Bluetooth: Use lmp_no_flush_capable where applicable
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
64 {
65 struct l2cap_chan *c;
66
67 list_for_each_entry(c, &conn->chan_l, list) {
68 if (c->dcid == cid)
69 return c;
70 }
71 return NULL;
72 }
73
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
75 {
76 struct l2cap_chan *c;
77
78 list_for_each_entry(c, &conn->chan_l, list) {
79 if (c->scid == cid)
80 return c;
81 }
82 return NULL;
83 }
84
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88 {
89 struct l2cap_chan *c;
90
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
93 if (c)
94 l2cap_chan_lock(c);
95 mutex_unlock(&conn->chan_lock);
96
97 return c;
98 }
99
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 {
102 struct l2cap_chan *c;
103
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
106 return c;
107 }
108 return NULL;
109 }
110
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 {
113 struct l2cap_chan *c;
114
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
117 return c;
118 }
119 return NULL;
120 }
121
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
123 {
124 int err;
125
126 write_lock(&chan_list_lock);
127
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
129 err = -EADDRINUSE;
130 goto done;
131 }
132
133 if (psm) {
134 chan->psm = psm;
135 chan->sport = psm;
136 err = 0;
137 } else {
138 u16 p;
139
140 err = -EINVAL;
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
145 err = 0;
146 break;
147 }
148 }
149
150 done:
151 write_unlock(&chan_list_lock);
152 return err;
153 }
154
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 {
157 write_lock(&chan_list_lock);
158
159 chan->scid = scid;
160
161 write_unlock(&chan_list_lock);
162
163 return 0;
164 }
165
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 {
168 u16 cid = L2CAP_CID_DYN_START;
169
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
172 return cid;
173 }
174
175 return 0;
176 }
177
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 {
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
182
183 chan->state = state;
184 chan->ops->state_change(chan, state);
185 }
186
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 {
189 struct sock *sk = chan->sk;
190
191 lock_sock(sk);
192 __l2cap_state_change(chan, state);
193 release_sock(sk);
194 }
195
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 {
198 struct sock *sk = chan->sk;
199
200 sk->sk_err = err;
201 }
202
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 {
205 struct sock *sk = chan->sk;
206
207 lock_sock(sk);
208 __l2cap_chan_set_err(chan, err);
209 release_sock(sk);
210 }
211
212 static void __set_retrans_timer(struct l2cap_chan *chan)
213 {
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219 }
220
221 static void __set_monitor_timer(struct l2cap_chan *chan)
222 {
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228 }
229
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232 {
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241 }
242
243 /* ---- L2CAP sequence number lists ---- */
244
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275 }
276
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 kfree(seq_list->list);
280 }
281
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284 {
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321 }
322
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389 }
390
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 atomic_set(&chan->refcnt, 1);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417 }
418
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 {
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
424
425 l2cap_chan_put(chan);
426 }
427
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 {
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
435 chan->sec_level = BT_SECURITY_LOW;
436
437 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
438 }
439
440 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 {
442 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
443 __le16_to_cpu(chan->psm), chan->dcid);
444
445 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
446
447 chan->conn = conn;
448
449 switch (chan->chan_type) {
450 case L2CAP_CHAN_CONN_ORIENTED:
451 if (conn->hcon->type == LE_LINK) {
452 /* LE connection */
453 chan->omtu = L2CAP_DEFAULT_MTU;
454 chan->scid = L2CAP_CID_LE_DATA;
455 chan->dcid = L2CAP_CID_LE_DATA;
456 } else {
457 /* Alloc CID for connection-oriented socket */
458 chan->scid = l2cap_alloc_cid(conn);
459 chan->omtu = L2CAP_DEFAULT_MTU;
460 }
461 break;
462
463 case L2CAP_CHAN_CONN_LESS:
464 /* Connectionless socket */
465 chan->scid = L2CAP_CID_CONN_LESS;
466 chan->dcid = L2CAP_CID_CONN_LESS;
467 chan->omtu = L2CAP_DEFAULT_MTU;
468 break;
469
470 case L2CAP_CHAN_CONN_FIX_A2MP:
471 chan->scid = L2CAP_CID_A2MP;
472 chan->dcid = L2CAP_CID_A2MP;
473 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
474 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
475 break;
476
477 default:
478 /* Raw socket can send/recv signalling messages only */
479 chan->scid = L2CAP_CID_SIGNALING;
480 chan->dcid = L2CAP_CID_SIGNALING;
481 chan->omtu = L2CAP_DEFAULT_MTU;
482 }
483
484 chan->local_id = L2CAP_BESTEFFORT_ID;
485 chan->local_stype = L2CAP_SERV_BESTEFFORT;
486 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
487 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
488 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
489 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
490
491 l2cap_chan_hold(chan);
492
493 list_add(&chan->list, &conn->chan_l);
494 }
495
496 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
497 {
498 mutex_lock(&conn->chan_lock);
499 __l2cap_chan_add(conn, chan);
500 mutex_unlock(&conn->chan_lock);
501 }
502
503 void l2cap_chan_del(struct l2cap_chan *chan, int err)
504 {
505 struct l2cap_conn *conn = chan->conn;
506
507 __clear_chan_timer(chan);
508
509 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
510
511 if (conn) {
512 /* Delete from channel list */
513 list_del(&chan->list);
514
515 l2cap_chan_put(chan);
516
517 chan->conn = NULL;
518
519 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
520 hci_conn_put(conn->hcon);
521 }
522
523 if (chan->ops->teardown)
524 chan->ops->teardown(chan, err);
525
526 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
527 return;
528
529 switch(chan->mode) {
530 case L2CAP_MODE_BASIC:
531 break;
532
533 case L2CAP_MODE_ERTM:
534 __clear_retrans_timer(chan);
535 __clear_monitor_timer(chan);
536 __clear_ack_timer(chan);
537
538 skb_queue_purge(&chan->srej_q);
539
540 l2cap_seq_list_free(&chan->srej_list);
541 l2cap_seq_list_free(&chan->retrans_list);
542
543 /* fall through */
544
545 case L2CAP_MODE_STREAMING:
546 skb_queue_purge(&chan->tx_q);
547 break;
548 }
549
550 return;
551 }
552
553 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
554 {
555 struct l2cap_conn *conn = chan->conn;
556 struct sock *sk = chan->sk;
557
558 BT_DBG("chan %p state %s sk %p", chan,
559 state_to_string(chan->state), sk);
560
561 switch (chan->state) {
562 case BT_LISTEN:
563 if (chan->ops->teardown)
564 chan->ops->teardown(chan, 0);
565 break;
566
567 case BT_CONNECTED:
568 case BT_CONFIG:
569 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
570 conn->hcon->type == ACL_LINK) {
571 __set_chan_timer(chan, sk->sk_sndtimeo);
572 l2cap_send_disconn_req(conn, chan, reason);
573 } else
574 l2cap_chan_del(chan, reason);
575 break;
576
577 case BT_CONNECT2:
578 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
579 conn->hcon->type == ACL_LINK) {
580 struct l2cap_conn_rsp rsp;
581 __u16 result;
582
583 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
584 result = L2CAP_CR_SEC_BLOCK;
585 else
586 result = L2CAP_CR_BAD_PSM;
587 l2cap_state_change(chan, BT_DISCONN);
588
589 rsp.scid = cpu_to_le16(chan->dcid);
590 rsp.dcid = cpu_to_le16(chan->scid);
591 rsp.result = cpu_to_le16(result);
592 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
593 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
594 sizeof(rsp), &rsp);
595 }
596
597 l2cap_chan_del(chan, reason);
598 break;
599
600 case BT_CONNECT:
601 case BT_DISCONN:
602 l2cap_chan_del(chan, reason);
603 break;
604
605 default:
606 if (chan->ops->teardown)
607 chan->ops->teardown(chan, 0);
608 break;
609 }
610 }
611
612 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
613 {
614 if (chan->chan_type == L2CAP_CHAN_RAW) {
615 switch (chan->sec_level) {
616 case BT_SECURITY_HIGH:
617 return HCI_AT_DEDICATED_BONDING_MITM;
618 case BT_SECURITY_MEDIUM:
619 return HCI_AT_DEDICATED_BONDING;
620 default:
621 return HCI_AT_NO_BONDING;
622 }
623 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
624 if (chan->sec_level == BT_SECURITY_LOW)
625 chan->sec_level = BT_SECURITY_SDP;
626
627 if (chan->sec_level == BT_SECURITY_HIGH)
628 return HCI_AT_NO_BONDING_MITM;
629 else
630 return HCI_AT_NO_BONDING;
631 } else {
632 switch (chan->sec_level) {
633 case BT_SECURITY_HIGH:
634 return HCI_AT_GENERAL_BONDING_MITM;
635 case BT_SECURITY_MEDIUM:
636 return HCI_AT_GENERAL_BONDING;
637 default:
638 return HCI_AT_NO_BONDING;
639 }
640 }
641 }
642
643 /* Service level security */
644 int l2cap_chan_check_security(struct l2cap_chan *chan)
645 {
646 struct l2cap_conn *conn = chan->conn;
647 __u8 auth_type;
648
649 auth_type = l2cap_get_auth_type(chan);
650
651 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
652 }
653
654 static u8 l2cap_get_ident(struct l2cap_conn *conn)
655 {
656 u8 id;
657
658 /* Get next available identificator.
659 * 1 - 128 are used by kernel.
660 * 129 - 199 are reserved.
661 * 200 - 254 are used by utilities like l2ping, etc.
662 */
663
664 spin_lock(&conn->lock);
665
666 if (++conn->tx_ident > 128)
667 conn->tx_ident = 1;
668
669 id = conn->tx_ident;
670
671 spin_unlock(&conn->lock);
672
673 return id;
674 }
675
676 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
677 {
678 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
679 u8 flags;
680
681 BT_DBG("code 0x%2.2x", code);
682
683 if (!skb)
684 return;
685
686 if (lmp_no_flush_capable(conn->hcon->hdev))
687 flags = ACL_START_NO_FLUSH;
688 else
689 flags = ACL_START;
690
691 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
692 skb->priority = HCI_PRIO_MAX;
693
694 hci_send_acl(conn->hchan, skb, flags);
695 }
696
697 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
698 {
699 struct hci_conn *hcon = chan->conn->hcon;
700 u16 flags;
701
702 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
703 skb->priority);
704
705 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
706 lmp_no_flush_capable(hcon->hdev))
707 flags = ACL_START_NO_FLUSH;
708 else
709 flags = ACL_START;
710
711 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
712 hci_send_acl(chan->conn->hchan, skb, flags);
713 }
714
715 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
716 {
717 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
718 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
719
720 if (enh & L2CAP_CTRL_FRAME_TYPE) {
721 /* S-Frame */
722 control->sframe = 1;
723 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
724 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
725
726 control->sar = 0;
727 control->txseq = 0;
728 } else {
729 /* I-Frame */
730 control->sframe = 0;
731 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
732 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
733
734 control->poll = 0;
735 control->super = 0;
736 }
737 }
738
739 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
740 {
741 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
742 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
743
744 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
745 /* S-Frame */
746 control->sframe = 1;
747 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
748 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
749
750 control->sar = 0;
751 control->txseq = 0;
752 } else {
753 /* I-Frame */
754 control->sframe = 0;
755 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
756 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
757
758 control->poll = 0;
759 control->super = 0;
760 }
761 }
762
763 static inline void __unpack_control(struct l2cap_chan *chan,
764 struct sk_buff *skb)
765 {
766 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
767 __unpack_extended_control(get_unaligned_le32(skb->data),
768 &bt_cb(skb)->control);
769 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
770 } else {
771 __unpack_enhanced_control(get_unaligned_le16(skb->data),
772 &bt_cb(skb)->control);
773 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
774 }
775 }
776
777 static u32 __pack_extended_control(struct l2cap_ctrl *control)
778 {
779 u32 packed;
780
781 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
782 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
783
784 if (control->sframe) {
785 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
786 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
787 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
788 } else {
789 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
790 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
791 }
792
793 return packed;
794 }
795
796 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
797 {
798 u16 packed;
799
800 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
802
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_CTRL_FRAME_TYPE;
807 } else {
808 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
810 }
811
812 return packed;
813 }
814
815 static inline void __pack_control(struct l2cap_chan *chan,
816 struct l2cap_ctrl *control,
817 struct sk_buff *skb)
818 {
819 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
820 put_unaligned_le32(__pack_extended_control(control),
821 skb->data + L2CAP_HDR_SIZE);
822 } else {
823 put_unaligned_le16(__pack_enhanced_control(control),
824 skb->data + L2CAP_HDR_SIZE);
825 }
826 }
827
828 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
829 {
830 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
831 return L2CAP_EXT_HDR_SIZE;
832 else
833 return L2CAP_ENH_HDR_SIZE;
834 }
835
836 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
837 u32 control)
838 {
839 struct sk_buff *skb;
840 struct l2cap_hdr *lh;
841 int hlen = __ertm_hdr_size(chan);
842
843 if (chan->fcs == L2CAP_FCS_CRC16)
844 hlen += L2CAP_FCS_SIZE;
845
846 skb = bt_skb_alloc(hlen, GFP_KERNEL);
847
848 if (!skb)
849 return ERR_PTR(-ENOMEM);
850
851 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
852 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
853 lh->cid = cpu_to_le16(chan->dcid);
854
855 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
856 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
857 else
858 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
859
860 if (chan->fcs == L2CAP_FCS_CRC16) {
861 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
862 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
863 }
864
865 skb->priority = HCI_PRIO_MAX;
866 return skb;
867 }
868
869 static void l2cap_send_sframe(struct l2cap_chan *chan,
870 struct l2cap_ctrl *control)
871 {
872 struct sk_buff *skb;
873 u32 control_field;
874
875 BT_DBG("chan %p, control %p", chan, control);
876
877 if (!control->sframe)
878 return;
879
880 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
881 !control->poll)
882 control->final = 1;
883
884 if (control->super == L2CAP_SUPER_RR)
885 clear_bit(CONN_RNR_SENT, &chan->conn_state);
886 else if (control->super == L2CAP_SUPER_RNR)
887 set_bit(CONN_RNR_SENT, &chan->conn_state);
888
889 if (control->super != L2CAP_SUPER_SREJ) {
890 chan->last_acked_seq = control->reqseq;
891 __clear_ack_timer(chan);
892 }
893
894 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
895 control->final, control->poll, control->super);
896
897 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
898 control_field = __pack_extended_control(control);
899 else
900 control_field = __pack_enhanced_control(control);
901
902 skb = l2cap_create_sframe_pdu(chan, control_field);
903 if (!IS_ERR(skb))
904 l2cap_do_send(chan, skb);
905 }
906
907 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
908 {
909 struct l2cap_ctrl control;
910
911 BT_DBG("chan %p, poll %d", chan, poll);
912
913 memset(&control, 0, sizeof(control));
914 control.sframe = 1;
915 control.poll = poll;
916
917 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
918 control.super = L2CAP_SUPER_RNR;
919 else
920 control.super = L2CAP_SUPER_RR;
921
922 control.reqseq = chan->buffer_seq;
923 l2cap_send_sframe(chan, &control);
924 }
925
926 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
927 {
928 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
929 }
930
931 static void l2cap_send_conn_req(struct l2cap_chan *chan)
932 {
933 struct l2cap_conn *conn = chan->conn;
934 struct l2cap_conn_req req;
935
936 req.scid = cpu_to_le16(chan->scid);
937 req.psm = chan->psm;
938
939 chan->ident = l2cap_get_ident(conn);
940
941 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
942
943 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
944 }
945
946 static void l2cap_chan_ready(struct l2cap_chan *chan)
947 {
948 /* This clears all conf flags, including CONF_NOT_COMPLETE */
949 chan->conf_state = 0;
950 __clear_chan_timer(chan);
951
952 chan->state = BT_CONNECTED;
953
954 chan->ops->ready(chan);
955 }
956
957 static void l2cap_do_start(struct l2cap_chan *chan)
958 {
959 struct l2cap_conn *conn = chan->conn;
960
961 if (conn->hcon->type == LE_LINK) {
962 l2cap_chan_ready(chan);
963 return;
964 }
965
966 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
967 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
968 return;
969
970 if (l2cap_chan_check_security(chan) &&
971 __l2cap_no_conn_pending(chan))
972 l2cap_send_conn_req(chan);
973 } else {
974 struct l2cap_info_req req;
975 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
976
977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
978 conn->info_ident = l2cap_get_ident(conn);
979
980 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
981
982 l2cap_send_cmd(conn, conn->info_ident,
983 L2CAP_INFO_REQ, sizeof(req), &req);
984 }
985 }
986
987 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
988 {
989 u32 local_feat_mask = l2cap_feat_mask;
990 if (!disable_ertm)
991 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
992
993 switch (mode) {
994 case L2CAP_MODE_ERTM:
995 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
996 case L2CAP_MODE_STREAMING:
997 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
998 default:
999 return 0x00;
1000 }
1001 }
1002
1003 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1004 {
1005 struct sock *sk = chan->sk;
1006 struct l2cap_disconn_req req;
1007
1008 if (!conn)
1009 return;
1010
1011 if (chan->mode == L2CAP_MODE_ERTM) {
1012 __clear_retrans_timer(chan);
1013 __clear_monitor_timer(chan);
1014 __clear_ack_timer(chan);
1015 }
1016
1017 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1018 __l2cap_state_change(chan, BT_DISCONN);
1019 return;
1020 }
1021
1022 req.dcid = cpu_to_le16(chan->dcid);
1023 req.scid = cpu_to_le16(chan->scid);
1024 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1025 L2CAP_DISCONN_REQ, sizeof(req), &req);
1026
1027 lock_sock(sk);
1028 __l2cap_state_change(chan, BT_DISCONN);
1029 __l2cap_chan_set_err(chan, err);
1030 release_sock(sk);
1031 }
1032
1033 /* ---- L2CAP connections ---- */
1034 static void l2cap_conn_start(struct l2cap_conn *conn)
1035 {
1036 struct l2cap_chan *chan, *tmp;
1037
1038 BT_DBG("conn %p", conn);
1039
1040 mutex_lock(&conn->chan_lock);
1041
1042 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1043 struct sock *sk = chan->sk;
1044
1045 l2cap_chan_lock(chan);
1046
1047 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1048 l2cap_chan_unlock(chan);
1049 continue;
1050 }
1051
1052 if (chan->state == BT_CONNECT) {
1053 if (!l2cap_chan_check_security(chan) ||
1054 !__l2cap_no_conn_pending(chan)) {
1055 l2cap_chan_unlock(chan);
1056 continue;
1057 }
1058
1059 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1060 && test_bit(CONF_STATE2_DEVICE,
1061 &chan->conf_state)) {
1062 l2cap_chan_close(chan, ECONNRESET);
1063 l2cap_chan_unlock(chan);
1064 continue;
1065 }
1066
1067 l2cap_send_conn_req(chan);
1068
1069 } else if (chan->state == BT_CONNECT2) {
1070 struct l2cap_conn_rsp rsp;
1071 char buf[128];
1072 rsp.scid = cpu_to_le16(chan->dcid);
1073 rsp.dcid = cpu_to_le16(chan->scid);
1074
1075 if (l2cap_chan_check_security(chan)) {
1076 lock_sock(sk);
1077 if (test_bit(BT_SK_DEFER_SETUP,
1078 &bt_sk(sk)->flags)) {
1079 struct sock *parent = bt_sk(sk)->parent;
1080 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1081 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1082 if (parent)
1083 parent->sk_data_ready(parent, 0);
1084
1085 } else {
1086 __l2cap_state_change(chan, BT_CONFIG);
1087 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1088 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1089 }
1090 release_sock(sk);
1091 } else {
1092 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1093 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1094 }
1095
1096 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1097 sizeof(rsp), &rsp);
1098
1099 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1100 rsp.result != L2CAP_CR_SUCCESS) {
1101 l2cap_chan_unlock(chan);
1102 continue;
1103 }
1104
1105 set_bit(CONF_REQ_SENT, &chan->conf_state);
1106 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1107 l2cap_build_conf_req(chan, buf), buf);
1108 chan->num_conf_req++;
1109 }
1110
1111 l2cap_chan_unlock(chan);
1112 }
1113
1114 mutex_unlock(&conn->chan_lock);
1115 }
1116
1117 /* Find socket with cid and source/destination bdaddr.
1118 * Returns closest match, locked.
1119 */
1120 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1121 bdaddr_t *src,
1122 bdaddr_t *dst)
1123 {
1124 struct l2cap_chan *c, *c1 = NULL;
1125
1126 read_lock(&chan_list_lock);
1127
1128 list_for_each_entry(c, &chan_list, global_l) {
1129 struct sock *sk = c->sk;
1130
1131 if (state && c->state != state)
1132 continue;
1133
1134 if (c->scid == cid) {
1135 int src_match, dst_match;
1136 int src_any, dst_any;
1137
1138 /* Exact match. */
1139 src_match = !bacmp(&bt_sk(sk)->src, src);
1140 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1141 if (src_match && dst_match) {
1142 read_unlock(&chan_list_lock);
1143 return c;
1144 }
1145
1146 /* Closest match */
1147 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1148 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1149 if ((src_match && dst_any) || (src_any && dst_match) ||
1150 (src_any && dst_any))
1151 c1 = c;
1152 }
1153 }
1154
1155 read_unlock(&chan_list_lock);
1156
1157 return c1;
1158 }
1159
1160 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1161 {
1162 struct sock *parent, *sk;
1163 struct l2cap_chan *chan, *pchan;
1164
1165 BT_DBG("");
1166
1167 /* Check if we have socket listening on cid */
1168 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1169 conn->src, conn->dst);
1170 if (!pchan)
1171 return;
1172
1173 parent = pchan->sk;
1174
1175 lock_sock(parent);
1176
1177 chan = pchan->ops->new_connection(pchan);
1178 if (!chan)
1179 goto clean;
1180
1181 sk = chan->sk;
1182
1183 hci_conn_hold(conn->hcon);
1184
1185 bacpy(&bt_sk(sk)->src, conn->src);
1186 bacpy(&bt_sk(sk)->dst, conn->dst);
1187
1188 bt_accept_enqueue(parent, sk);
1189
1190 l2cap_chan_add(conn, chan);
1191
1192 l2cap_chan_ready(chan);
1193
1194 clean:
1195 release_sock(parent);
1196 }
1197
1198 static void l2cap_conn_ready(struct l2cap_conn *conn)
1199 {
1200 struct l2cap_chan *chan;
1201
1202 BT_DBG("conn %p", conn);
1203
1204 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1205 l2cap_le_conn_ready(conn);
1206
1207 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1208 smp_conn_security(conn, conn->hcon->pending_sec_level);
1209
1210 mutex_lock(&conn->chan_lock);
1211
1212 list_for_each_entry(chan, &conn->chan_l, list) {
1213
1214 l2cap_chan_lock(chan);
1215
1216 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1217 l2cap_chan_unlock(chan);
1218 continue;
1219 }
1220
1221 if (conn->hcon->type == LE_LINK) {
1222 if (smp_conn_security(conn, chan->sec_level))
1223 l2cap_chan_ready(chan);
1224
1225 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1226 struct sock *sk = chan->sk;
1227 __clear_chan_timer(chan);
1228 lock_sock(sk);
1229 __l2cap_state_change(chan, BT_CONNECTED);
1230 sk->sk_state_change(sk);
1231 release_sock(sk);
1232
1233 } else if (chan->state == BT_CONNECT)
1234 l2cap_do_start(chan);
1235
1236 l2cap_chan_unlock(chan);
1237 }
1238
1239 mutex_unlock(&conn->chan_lock);
1240 }
1241
1242 /* Notify sockets that we cannot guaranty reliability anymore */
1243 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1244 {
1245 struct l2cap_chan *chan;
1246
1247 BT_DBG("conn %p", conn);
1248
1249 mutex_lock(&conn->chan_lock);
1250
1251 list_for_each_entry(chan, &conn->chan_l, list) {
1252 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1253 __l2cap_chan_set_err(chan, err);
1254 }
1255
1256 mutex_unlock(&conn->chan_lock);
1257 }
1258
1259 static void l2cap_info_timeout(struct work_struct *work)
1260 {
1261 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1262 info_timer.work);
1263
1264 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1265 conn->info_ident = 0;
1266
1267 l2cap_conn_start(conn);
1268 }
1269
1270 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1271 {
1272 struct l2cap_conn *conn = hcon->l2cap_data;
1273 struct l2cap_chan *chan, *l;
1274
1275 if (!conn)
1276 return;
1277
1278 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1279
1280 kfree_skb(conn->rx_skb);
1281
1282 mutex_lock(&conn->chan_lock);
1283
1284 /* Kill channels */
1285 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1286 l2cap_chan_hold(chan);
1287 l2cap_chan_lock(chan);
1288
1289 l2cap_chan_del(chan, err);
1290
1291 l2cap_chan_unlock(chan);
1292
1293 chan->ops->close(chan);
1294 l2cap_chan_put(chan);
1295 }
1296
1297 mutex_unlock(&conn->chan_lock);
1298
1299 hci_chan_del(conn->hchan);
1300
1301 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1302 cancel_delayed_work_sync(&conn->info_timer);
1303
1304 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1305 cancel_delayed_work_sync(&conn->security_timer);
1306 smp_chan_destroy(conn);
1307 }
1308
1309 hcon->l2cap_data = NULL;
1310 kfree(conn);
1311 }
1312
1313 static void security_timeout(struct work_struct *work)
1314 {
1315 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1316 security_timer.work);
1317
1318 BT_DBG("conn %p", conn);
1319
1320 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1321 smp_chan_destroy(conn);
1322 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1323 }
1324 }
1325
1326 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1327 {
1328 struct l2cap_conn *conn = hcon->l2cap_data;
1329 struct hci_chan *hchan;
1330
1331 if (conn || status)
1332 return conn;
1333
1334 hchan = hci_chan_create(hcon);
1335 if (!hchan)
1336 return NULL;
1337
1338 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1339 if (!conn) {
1340 hci_chan_del(hchan);
1341 return NULL;
1342 }
1343
1344 hcon->l2cap_data = conn;
1345 conn->hcon = hcon;
1346 conn->hchan = hchan;
1347
1348 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1349
1350 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1351 conn->mtu = hcon->hdev->le_mtu;
1352 else
1353 conn->mtu = hcon->hdev->acl_mtu;
1354
1355 conn->src = &hcon->hdev->bdaddr;
1356 conn->dst = &hcon->dst;
1357
1358 conn->feat_mask = 0;
1359
1360 spin_lock_init(&conn->lock);
1361 mutex_init(&conn->chan_lock);
1362
1363 INIT_LIST_HEAD(&conn->chan_l);
1364
1365 if (hcon->type == LE_LINK)
1366 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1367 else
1368 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1369
1370 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1371
1372 return conn;
1373 }
1374
1375 /* ---- Socket interface ---- */
1376
1377 /* Find socket with psm and source / destination bdaddr.
1378 * Returns closest match.
1379 */
1380 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1381 bdaddr_t *src,
1382 bdaddr_t *dst)
1383 {
1384 struct l2cap_chan *c, *c1 = NULL;
1385
1386 read_lock(&chan_list_lock);
1387
1388 list_for_each_entry(c, &chan_list, global_l) {
1389 struct sock *sk = c->sk;
1390
1391 if (state && c->state != state)
1392 continue;
1393
1394 if (c->psm == psm) {
1395 int src_match, dst_match;
1396 int src_any, dst_any;
1397
1398 /* Exact match. */
1399 src_match = !bacmp(&bt_sk(sk)->src, src);
1400 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1401 if (src_match && dst_match) {
1402 read_unlock(&chan_list_lock);
1403 return c;
1404 }
1405
1406 /* Closest match */
1407 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1408 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1409 if ((src_match && dst_any) || (src_any && dst_match) ||
1410 (src_any && dst_any))
1411 c1 = c;
1412 }
1413 }
1414
1415 read_unlock(&chan_list_lock);
1416
1417 return c1;
1418 }
1419
1420 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1421 bdaddr_t *dst, u8 dst_type)
1422 {
1423 struct sock *sk = chan->sk;
1424 bdaddr_t *src = &bt_sk(sk)->src;
1425 struct l2cap_conn *conn;
1426 struct hci_conn *hcon;
1427 struct hci_dev *hdev;
1428 __u8 auth_type;
1429 int err;
1430
1431 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1432 dst_type, __le16_to_cpu(chan->psm));
1433
1434 hdev = hci_get_route(dst, src);
1435 if (!hdev)
1436 return -EHOSTUNREACH;
1437
1438 hci_dev_lock(hdev);
1439
1440 l2cap_chan_lock(chan);
1441
1442 /* PSM must be odd and lsb of upper byte must be 0 */
1443 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1444 chan->chan_type != L2CAP_CHAN_RAW) {
1445 err = -EINVAL;
1446 goto done;
1447 }
1448
1449 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1450 err = -EINVAL;
1451 goto done;
1452 }
1453
1454 switch (chan->mode) {
1455 case L2CAP_MODE_BASIC:
1456 break;
1457 case L2CAP_MODE_ERTM:
1458 case L2CAP_MODE_STREAMING:
1459 if (!disable_ertm)
1460 break;
1461 /* fall through */
1462 default:
1463 err = -ENOTSUPP;
1464 goto done;
1465 }
1466
1467 switch (chan->state) {
1468 case BT_CONNECT:
1469 case BT_CONNECT2:
1470 case BT_CONFIG:
1471 /* Already connecting */
1472 err = 0;
1473 goto done;
1474
1475 case BT_CONNECTED:
1476 /* Already connected */
1477 err = -EISCONN;
1478 goto done;
1479
1480 case BT_OPEN:
1481 case BT_BOUND:
1482 /* Can connect */
1483 break;
1484
1485 default:
1486 err = -EBADFD;
1487 goto done;
1488 }
1489
1490 /* Set destination address and psm */
1491 lock_sock(sk);
1492 bacpy(&bt_sk(sk)->dst, dst);
1493 release_sock(sk);
1494
1495 chan->psm = psm;
1496 chan->dcid = cid;
1497
1498 auth_type = l2cap_get_auth_type(chan);
1499
1500 if (chan->dcid == L2CAP_CID_LE_DATA)
1501 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1502 chan->sec_level, auth_type);
1503 else
1504 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1505 chan->sec_level, auth_type);
1506
1507 if (IS_ERR(hcon)) {
1508 err = PTR_ERR(hcon);
1509 goto done;
1510 }
1511
1512 conn = l2cap_conn_add(hcon, 0);
1513 if (!conn) {
1514 hci_conn_put(hcon);
1515 err = -ENOMEM;
1516 goto done;
1517 }
1518
1519 if (hcon->type == LE_LINK) {
1520 err = 0;
1521
1522 if (!list_empty(&conn->chan_l)) {
1523 err = -EBUSY;
1524 hci_conn_put(hcon);
1525 }
1526
1527 if (err)
1528 goto done;
1529 }
1530
1531 /* Update source addr of the socket */
1532 bacpy(src, conn->src);
1533
1534 l2cap_chan_unlock(chan);
1535 l2cap_chan_add(conn, chan);
1536 l2cap_chan_lock(chan);
1537
1538 l2cap_state_change(chan, BT_CONNECT);
1539 __set_chan_timer(chan, sk->sk_sndtimeo);
1540
1541 if (hcon->state == BT_CONNECTED) {
1542 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1543 __clear_chan_timer(chan);
1544 if (l2cap_chan_check_security(chan))
1545 l2cap_state_change(chan, BT_CONNECTED);
1546 } else
1547 l2cap_do_start(chan);
1548 }
1549
1550 err = 0;
1551
1552 done:
1553 l2cap_chan_unlock(chan);
1554 hci_dev_unlock(hdev);
1555 hci_dev_put(hdev);
1556 return err;
1557 }
1558
1559 int __l2cap_wait_ack(struct sock *sk)
1560 {
1561 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1562 DECLARE_WAITQUEUE(wait, current);
1563 int err = 0;
1564 int timeo = HZ/5;
1565
1566 add_wait_queue(sk_sleep(sk), &wait);
1567 set_current_state(TASK_INTERRUPTIBLE);
1568 while (chan->unacked_frames > 0 && chan->conn) {
1569 if (!timeo)
1570 timeo = HZ/5;
1571
1572 if (signal_pending(current)) {
1573 err = sock_intr_errno(timeo);
1574 break;
1575 }
1576
1577 release_sock(sk);
1578 timeo = schedule_timeout(timeo);
1579 lock_sock(sk);
1580 set_current_state(TASK_INTERRUPTIBLE);
1581
1582 err = sock_error(sk);
1583 if (err)
1584 break;
1585 }
1586 set_current_state(TASK_RUNNING);
1587 remove_wait_queue(sk_sleep(sk), &wait);
1588 return err;
1589 }
1590
1591 static void l2cap_monitor_timeout(struct work_struct *work)
1592 {
1593 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1594 monitor_timer.work);
1595
1596 BT_DBG("chan %p", chan);
1597
1598 l2cap_chan_lock(chan);
1599
1600 if (!chan->conn) {
1601 l2cap_chan_unlock(chan);
1602 l2cap_chan_put(chan);
1603 return;
1604 }
1605
1606 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1607
1608 l2cap_chan_unlock(chan);
1609 l2cap_chan_put(chan);
1610 }
1611
1612 static void l2cap_retrans_timeout(struct work_struct *work)
1613 {
1614 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1615 retrans_timer.work);
1616
1617 BT_DBG("chan %p", chan);
1618
1619 l2cap_chan_lock(chan);
1620
1621 if (!chan->conn) {
1622 l2cap_chan_unlock(chan);
1623 l2cap_chan_put(chan);
1624 return;
1625 }
1626
1627 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1628 l2cap_chan_unlock(chan);
1629 l2cap_chan_put(chan);
1630 }
1631
1632 static void l2cap_streaming_send(struct l2cap_chan *chan,
1633 struct sk_buff_head *skbs)
1634 {
1635 struct sk_buff *skb;
1636 struct l2cap_ctrl *control;
1637
1638 BT_DBG("chan %p, skbs %p", chan, skbs);
1639
1640 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1641
1642 while (!skb_queue_empty(&chan->tx_q)) {
1643
1644 skb = skb_dequeue(&chan->tx_q);
1645
1646 bt_cb(skb)->control.retries = 1;
1647 control = &bt_cb(skb)->control;
1648
1649 control->reqseq = 0;
1650 control->txseq = chan->next_tx_seq;
1651
1652 __pack_control(chan, control, skb);
1653
1654 if (chan->fcs == L2CAP_FCS_CRC16) {
1655 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1656 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1657 }
1658
1659 l2cap_do_send(chan, skb);
1660
1661 BT_DBG("Sent txseq %u", control->txseq);
1662
1663 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1664 chan->frames_sent++;
1665 }
1666 }
1667
1668 static int l2cap_ertm_send(struct l2cap_chan *chan)
1669 {
1670 struct sk_buff *skb, *tx_skb;
1671 struct l2cap_ctrl *control;
1672 int sent = 0;
1673
1674 BT_DBG("chan %p", chan);
1675
1676 if (chan->state != BT_CONNECTED)
1677 return -ENOTCONN;
1678
1679 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1680 return 0;
1681
1682 while (chan->tx_send_head &&
1683 chan->unacked_frames < chan->remote_tx_win &&
1684 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1685
1686 skb = chan->tx_send_head;
1687
1688 bt_cb(skb)->control.retries = 1;
1689 control = &bt_cb(skb)->control;
1690
1691 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1692 control->final = 1;
1693
1694 control->reqseq = chan->buffer_seq;
1695 chan->last_acked_seq = chan->buffer_seq;
1696 control->txseq = chan->next_tx_seq;
1697
1698 __pack_control(chan, control, skb);
1699
1700 if (chan->fcs == L2CAP_FCS_CRC16) {
1701 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1702 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1703 }
1704
1705 /* Clone after data has been modified. Data is assumed to be
1706 read-only (for locking purposes) on cloned sk_buffs.
1707 */
1708 tx_skb = skb_clone(skb, GFP_KERNEL);
1709
1710 if (!tx_skb)
1711 break;
1712
1713 __set_retrans_timer(chan);
1714
1715 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1716 chan->unacked_frames++;
1717 chan->frames_sent++;
1718 sent++;
1719
1720 if (skb_queue_is_last(&chan->tx_q, skb))
1721 chan->tx_send_head = NULL;
1722 else
1723 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1724
1725 l2cap_do_send(chan, tx_skb);
1726 BT_DBG("Sent txseq %u", control->txseq);
1727 }
1728
1729 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1730 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1731
1732 return sent;
1733 }
1734
1735 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1736 {
1737 struct l2cap_ctrl control;
1738 struct sk_buff *skb;
1739 struct sk_buff *tx_skb;
1740 u16 seq;
1741
1742 BT_DBG("chan %p", chan);
1743
1744 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1745 return;
1746
1747 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1748 seq = l2cap_seq_list_pop(&chan->retrans_list);
1749
1750 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1751 if (!skb) {
1752 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1753 seq);
1754 continue;
1755 }
1756
1757 bt_cb(skb)->control.retries++;
1758 control = bt_cb(skb)->control;
1759
1760 if (chan->max_tx != 0 &&
1761 bt_cb(skb)->control.retries > chan->max_tx) {
1762 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1763 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1764 l2cap_seq_list_clear(&chan->retrans_list);
1765 break;
1766 }
1767
1768 control.reqseq = chan->buffer_seq;
1769 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1770 control.final = 1;
1771 else
1772 control.final = 0;
1773
1774 if (skb_cloned(skb)) {
1775 /* Cloned sk_buffs are read-only, so we need a
1776 * writeable copy
1777 */
1778 tx_skb = skb_copy(skb, GFP_ATOMIC);
1779 } else {
1780 tx_skb = skb_clone(skb, GFP_ATOMIC);
1781 }
1782
1783 if (!tx_skb) {
1784 l2cap_seq_list_clear(&chan->retrans_list);
1785 break;
1786 }
1787
1788 /* Update skb contents */
1789 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1790 put_unaligned_le32(__pack_extended_control(&control),
1791 tx_skb->data + L2CAP_HDR_SIZE);
1792 } else {
1793 put_unaligned_le16(__pack_enhanced_control(&control),
1794 tx_skb->data + L2CAP_HDR_SIZE);
1795 }
1796
1797 if (chan->fcs == L2CAP_FCS_CRC16) {
1798 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1799 put_unaligned_le16(fcs, skb_put(tx_skb,
1800 L2CAP_FCS_SIZE));
1801 }
1802
1803 l2cap_do_send(chan, tx_skb);
1804
1805 BT_DBG("Resent txseq %d", control.txseq);
1806
1807 chan->last_acked_seq = chan->buffer_seq;
1808 }
1809 }
1810
1811 static void l2cap_retransmit(struct l2cap_chan *chan,
1812 struct l2cap_ctrl *control)
1813 {
1814 BT_DBG("chan %p, control %p", chan, control);
1815
1816 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1817 l2cap_ertm_resend(chan);
1818 }
1819
1820 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1821 struct l2cap_ctrl *control)
1822 {
1823 struct sk_buff *skb;
1824
1825 BT_DBG("chan %p, control %p", chan, control);
1826
1827 if (control->poll)
1828 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1829
1830 l2cap_seq_list_clear(&chan->retrans_list);
1831
1832 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1833 return;
1834
1835 if (chan->unacked_frames) {
1836 skb_queue_walk(&chan->tx_q, skb) {
1837 if (bt_cb(skb)->control.txseq == control->reqseq ||
1838 skb == chan->tx_send_head)
1839 break;
1840 }
1841
1842 skb_queue_walk_from(&chan->tx_q, skb) {
1843 if (skb == chan->tx_send_head)
1844 break;
1845
1846 l2cap_seq_list_append(&chan->retrans_list,
1847 bt_cb(skb)->control.txseq);
1848 }
1849
1850 l2cap_ertm_resend(chan);
1851 }
1852 }
1853
1854 static void l2cap_send_ack(struct l2cap_chan *chan)
1855 {
1856 struct l2cap_ctrl control;
1857 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1858 chan->last_acked_seq);
1859 int threshold;
1860
1861 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1862 chan, chan->last_acked_seq, chan->buffer_seq);
1863
1864 memset(&control, 0, sizeof(control));
1865 control.sframe = 1;
1866
1867 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1868 chan->rx_state == L2CAP_RX_STATE_RECV) {
1869 __clear_ack_timer(chan);
1870 control.super = L2CAP_SUPER_RNR;
1871 control.reqseq = chan->buffer_seq;
1872 l2cap_send_sframe(chan, &control);
1873 } else {
1874 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1875 l2cap_ertm_send(chan);
1876 /* If any i-frames were sent, they included an ack */
1877 if (chan->buffer_seq == chan->last_acked_seq)
1878 frames_to_ack = 0;
1879 }
1880
1881 /* Ack now if the window is 3/4ths full.
1882 * Calculate without mul or div
1883 */
1884 threshold = chan->ack_win;
1885 threshold += threshold << 1;
1886 threshold >>= 2;
1887
1888 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1889 threshold);
1890
1891 if (frames_to_ack >= threshold) {
1892 __clear_ack_timer(chan);
1893 control.super = L2CAP_SUPER_RR;
1894 control.reqseq = chan->buffer_seq;
1895 l2cap_send_sframe(chan, &control);
1896 frames_to_ack = 0;
1897 }
1898
1899 if (frames_to_ack)
1900 __set_ack_timer(chan);
1901 }
1902 }
1903
1904 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1905 struct msghdr *msg, int len,
1906 int count, struct sk_buff *skb)
1907 {
1908 struct l2cap_conn *conn = chan->conn;
1909 struct sk_buff **frag;
1910 int sent = 0;
1911
1912 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1913 return -EFAULT;
1914
1915 sent += count;
1916 len -= count;
1917
1918 /* Continuation fragments (no L2CAP header) */
1919 frag = &skb_shinfo(skb)->frag_list;
1920 while (len) {
1921 struct sk_buff *tmp;
1922
1923 count = min_t(unsigned int, conn->mtu, len);
1924
1925 tmp = chan->ops->alloc_skb(chan, count,
1926 msg->msg_flags & MSG_DONTWAIT);
1927 if (IS_ERR(tmp))
1928 return PTR_ERR(tmp);
1929
1930 *frag = tmp;
1931
1932 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1933 return -EFAULT;
1934
1935 (*frag)->priority = skb->priority;
1936
1937 sent += count;
1938 len -= count;
1939
1940 skb->len += (*frag)->len;
1941 skb->data_len += (*frag)->len;
1942
1943 frag = &(*frag)->next;
1944 }
1945
1946 return sent;
1947 }
1948
1949 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1950 struct msghdr *msg, size_t len,
1951 u32 priority)
1952 {
1953 struct l2cap_conn *conn = chan->conn;
1954 struct sk_buff *skb;
1955 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1956 struct l2cap_hdr *lh;
1957
1958 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1959
1960 count = min_t(unsigned int, (conn->mtu - hlen), len);
1961
1962 skb = chan->ops->alloc_skb(chan, count + hlen,
1963 msg->msg_flags & MSG_DONTWAIT);
1964 if (IS_ERR(skb))
1965 return skb;
1966
1967 skb->priority = priority;
1968
1969 /* Create L2CAP header */
1970 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1971 lh->cid = cpu_to_le16(chan->dcid);
1972 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1973 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1974
1975 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1976 if (unlikely(err < 0)) {
1977 kfree_skb(skb);
1978 return ERR_PTR(err);
1979 }
1980 return skb;
1981 }
1982
1983 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1984 struct msghdr *msg, size_t len,
1985 u32 priority)
1986 {
1987 struct l2cap_conn *conn = chan->conn;
1988 struct sk_buff *skb;
1989 int err, count;
1990 struct l2cap_hdr *lh;
1991
1992 BT_DBG("chan %p len %zu", chan, len);
1993
1994 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1995
1996 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1997 msg->msg_flags & MSG_DONTWAIT);
1998 if (IS_ERR(skb))
1999 return skb;
2000
2001 skb->priority = priority;
2002
2003 /* Create L2CAP header */
2004 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2005 lh->cid = cpu_to_le16(chan->dcid);
2006 lh->len = cpu_to_le16(len);
2007
2008 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2009 if (unlikely(err < 0)) {
2010 kfree_skb(skb);
2011 return ERR_PTR(err);
2012 }
2013 return skb;
2014 }
2015
2016 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2017 struct msghdr *msg, size_t len,
2018 u16 sdulen)
2019 {
2020 struct l2cap_conn *conn = chan->conn;
2021 struct sk_buff *skb;
2022 int err, count, hlen;
2023 struct l2cap_hdr *lh;
2024
2025 BT_DBG("chan %p len %zu", chan, len);
2026
2027 if (!conn)
2028 return ERR_PTR(-ENOTCONN);
2029
2030 hlen = __ertm_hdr_size(chan);
2031
2032 if (sdulen)
2033 hlen += L2CAP_SDULEN_SIZE;
2034
2035 if (chan->fcs == L2CAP_FCS_CRC16)
2036 hlen += L2CAP_FCS_SIZE;
2037
2038 count = min_t(unsigned int, (conn->mtu - hlen), len);
2039
2040 skb = chan->ops->alloc_skb(chan, count + hlen,
2041 msg->msg_flags & MSG_DONTWAIT);
2042 if (IS_ERR(skb))
2043 return skb;
2044
2045 /* Create L2CAP header */
2046 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2047 lh->cid = cpu_to_le16(chan->dcid);
2048 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2049
2050 /* Control header is populated later */
2051 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2052 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2053 else
2054 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2055
2056 if (sdulen)
2057 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2058
2059 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2060 if (unlikely(err < 0)) {
2061 kfree_skb(skb);
2062 return ERR_PTR(err);
2063 }
2064
2065 bt_cb(skb)->control.fcs = chan->fcs;
2066 bt_cb(skb)->control.retries = 0;
2067 return skb;
2068 }
2069
2070 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2071 struct sk_buff_head *seg_queue,
2072 struct msghdr *msg, size_t len)
2073 {
2074 struct sk_buff *skb;
2075 u16 sdu_len;
2076 size_t pdu_len;
2077 u8 sar;
2078
2079 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2080
2081 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2082 * so fragmented skbs are not used. The HCI layer's handling
2083 * of fragmented skbs is not compatible with ERTM's queueing.
2084 */
2085
2086 /* PDU size is derived from the HCI MTU */
2087 pdu_len = chan->conn->mtu;
2088
2089 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2090
2091 /* Adjust for largest possible L2CAP overhead. */
2092 if (chan->fcs)
2093 pdu_len -= L2CAP_FCS_SIZE;
2094
2095 pdu_len -= __ertm_hdr_size(chan);
2096
2097 /* Remote device may have requested smaller PDUs */
2098 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2099
2100 if (len <= pdu_len) {
2101 sar = L2CAP_SAR_UNSEGMENTED;
2102 sdu_len = 0;
2103 pdu_len = len;
2104 } else {
2105 sar = L2CAP_SAR_START;
2106 sdu_len = len;
2107 pdu_len -= L2CAP_SDULEN_SIZE;
2108 }
2109
2110 while (len > 0) {
2111 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2112
2113 if (IS_ERR(skb)) {
2114 __skb_queue_purge(seg_queue);
2115 return PTR_ERR(skb);
2116 }
2117
2118 bt_cb(skb)->control.sar = sar;
2119 __skb_queue_tail(seg_queue, skb);
2120
2121 len -= pdu_len;
2122 if (sdu_len) {
2123 sdu_len = 0;
2124 pdu_len += L2CAP_SDULEN_SIZE;
2125 }
2126
2127 if (len <= pdu_len) {
2128 sar = L2CAP_SAR_END;
2129 pdu_len = len;
2130 } else {
2131 sar = L2CAP_SAR_CONTINUE;
2132 }
2133 }
2134
2135 return 0;
2136 }
2137
2138 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2139 u32 priority)
2140 {
2141 struct sk_buff *skb;
2142 int err;
2143 struct sk_buff_head seg_queue;
2144
2145 /* Connectionless channel */
2146 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2147 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2148 if (IS_ERR(skb))
2149 return PTR_ERR(skb);
2150
2151 l2cap_do_send(chan, skb);
2152 return len;
2153 }
2154
2155 switch (chan->mode) {
2156 case L2CAP_MODE_BASIC:
2157 /* Check outgoing MTU */
2158 if (len > chan->omtu)
2159 return -EMSGSIZE;
2160
2161 /* Create a basic PDU */
2162 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2163 if (IS_ERR(skb))
2164 return PTR_ERR(skb);
2165
2166 l2cap_do_send(chan, skb);
2167 err = len;
2168 break;
2169
2170 case L2CAP_MODE_ERTM:
2171 case L2CAP_MODE_STREAMING:
2172 /* Check outgoing MTU */
2173 if (len > chan->omtu) {
2174 err = -EMSGSIZE;
2175 break;
2176 }
2177
2178 __skb_queue_head_init(&seg_queue);
2179
2180 /* Do segmentation before calling in to the state machine,
2181 * since it's possible to block while waiting for memory
2182 * allocation.
2183 */
2184 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2185
2186 /* The channel could have been closed while segmenting,
2187 * check that it is still connected.
2188 */
2189 if (chan->state != BT_CONNECTED) {
2190 __skb_queue_purge(&seg_queue);
2191 err = -ENOTCONN;
2192 }
2193
2194 if (err)
2195 break;
2196
2197 if (chan->mode == L2CAP_MODE_ERTM)
2198 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2199 else
2200 l2cap_streaming_send(chan, &seg_queue);
2201
2202 err = len;
2203
2204 /* If the skbs were not queued for sending, they'll still be in
2205 * seg_queue and need to be purged.
2206 */
2207 __skb_queue_purge(&seg_queue);
2208 break;
2209
2210 default:
2211 BT_DBG("bad state %1.1x", chan->mode);
2212 err = -EBADFD;
2213 }
2214
2215 return err;
2216 }
2217
2218 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2219 {
2220 struct l2cap_ctrl control;
2221 u16 seq;
2222
2223 BT_DBG("chan %p, txseq %u", chan, txseq);
2224
2225 memset(&control, 0, sizeof(control));
2226 control.sframe = 1;
2227 control.super = L2CAP_SUPER_SREJ;
2228
2229 for (seq = chan->expected_tx_seq; seq != txseq;
2230 seq = __next_seq(chan, seq)) {
2231 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2232 control.reqseq = seq;
2233 l2cap_send_sframe(chan, &control);
2234 l2cap_seq_list_append(&chan->srej_list, seq);
2235 }
2236 }
2237
2238 chan->expected_tx_seq = __next_seq(chan, txseq);
2239 }
2240
2241 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2242 {
2243 struct l2cap_ctrl control;
2244
2245 BT_DBG("chan %p", chan);
2246
2247 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2248 return;
2249
2250 memset(&control, 0, sizeof(control));
2251 control.sframe = 1;
2252 control.super = L2CAP_SUPER_SREJ;
2253 control.reqseq = chan->srej_list.tail;
2254 l2cap_send_sframe(chan, &control);
2255 }
2256
2257 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2258 {
2259 struct l2cap_ctrl control;
2260 u16 initial_head;
2261 u16 seq;
2262
2263 BT_DBG("chan %p, txseq %u", chan, txseq);
2264
2265 memset(&control, 0, sizeof(control));
2266 control.sframe = 1;
2267 control.super = L2CAP_SUPER_SREJ;
2268
2269 /* Capture initial list head to allow only one pass through the list. */
2270 initial_head = chan->srej_list.head;
2271
2272 do {
2273 seq = l2cap_seq_list_pop(&chan->srej_list);
2274 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2275 break;
2276
2277 control.reqseq = seq;
2278 l2cap_send_sframe(chan, &control);
2279 l2cap_seq_list_append(&chan->srej_list, seq);
2280 } while (chan->srej_list.head != initial_head);
2281 }
2282
2283 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2284 {
2285 struct sk_buff *acked_skb;
2286 u16 ackseq;
2287
2288 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2289
2290 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2291 return;
2292
2293 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2294 chan->expected_ack_seq, chan->unacked_frames);
2295
2296 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2297 ackseq = __next_seq(chan, ackseq)) {
2298
2299 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2300 if (acked_skb) {
2301 skb_unlink(acked_skb, &chan->tx_q);
2302 kfree_skb(acked_skb);
2303 chan->unacked_frames--;
2304 }
2305 }
2306
2307 chan->expected_ack_seq = reqseq;
2308
2309 if (chan->unacked_frames == 0)
2310 __clear_retrans_timer(chan);
2311
2312 BT_DBG("unacked_frames %u", chan->unacked_frames);
2313 }
2314
2315 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2316 {
2317 BT_DBG("chan %p", chan);
2318
2319 chan->expected_tx_seq = chan->buffer_seq;
2320 l2cap_seq_list_clear(&chan->srej_list);
2321 skb_queue_purge(&chan->srej_q);
2322 chan->rx_state = L2CAP_RX_STATE_RECV;
2323 }
2324
2325 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2326 struct l2cap_ctrl *control,
2327 struct sk_buff_head *skbs, u8 event)
2328 {
2329 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2330 event);
2331
2332 switch (event) {
2333 case L2CAP_EV_DATA_REQUEST:
2334 if (chan->tx_send_head == NULL)
2335 chan->tx_send_head = skb_peek(skbs);
2336
2337 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2338 l2cap_ertm_send(chan);
2339 break;
2340 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2341 BT_DBG("Enter LOCAL_BUSY");
2342 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2343
2344 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2345 /* The SREJ_SENT state must be aborted if we are to
2346 * enter the LOCAL_BUSY state.
2347 */
2348 l2cap_abort_rx_srej_sent(chan);
2349 }
2350
2351 l2cap_send_ack(chan);
2352
2353 break;
2354 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2355 BT_DBG("Exit LOCAL_BUSY");
2356 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2357
2358 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2359 struct l2cap_ctrl local_control;
2360
2361 memset(&local_control, 0, sizeof(local_control));
2362 local_control.sframe = 1;
2363 local_control.super = L2CAP_SUPER_RR;
2364 local_control.poll = 1;
2365 local_control.reqseq = chan->buffer_seq;
2366 l2cap_send_sframe(chan, &local_control);
2367
2368 chan->retry_count = 1;
2369 __set_monitor_timer(chan);
2370 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2371 }
2372 break;
2373 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2374 l2cap_process_reqseq(chan, control->reqseq);
2375 break;
2376 case L2CAP_EV_EXPLICIT_POLL:
2377 l2cap_send_rr_or_rnr(chan, 1);
2378 chan->retry_count = 1;
2379 __set_monitor_timer(chan);
2380 __clear_ack_timer(chan);
2381 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2382 break;
2383 case L2CAP_EV_RETRANS_TO:
2384 l2cap_send_rr_or_rnr(chan, 1);
2385 chan->retry_count = 1;
2386 __set_monitor_timer(chan);
2387 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2388 break;
2389 case L2CAP_EV_RECV_FBIT:
2390 /* Nothing to process */
2391 break;
2392 default:
2393 break;
2394 }
2395 }
2396
2397 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2398 struct l2cap_ctrl *control,
2399 struct sk_buff_head *skbs, u8 event)
2400 {
2401 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2402 event);
2403
2404 switch (event) {
2405 case L2CAP_EV_DATA_REQUEST:
2406 if (chan->tx_send_head == NULL)
2407 chan->tx_send_head = skb_peek(skbs);
2408 /* Queue data, but don't send. */
2409 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2410 break;
2411 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2412 BT_DBG("Enter LOCAL_BUSY");
2413 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2414
2415 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2416 /* The SREJ_SENT state must be aborted if we are to
2417 * enter the LOCAL_BUSY state.
2418 */
2419 l2cap_abort_rx_srej_sent(chan);
2420 }
2421
2422 l2cap_send_ack(chan);
2423
2424 break;
2425 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2426 BT_DBG("Exit LOCAL_BUSY");
2427 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2428
2429 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2430 struct l2cap_ctrl local_control;
2431 memset(&local_control, 0, sizeof(local_control));
2432 local_control.sframe = 1;
2433 local_control.super = L2CAP_SUPER_RR;
2434 local_control.poll = 1;
2435 local_control.reqseq = chan->buffer_seq;
2436 l2cap_send_sframe(chan, &local_control);
2437
2438 chan->retry_count = 1;
2439 __set_monitor_timer(chan);
2440 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2441 }
2442 break;
2443 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2444 l2cap_process_reqseq(chan, control->reqseq);
2445
2446 /* Fall through */
2447
2448 case L2CAP_EV_RECV_FBIT:
2449 if (control && control->final) {
2450 __clear_monitor_timer(chan);
2451 if (chan->unacked_frames > 0)
2452 __set_retrans_timer(chan);
2453 chan->retry_count = 0;
2454 chan->tx_state = L2CAP_TX_STATE_XMIT;
2455 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2456 }
2457 break;
2458 case L2CAP_EV_EXPLICIT_POLL:
2459 /* Ignore */
2460 break;
2461 case L2CAP_EV_MONITOR_TO:
2462 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2463 l2cap_send_rr_or_rnr(chan, 1);
2464 __set_monitor_timer(chan);
2465 chan->retry_count++;
2466 } else {
2467 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2468 }
2469 break;
2470 default:
2471 break;
2472 }
2473 }
2474
2475 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2476 struct sk_buff_head *skbs, u8 event)
2477 {
2478 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2479 chan, control, skbs, event, chan->tx_state);
2480
2481 switch (chan->tx_state) {
2482 case L2CAP_TX_STATE_XMIT:
2483 l2cap_tx_state_xmit(chan, control, skbs, event);
2484 break;
2485 case L2CAP_TX_STATE_WAIT_F:
2486 l2cap_tx_state_wait_f(chan, control, skbs, event);
2487 break;
2488 default:
2489 /* Ignore event */
2490 break;
2491 }
2492 }
2493
2494 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2495 struct l2cap_ctrl *control)
2496 {
2497 BT_DBG("chan %p, control %p", chan, control);
2498 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2499 }
2500
2501 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2502 struct l2cap_ctrl *control)
2503 {
2504 BT_DBG("chan %p, control %p", chan, control);
2505 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2506 }
2507
2508 /* Copy frame to all raw sockets on that connection */
2509 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2510 {
2511 struct sk_buff *nskb;
2512 struct l2cap_chan *chan;
2513
2514 BT_DBG("conn %p", conn);
2515
2516 mutex_lock(&conn->chan_lock);
2517
2518 list_for_each_entry(chan, &conn->chan_l, list) {
2519 struct sock *sk = chan->sk;
2520 if (chan->chan_type != L2CAP_CHAN_RAW)
2521 continue;
2522
2523 /* Don't send frame to the socket it came from */
2524 if (skb->sk == sk)
2525 continue;
2526 nskb = skb_clone(skb, GFP_ATOMIC);
2527 if (!nskb)
2528 continue;
2529
2530 if (chan->ops->recv(chan, nskb))
2531 kfree_skb(nskb);
2532 }
2533
2534 mutex_unlock(&conn->chan_lock);
2535 }
2536
2537 /* ---- L2CAP signalling commands ---- */
2538 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2539 u8 ident, u16 dlen, void *data)
2540 {
2541 struct sk_buff *skb, **frag;
2542 struct l2cap_cmd_hdr *cmd;
2543 struct l2cap_hdr *lh;
2544 int len, count;
2545
2546 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2547 conn, code, ident, dlen);
2548
2549 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2550 count = min_t(unsigned int, conn->mtu, len);
2551
2552 skb = bt_skb_alloc(count, GFP_ATOMIC);
2553 if (!skb)
2554 return NULL;
2555
2556 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2557 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2558
2559 if (conn->hcon->type == LE_LINK)
2560 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2561 else
2562 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2563
2564 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2565 cmd->code = code;
2566 cmd->ident = ident;
2567 cmd->len = cpu_to_le16(dlen);
2568
2569 if (dlen) {
2570 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2571 memcpy(skb_put(skb, count), data, count);
2572 data += count;
2573 }
2574
2575 len -= skb->len;
2576
2577 /* Continuation fragments (no L2CAP header) */
2578 frag = &skb_shinfo(skb)->frag_list;
2579 while (len) {
2580 count = min_t(unsigned int, conn->mtu, len);
2581
2582 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2583 if (!*frag)
2584 goto fail;
2585
2586 memcpy(skb_put(*frag, count), data, count);
2587
2588 len -= count;
2589 data += count;
2590
2591 frag = &(*frag)->next;
2592 }
2593
2594 return skb;
2595
2596 fail:
2597 kfree_skb(skb);
2598 return NULL;
2599 }
2600
2601 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2602 {
2603 struct l2cap_conf_opt *opt = *ptr;
2604 int len;
2605
2606 len = L2CAP_CONF_OPT_SIZE + opt->len;
2607 *ptr += len;
2608
2609 *type = opt->type;
2610 *olen = opt->len;
2611
2612 switch (opt->len) {
2613 case 1:
2614 *val = *((u8 *) opt->val);
2615 break;
2616
2617 case 2:
2618 *val = get_unaligned_le16(opt->val);
2619 break;
2620
2621 case 4:
2622 *val = get_unaligned_le32(opt->val);
2623 break;
2624
2625 default:
2626 *val = (unsigned long) opt->val;
2627 break;
2628 }
2629
2630 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2631 return len;
2632 }
2633
2634 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2635 {
2636 struct l2cap_conf_opt *opt = *ptr;
2637
2638 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2639
2640 opt->type = type;
2641 opt->len = len;
2642
2643 switch (len) {
2644 case 1:
2645 *((u8 *) opt->val) = val;
2646 break;
2647
2648 case 2:
2649 put_unaligned_le16(val, opt->val);
2650 break;
2651
2652 case 4:
2653 put_unaligned_le32(val, opt->val);
2654 break;
2655
2656 default:
2657 memcpy(opt->val, (void *) val, len);
2658 break;
2659 }
2660
2661 *ptr += L2CAP_CONF_OPT_SIZE + len;
2662 }
2663
2664 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2665 {
2666 struct l2cap_conf_efs efs;
2667
2668 switch (chan->mode) {
2669 case L2CAP_MODE_ERTM:
2670 efs.id = chan->local_id;
2671 efs.stype = chan->local_stype;
2672 efs.msdu = cpu_to_le16(chan->local_msdu);
2673 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2674 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2675 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2676 break;
2677
2678 case L2CAP_MODE_STREAMING:
2679 efs.id = 1;
2680 efs.stype = L2CAP_SERV_BESTEFFORT;
2681 efs.msdu = cpu_to_le16(chan->local_msdu);
2682 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2683 efs.acc_lat = 0;
2684 efs.flush_to = 0;
2685 break;
2686
2687 default:
2688 return;
2689 }
2690
2691 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2692 (unsigned long) &efs);
2693 }
2694
2695 static void l2cap_ack_timeout(struct work_struct *work)
2696 {
2697 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2698 ack_timer.work);
2699 u16 frames_to_ack;
2700
2701 BT_DBG("chan %p", chan);
2702
2703 l2cap_chan_lock(chan);
2704
2705 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2706 chan->last_acked_seq);
2707
2708 if (frames_to_ack)
2709 l2cap_send_rr_or_rnr(chan, 0);
2710
2711 l2cap_chan_unlock(chan);
2712 l2cap_chan_put(chan);
2713 }
2714
2715 int l2cap_ertm_init(struct l2cap_chan *chan)
2716 {
2717 int err;
2718
2719 chan->next_tx_seq = 0;
2720 chan->expected_tx_seq = 0;
2721 chan->expected_ack_seq = 0;
2722 chan->unacked_frames = 0;
2723 chan->buffer_seq = 0;
2724 chan->frames_sent = 0;
2725 chan->last_acked_seq = 0;
2726 chan->sdu = NULL;
2727 chan->sdu_last_frag = NULL;
2728 chan->sdu_len = 0;
2729
2730 skb_queue_head_init(&chan->tx_q);
2731
2732 if (chan->mode != L2CAP_MODE_ERTM)
2733 return 0;
2734
2735 chan->rx_state = L2CAP_RX_STATE_RECV;
2736 chan->tx_state = L2CAP_TX_STATE_XMIT;
2737
2738 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2739 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2740 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2741
2742 skb_queue_head_init(&chan->srej_q);
2743
2744 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2745 if (err < 0)
2746 return err;
2747
2748 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2749 if (err < 0)
2750 l2cap_seq_list_free(&chan->srej_list);
2751
2752 return err;
2753 }
2754
2755 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2756 {
2757 switch (mode) {
2758 case L2CAP_MODE_STREAMING:
2759 case L2CAP_MODE_ERTM:
2760 if (l2cap_mode_supported(mode, remote_feat_mask))
2761 return mode;
2762 /* fall through */
2763 default:
2764 return L2CAP_MODE_BASIC;
2765 }
2766 }
2767
2768 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2769 {
2770 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2771 }
2772
2773 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2774 {
2775 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2776 }
2777
2778 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2779 {
2780 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2781 __l2cap_ews_supported(chan)) {
2782 /* use extended control field */
2783 set_bit(FLAG_EXT_CTRL, &chan->flags);
2784 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2785 } else {
2786 chan->tx_win = min_t(u16, chan->tx_win,
2787 L2CAP_DEFAULT_TX_WINDOW);
2788 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2789 }
2790 chan->ack_win = chan->tx_win;
2791 }
2792
2793 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2794 {
2795 struct l2cap_conf_req *req = data;
2796 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2797 void *ptr = req->data;
2798 u16 size;
2799
2800 BT_DBG("chan %p", chan);
2801
2802 if (chan->num_conf_req || chan->num_conf_rsp)
2803 goto done;
2804
2805 switch (chan->mode) {
2806 case L2CAP_MODE_STREAMING:
2807 case L2CAP_MODE_ERTM:
2808 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2809 break;
2810
2811 if (__l2cap_efs_supported(chan))
2812 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2813
2814 /* fall through */
2815 default:
2816 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2817 break;
2818 }
2819
2820 done:
2821 if (chan->imtu != L2CAP_DEFAULT_MTU)
2822 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2823
2824 switch (chan->mode) {
2825 case L2CAP_MODE_BASIC:
2826 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2827 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2828 break;
2829
2830 rfc.mode = L2CAP_MODE_BASIC;
2831 rfc.txwin_size = 0;
2832 rfc.max_transmit = 0;
2833 rfc.retrans_timeout = 0;
2834 rfc.monitor_timeout = 0;
2835 rfc.max_pdu_size = 0;
2836
2837 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2838 (unsigned long) &rfc);
2839 break;
2840
2841 case L2CAP_MODE_ERTM:
2842 rfc.mode = L2CAP_MODE_ERTM;
2843 rfc.max_transmit = chan->max_tx;
2844 rfc.retrans_timeout = 0;
2845 rfc.monitor_timeout = 0;
2846
2847 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2848 L2CAP_EXT_HDR_SIZE -
2849 L2CAP_SDULEN_SIZE -
2850 L2CAP_FCS_SIZE);
2851 rfc.max_pdu_size = cpu_to_le16(size);
2852
2853 l2cap_txwin_setup(chan);
2854
2855 rfc.txwin_size = min_t(u16, chan->tx_win,
2856 L2CAP_DEFAULT_TX_WINDOW);
2857
2858 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2859 (unsigned long) &rfc);
2860
2861 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2862 l2cap_add_opt_efs(&ptr, chan);
2863
2864 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2865 break;
2866
2867 if (chan->fcs == L2CAP_FCS_NONE ||
2868 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2869 chan->fcs = L2CAP_FCS_NONE;
2870 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2871 }
2872
2873 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2874 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2875 chan->tx_win);
2876 break;
2877
2878 case L2CAP_MODE_STREAMING:
2879 l2cap_txwin_setup(chan);
2880 rfc.mode = L2CAP_MODE_STREAMING;
2881 rfc.txwin_size = 0;
2882 rfc.max_transmit = 0;
2883 rfc.retrans_timeout = 0;
2884 rfc.monitor_timeout = 0;
2885
2886 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2887 L2CAP_EXT_HDR_SIZE -
2888 L2CAP_SDULEN_SIZE -
2889 L2CAP_FCS_SIZE);
2890 rfc.max_pdu_size = cpu_to_le16(size);
2891
2892 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2893 (unsigned long) &rfc);
2894
2895 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2896 l2cap_add_opt_efs(&ptr, chan);
2897
2898 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2899 break;
2900
2901 if (chan->fcs == L2CAP_FCS_NONE ||
2902 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2903 chan->fcs = L2CAP_FCS_NONE;
2904 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2905 }
2906 break;
2907 }
2908
2909 req->dcid = cpu_to_le16(chan->dcid);
2910 req->flags = __constant_cpu_to_le16(0);
2911
2912 return ptr - data;
2913 }
2914
2915 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2916 {
2917 struct l2cap_conf_rsp *rsp = data;
2918 void *ptr = rsp->data;
2919 void *req = chan->conf_req;
2920 int len = chan->conf_len;
2921 int type, hint, olen;
2922 unsigned long val;
2923 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2924 struct l2cap_conf_efs efs;
2925 u8 remote_efs = 0;
2926 u16 mtu = L2CAP_DEFAULT_MTU;
2927 u16 result = L2CAP_CONF_SUCCESS;
2928 u16 size;
2929
2930 BT_DBG("chan %p", chan);
2931
2932 while (len >= L2CAP_CONF_OPT_SIZE) {
2933 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2934
2935 hint = type & L2CAP_CONF_HINT;
2936 type &= L2CAP_CONF_MASK;
2937
2938 switch (type) {
2939 case L2CAP_CONF_MTU:
2940 mtu = val;
2941 break;
2942
2943 case L2CAP_CONF_FLUSH_TO:
2944 chan->flush_to = val;
2945 break;
2946
2947 case L2CAP_CONF_QOS:
2948 break;
2949
2950 case L2CAP_CONF_RFC:
2951 if (olen == sizeof(rfc))
2952 memcpy(&rfc, (void *) val, olen);
2953 break;
2954
2955 case L2CAP_CONF_FCS:
2956 if (val == L2CAP_FCS_NONE)
2957 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2958 break;
2959
2960 case L2CAP_CONF_EFS:
2961 remote_efs = 1;
2962 if (olen == sizeof(efs))
2963 memcpy(&efs, (void *) val, olen);
2964 break;
2965
2966 case L2CAP_CONF_EWS:
2967 if (!enable_hs)
2968 return -ECONNREFUSED;
2969
2970 set_bit(FLAG_EXT_CTRL, &chan->flags);
2971 set_bit(CONF_EWS_RECV, &chan->conf_state);
2972 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2973 chan->remote_tx_win = val;
2974 break;
2975
2976 default:
2977 if (hint)
2978 break;
2979
2980 result = L2CAP_CONF_UNKNOWN;
2981 *((u8 *) ptr++) = type;
2982 break;
2983 }
2984 }
2985
2986 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2987 goto done;
2988
2989 switch (chan->mode) {
2990 case L2CAP_MODE_STREAMING:
2991 case L2CAP_MODE_ERTM:
2992 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2993 chan->mode = l2cap_select_mode(rfc.mode,
2994 chan->conn->feat_mask);
2995 break;
2996 }
2997
2998 if (remote_efs) {
2999 if (__l2cap_efs_supported(chan))
3000 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3001 else
3002 return -ECONNREFUSED;
3003 }
3004
3005 if (chan->mode != rfc.mode)
3006 return -ECONNREFUSED;
3007
3008 break;
3009 }
3010
3011 done:
3012 if (chan->mode != rfc.mode) {
3013 result = L2CAP_CONF_UNACCEPT;
3014 rfc.mode = chan->mode;
3015
3016 if (chan->num_conf_rsp == 1)
3017 return -ECONNREFUSED;
3018
3019 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3020 sizeof(rfc), (unsigned long) &rfc);
3021 }
3022
3023 if (result == L2CAP_CONF_SUCCESS) {
3024 /* Configure output options and let the other side know
3025 * which ones we don't like. */
3026
3027 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3028 result = L2CAP_CONF_UNACCEPT;
3029 else {
3030 chan->omtu = mtu;
3031 set_bit(CONF_MTU_DONE, &chan->conf_state);
3032 }
3033 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3034
3035 if (remote_efs) {
3036 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3037 efs.stype != L2CAP_SERV_NOTRAFIC &&
3038 efs.stype != chan->local_stype) {
3039
3040 result = L2CAP_CONF_UNACCEPT;
3041
3042 if (chan->num_conf_req >= 1)
3043 return -ECONNREFUSED;
3044
3045 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3046 sizeof(efs),
3047 (unsigned long) &efs);
3048 } else {
3049 /* Send PENDING Conf Rsp */
3050 result = L2CAP_CONF_PENDING;
3051 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3052 }
3053 }
3054
3055 switch (rfc.mode) {
3056 case L2CAP_MODE_BASIC:
3057 chan->fcs = L2CAP_FCS_NONE;
3058 set_bit(CONF_MODE_DONE, &chan->conf_state);
3059 break;
3060
3061 case L2CAP_MODE_ERTM:
3062 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3063 chan->remote_tx_win = rfc.txwin_size;
3064 else
3065 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3066
3067 chan->remote_max_tx = rfc.max_transmit;
3068
3069 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3070 chan->conn->mtu -
3071 L2CAP_EXT_HDR_SIZE -
3072 L2CAP_SDULEN_SIZE -
3073 L2CAP_FCS_SIZE);
3074 rfc.max_pdu_size = cpu_to_le16(size);
3075 chan->remote_mps = size;
3076
3077 rfc.retrans_timeout =
3078 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3079 rfc.monitor_timeout =
3080 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3081
3082 set_bit(CONF_MODE_DONE, &chan->conf_state);
3083
3084 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3085 sizeof(rfc), (unsigned long) &rfc);
3086
3087 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3088 chan->remote_id = efs.id;
3089 chan->remote_stype = efs.stype;
3090 chan->remote_msdu = le16_to_cpu(efs.msdu);
3091 chan->remote_flush_to =
3092 le32_to_cpu(efs.flush_to);
3093 chan->remote_acc_lat =
3094 le32_to_cpu(efs.acc_lat);
3095 chan->remote_sdu_itime =
3096 le32_to_cpu(efs.sdu_itime);
3097 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3098 sizeof(efs), (unsigned long) &efs);
3099 }
3100 break;
3101
3102 case L2CAP_MODE_STREAMING:
3103 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3104 chan->conn->mtu -
3105 L2CAP_EXT_HDR_SIZE -
3106 L2CAP_SDULEN_SIZE -
3107 L2CAP_FCS_SIZE);
3108 rfc.max_pdu_size = cpu_to_le16(size);
3109 chan->remote_mps = size;
3110
3111 set_bit(CONF_MODE_DONE, &chan->conf_state);
3112
3113 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3114 sizeof(rfc), (unsigned long) &rfc);
3115
3116 break;
3117
3118 default:
3119 result = L2CAP_CONF_UNACCEPT;
3120
3121 memset(&rfc, 0, sizeof(rfc));
3122 rfc.mode = chan->mode;
3123 }
3124
3125 if (result == L2CAP_CONF_SUCCESS)
3126 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3127 }
3128 rsp->scid = cpu_to_le16(chan->dcid);
3129 rsp->result = cpu_to_le16(result);
3130 rsp->flags = __constant_cpu_to_le16(0);
3131
3132 return ptr - data;
3133 }
3134
3135 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3136 {
3137 struct l2cap_conf_req *req = data;
3138 void *ptr = req->data;
3139 int type, olen;
3140 unsigned long val;
3141 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3142 struct l2cap_conf_efs efs;
3143
3144 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3145
3146 while (len >= L2CAP_CONF_OPT_SIZE) {
3147 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3148
3149 switch (type) {
3150 case L2CAP_CONF_MTU:
3151 if (val < L2CAP_DEFAULT_MIN_MTU) {
3152 *result = L2CAP_CONF_UNACCEPT;
3153 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3154 } else
3155 chan->imtu = val;
3156 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3157 break;
3158
3159 case L2CAP_CONF_FLUSH_TO:
3160 chan->flush_to = val;
3161 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3162 2, chan->flush_to);
3163 break;
3164
3165 case L2CAP_CONF_RFC:
3166 if (olen == sizeof(rfc))
3167 memcpy(&rfc, (void *)val, olen);
3168
3169 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3170 rfc.mode != chan->mode)
3171 return -ECONNREFUSED;
3172
3173 chan->fcs = 0;
3174
3175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3176 sizeof(rfc), (unsigned long) &rfc);
3177 break;
3178
3179 case L2CAP_CONF_EWS:
3180 chan->ack_win = min_t(u16, val, chan->ack_win);
3181 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3182 chan->tx_win);
3183 break;
3184
3185 case L2CAP_CONF_EFS:
3186 if (olen == sizeof(efs))
3187 memcpy(&efs, (void *)val, olen);
3188
3189 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3190 efs.stype != L2CAP_SERV_NOTRAFIC &&
3191 efs.stype != chan->local_stype)
3192 return -ECONNREFUSED;
3193
3194 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3195 sizeof(efs), (unsigned long) &efs);
3196 break;
3197 }
3198 }
3199
3200 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3201 return -ECONNREFUSED;
3202
3203 chan->mode = rfc.mode;
3204
3205 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3206 switch (rfc.mode) {
3207 case L2CAP_MODE_ERTM:
3208 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3209 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3210 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3211 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3212 chan->ack_win = min_t(u16, chan->ack_win,
3213 rfc.txwin_size);
3214
3215 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3216 chan->local_msdu = le16_to_cpu(efs.msdu);
3217 chan->local_sdu_itime =
3218 le32_to_cpu(efs.sdu_itime);
3219 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3220 chan->local_flush_to =
3221 le32_to_cpu(efs.flush_to);
3222 }
3223 break;
3224
3225 case L2CAP_MODE_STREAMING:
3226 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3227 }
3228 }
3229
3230 req->dcid = cpu_to_le16(chan->dcid);
3231 req->flags = __constant_cpu_to_le16(0);
3232
3233 return ptr - data;
3234 }
3235
3236 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3237 {
3238 struct l2cap_conf_rsp *rsp = data;
3239 void *ptr = rsp->data;
3240
3241 BT_DBG("chan %p", chan);
3242
3243 rsp->scid = cpu_to_le16(chan->dcid);
3244 rsp->result = cpu_to_le16(result);
3245 rsp->flags = cpu_to_le16(flags);
3246
3247 return ptr - data;
3248 }
3249
3250 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3251 {
3252 struct l2cap_conn_rsp rsp;
3253 struct l2cap_conn *conn = chan->conn;
3254 u8 buf[128];
3255
3256 rsp.scid = cpu_to_le16(chan->dcid);
3257 rsp.dcid = cpu_to_le16(chan->scid);
3258 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3259 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3260 l2cap_send_cmd(conn, chan->ident,
3261 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3262
3263 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3264 return;
3265
3266 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3267 l2cap_build_conf_req(chan, buf), buf);
3268 chan->num_conf_req++;
3269 }
3270
3271 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3272 {
3273 int type, olen;
3274 unsigned long val;
3275 /* Use sane default values in case a misbehaving remote device
3276 * did not send an RFC or extended window size option.
3277 */
3278 u16 txwin_ext = chan->ack_win;
3279 struct l2cap_conf_rfc rfc = {
3280 .mode = chan->mode,
3281 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3282 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3283 .max_pdu_size = cpu_to_le16(chan->imtu),
3284 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3285 };
3286
3287 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3288
3289 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3290 return;
3291
3292 while (len >= L2CAP_CONF_OPT_SIZE) {
3293 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3294
3295 switch (type) {
3296 case L2CAP_CONF_RFC:
3297 if (olen == sizeof(rfc))
3298 memcpy(&rfc, (void *)val, olen);
3299 break;
3300 case L2CAP_CONF_EWS:
3301 txwin_ext = val;
3302 break;
3303 }
3304 }
3305
3306 switch (rfc.mode) {
3307 case L2CAP_MODE_ERTM:
3308 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3309 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3310 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3311 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3312 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3313 else
3314 chan->ack_win = min_t(u16, chan->ack_win,
3315 rfc.txwin_size);
3316 break;
3317 case L2CAP_MODE_STREAMING:
3318 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3319 }
3320 }
3321
3322 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3323 {
3324 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3325
3326 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3327 return 0;
3328
3329 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3330 cmd->ident == conn->info_ident) {
3331 cancel_delayed_work(&conn->info_timer);
3332
3333 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3334 conn->info_ident = 0;
3335
3336 l2cap_conn_start(conn);
3337 }
3338
3339 return 0;
3340 }
3341
3342 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3343 {
3344 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3345 struct l2cap_conn_rsp rsp;
3346 struct l2cap_chan *chan = NULL, *pchan;
3347 struct sock *parent, *sk = NULL;
3348 int result, status = L2CAP_CS_NO_INFO;
3349
3350 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3351 __le16 psm = req->psm;
3352
3353 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3354
3355 /* Check if we have socket listening on psm */
3356 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3357 if (!pchan) {
3358 result = L2CAP_CR_BAD_PSM;
3359 goto sendresp;
3360 }
3361
3362 parent = pchan->sk;
3363
3364 mutex_lock(&conn->chan_lock);
3365 lock_sock(parent);
3366
3367 /* Check if the ACL is secure enough (if not SDP) */
3368 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3369 !hci_conn_check_link_mode(conn->hcon)) {
3370 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3371 result = L2CAP_CR_SEC_BLOCK;
3372 goto response;
3373 }
3374
3375 result = L2CAP_CR_NO_MEM;
3376
3377 /* Check if we already have channel with that dcid */
3378 if (__l2cap_get_chan_by_dcid(conn, scid))
3379 goto response;
3380
3381 chan = pchan->ops->new_connection(pchan);
3382 if (!chan)
3383 goto response;
3384
3385 sk = chan->sk;
3386
3387 hci_conn_hold(conn->hcon);
3388
3389 bacpy(&bt_sk(sk)->src, conn->src);
3390 bacpy(&bt_sk(sk)->dst, conn->dst);
3391 chan->psm = psm;
3392 chan->dcid = scid;
3393
3394 bt_accept_enqueue(parent, sk);
3395
3396 __l2cap_chan_add(conn, chan);
3397
3398 dcid = chan->scid;
3399
3400 __set_chan_timer(chan, sk->sk_sndtimeo);
3401
3402 chan->ident = cmd->ident;
3403
3404 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3405 if (l2cap_chan_check_security(chan)) {
3406 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3407 __l2cap_state_change(chan, BT_CONNECT2);
3408 result = L2CAP_CR_PEND;
3409 status = L2CAP_CS_AUTHOR_PEND;
3410 parent->sk_data_ready(parent, 0);
3411 } else {
3412 __l2cap_state_change(chan, BT_CONFIG);
3413 result = L2CAP_CR_SUCCESS;
3414 status = L2CAP_CS_NO_INFO;
3415 }
3416 } else {
3417 __l2cap_state_change(chan, BT_CONNECT2);
3418 result = L2CAP_CR_PEND;
3419 status = L2CAP_CS_AUTHEN_PEND;
3420 }
3421 } else {
3422 __l2cap_state_change(chan, BT_CONNECT2);
3423 result = L2CAP_CR_PEND;
3424 status = L2CAP_CS_NO_INFO;
3425 }
3426
3427 response:
3428 release_sock(parent);
3429 mutex_unlock(&conn->chan_lock);
3430
3431 sendresp:
3432 rsp.scid = cpu_to_le16(scid);
3433 rsp.dcid = cpu_to_le16(dcid);
3434 rsp.result = cpu_to_le16(result);
3435 rsp.status = cpu_to_le16(status);
3436 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3437
3438 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3439 struct l2cap_info_req info;
3440 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3441
3442 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3443 conn->info_ident = l2cap_get_ident(conn);
3444
3445 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3446
3447 l2cap_send_cmd(conn, conn->info_ident,
3448 L2CAP_INFO_REQ, sizeof(info), &info);
3449 }
3450
3451 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3452 result == L2CAP_CR_SUCCESS) {
3453 u8 buf[128];
3454 set_bit(CONF_REQ_SENT, &chan->conf_state);
3455 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3456 l2cap_build_conf_req(chan, buf), buf);
3457 chan->num_conf_req++;
3458 }
3459
3460 return 0;
3461 }
3462
3463 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3464 {
3465 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3466 u16 scid, dcid, result, status;
3467 struct l2cap_chan *chan;
3468 u8 req[128];
3469 int err;
3470
3471 scid = __le16_to_cpu(rsp->scid);
3472 dcid = __le16_to_cpu(rsp->dcid);
3473 result = __le16_to_cpu(rsp->result);
3474 status = __le16_to_cpu(rsp->status);
3475
3476 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3477 dcid, scid, result, status);
3478
3479 mutex_lock(&conn->chan_lock);
3480
3481 if (scid) {
3482 chan = __l2cap_get_chan_by_scid(conn, scid);
3483 if (!chan) {
3484 err = -EFAULT;
3485 goto unlock;
3486 }
3487 } else {
3488 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3489 if (!chan) {
3490 err = -EFAULT;
3491 goto unlock;
3492 }
3493 }
3494
3495 err = 0;
3496
3497 l2cap_chan_lock(chan);
3498
3499 switch (result) {
3500 case L2CAP_CR_SUCCESS:
3501 l2cap_state_change(chan, BT_CONFIG);
3502 chan->ident = 0;
3503 chan->dcid = dcid;
3504 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3505
3506 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3507 break;
3508
3509 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3510 l2cap_build_conf_req(chan, req), req);
3511 chan->num_conf_req++;
3512 break;
3513
3514 case L2CAP_CR_PEND:
3515 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3516 break;
3517
3518 default:
3519 l2cap_chan_del(chan, ECONNREFUSED);
3520 break;
3521 }
3522
3523 l2cap_chan_unlock(chan);
3524
3525 unlock:
3526 mutex_unlock(&conn->chan_lock);
3527
3528 return err;
3529 }
3530
3531 static inline void set_default_fcs(struct l2cap_chan *chan)
3532 {
3533 /* FCS is enabled only in ERTM or streaming mode, if one or both
3534 * sides request it.
3535 */
3536 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3537 chan->fcs = L2CAP_FCS_NONE;
3538 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3539 chan->fcs = L2CAP_FCS_CRC16;
3540 }
3541
3542 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3543 {
3544 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3545 u16 dcid, flags;
3546 u8 rsp[64];
3547 struct l2cap_chan *chan;
3548 int len, err = 0;
3549
3550 dcid = __le16_to_cpu(req->dcid);
3551 flags = __le16_to_cpu(req->flags);
3552
3553 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3554
3555 chan = l2cap_get_chan_by_scid(conn, dcid);
3556 if (!chan)
3557 return -ENOENT;
3558
3559 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3560 struct l2cap_cmd_rej_cid rej;
3561
3562 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3563 rej.scid = cpu_to_le16(chan->scid);
3564 rej.dcid = cpu_to_le16(chan->dcid);
3565
3566 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3567 sizeof(rej), &rej);
3568 goto unlock;
3569 }
3570
3571 /* Reject if config buffer is too small. */
3572 len = cmd_len - sizeof(*req);
3573 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3574 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3575 l2cap_build_conf_rsp(chan, rsp,
3576 L2CAP_CONF_REJECT, flags), rsp);
3577 goto unlock;
3578 }
3579
3580 /* Store config. */
3581 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3582 chan->conf_len += len;
3583
3584 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3585 /* Incomplete config. Send empty response. */
3586 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3587 l2cap_build_conf_rsp(chan, rsp,
3588 L2CAP_CONF_SUCCESS, flags), rsp);
3589 goto unlock;
3590 }
3591
3592 /* Complete config. */
3593 len = l2cap_parse_conf_req(chan, rsp);
3594 if (len < 0) {
3595 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3596 goto unlock;
3597 }
3598
3599 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3600 chan->num_conf_rsp++;
3601
3602 /* Reset config buffer. */
3603 chan->conf_len = 0;
3604
3605 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3606 goto unlock;
3607
3608 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3609 set_default_fcs(chan);
3610
3611 if (chan->mode == L2CAP_MODE_ERTM ||
3612 chan->mode == L2CAP_MODE_STREAMING)
3613 err = l2cap_ertm_init(chan);
3614
3615 if (err < 0)
3616 l2cap_send_disconn_req(chan->conn, chan, -err);
3617 else
3618 l2cap_chan_ready(chan);
3619
3620 goto unlock;
3621 }
3622
3623 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3624 u8 buf[64];
3625 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3626 l2cap_build_conf_req(chan, buf), buf);
3627 chan->num_conf_req++;
3628 }
3629
3630 /* Got Conf Rsp PENDING from remote side and asume we sent
3631 Conf Rsp PENDING in the code above */
3632 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3633 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3634
3635 /* check compatibility */
3636
3637 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3638 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3639
3640 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3641 l2cap_build_conf_rsp(chan, rsp,
3642 L2CAP_CONF_SUCCESS, flags), rsp);
3643 }
3644
3645 unlock:
3646 l2cap_chan_unlock(chan);
3647 return err;
3648 }
3649
3650 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3651 {
3652 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3653 u16 scid, flags, result;
3654 struct l2cap_chan *chan;
3655 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3656 int err = 0;
3657
3658 scid = __le16_to_cpu(rsp->scid);
3659 flags = __le16_to_cpu(rsp->flags);
3660 result = __le16_to_cpu(rsp->result);
3661
3662 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3663 result, len);
3664
3665 chan = l2cap_get_chan_by_scid(conn, scid);
3666 if (!chan)
3667 return 0;
3668
3669 switch (result) {
3670 case L2CAP_CONF_SUCCESS:
3671 l2cap_conf_rfc_get(chan, rsp->data, len);
3672 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3673 break;
3674
3675 case L2CAP_CONF_PENDING:
3676 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3677
3678 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3679 char buf[64];
3680
3681 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3682 buf, &result);
3683 if (len < 0) {
3684 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3685 goto done;
3686 }
3687
3688 /* check compatibility */
3689
3690 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3691 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3692
3693 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3694 l2cap_build_conf_rsp(chan, buf,
3695 L2CAP_CONF_SUCCESS, 0x0000), buf);
3696 }
3697 goto done;
3698
3699 case L2CAP_CONF_UNACCEPT:
3700 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3701 char req[64];
3702
3703 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3704 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3705 goto done;
3706 }
3707
3708 /* throw out any old stored conf requests */
3709 result = L2CAP_CONF_SUCCESS;
3710 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3711 req, &result);
3712 if (len < 0) {
3713 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3714 goto done;
3715 }
3716
3717 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3718 L2CAP_CONF_REQ, len, req);
3719 chan->num_conf_req++;
3720 if (result != L2CAP_CONF_SUCCESS)
3721 goto done;
3722 break;
3723 }
3724
3725 default:
3726 l2cap_chan_set_err(chan, ECONNRESET);
3727
3728 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3729 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3730 goto done;
3731 }
3732
3733 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3734 goto done;
3735
3736 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3737
3738 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3739 set_default_fcs(chan);
3740
3741 if (chan->mode == L2CAP_MODE_ERTM ||
3742 chan->mode == L2CAP_MODE_STREAMING)
3743 err = l2cap_ertm_init(chan);
3744
3745 if (err < 0)
3746 l2cap_send_disconn_req(chan->conn, chan, -err);
3747 else
3748 l2cap_chan_ready(chan);
3749 }
3750
3751 done:
3752 l2cap_chan_unlock(chan);
3753 return err;
3754 }
3755
3756 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3757 {
3758 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3759 struct l2cap_disconn_rsp rsp;
3760 u16 dcid, scid;
3761 struct l2cap_chan *chan;
3762 struct sock *sk;
3763
3764 scid = __le16_to_cpu(req->scid);
3765 dcid = __le16_to_cpu(req->dcid);
3766
3767 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3768
3769 mutex_lock(&conn->chan_lock);
3770
3771 chan = __l2cap_get_chan_by_scid(conn, dcid);
3772 if (!chan) {
3773 mutex_unlock(&conn->chan_lock);
3774 return 0;
3775 }
3776
3777 l2cap_chan_lock(chan);
3778
3779 sk = chan->sk;
3780
3781 rsp.dcid = cpu_to_le16(chan->scid);
3782 rsp.scid = cpu_to_le16(chan->dcid);
3783 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3784
3785 lock_sock(sk);
3786 sk->sk_shutdown = SHUTDOWN_MASK;
3787 release_sock(sk);
3788
3789 l2cap_chan_hold(chan);
3790 l2cap_chan_del(chan, ECONNRESET);
3791
3792 l2cap_chan_unlock(chan);
3793
3794 chan->ops->close(chan);
3795 l2cap_chan_put(chan);
3796
3797 mutex_unlock(&conn->chan_lock);
3798
3799 return 0;
3800 }
3801
3802 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3803 {
3804 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3805 u16 dcid, scid;
3806 struct l2cap_chan *chan;
3807
3808 scid = __le16_to_cpu(rsp->scid);
3809 dcid = __le16_to_cpu(rsp->dcid);
3810
3811 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3812
3813 mutex_lock(&conn->chan_lock);
3814
3815 chan = __l2cap_get_chan_by_scid(conn, scid);
3816 if (!chan) {
3817 mutex_unlock(&conn->chan_lock);
3818 return 0;
3819 }
3820
3821 l2cap_chan_lock(chan);
3822
3823 l2cap_chan_hold(chan);
3824 l2cap_chan_del(chan, 0);
3825
3826 l2cap_chan_unlock(chan);
3827
3828 chan->ops->close(chan);
3829 l2cap_chan_put(chan);
3830
3831 mutex_unlock(&conn->chan_lock);
3832
3833 return 0;
3834 }
3835
3836 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3837 {
3838 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3839 u16 type;
3840
3841 type = __le16_to_cpu(req->type);
3842
3843 BT_DBG("type 0x%4.4x", type);
3844
3845 if (type == L2CAP_IT_FEAT_MASK) {
3846 u8 buf[8];
3847 u32 feat_mask = l2cap_feat_mask;
3848 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3849 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3850 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3851 if (!disable_ertm)
3852 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3853 | L2CAP_FEAT_FCS;
3854 if (enable_hs)
3855 feat_mask |= L2CAP_FEAT_EXT_FLOW
3856 | L2CAP_FEAT_EXT_WINDOW;
3857
3858 put_unaligned_le32(feat_mask, rsp->data);
3859 l2cap_send_cmd(conn, cmd->ident,
3860 L2CAP_INFO_RSP, sizeof(buf), buf);
3861 } else if (type == L2CAP_IT_FIXED_CHAN) {
3862 u8 buf[12];
3863 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3864
3865 if (enable_hs)
3866 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3867 else
3868 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3869
3870 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3871 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3872 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3873 l2cap_send_cmd(conn, cmd->ident,
3874 L2CAP_INFO_RSP, sizeof(buf), buf);
3875 } else {
3876 struct l2cap_info_rsp rsp;
3877 rsp.type = cpu_to_le16(type);
3878 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3879 l2cap_send_cmd(conn, cmd->ident,
3880 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3881 }
3882
3883 return 0;
3884 }
3885
3886 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3887 {
3888 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3889 u16 type, result;
3890
3891 type = __le16_to_cpu(rsp->type);
3892 result = __le16_to_cpu(rsp->result);
3893
3894 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3895
3896 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3897 if (cmd->ident != conn->info_ident ||
3898 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3899 return 0;
3900
3901 cancel_delayed_work(&conn->info_timer);
3902
3903 if (result != L2CAP_IR_SUCCESS) {
3904 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3905 conn->info_ident = 0;
3906
3907 l2cap_conn_start(conn);
3908
3909 return 0;
3910 }
3911
3912 switch (type) {
3913 case L2CAP_IT_FEAT_MASK:
3914 conn->feat_mask = get_unaligned_le32(rsp->data);
3915
3916 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3917 struct l2cap_info_req req;
3918 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3919
3920 conn->info_ident = l2cap_get_ident(conn);
3921
3922 l2cap_send_cmd(conn, conn->info_ident,
3923 L2CAP_INFO_REQ, sizeof(req), &req);
3924 } else {
3925 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3926 conn->info_ident = 0;
3927
3928 l2cap_conn_start(conn);
3929 }
3930 break;
3931
3932 case L2CAP_IT_FIXED_CHAN:
3933 conn->fixed_chan_mask = rsp->data[0];
3934 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3935 conn->info_ident = 0;
3936
3937 l2cap_conn_start(conn);
3938 break;
3939 }
3940
3941 return 0;
3942 }
3943
3944 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3945 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3946 void *data)
3947 {
3948 struct l2cap_create_chan_req *req = data;
3949 struct l2cap_create_chan_rsp rsp;
3950 u16 psm, scid;
3951
3952 if (cmd_len != sizeof(*req))
3953 return -EPROTO;
3954
3955 if (!enable_hs)
3956 return -EINVAL;
3957
3958 psm = le16_to_cpu(req->psm);
3959 scid = le16_to_cpu(req->scid);
3960
3961 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3962
3963 /* Placeholder: Always reject */
3964 rsp.dcid = 0;
3965 rsp.scid = cpu_to_le16(scid);
3966 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3967 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3968
3969 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3970 sizeof(rsp), &rsp);
3971
3972 return 0;
3973 }
3974
3975 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3976 struct l2cap_cmd_hdr *cmd, void *data)
3977 {
3978 BT_DBG("conn %p", conn);
3979
3980 return l2cap_connect_rsp(conn, cmd, data);
3981 }
3982
3983 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3984 u16 icid, u16 result)
3985 {
3986 struct l2cap_move_chan_rsp rsp;
3987
3988 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3989
3990 rsp.icid = cpu_to_le16(icid);
3991 rsp.result = cpu_to_le16(result);
3992
3993 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3994 }
3995
3996 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3997 struct l2cap_chan *chan,
3998 u16 icid, u16 result)
3999 {
4000 struct l2cap_move_chan_cfm cfm;
4001 u8 ident;
4002
4003 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4004
4005 ident = l2cap_get_ident(conn);
4006 if (chan)
4007 chan->ident = ident;
4008
4009 cfm.icid = cpu_to_le16(icid);
4010 cfm.result = cpu_to_le16(result);
4011
4012 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4013 }
4014
4015 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4016 u16 icid)
4017 {
4018 struct l2cap_move_chan_cfm_rsp rsp;
4019
4020 BT_DBG("icid 0x%4.4x", icid);
4021
4022 rsp.icid = cpu_to_le16(icid);
4023 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4024 }
4025
4026 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4027 struct l2cap_cmd_hdr *cmd,
4028 u16 cmd_len, void *data)
4029 {
4030 struct l2cap_move_chan_req *req = data;
4031 u16 icid = 0;
4032 u16 result = L2CAP_MR_NOT_ALLOWED;
4033
4034 if (cmd_len != sizeof(*req))
4035 return -EPROTO;
4036
4037 icid = le16_to_cpu(req->icid);
4038
4039 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4040
4041 if (!enable_hs)
4042 return -EINVAL;
4043
4044 /* Placeholder: Always refuse */
4045 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4046
4047 return 0;
4048 }
4049
4050 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4051 struct l2cap_cmd_hdr *cmd,
4052 u16 cmd_len, void *data)
4053 {
4054 struct l2cap_move_chan_rsp *rsp = data;
4055 u16 icid, result;
4056
4057 if (cmd_len != sizeof(*rsp))
4058 return -EPROTO;
4059
4060 icid = le16_to_cpu(rsp->icid);
4061 result = le16_to_cpu(rsp->result);
4062
4063 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4064
4065 /* Placeholder: Always unconfirmed */
4066 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4067
4068 return 0;
4069 }
4070
4071 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4072 struct l2cap_cmd_hdr *cmd,
4073 u16 cmd_len, void *data)
4074 {
4075 struct l2cap_move_chan_cfm *cfm = data;
4076 u16 icid, result;
4077
4078 if (cmd_len != sizeof(*cfm))
4079 return -EPROTO;
4080
4081 icid = le16_to_cpu(cfm->icid);
4082 result = le16_to_cpu(cfm->result);
4083
4084 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4085
4086 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4087
4088 return 0;
4089 }
4090
4091 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4092 struct l2cap_cmd_hdr *cmd,
4093 u16 cmd_len, void *data)
4094 {
4095 struct l2cap_move_chan_cfm_rsp *rsp = data;
4096 u16 icid;
4097
4098 if (cmd_len != sizeof(*rsp))
4099 return -EPROTO;
4100
4101 icid = le16_to_cpu(rsp->icid);
4102
4103 BT_DBG("icid 0x%4.4x", icid);
4104
4105 return 0;
4106 }
4107
4108 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4109 u16 to_multiplier)
4110 {
4111 u16 max_latency;
4112
4113 if (min > max || min < 6 || max > 3200)
4114 return -EINVAL;
4115
4116 if (to_multiplier < 10 || to_multiplier > 3200)
4117 return -EINVAL;
4118
4119 if (max >= to_multiplier * 8)
4120 return -EINVAL;
4121
4122 max_latency = (to_multiplier * 8 / max) - 1;
4123 if (latency > 499 || latency > max_latency)
4124 return -EINVAL;
4125
4126 return 0;
4127 }
4128
4129 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4130 struct l2cap_cmd_hdr *cmd, u8 *data)
4131 {
4132 struct hci_conn *hcon = conn->hcon;
4133 struct l2cap_conn_param_update_req *req;
4134 struct l2cap_conn_param_update_rsp rsp;
4135 u16 min, max, latency, to_multiplier, cmd_len;
4136 int err;
4137
4138 if (!(hcon->link_mode & HCI_LM_MASTER))
4139 return -EINVAL;
4140
4141 cmd_len = __le16_to_cpu(cmd->len);
4142 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4143 return -EPROTO;
4144
4145 req = (struct l2cap_conn_param_update_req *) data;
4146 min = __le16_to_cpu(req->min);
4147 max = __le16_to_cpu(req->max);
4148 latency = __le16_to_cpu(req->latency);
4149 to_multiplier = __le16_to_cpu(req->to_multiplier);
4150
4151 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4152 min, max, latency, to_multiplier);
4153
4154 memset(&rsp, 0, sizeof(rsp));
4155
4156 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4157 if (err)
4158 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4159 else
4160 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4161
4162 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4163 sizeof(rsp), &rsp);
4164
4165 if (!err)
4166 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4167
4168 return 0;
4169 }
4170
4171 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4172 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4173 {
4174 int err = 0;
4175
4176 switch (cmd->code) {
4177 case L2CAP_COMMAND_REJ:
4178 l2cap_command_rej(conn, cmd, data);
4179 break;
4180
4181 case L2CAP_CONN_REQ:
4182 err = l2cap_connect_req(conn, cmd, data);
4183 break;
4184
4185 case L2CAP_CONN_RSP:
4186 err = l2cap_connect_rsp(conn, cmd, data);
4187 break;
4188
4189 case L2CAP_CONF_REQ:
4190 err = l2cap_config_req(conn, cmd, cmd_len, data);
4191 break;
4192
4193 case L2CAP_CONF_RSP:
4194 err = l2cap_config_rsp(conn, cmd, data);
4195 break;
4196
4197 case L2CAP_DISCONN_REQ:
4198 err = l2cap_disconnect_req(conn, cmd, data);
4199 break;
4200
4201 case L2CAP_DISCONN_RSP:
4202 err = l2cap_disconnect_rsp(conn, cmd, data);
4203 break;
4204
4205 case L2CAP_ECHO_REQ:
4206 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4207 break;
4208
4209 case L2CAP_ECHO_RSP:
4210 break;
4211
4212 case L2CAP_INFO_REQ:
4213 err = l2cap_information_req(conn, cmd, data);
4214 break;
4215
4216 case L2CAP_INFO_RSP:
4217 err = l2cap_information_rsp(conn, cmd, data);
4218 break;
4219
4220 case L2CAP_CREATE_CHAN_REQ:
4221 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4222 break;
4223
4224 case L2CAP_CREATE_CHAN_RSP:
4225 err = l2cap_create_channel_rsp(conn, cmd, data);
4226 break;
4227
4228 case L2CAP_MOVE_CHAN_REQ:
4229 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4230 break;
4231
4232 case L2CAP_MOVE_CHAN_RSP:
4233 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4234 break;
4235
4236 case L2CAP_MOVE_CHAN_CFM:
4237 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4238 break;
4239
4240 case L2CAP_MOVE_CHAN_CFM_RSP:
4241 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4242 break;
4243
4244 default:
4245 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4246 err = -EINVAL;
4247 break;
4248 }
4249
4250 return err;
4251 }
4252
4253 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4254 struct l2cap_cmd_hdr *cmd, u8 *data)
4255 {
4256 switch (cmd->code) {
4257 case L2CAP_COMMAND_REJ:
4258 return 0;
4259
4260 case L2CAP_CONN_PARAM_UPDATE_REQ:
4261 return l2cap_conn_param_update_req(conn, cmd, data);
4262
4263 case L2CAP_CONN_PARAM_UPDATE_RSP:
4264 return 0;
4265
4266 default:
4267 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4268 return -EINVAL;
4269 }
4270 }
4271
4272 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4273 struct sk_buff *skb)
4274 {
4275 u8 *data = skb->data;
4276 int len = skb->len;
4277 struct l2cap_cmd_hdr cmd;
4278 int err;
4279
4280 l2cap_raw_recv(conn, skb);
4281
4282 while (len >= L2CAP_CMD_HDR_SIZE) {
4283 u16 cmd_len;
4284 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4285 data += L2CAP_CMD_HDR_SIZE;
4286 len -= L2CAP_CMD_HDR_SIZE;
4287
4288 cmd_len = le16_to_cpu(cmd.len);
4289
4290 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4291
4292 if (cmd_len > len || !cmd.ident) {
4293 BT_DBG("corrupted command");
4294 break;
4295 }
4296
4297 if (conn->hcon->type == LE_LINK)
4298 err = l2cap_le_sig_cmd(conn, &cmd, data);
4299 else
4300 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4301
4302 if (err) {
4303 struct l2cap_cmd_rej_unk rej;
4304
4305 BT_ERR("Wrong link type (%d)", err);
4306
4307 /* FIXME: Map err to a valid reason */
4308 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4309 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4310 }
4311
4312 data += cmd_len;
4313 len -= cmd_len;
4314 }
4315
4316 kfree_skb(skb);
4317 }
4318
4319 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4320 {
4321 u16 our_fcs, rcv_fcs;
4322 int hdr_size;
4323
4324 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4325 hdr_size = L2CAP_EXT_HDR_SIZE;
4326 else
4327 hdr_size = L2CAP_ENH_HDR_SIZE;
4328
4329 if (chan->fcs == L2CAP_FCS_CRC16) {
4330 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4331 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4332 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4333
4334 if (our_fcs != rcv_fcs)
4335 return -EBADMSG;
4336 }
4337 return 0;
4338 }
4339
4340 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4341 {
4342 struct l2cap_ctrl control;
4343
4344 BT_DBG("chan %p", chan);
4345
4346 memset(&control, 0, sizeof(control));
4347 control.sframe = 1;
4348 control.final = 1;
4349 control.reqseq = chan->buffer_seq;
4350 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4351
4352 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4353 control.super = L2CAP_SUPER_RNR;
4354 l2cap_send_sframe(chan, &control);
4355 }
4356
4357 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4358 chan->unacked_frames > 0)
4359 __set_retrans_timer(chan);
4360
4361 /* Send pending iframes */
4362 l2cap_ertm_send(chan);
4363
4364 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4365 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4366 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4367 * send it now.
4368 */
4369 control.super = L2CAP_SUPER_RR;
4370 l2cap_send_sframe(chan, &control);
4371 }
4372 }
4373
4374 static void append_skb_frag(struct sk_buff *skb,
4375 struct sk_buff *new_frag, struct sk_buff **last_frag)
4376 {
4377 /* skb->len reflects data in skb as well as all fragments
4378 * skb->data_len reflects only data in fragments
4379 */
4380 if (!skb_has_frag_list(skb))
4381 skb_shinfo(skb)->frag_list = new_frag;
4382
4383 new_frag->next = NULL;
4384
4385 (*last_frag)->next = new_frag;
4386 *last_frag = new_frag;
4387
4388 skb->len += new_frag->len;
4389 skb->data_len += new_frag->len;
4390 skb->truesize += new_frag->truesize;
4391 }
4392
4393 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4394 struct l2cap_ctrl *control)
4395 {
4396 int err = -EINVAL;
4397
4398 switch (control->sar) {
4399 case L2CAP_SAR_UNSEGMENTED:
4400 if (chan->sdu)
4401 break;
4402
4403 err = chan->ops->recv(chan, skb);
4404 break;
4405
4406 case L2CAP_SAR_START:
4407 if (chan->sdu)
4408 break;
4409
4410 chan->sdu_len = get_unaligned_le16(skb->data);
4411 skb_pull(skb, L2CAP_SDULEN_SIZE);
4412
4413 if (chan->sdu_len > chan->imtu) {
4414 err = -EMSGSIZE;
4415 break;
4416 }
4417
4418 if (skb->len >= chan->sdu_len)
4419 break;
4420
4421 chan->sdu = skb;
4422 chan->sdu_last_frag = skb;
4423
4424 skb = NULL;
4425 err = 0;
4426 break;
4427
4428 case L2CAP_SAR_CONTINUE:
4429 if (!chan->sdu)
4430 break;
4431
4432 append_skb_frag(chan->sdu, skb,
4433 &chan->sdu_last_frag);
4434 skb = NULL;
4435
4436 if (chan->sdu->len >= chan->sdu_len)
4437 break;
4438
4439 err = 0;
4440 break;
4441
4442 case L2CAP_SAR_END:
4443 if (!chan->sdu)
4444 break;
4445
4446 append_skb_frag(chan->sdu, skb,
4447 &chan->sdu_last_frag);
4448 skb = NULL;
4449
4450 if (chan->sdu->len != chan->sdu_len)
4451 break;
4452
4453 err = chan->ops->recv(chan, chan->sdu);
4454
4455 if (!err) {
4456 /* Reassembly complete */
4457 chan->sdu = NULL;
4458 chan->sdu_last_frag = NULL;
4459 chan->sdu_len = 0;
4460 }
4461 break;
4462 }
4463
4464 if (err) {
4465 kfree_skb(skb);
4466 kfree_skb(chan->sdu);
4467 chan->sdu = NULL;
4468 chan->sdu_last_frag = NULL;
4469 chan->sdu_len = 0;
4470 }
4471
4472 return err;
4473 }
4474
4475 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4476 {
4477 u8 event;
4478
4479 if (chan->mode != L2CAP_MODE_ERTM)
4480 return;
4481
4482 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4483 l2cap_tx(chan, NULL, NULL, event);
4484 }
4485
4486 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4487 {
4488 int err = 0;
4489 /* Pass sequential frames to l2cap_reassemble_sdu()
4490 * until a gap is encountered.
4491 */
4492
4493 BT_DBG("chan %p", chan);
4494
4495 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4496 struct sk_buff *skb;
4497 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4498 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4499
4500 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4501
4502 if (!skb)
4503 break;
4504
4505 skb_unlink(skb, &chan->srej_q);
4506 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4507 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4508 if (err)
4509 break;
4510 }
4511
4512 if (skb_queue_empty(&chan->srej_q)) {
4513 chan->rx_state = L2CAP_RX_STATE_RECV;
4514 l2cap_send_ack(chan);
4515 }
4516
4517 return err;
4518 }
4519
4520 static void l2cap_handle_srej(struct l2cap_chan *chan,
4521 struct l2cap_ctrl *control)
4522 {
4523 struct sk_buff *skb;
4524
4525 BT_DBG("chan %p, control %p", chan, control);
4526
4527 if (control->reqseq == chan->next_tx_seq) {
4528 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4529 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4530 return;
4531 }
4532
4533 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4534
4535 if (skb == NULL) {
4536 BT_DBG("Seq %d not available for retransmission",
4537 control->reqseq);
4538 return;
4539 }
4540
4541 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4542 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4544 return;
4545 }
4546
4547 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4548
4549 if (control->poll) {
4550 l2cap_pass_to_tx(chan, control);
4551
4552 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4553 l2cap_retransmit(chan, control);
4554 l2cap_ertm_send(chan);
4555
4556 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4557 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4558 chan->srej_save_reqseq = control->reqseq;
4559 }
4560 } else {
4561 l2cap_pass_to_tx_fbit(chan, control);
4562
4563 if (control->final) {
4564 if (chan->srej_save_reqseq != control->reqseq ||
4565 !test_and_clear_bit(CONN_SREJ_ACT,
4566 &chan->conn_state))
4567 l2cap_retransmit(chan, control);
4568 } else {
4569 l2cap_retransmit(chan, control);
4570 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4571 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4572 chan->srej_save_reqseq = control->reqseq;
4573 }
4574 }
4575 }
4576 }
4577
4578 static void l2cap_handle_rej(struct l2cap_chan *chan,
4579 struct l2cap_ctrl *control)
4580 {
4581 struct sk_buff *skb;
4582
4583 BT_DBG("chan %p, control %p", chan, control);
4584
4585 if (control->reqseq == chan->next_tx_seq) {
4586 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4587 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4588 return;
4589 }
4590
4591 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4592
4593 if (chan->max_tx && skb &&
4594 bt_cb(skb)->control.retries >= chan->max_tx) {
4595 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4596 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4597 return;
4598 }
4599
4600 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4601
4602 l2cap_pass_to_tx(chan, control);
4603
4604 if (control->final) {
4605 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4606 l2cap_retransmit_all(chan, control);
4607 } else {
4608 l2cap_retransmit_all(chan, control);
4609 l2cap_ertm_send(chan);
4610 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4611 set_bit(CONN_REJ_ACT, &chan->conn_state);
4612 }
4613 }
4614
4615 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4616 {
4617 BT_DBG("chan %p, txseq %d", chan, txseq);
4618
4619 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4620 chan->expected_tx_seq);
4621
4622 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4623 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4624 chan->tx_win) {
4625 /* See notes below regarding "double poll" and
4626 * invalid packets.
4627 */
4628 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4629 BT_DBG("Invalid/Ignore - after SREJ");
4630 return L2CAP_TXSEQ_INVALID_IGNORE;
4631 } else {
4632 BT_DBG("Invalid - in window after SREJ sent");
4633 return L2CAP_TXSEQ_INVALID;
4634 }
4635 }
4636
4637 if (chan->srej_list.head == txseq) {
4638 BT_DBG("Expected SREJ");
4639 return L2CAP_TXSEQ_EXPECTED_SREJ;
4640 }
4641
4642 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4643 BT_DBG("Duplicate SREJ - txseq already stored");
4644 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4645 }
4646
4647 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4648 BT_DBG("Unexpected SREJ - not requested");
4649 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4650 }
4651 }
4652
4653 if (chan->expected_tx_seq == txseq) {
4654 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4655 chan->tx_win) {
4656 BT_DBG("Invalid - txseq outside tx window");
4657 return L2CAP_TXSEQ_INVALID;
4658 } else {
4659 BT_DBG("Expected");
4660 return L2CAP_TXSEQ_EXPECTED;
4661 }
4662 }
4663
4664 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4665 __seq_offset(chan, chan->expected_tx_seq,
4666 chan->last_acked_seq)){
4667 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4668 return L2CAP_TXSEQ_DUPLICATE;
4669 }
4670
4671 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4672 /* A source of invalid packets is a "double poll" condition,
4673 * where delays cause us to send multiple poll packets. If
4674 * the remote stack receives and processes both polls,
4675 * sequence numbers can wrap around in such a way that a
4676 * resent frame has a sequence number that looks like new data
4677 * with a sequence gap. This would trigger an erroneous SREJ
4678 * request.
4679 *
4680 * Fortunately, this is impossible with a tx window that's
4681 * less than half of the maximum sequence number, which allows
4682 * invalid frames to be safely ignored.
4683 *
4684 * With tx window sizes greater than half of the tx window
4685 * maximum, the frame is invalid and cannot be ignored. This
4686 * causes a disconnect.
4687 */
4688
4689 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4690 BT_DBG("Invalid/Ignore - txseq outside tx window");
4691 return L2CAP_TXSEQ_INVALID_IGNORE;
4692 } else {
4693 BT_DBG("Invalid - txseq outside tx window");
4694 return L2CAP_TXSEQ_INVALID;
4695 }
4696 } else {
4697 BT_DBG("Unexpected - txseq indicates missing frames");
4698 return L2CAP_TXSEQ_UNEXPECTED;
4699 }
4700 }
4701
4702 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4703 struct l2cap_ctrl *control,
4704 struct sk_buff *skb, u8 event)
4705 {
4706 int err = 0;
4707 bool skb_in_use = 0;
4708
4709 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4710 event);
4711
4712 switch (event) {
4713 case L2CAP_EV_RECV_IFRAME:
4714 switch (l2cap_classify_txseq(chan, control->txseq)) {
4715 case L2CAP_TXSEQ_EXPECTED:
4716 l2cap_pass_to_tx(chan, control);
4717
4718 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4719 BT_DBG("Busy, discarding expected seq %d",
4720 control->txseq);
4721 break;
4722 }
4723
4724 chan->expected_tx_seq = __next_seq(chan,
4725 control->txseq);
4726
4727 chan->buffer_seq = chan->expected_tx_seq;
4728 skb_in_use = 1;
4729
4730 err = l2cap_reassemble_sdu(chan, skb, control);
4731 if (err)
4732 break;
4733
4734 if (control->final) {
4735 if (!test_and_clear_bit(CONN_REJ_ACT,
4736 &chan->conn_state)) {
4737 control->final = 0;
4738 l2cap_retransmit_all(chan, control);
4739 l2cap_ertm_send(chan);
4740 }
4741 }
4742
4743 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4744 l2cap_send_ack(chan);
4745 break;
4746 case L2CAP_TXSEQ_UNEXPECTED:
4747 l2cap_pass_to_tx(chan, control);
4748
4749 /* Can't issue SREJ frames in the local busy state.
4750 * Drop this frame, it will be seen as missing
4751 * when local busy is exited.
4752 */
4753 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4754 BT_DBG("Busy, discarding unexpected seq %d",
4755 control->txseq);
4756 break;
4757 }
4758
4759 /* There was a gap in the sequence, so an SREJ
4760 * must be sent for each missing frame. The
4761 * current frame is stored for later use.
4762 */
4763 skb_queue_tail(&chan->srej_q, skb);
4764 skb_in_use = 1;
4765 BT_DBG("Queued %p (queue len %d)", skb,
4766 skb_queue_len(&chan->srej_q));
4767
4768 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4769 l2cap_seq_list_clear(&chan->srej_list);
4770 l2cap_send_srej(chan, control->txseq);
4771
4772 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4773 break;
4774 case L2CAP_TXSEQ_DUPLICATE:
4775 l2cap_pass_to_tx(chan, control);
4776 break;
4777 case L2CAP_TXSEQ_INVALID_IGNORE:
4778 break;
4779 case L2CAP_TXSEQ_INVALID:
4780 default:
4781 l2cap_send_disconn_req(chan->conn, chan,
4782 ECONNRESET);
4783 break;
4784 }
4785 break;
4786 case L2CAP_EV_RECV_RR:
4787 l2cap_pass_to_tx(chan, control);
4788 if (control->final) {
4789 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4790
4791 if (!test_and_clear_bit(CONN_REJ_ACT,
4792 &chan->conn_state)) {
4793 control->final = 0;
4794 l2cap_retransmit_all(chan, control);
4795 }
4796
4797 l2cap_ertm_send(chan);
4798 } else if (control->poll) {
4799 l2cap_send_i_or_rr_or_rnr(chan);
4800 } else {
4801 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4802 &chan->conn_state) &&
4803 chan->unacked_frames)
4804 __set_retrans_timer(chan);
4805
4806 l2cap_ertm_send(chan);
4807 }
4808 break;
4809 case L2CAP_EV_RECV_RNR:
4810 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4811 l2cap_pass_to_tx(chan, control);
4812 if (control && control->poll) {
4813 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4814 l2cap_send_rr_or_rnr(chan, 0);
4815 }
4816 __clear_retrans_timer(chan);
4817 l2cap_seq_list_clear(&chan->retrans_list);
4818 break;
4819 case L2CAP_EV_RECV_REJ:
4820 l2cap_handle_rej(chan, control);
4821 break;
4822 case L2CAP_EV_RECV_SREJ:
4823 l2cap_handle_srej(chan, control);
4824 break;
4825 default:
4826 break;
4827 }
4828
4829 if (skb && !skb_in_use) {
4830 BT_DBG("Freeing %p", skb);
4831 kfree_skb(skb);
4832 }
4833
4834 return err;
4835 }
4836
4837 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4838 struct l2cap_ctrl *control,
4839 struct sk_buff *skb, u8 event)
4840 {
4841 int err = 0;
4842 u16 txseq = control->txseq;
4843 bool skb_in_use = 0;
4844
4845 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4846 event);
4847
4848 switch (event) {
4849 case L2CAP_EV_RECV_IFRAME:
4850 switch (l2cap_classify_txseq(chan, txseq)) {
4851 case L2CAP_TXSEQ_EXPECTED:
4852 /* Keep frame for reassembly later */
4853 l2cap_pass_to_tx(chan, control);
4854 skb_queue_tail(&chan->srej_q, skb);
4855 skb_in_use = 1;
4856 BT_DBG("Queued %p (queue len %d)", skb,
4857 skb_queue_len(&chan->srej_q));
4858
4859 chan->expected_tx_seq = __next_seq(chan, txseq);
4860 break;
4861 case L2CAP_TXSEQ_EXPECTED_SREJ:
4862 l2cap_seq_list_pop(&chan->srej_list);
4863
4864 l2cap_pass_to_tx(chan, control);
4865 skb_queue_tail(&chan->srej_q, skb);
4866 skb_in_use = 1;
4867 BT_DBG("Queued %p (queue len %d)", skb,
4868 skb_queue_len(&chan->srej_q));
4869
4870 err = l2cap_rx_queued_iframes(chan);
4871 if (err)
4872 break;
4873
4874 break;
4875 case L2CAP_TXSEQ_UNEXPECTED:
4876 /* Got a frame that can't be reassembled yet.
4877 * Save it for later, and send SREJs to cover
4878 * the missing frames.
4879 */
4880 skb_queue_tail(&chan->srej_q, skb);
4881 skb_in_use = 1;
4882 BT_DBG("Queued %p (queue len %d)", skb,
4883 skb_queue_len(&chan->srej_q));
4884
4885 l2cap_pass_to_tx(chan, control);
4886 l2cap_send_srej(chan, control->txseq);
4887 break;
4888 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4889 /* This frame was requested with an SREJ, but
4890 * some expected retransmitted frames are
4891 * missing. Request retransmission of missing
4892 * SREJ'd frames.
4893 */
4894 skb_queue_tail(&chan->srej_q, skb);
4895 skb_in_use = 1;
4896 BT_DBG("Queued %p (queue len %d)", skb,
4897 skb_queue_len(&chan->srej_q));
4898
4899 l2cap_pass_to_tx(chan, control);
4900 l2cap_send_srej_list(chan, control->txseq);
4901 break;
4902 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4903 /* We've already queued this frame. Drop this copy. */
4904 l2cap_pass_to_tx(chan, control);
4905 break;
4906 case L2CAP_TXSEQ_DUPLICATE:
4907 /* Expecting a later sequence number, so this frame
4908 * was already received. Ignore it completely.
4909 */
4910 break;
4911 case L2CAP_TXSEQ_INVALID_IGNORE:
4912 break;
4913 case L2CAP_TXSEQ_INVALID:
4914 default:
4915 l2cap_send_disconn_req(chan->conn, chan,
4916 ECONNRESET);
4917 break;
4918 }
4919 break;
4920 case L2CAP_EV_RECV_RR:
4921 l2cap_pass_to_tx(chan, control);
4922 if (control->final) {
4923 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4924
4925 if (!test_and_clear_bit(CONN_REJ_ACT,
4926 &chan->conn_state)) {
4927 control->final = 0;
4928 l2cap_retransmit_all(chan, control);
4929 }
4930
4931 l2cap_ertm_send(chan);
4932 } else if (control->poll) {
4933 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4934 &chan->conn_state) &&
4935 chan->unacked_frames) {
4936 __set_retrans_timer(chan);
4937 }
4938
4939 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4940 l2cap_send_srej_tail(chan);
4941 } else {
4942 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4943 &chan->conn_state) &&
4944 chan->unacked_frames)
4945 __set_retrans_timer(chan);
4946
4947 l2cap_send_ack(chan);
4948 }
4949 break;
4950 case L2CAP_EV_RECV_RNR:
4951 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4952 l2cap_pass_to_tx(chan, control);
4953 if (control->poll) {
4954 l2cap_send_srej_tail(chan);
4955 } else {
4956 struct l2cap_ctrl rr_control;
4957 memset(&rr_control, 0, sizeof(rr_control));
4958 rr_control.sframe = 1;
4959 rr_control.super = L2CAP_SUPER_RR;
4960 rr_control.reqseq = chan->buffer_seq;
4961 l2cap_send_sframe(chan, &rr_control);
4962 }
4963
4964 break;
4965 case L2CAP_EV_RECV_REJ:
4966 l2cap_handle_rej(chan, control);
4967 break;
4968 case L2CAP_EV_RECV_SREJ:
4969 l2cap_handle_srej(chan, control);
4970 break;
4971 }
4972
4973 if (skb && !skb_in_use) {
4974 BT_DBG("Freeing %p", skb);
4975 kfree_skb(skb);
4976 }
4977
4978 return err;
4979 }
4980
4981 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4982 {
4983 /* Make sure reqseq is for a packet that has been sent but not acked */
4984 u16 unacked;
4985
4986 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4987 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4988 }
4989
4990 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4991 struct sk_buff *skb, u8 event)
4992 {
4993 int err = 0;
4994
4995 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4996 control, skb, event, chan->rx_state);
4997
4998 if (__valid_reqseq(chan, control->reqseq)) {
4999 switch (chan->rx_state) {
5000 case L2CAP_RX_STATE_RECV:
5001 err = l2cap_rx_state_recv(chan, control, skb, event);
5002 break;
5003 case L2CAP_RX_STATE_SREJ_SENT:
5004 err = l2cap_rx_state_srej_sent(chan, control, skb,
5005 event);
5006 break;
5007 default:
5008 /* shut it down */
5009 break;
5010 }
5011 } else {
5012 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5013 control->reqseq, chan->next_tx_seq,
5014 chan->expected_ack_seq);
5015 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5016 }
5017
5018 return err;
5019 }
5020
5021 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5022 struct sk_buff *skb)
5023 {
5024 int err = 0;
5025
5026 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5027 chan->rx_state);
5028
5029 if (l2cap_classify_txseq(chan, control->txseq) ==
5030 L2CAP_TXSEQ_EXPECTED) {
5031 l2cap_pass_to_tx(chan, control);
5032
5033 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5034 __next_seq(chan, chan->buffer_seq));
5035
5036 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5037
5038 l2cap_reassemble_sdu(chan, skb, control);
5039 } else {
5040 if (chan->sdu) {
5041 kfree_skb(chan->sdu);
5042 chan->sdu = NULL;
5043 }
5044 chan->sdu_last_frag = NULL;
5045 chan->sdu_len = 0;
5046
5047 if (skb) {
5048 BT_DBG("Freeing %p", skb);
5049 kfree_skb(skb);
5050 }
5051 }
5052
5053 chan->last_acked_seq = control->txseq;
5054 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5055
5056 return err;
5057 }
5058
5059 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5060 {
5061 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5062 u16 len;
5063 u8 event;
5064
5065 __unpack_control(chan, skb);
5066
5067 len = skb->len;
5068
5069 /*
5070 * We can just drop the corrupted I-frame here.
5071 * Receiver will miss it and start proper recovery
5072 * procedures and ask for retransmission.
5073 */
5074 if (l2cap_check_fcs(chan, skb))
5075 goto drop;
5076
5077 if (!control->sframe && control->sar == L2CAP_SAR_START)
5078 len -= L2CAP_SDULEN_SIZE;
5079
5080 if (chan->fcs == L2CAP_FCS_CRC16)
5081 len -= L2CAP_FCS_SIZE;
5082
5083 if (len > chan->mps) {
5084 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5085 goto drop;
5086 }
5087
5088 if (!control->sframe) {
5089 int err;
5090
5091 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5092 control->sar, control->reqseq, control->final,
5093 control->txseq);
5094
5095 /* Validate F-bit - F=0 always valid, F=1 only
5096 * valid in TX WAIT_F
5097 */
5098 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5099 goto drop;
5100
5101 if (chan->mode != L2CAP_MODE_STREAMING) {
5102 event = L2CAP_EV_RECV_IFRAME;
5103 err = l2cap_rx(chan, control, skb, event);
5104 } else {
5105 err = l2cap_stream_rx(chan, control, skb);
5106 }
5107
5108 if (err)
5109 l2cap_send_disconn_req(chan->conn, chan,
5110 ECONNRESET);
5111 } else {
5112 const u8 rx_func_to_event[4] = {
5113 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5114 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5115 };
5116
5117 /* Only I-frames are expected in streaming mode */
5118 if (chan->mode == L2CAP_MODE_STREAMING)
5119 goto drop;
5120
5121 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5122 control->reqseq, control->final, control->poll,
5123 control->super);
5124
5125 if (len != 0) {
5126 BT_ERR("%d", len);
5127 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5128 goto drop;
5129 }
5130
5131 /* Validate F and P bits */
5132 if (control->final && (control->poll ||
5133 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5134 goto drop;
5135
5136 event = rx_func_to_event[control->super];
5137 if (l2cap_rx(chan, control, skb, event))
5138 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5139 }
5140
5141 return 0;
5142
5143 drop:
5144 kfree_skb(skb);
5145 return 0;
5146 }
5147
5148 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5149 struct sk_buff *skb)
5150 {
5151 struct l2cap_chan *chan;
5152
5153 chan = l2cap_get_chan_by_scid(conn, cid);
5154 if (!chan) {
5155 if (cid == L2CAP_CID_A2MP) {
5156 chan = a2mp_channel_create(conn, skb);
5157 if (!chan) {
5158 kfree_skb(skb);
5159 return;
5160 }
5161
5162 l2cap_chan_lock(chan);
5163 } else {
5164 BT_DBG("unknown cid 0x%4.4x", cid);
5165 /* Drop packet and return */
5166 kfree_skb(skb);
5167 return;
5168 }
5169 }
5170
5171 BT_DBG("chan %p, len %d", chan, skb->len);
5172
5173 if (chan->state != BT_CONNECTED)
5174 goto drop;
5175
5176 switch (chan->mode) {
5177 case L2CAP_MODE_BASIC:
5178 /* If socket recv buffers overflows we drop data here
5179 * which is *bad* because L2CAP has to be reliable.
5180 * But we don't have any other choice. L2CAP doesn't
5181 * provide flow control mechanism. */
5182
5183 if (chan->imtu < skb->len)
5184 goto drop;
5185
5186 if (!chan->ops->recv(chan, skb))
5187 goto done;
5188 break;
5189
5190 case L2CAP_MODE_ERTM:
5191 case L2CAP_MODE_STREAMING:
5192 l2cap_data_rcv(chan, skb);
5193 goto done;
5194
5195 default:
5196 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5197 break;
5198 }
5199
5200 drop:
5201 kfree_skb(skb);
5202
5203 done:
5204 l2cap_chan_unlock(chan);
5205 }
5206
5207 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5208 struct sk_buff *skb)
5209 {
5210 struct l2cap_chan *chan;
5211
5212 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5213 if (!chan)
5214 goto drop;
5215
5216 BT_DBG("chan %p, len %d", chan, skb->len);
5217
5218 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5219 goto drop;
5220
5221 if (chan->imtu < skb->len)
5222 goto drop;
5223
5224 if (!chan->ops->recv(chan, skb))
5225 return;
5226
5227 drop:
5228 kfree_skb(skb);
5229 }
5230
5231 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5232 struct sk_buff *skb)
5233 {
5234 struct l2cap_chan *chan;
5235
5236 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5237 if (!chan)
5238 goto drop;
5239
5240 BT_DBG("chan %p, len %d", chan, skb->len);
5241
5242 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5243 goto drop;
5244
5245 if (chan->imtu < skb->len)
5246 goto drop;
5247
5248 if (!chan->ops->recv(chan, skb))
5249 return;
5250
5251 drop:
5252 kfree_skb(skb);
5253 }
5254
5255 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5256 {
5257 struct l2cap_hdr *lh = (void *) skb->data;
5258 u16 cid, len;
5259 __le16 psm;
5260
5261 skb_pull(skb, L2CAP_HDR_SIZE);
5262 cid = __le16_to_cpu(lh->cid);
5263 len = __le16_to_cpu(lh->len);
5264
5265 if (len != skb->len) {
5266 kfree_skb(skb);
5267 return;
5268 }
5269
5270 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5271
5272 switch (cid) {
5273 case L2CAP_CID_LE_SIGNALING:
5274 case L2CAP_CID_SIGNALING:
5275 l2cap_sig_channel(conn, skb);
5276 break;
5277
5278 case L2CAP_CID_CONN_LESS:
5279 psm = get_unaligned((__le16 *) skb->data);
5280 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5281 l2cap_conless_channel(conn, psm, skb);
5282 break;
5283
5284 case L2CAP_CID_LE_DATA:
5285 l2cap_att_channel(conn, cid, skb);
5286 break;
5287
5288 case L2CAP_CID_SMP:
5289 if (smp_sig_channel(conn, skb))
5290 l2cap_conn_del(conn->hcon, EACCES);
5291 break;
5292
5293 default:
5294 l2cap_data_channel(conn, cid, skb);
5295 break;
5296 }
5297 }
5298
5299 /* ---- L2CAP interface with lower layer (HCI) ---- */
5300
5301 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5302 {
5303 int exact = 0, lm1 = 0, lm2 = 0;
5304 struct l2cap_chan *c;
5305
5306 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5307
5308 /* Find listening sockets and check their link_mode */
5309 read_lock(&chan_list_lock);
5310 list_for_each_entry(c, &chan_list, global_l) {
5311 struct sock *sk = c->sk;
5312
5313 if (c->state != BT_LISTEN)
5314 continue;
5315
5316 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5317 lm1 |= HCI_LM_ACCEPT;
5318 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5319 lm1 |= HCI_LM_MASTER;
5320 exact++;
5321 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5322 lm2 |= HCI_LM_ACCEPT;
5323 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5324 lm2 |= HCI_LM_MASTER;
5325 }
5326 }
5327 read_unlock(&chan_list_lock);
5328
5329 return exact ? lm1 : lm2;
5330 }
5331
5332 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5333 {
5334 struct l2cap_conn *conn;
5335
5336 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5337
5338 if (!status) {
5339 conn = l2cap_conn_add(hcon, status);
5340 if (conn)
5341 l2cap_conn_ready(conn);
5342 } else
5343 l2cap_conn_del(hcon, bt_to_errno(status));
5344
5345 return 0;
5346 }
5347
5348 int l2cap_disconn_ind(struct hci_conn *hcon)
5349 {
5350 struct l2cap_conn *conn = hcon->l2cap_data;
5351
5352 BT_DBG("hcon %p", hcon);
5353
5354 if (!conn)
5355 return HCI_ERROR_REMOTE_USER_TERM;
5356 return conn->disc_reason;
5357 }
5358
5359 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5360 {
5361 BT_DBG("hcon %p reason %d", hcon, reason);
5362
5363 l2cap_conn_del(hcon, bt_to_errno(reason));
5364 return 0;
5365 }
5366
5367 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5368 {
5369 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5370 return;
5371
5372 if (encrypt == 0x00) {
5373 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5374 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5375 } else if (chan->sec_level == BT_SECURITY_HIGH)
5376 l2cap_chan_close(chan, ECONNREFUSED);
5377 } else {
5378 if (chan->sec_level == BT_SECURITY_MEDIUM)
5379 __clear_chan_timer(chan);
5380 }
5381 }
5382
5383 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5384 {
5385 struct l2cap_conn *conn = hcon->l2cap_data;
5386 struct l2cap_chan *chan;
5387
5388 if (!conn)
5389 return 0;
5390
5391 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5392
5393 if (hcon->type == LE_LINK) {
5394 if (!status && encrypt)
5395 smp_distribute_keys(conn, 0);
5396 cancel_delayed_work(&conn->security_timer);
5397 }
5398
5399 mutex_lock(&conn->chan_lock);
5400
5401 list_for_each_entry(chan, &conn->chan_l, list) {
5402 l2cap_chan_lock(chan);
5403
5404 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5405 state_to_string(chan->state));
5406
5407 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5408 l2cap_chan_unlock(chan);
5409 continue;
5410 }
5411
5412 if (chan->scid == L2CAP_CID_LE_DATA) {
5413 if (!status && encrypt) {
5414 chan->sec_level = hcon->sec_level;
5415 l2cap_chan_ready(chan);
5416 }
5417
5418 l2cap_chan_unlock(chan);
5419 continue;
5420 }
5421
5422 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5423 l2cap_chan_unlock(chan);
5424 continue;
5425 }
5426
5427 if (!status && (chan->state == BT_CONNECTED ||
5428 chan->state == BT_CONFIG)) {
5429 struct sock *sk = chan->sk;
5430
5431 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5432 sk->sk_state_change(sk);
5433
5434 l2cap_check_encryption(chan, encrypt);
5435 l2cap_chan_unlock(chan);
5436 continue;
5437 }
5438
5439 if (chan->state == BT_CONNECT) {
5440 if (!status) {
5441 l2cap_send_conn_req(chan);
5442 } else {
5443 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5444 }
5445 } else if (chan->state == BT_CONNECT2) {
5446 struct sock *sk = chan->sk;
5447 struct l2cap_conn_rsp rsp;
5448 __u16 res, stat;
5449
5450 lock_sock(sk);
5451
5452 if (!status) {
5453 if (test_bit(BT_SK_DEFER_SETUP,
5454 &bt_sk(sk)->flags)) {
5455 struct sock *parent = bt_sk(sk)->parent;
5456 res = L2CAP_CR_PEND;
5457 stat = L2CAP_CS_AUTHOR_PEND;
5458 if (parent)
5459 parent->sk_data_ready(parent, 0);
5460 } else {
5461 __l2cap_state_change(chan, BT_CONFIG);
5462 res = L2CAP_CR_SUCCESS;
5463 stat = L2CAP_CS_NO_INFO;
5464 }
5465 } else {
5466 __l2cap_state_change(chan, BT_DISCONN);
5467 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5468 res = L2CAP_CR_SEC_BLOCK;
5469 stat = L2CAP_CS_NO_INFO;
5470 }
5471
5472 release_sock(sk);
5473
5474 rsp.scid = cpu_to_le16(chan->dcid);
5475 rsp.dcid = cpu_to_le16(chan->scid);
5476 rsp.result = cpu_to_le16(res);
5477 rsp.status = cpu_to_le16(stat);
5478 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5479 sizeof(rsp), &rsp);
5480
5481 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5482 res == L2CAP_CR_SUCCESS) {
5483 char buf[128];
5484 set_bit(CONF_REQ_SENT, &chan->conf_state);
5485 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5486 L2CAP_CONF_REQ,
5487 l2cap_build_conf_req(chan, buf),
5488 buf);
5489 chan->num_conf_req++;
5490 }
5491 }
5492
5493 l2cap_chan_unlock(chan);
5494 }
5495
5496 mutex_unlock(&conn->chan_lock);
5497
5498 return 0;
5499 }
5500
5501 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5502 {
5503 struct l2cap_conn *conn = hcon->l2cap_data;
5504
5505 if (!conn)
5506 conn = l2cap_conn_add(hcon, 0);
5507
5508 if (!conn)
5509 goto drop;
5510
5511 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5512
5513 if (!(flags & ACL_CONT)) {
5514 struct l2cap_hdr *hdr;
5515 int len;
5516
5517 if (conn->rx_len) {
5518 BT_ERR("Unexpected start frame (len %d)", skb->len);
5519 kfree_skb(conn->rx_skb);
5520 conn->rx_skb = NULL;
5521 conn->rx_len = 0;
5522 l2cap_conn_unreliable(conn, ECOMM);
5523 }
5524
5525 /* Start fragment always begin with Basic L2CAP header */
5526 if (skb->len < L2CAP_HDR_SIZE) {
5527 BT_ERR("Frame is too short (len %d)", skb->len);
5528 l2cap_conn_unreliable(conn, ECOMM);
5529 goto drop;
5530 }
5531
5532 hdr = (struct l2cap_hdr *) skb->data;
5533 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5534
5535 if (len == skb->len) {
5536 /* Complete frame received */
5537 l2cap_recv_frame(conn, skb);
5538 return 0;
5539 }
5540
5541 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5542
5543 if (skb->len > len) {
5544 BT_ERR("Frame is too long (len %d, expected len %d)",
5545 skb->len, len);
5546 l2cap_conn_unreliable(conn, ECOMM);
5547 goto drop;
5548 }
5549
5550 /* Allocate skb for the complete frame (with header) */
5551 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5552 if (!conn->rx_skb)
5553 goto drop;
5554
5555 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5556 skb->len);
5557 conn->rx_len = len - skb->len;
5558 } else {
5559 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5560
5561 if (!conn->rx_len) {
5562 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5563 l2cap_conn_unreliable(conn, ECOMM);
5564 goto drop;
5565 }
5566
5567 if (skb->len > conn->rx_len) {
5568 BT_ERR("Fragment is too long (len %d, expected %d)",
5569 skb->len, conn->rx_len);
5570 kfree_skb(conn->rx_skb);
5571 conn->rx_skb = NULL;
5572 conn->rx_len = 0;
5573 l2cap_conn_unreliable(conn, ECOMM);
5574 goto drop;
5575 }
5576
5577 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5578 skb->len);
5579 conn->rx_len -= skb->len;
5580
5581 if (!conn->rx_len) {
5582 /* Complete frame received */
5583 l2cap_recv_frame(conn, conn->rx_skb);
5584 conn->rx_skb = NULL;
5585 }
5586 }
5587
5588 drop:
5589 kfree_skb(skb);
5590 return 0;
5591 }
5592
5593 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5594 {
5595 struct l2cap_chan *c;
5596
5597 read_lock(&chan_list_lock);
5598
5599 list_for_each_entry(c, &chan_list, global_l) {
5600 struct sock *sk = c->sk;
5601
5602 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5603 batostr(&bt_sk(sk)->src),
5604 batostr(&bt_sk(sk)->dst),
5605 c->state, __le16_to_cpu(c->psm),
5606 c->scid, c->dcid, c->imtu, c->omtu,
5607 c->sec_level, c->mode);
5608 }
5609
5610 read_unlock(&chan_list_lock);
5611
5612 return 0;
5613 }
5614
5615 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5616 {
5617 return single_open(file, l2cap_debugfs_show, inode->i_private);
5618 }
5619
5620 static const struct file_operations l2cap_debugfs_fops = {
5621 .open = l2cap_debugfs_open,
5622 .read = seq_read,
5623 .llseek = seq_lseek,
5624 .release = single_release,
5625 };
5626
5627 static struct dentry *l2cap_debugfs;
5628
5629 int __init l2cap_init(void)
5630 {
5631 int err;
5632
5633 err = l2cap_init_sockets();
5634 if (err < 0)
5635 return err;
5636
5637 if (bt_debugfs) {
5638 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5639 bt_debugfs, NULL, &l2cap_debugfs_fops);
5640 if (!l2cap_debugfs)
5641 BT_ERR("Failed to create L2CAP debug file");
5642 }
5643
5644 return 0;
5645 }
5646
5647 void l2cap_exit(void)
5648 {
5649 debugfs_remove(l2cap_debugfs);
5650 l2cap_cleanup_sockets();
5651 }
5652
5653 module_param(disable_ertm, bool, 0644);
5654 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");