Bluetooth: Refactor l2cap_send_sframe
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm = 1;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 {
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90 }
91
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 {
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->scid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106 {
107 struct l2cap_chan *c;
108
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 l2cap_chan_lock(c);
113 mutex_unlock(&conn->chan_lock);
114
115 return c;
116 }
117
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 {
120 struct l2cap_chan *c;
121
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
124 return c;
125 }
126 return NULL;
127 }
128
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
130 {
131 struct l2cap_chan *c;
132
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
135 return c;
136 }
137 return NULL;
138 }
139
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141 {
142 int err;
143
144 write_lock(&chan_list_lock);
145
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
147 err = -EADDRINUSE;
148 goto done;
149 }
150
151 if (psm) {
152 chan->psm = psm;
153 chan->sport = psm;
154 err = 0;
155 } else {
156 u16 p;
157
158 err = -EINVAL;
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
163 err = 0;
164 break;
165 }
166 }
167
168 done:
169 write_unlock(&chan_list_lock);
170 return err;
171 }
172
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
174 {
175 write_lock(&chan_list_lock);
176
177 chan->scid = scid;
178
179 write_unlock(&chan_list_lock);
180
181 return 0;
182 }
183
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
185 {
186 u16 cid = L2CAP_CID_DYN_START;
187
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
190 return cid;
191 }
192
193 return 0;
194 }
195
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
197 {
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
200
201 chan->state = state;
202 chan->ops->state_change(chan->data, state);
203 }
204
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
206 {
207 struct sock *sk = chan->sk;
208
209 lock_sock(sk);
210 __l2cap_state_change(chan, state);
211 release_sock(sk);
212 }
213
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
215 {
216 struct sock *sk = chan->sk;
217
218 sk->sk_err = err;
219 }
220
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222 {
223 struct sock *sk = chan->sk;
224
225 lock_sock(sk);
226 __l2cap_chan_set_err(chan, err);
227 release_sock(sk);
228 }
229
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232 {
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241 }
242
243 /* ---- L2CAP sequence number lists ---- */
244
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275 }
276
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 kfree(seq_list->list);
280 }
281
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284 {
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321 }
322
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389 }
390
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 atomic_set(&chan->refcnt, 1);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417 }
418
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 {
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
424
425 l2cap_chan_put(chan);
426 }
427
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 {
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
435
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
437 }
438
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
440 {
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
443
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
445
446 chan->conn = conn;
447
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
451 /* LE connection */
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
455 } else {
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
459 }
460 break;
461
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
467 break;
468
469 default:
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
474 }
475
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
482
483 l2cap_chan_hold(chan);
484
485 list_add(&chan->list, &conn->chan_l);
486 }
487
488 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
489 {
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
493 }
494
495 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
496 {
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
500
501 __clear_chan_timer(chan);
502
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
504
505 if (conn) {
506 /* Delete from channel list */
507 list_del(&chan->list);
508
509 l2cap_chan_put(chan);
510
511 chan->conn = NULL;
512 hci_conn_put(conn->hcon);
513 }
514
515 lock_sock(sk);
516
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
519
520 if (err)
521 __l2cap_chan_set_err(chan, err);
522
523 if (parent) {
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
526 } else
527 sk->sk_state_change(sk);
528
529 release_sock(sk);
530
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
532 return;
533
534 skb_queue_purge(&chan->tx_q);
535
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 struct srej_list *l, *tmp;
538
539 __clear_retrans_timer(chan);
540 __clear_monitor_timer(chan);
541 __clear_ack_timer(chan);
542
543 skb_queue_purge(&chan->srej_q);
544
545 l2cap_seq_list_free(&chan->srej_list);
546 l2cap_seq_list_free(&chan->retrans_list);
547 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
548 list_del(&l->list);
549 kfree(l);
550 }
551 }
552 }
553
554 static void l2cap_chan_cleanup_listen(struct sock *parent)
555 {
556 struct sock *sk;
557
558 BT_DBG("parent %p", parent);
559
560 /* Close not yet accepted channels */
561 while ((sk = bt_accept_dequeue(parent, NULL))) {
562 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
563
564 l2cap_chan_lock(chan);
565 __clear_chan_timer(chan);
566 l2cap_chan_close(chan, ECONNRESET);
567 l2cap_chan_unlock(chan);
568
569 chan->ops->close(chan->data);
570 }
571 }
572
573 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
574 {
575 struct l2cap_conn *conn = chan->conn;
576 struct sock *sk = chan->sk;
577
578 BT_DBG("chan %p state %s sk %p", chan,
579 state_to_string(chan->state), sk);
580
581 switch (chan->state) {
582 case BT_LISTEN:
583 lock_sock(sk);
584 l2cap_chan_cleanup_listen(sk);
585
586 __l2cap_state_change(chan, BT_CLOSED);
587 sock_set_flag(sk, SOCK_ZAPPED);
588 release_sock(sk);
589 break;
590
591 case BT_CONNECTED:
592 case BT_CONFIG:
593 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
594 conn->hcon->type == ACL_LINK) {
595 __set_chan_timer(chan, sk->sk_sndtimeo);
596 l2cap_send_disconn_req(conn, chan, reason);
597 } else
598 l2cap_chan_del(chan, reason);
599 break;
600
601 case BT_CONNECT2:
602 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
603 conn->hcon->type == ACL_LINK) {
604 struct l2cap_conn_rsp rsp;
605 __u16 result;
606
607 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
608 result = L2CAP_CR_SEC_BLOCK;
609 else
610 result = L2CAP_CR_BAD_PSM;
611 l2cap_state_change(chan, BT_DISCONN);
612
613 rsp.scid = cpu_to_le16(chan->dcid);
614 rsp.dcid = cpu_to_le16(chan->scid);
615 rsp.result = cpu_to_le16(result);
616 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
617 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
618 sizeof(rsp), &rsp);
619 }
620
621 l2cap_chan_del(chan, reason);
622 break;
623
624 case BT_CONNECT:
625 case BT_DISCONN:
626 l2cap_chan_del(chan, reason);
627 break;
628
629 default:
630 lock_sock(sk);
631 sock_set_flag(sk, SOCK_ZAPPED);
632 release_sock(sk);
633 break;
634 }
635 }
636
637 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
638 {
639 if (chan->chan_type == L2CAP_CHAN_RAW) {
640 switch (chan->sec_level) {
641 case BT_SECURITY_HIGH:
642 return HCI_AT_DEDICATED_BONDING_MITM;
643 case BT_SECURITY_MEDIUM:
644 return HCI_AT_DEDICATED_BONDING;
645 default:
646 return HCI_AT_NO_BONDING;
647 }
648 } else if (chan->psm == cpu_to_le16(0x0001)) {
649 if (chan->sec_level == BT_SECURITY_LOW)
650 chan->sec_level = BT_SECURITY_SDP;
651
652 if (chan->sec_level == BT_SECURITY_HIGH)
653 return HCI_AT_NO_BONDING_MITM;
654 else
655 return HCI_AT_NO_BONDING;
656 } else {
657 switch (chan->sec_level) {
658 case BT_SECURITY_HIGH:
659 return HCI_AT_GENERAL_BONDING_MITM;
660 case BT_SECURITY_MEDIUM:
661 return HCI_AT_GENERAL_BONDING;
662 default:
663 return HCI_AT_NO_BONDING;
664 }
665 }
666 }
667
668 /* Service level security */
669 int l2cap_chan_check_security(struct l2cap_chan *chan)
670 {
671 struct l2cap_conn *conn = chan->conn;
672 __u8 auth_type;
673
674 auth_type = l2cap_get_auth_type(chan);
675
676 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
677 }
678
679 static u8 l2cap_get_ident(struct l2cap_conn *conn)
680 {
681 u8 id;
682
683 /* Get next available identificator.
684 * 1 - 128 are used by kernel.
685 * 129 - 199 are reserved.
686 * 200 - 254 are used by utilities like l2ping, etc.
687 */
688
689 spin_lock(&conn->lock);
690
691 if (++conn->tx_ident > 128)
692 conn->tx_ident = 1;
693
694 id = conn->tx_ident;
695
696 spin_unlock(&conn->lock);
697
698 return id;
699 }
700
701 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
702 {
703 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
704 u8 flags;
705
706 BT_DBG("code 0x%2.2x", code);
707
708 if (!skb)
709 return;
710
711 if (lmp_no_flush_capable(conn->hcon->hdev))
712 flags = ACL_START_NO_FLUSH;
713 else
714 flags = ACL_START;
715
716 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
717 skb->priority = HCI_PRIO_MAX;
718
719 hci_send_acl(conn->hchan, skb, flags);
720 }
721
722 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
723 {
724 struct hci_conn *hcon = chan->conn->hcon;
725 u16 flags;
726
727 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
728 skb->priority);
729
730 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
731 lmp_no_flush_capable(hcon->hdev))
732 flags = ACL_START_NO_FLUSH;
733 else
734 flags = ACL_START;
735
736 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
737 hci_send_acl(chan->conn->hchan, skb, flags);
738 }
739
740 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
741 {
742 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
743 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
744
745 if (enh & L2CAP_CTRL_FRAME_TYPE) {
746 /* S-Frame */
747 control->sframe = 1;
748 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
749 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
750
751 control->sar = 0;
752 control->txseq = 0;
753 } else {
754 /* I-Frame */
755 control->sframe = 0;
756 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
757 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
758
759 control->poll = 0;
760 control->super = 0;
761 }
762 }
763
764 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
765 {
766 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
767 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
768
769 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
770 /* S-Frame */
771 control->sframe = 1;
772 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
773 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
774
775 control->sar = 0;
776 control->txseq = 0;
777 } else {
778 /* I-Frame */
779 control->sframe = 0;
780 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
781 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
782
783 control->poll = 0;
784 control->super = 0;
785 }
786 }
787
788 static inline void __unpack_control(struct l2cap_chan *chan,
789 struct sk_buff *skb)
790 {
791 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
792 __unpack_extended_control(get_unaligned_le32(skb->data),
793 &bt_cb(skb)->control);
794 } else {
795 __unpack_enhanced_control(get_unaligned_le16(skb->data),
796 &bt_cb(skb)->control);
797 }
798 }
799
800 static u32 __pack_extended_control(struct l2cap_ctrl *control)
801 {
802 u32 packed;
803
804 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
805 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
806
807 if (control->sframe) {
808 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
809 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
810 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
811 } else {
812 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
813 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
814 }
815
816 return packed;
817 }
818
819 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
820 {
821 u16 packed;
822
823 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
824 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
825
826 if (control->sframe) {
827 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
828 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
829 packed |= L2CAP_CTRL_FRAME_TYPE;
830 } else {
831 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
832 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
833 }
834
835 return packed;
836 }
837
838 static inline void __pack_control(struct l2cap_chan *chan,
839 struct l2cap_ctrl *control,
840 struct sk_buff *skb)
841 {
842 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
843 put_unaligned_le32(__pack_extended_control(control),
844 skb->data + L2CAP_HDR_SIZE);
845 } else {
846 put_unaligned_le16(__pack_enhanced_control(control),
847 skb->data + L2CAP_HDR_SIZE);
848 }
849 }
850
851 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
852 u32 control)
853 {
854 struct sk_buff *skb;
855 struct l2cap_hdr *lh;
856 int hlen;
857
858 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
859 hlen = L2CAP_EXT_HDR_SIZE;
860 else
861 hlen = L2CAP_ENH_HDR_SIZE;
862
863 if (chan->fcs == L2CAP_FCS_CRC16)
864 hlen += L2CAP_FCS_SIZE;
865
866 skb = bt_skb_alloc(hlen, GFP_KERNEL);
867
868 if (!skb)
869 return ERR_PTR(-ENOMEM);
870
871 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
872 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
873 lh->cid = cpu_to_le16(chan->dcid);
874
875 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
876 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
877 else
878 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
879
880 if (chan->fcs == L2CAP_FCS_CRC16) {
881 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
882 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
883 }
884
885 skb->priority = HCI_PRIO_MAX;
886 return skb;
887 }
888
889 static void l2cap_send_sframe(struct l2cap_chan *chan,
890 struct l2cap_ctrl *control)
891 {
892 struct sk_buff *skb;
893 u32 control_field;
894
895 BT_DBG("chan %p, control %p", chan, control);
896
897 if (!control->sframe)
898 return;
899
900 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
901 !control->poll)
902 control->final = 1;
903
904 if (control->super == L2CAP_SUPER_RR)
905 clear_bit(CONN_RNR_SENT, &chan->conn_state);
906 else if (control->super == L2CAP_SUPER_RNR)
907 set_bit(CONN_RNR_SENT, &chan->conn_state);
908
909 if (control->super != L2CAP_SUPER_SREJ) {
910 chan->last_acked_seq = control->reqseq;
911 __clear_ack_timer(chan);
912 }
913
914 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
915 control->final, control->poll, control->super);
916
917 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
918 control_field = __pack_extended_control(control);
919 else
920 control_field = __pack_enhanced_control(control);
921
922 skb = l2cap_create_sframe_pdu(chan, control_field);
923 if (!IS_ERR(skb))
924 l2cap_do_send(chan, skb);
925 }
926
927 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
928 {
929 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
930 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
931 set_bit(CONN_RNR_SENT, &chan->conn_state);
932 } else
933 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
934
935 control |= __set_reqseq(chan, chan->buffer_seq);
936 }
937
938 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
939 {
940 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
941 }
942
943 static void l2cap_send_conn_req(struct l2cap_chan *chan)
944 {
945 struct l2cap_conn *conn = chan->conn;
946 struct l2cap_conn_req req;
947
948 req.scid = cpu_to_le16(chan->scid);
949 req.psm = chan->psm;
950
951 chan->ident = l2cap_get_ident(conn);
952
953 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
954
955 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
956 }
957
958 static void l2cap_chan_ready(struct l2cap_chan *chan)
959 {
960 struct sock *sk = chan->sk;
961 struct sock *parent;
962
963 lock_sock(sk);
964
965 parent = bt_sk(sk)->parent;
966
967 BT_DBG("sk %p, parent %p", sk, parent);
968
969 /* This clears all conf flags, including CONF_NOT_COMPLETE */
970 chan->conf_state = 0;
971 __clear_chan_timer(chan);
972
973 __l2cap_state_change(chan, BT_CONNECTED);
974 sk->sk_state_change(sk);
975
976 if (parent)
977 parent->sk_data_ready(parent, 0);
978
979 release_sock(sk);
980 }
981
982 static void l2cap_do_start(struct l2cap_chan *chan)
983 {
984 struct l2cap_conn *conn = chan->conn;
985
986 if (conn->hcon->type == LE_LINK) {
987 l2cap_chan_ready(chan);
988 return;
989 }
990
991 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
992 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
993 return;
994
995 if (l2cap_chan_check_security(chan) &&
996 __l2cap_no_conn_pending(chan))
997 l2cap_send_conn_req(chan);
998 } else {
999 struct l2cap_info_req req;
1000 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1001
1002 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1003 conn->info_ident = l2cap_get_ident(conn);
1004
1005 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1006
1007 l2cap_send_cmd(conn, conn->info_ident,
1008 L2CAP_INFO_REQ, sizeof(req), &req);
1009 }
1010 }
1011
1012 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1013 {
1014 u32 local_feat_mask = l2cap_feat_mask;
1015 if (!disable_ertm)
1016 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1017
1018 switch (mode) {
1019 case L2CAP_MODE_ERTM:
1020 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1021 case L2CAP_MODE_STREAMING:
1022 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1023 default:
1024 return 0x00;
1025 }
1026 }
1027
1028 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1029 {
1030 struct sock *sk = chan->sk;
1031 struct l2cap_disconn_req req;
1032
1033 if (!conn)
1034 return;
1035
1036 if (chan->mode == L2CAP_MODE_ERTM) {
1037 __clear_retrans_timer(chan);
1038 __clear_monitor_timer(chan);
1039 __clear_ack_timer(chan);
1040 }
1041
1042 req.dcid = cpu_to_le16(chan->dcid);
1043 req.scid = cpu_to_le16(chan->scid);
1044 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1045 L2CAP_DISCONN_REQ, sizeof(req), &req);
1046
1047 lock_sock(sk);
1048 __l2cap_state_change(chan, BT_DISCONN);
1049 __l2cap_chan_set_err(chan, err);
1050 release_sock(sk);
1051 }
1052
1053 /* ---- L2CAP connections ---- */
1054 static void l2cap_conn_start(struct l2cap_conn *conn)
1055 {
1056 struct l2cap_chan *chan, *tmp;
1057
1058 BT_DBG("conn %p", conn);
1059
1060 mutex_lock(&conn->chan_lock);
1061
1062 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1063 struct sock *sk = chan->sk;
1064
1065 l2cap_chan_lock(chan);
1066
1067 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1068 l2cap_chan_unlock(chan);
1069 continue;
1070 }
1071
1072 if (chan->state == BT_CONNECT) {
1073 if (!l2cap_chan_check_security(chan) ||
1074 !__l2cap_no_conn_pending(chan)) {
1075 l2cap_chan_unlock(chan);
1076 continue;
1077 }
1078
1079 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1080 && test_bit(CONF_STATE2_DEVICE,
1081 &chan->conf_state)) {
1082 l2cap_chan_close(chan, ECONNRESET);
1083 l2cap_chan_unlock(chan);
1084 continue;
1085 }
1086
1087 l2cap_send_conn_req(chan);
1088
1089 } else if (chan->state == BT_CONNECT2) {
1090 struct l2cap_conn_rsp rsp;
1091 char buf[128];
1092 rsp.scid = cpu_to_le16(chan->dcid);
1093 rsp.dcid = cpu_to_le16(chan->scid);
1094
1095 if (l2cap_chan_check_security(chan)) {
1096 lock_sock(sk);
1097 if (test_bit(BT_SK_DEFER_SETUP,
1098 &bt_sk(sk)->flags)) {
1099 struct sock *parent = bt_sk(sk)->parent;
1100 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1101 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1102 if (parent)
1103 parent->sk_data_ready(parent, 0);
1104
1105 } else {
1106 __l2cap_state_change(chan, BT_CONFIG);
1107 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1108 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1109 }
1110 release_sock(sk);
1111 } else {
1112 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1113 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1114 }
1115
1116 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1117 sizeof(rsp), &rsp);
1118
1119 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1120 rsp.result != L2CAP_CR_SUCCESS) {
1121 l2cap_chan_unlock(chan);
1122 continue;
1123 }
1124
1125 set_bit(CONF_REQ_SENT, &chan->conf_state);
1126 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1127 l2cap_build_conf_req(chan, buf), buf);
1128 chan->num_conf_req++;
1129 }
1130
1131 l2cap_chan_unlock(chan);
1132 }
1133
1134 mutex_unlock(&conn->chan_lock);
1135 }
1136
1137 /* Find socket with cid and source/destination bdaddr.
1138 * Returns closest match, locked.
1139 */
1140 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1141 bdaddr_t *src,
1142 bdaddr_t *dst)
1143 {
1144 struct l2cap_chan *c, *c1 = NULL;
1145
1146 read_lock(&chan_list_lock);
1147
1148 list_for_each_entry(c, &chan_list, global_l) {
1149 struct sock *sk = c->sk;
1150
1151 if (state && c->state != state)
1152 continue;
1153
1154 if (c->scid == cid) {
1155 int src_match, dst_match;
1156 int src_any, dst_any;
1157
1158 /* Exact match. */
1159 src_match = !bacmp(&bt_sk(sk)->src, src);
1160 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1161 if (src_match && dst_match) {
1162 read_unlock(&chan_list_lock);
1163 return c;
1164 }
1165
1166 /* Closest match */
1167 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1168 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1169 if ((src_match && dst_any) || (src_any && dst_match) ||
1170 (src_any && dst_any))
1171 c1 = c;
1172 }
1173 }
1174
1175 read_unlock(&chan_list_lock);
1176
1177 return c1;
1178 }
1179
1180 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1181 {
1182 struct sock *parent, *sk;
1183 struct l2cap_chan *chan, *pchan;
1184
1185 BT_DBG("");
1186
1187 /* Check if we have socket listening on cid */
1188 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1189 conn->src, conn->dst);
1190 if (!pchan)
1191 return;
1192
1193 parent = pchan->sk;
1194
1195 lock_sock(parent);
1196
1197 /* Check for backlog size */
1198 if (sk_acceptq_is_full(parent)) {
1199 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1200 goto clean;
1201 }
1202
1203 chan = pchan->ops->new_connection(pchan->data);
1204 if (!chan)
1205 goto clean;
1206
1207 sk = chan->sk;
1208
1209 hci_conn_hold(conn->hcon);
1210
1211 bacpy(&bt_sk(sk)->src, conn->src);
1212 bacpy(&bt_sk(sk)->dst, conn->dst);
1213
1214 bt_accept_enqueue(parent, sk);
1215
1216 l2cap_chan_add(conn, chan);
1217
1218 __set_chan_timer(chan, sk->sk_sndtimeo);
1219
1220 __l2cap_state_change(chan, BT_CONNECTED);
1221 parent->sk_data_ready(parent, 0);
1222
1223 clean:
1224 release_sock(parent);
1225 }
1226
1227 static void l2cap_conn_ready(struct l2cap_conn *conn)
1228 {
1229 struct l2cap_chan *chan;
1230
1231 BT_DBG("conn %p", conn);
1232
1233 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1234 l2cap_le_conn_ready(conn);
1235
1236 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1237 smp_conn_security(conn, conn->hcon->pending_sec_level);
1238
1239 mutex_lock(&conn->chan_lock);
1240
1241 list_for_each_entry(chan, &conn->chan_l, list) {
1242
1243 l2cap_chan_lock(chan);
1244
1245 if (conn->hcon->type == LE_LINK) {
1246 if (smp_conn_security(conn, chan->sec_level))
1247 l2cap_chan_ready(chan);
1248
1249 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1250 struct sock *sk = chan->sk;
1251 __clear_chan_timer(chan);
1252 lock_sock(sk);
1253 __l2cap_state_change(chan, BT_CONNECTED);
1254 sk->sk_state_change(sk);
1255 release_sock(sk);
1256
1257 } else if (chan->state == BT_CONNECT)
1258 l2cap_do_start(chan);
1259
1260 l2cap_chan_unlock(chan);
1261 }
1262
1263 mutex_unlock(&conn->chan_lock);
1264 }
1265
1266 /* Notify sockets that we cannot guaranty reliability anymore */
1267 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1268 {
1269 struct l2cap_chan *chan;
1270
1271 BT_DBG("conn %p", conn);
1272
1273 mutex_lock(&conn->chan_lock);
1274
1275 list_for_each_entry(chan, &conn->chan_l, list) {
1276 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1277 __l2cap_chan_set_err(chan, err);
1278 }
1279
1280 mutex_unlock(&conn->chan_lock);
1281 }
1282
1283 static void l2cap_info_timeout(struct work_struct *work)
1284 {
1285 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1286 info_timer.work);
1287
1288 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1289 conn->info_ident = 0;
1290
1291 l2cap_conn_start(conn);
1292 }
1293
1294 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1295 {
1296 struct l2cap_conn *conn = hcon->l2cap_data;
1297 struct l2cap_chan *chan, *l;
1298
1299 if (!conn)
1300 return;
1301
1302 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1303
1304 kfree_skb(conn->rx_skb);
1305
1306 mutex_lock(&conn->chan_lock);
1307
1308 /* Kill channels */
1309 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1310 l2cap_chan_hold(chan);
1311 l2cap_chan_lock(chan);
1312
1313 l2cap_chan_del(chan, err);
1314
1315 l2cap_chan_unlock(chan);
1316
1317 chan->ops->close(chan->data);
1318 l2cap_chan_put(chan);
1319 }
1320
1321 mutex_unlock(&conn->chan_lock);
1322
1323 hci_chan_del(conn->hchan);
1324
1325 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1326 cancel_delayed_work_sync(&conn->info_timer);
1327
1328 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1329 cancel_delayed_work_sync(&conn->security_timer);
1330 smp_chan_destroy(conn);
1331 }
1332
1333 hcon->l2cap_data = NULL;
1334 kfree(conn);
1335 }
1336
1337 static void security_timeout(struct work_struct *work)
1338 {
1339 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1340 security_timer.work);
1341
1342 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1343 }
1344
1345 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1346 {
1347 struct l2cap_conn *conn = hcon->l2cap_data;
1348 struct hci_chan *hchan;
1349
1350 if (conn || status)
1351 return conn;
1352
1353 hchan = hci_chan_create(hcon);
1354 if (!hchan)
1355 return NULL;
1356
1357 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1358 if (!conn) {
1359 hci_chan_del(hchan);
1360 return NULL;
1361 }
1362
1363 hcon->l2cap_data = conn;
1364 conn->hcon = hcon;
1365 conn->hchan = hchan;
1366
1367 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1368
1369 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1370 conn->mtu = hcon->hdev->le_mtu;
1371 else
1372 conn->mtu = hcon->hdev->acl_mtu;
1373
1374 conn->src = &hcon->hdev->bdaddr;
1375 conn->dst = &hcon->dst;
1376
1377 conn->feat_mask = 0;
1378
1379 spin_lock_init(&conn->lock);
1380 mutex_init(&conn->chan_lock);
1381
1382 INIT_LIST_HEAD(&conn->chan_l);
1383
1384 if (hcon->type == LE_LINK)
1385 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1386 else
1387 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1388
1389 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1390
1391 return conn;
1392 }
1393
1394 /* ---- Socket interface ---- */
1395
1396 /* Find socket with psm and source / destination bdaddr.
1397 * Returns closest match.
1398 */
1399 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1400 bdaddr_t *src,
1401 bdaddr_t *dst)
1402 {
1403 struct l2cap_chan *c, *c1 = NULL;
1404
1405 read_lock(&chan_list_lock);
1406
1407 list_for_each_entry(c, &chan_list, global_l) {
1408 struct sock *sk = c->sk;
1409
1410 if (state && c->state != state)
1411 continue;
1412
1413 if (c->psm == psm) {
1414 int src_match, dst_match;
1415 int src_any, dst_any;
1416
1417 /* Exact match. */
1418 src_match = !bacmp(&bt_sk(sk)->src, src);
1419 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1420 if (src_match && dst_match) {
1421 read_unlock(&chan_list_lock);
1422 return c;
1423 }
1424
1425 /* Closest match */
1426 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1427 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1428 if ((src_match && dst_any) || (src_any && dst_match) ||
1429 (src_any && dst_any))
1430 c1 = c;
1431 }
1432 }
1433
1434 read_unlock(&chan_list_lock);
1435
1436 return c1;
1437 }
1438
1439 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1440 bdaddr_t *dst, u8 dst_type)
1441 {
1442 struct sock *sk = chan->sk;
1443 bdaddr_t *src = &bt_sk(sk)->src;
1444 struct l2cap_conn *conn;
1445 struct hci_conn *hcon;
1446 struct hci_dev *hdev;
1447 __u8 auth_type;
1448 int err;
1449
1450 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1451 dst_type, __le16_to_cpu(chan->psm));
1452
1453 hdev = hci_get_route(dst, src);
1454 if (!hdev)
1455 return -EHOSTUNREACH;
1456
1457 hci_dev_lock(hdev);
1458
1459 l2cap_chan_lock(chan);
1460
1461 /* PSM must be odd and lsb of upper byte must be 0 */
1462 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1463 chan->chan_type != L2CAP_CHAN_RAW) {
1464 err = -EINVAL;
1465 goto done;
1466 }
1467
1468 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1469 err = -EINVAL;
1470 goto done;
1471 }
1472
1473 switch (chan->mode) {
1474 case L2CAP_MODE_BASIC:
1475 break;
1476 case L2CAP_MODE_ERTM:
1477 case L2CAP_MODE_STREAMING:
1478 if (!disable_ertm)
1479 break;
1480 /* fall through */
1481 default:
1482 err = -ENOTSUPP;
1483 goto done;
1484 }
1485
1486 lock_sock(sk);
1487
1488 switch (sk->sk_state) {
1489 case BT_CONNECT:
1490 case BT_CONNECT2:
1491 case BT_CONFIG:
1492 /* Already connecting */
1493 err = 0;
1494 release_sock(sk);
1495 goto done;
1496
1497 case BT_CONNECTED:
1498 /* Already connected */
1499 err = -EISCONN;
1500 release_sock(sk);
1501 goto done;
1502
1503 case BT_OPEN:
1504 case BT_BOUND:
1505 /* Can connect */
1506 break;
1507
1508 default:
1509 err = -EBADFD;
1510 release_sock(sk);
1511 goto done;
1512 }
1513
1514 /* Set destination address and psm */
1515 bacpy(&bt_sk(sk)->dst, dst);
1516
1517 release_sock(sk);
1518
1519 chan->psm = psm;
1520 chan->dcid = cid;
1521
1522 auth_type = l2cap_get_auth_type(chan);
1523
1524 if (chan->dcid == L2CAP_CID_LE_DATA)
1525 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1526 chan->sec_level, auth_type);
1527 else
1528 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1529 chan->sec_level, auth_type);
1530
1531 if (IS_ERR(hcon)) {
1532 err = PTR_ERR(hcon);
1533 goto done;
1534 }
1535
1536 conn = l2cap_conn_add(hcon, 0);
1537 if (!conn) {
1538 hci_conn_put(hcon);
1539 err = -ENOMEM;
1540 goto done;
1541 }
1542
1543 if (hcon->type == LE_LINK) {
1544 err = 0;
1545
1546 if (!list_empty(&conn->chan_l)) {
1547 err = -EBUSY;
1548 hci_conn_put(hcon);
1549 }
1550
1551 if (err)
1552 goto done;
1553 }
1554
1555 /* Update source addr of the socket */
1556 bacpy(src, conn->src);
1557
1558 l2cap_chan_unlock(chan);
1559 l2cap_chan_add(conn, chan);
1560 l2cap_chan_lock(chan);
1561
1562 l2cap_state_change(chan, BT_CONNECT);
1563 __set_chan_timer(chan, sk->sk_sndtimeo);
1564
1565 if (hcon->state == BT_CONNECTED) {
1566 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1567 __clear_chan_timer(chan);
1568 if (l2cap_chan_check_security(chan))
1569 l2cap_state_change(chan, BT_CONNECTED);
1570 } else
1571 l2cap_do_start(chan);
1572 }
1573
1574 err = 0;
1575
1576 done:
1577 l2cap_chan_unlock(chan);
1578 hci_dev_unlock(hdev);
1579 hci_dev_put(hdev);
1580 return err;
1581 }
1582
1583 int __l2cap_wait_ack(struct sock *sk)
1584 {
1585 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1586 DECLARE_WAITQUEUE(wait, current);
1587 int err = 0;
1588 int timeo = HZ/5;
1589
1590 add_wait_queue(sk_sleep(sk), &wait);
1591 set_current_state(TASK_INTERRUPTIBLE);
1592 while (chan->unacked_frames > 0 && chan->conn) {
1593 if (!timeo)
1594 timeo = HZ/5;
1595
1596 if (signal_pending(current)) {
1597 err = sock_intr_errno(timeo);
1598 break;
1599 }
1600
1601 release_sock(sk);
1602 timeo = schedule_timeout(timeo);
1603 lock_sock(sk);
1604 set_current_state(TASK_INTERRUPTIBLE);
1605
1606 err = sock_error(sk);
1607 if (err)
1608 break;
1609 }
1610 set_current_state(TASK_RUNNING);
1611 remove_wait_queue(sk_sleep(sk), &wait);
1612 return err;
1613 }
1614
1615 static void l2cap_monitor_timeout(struct work_struct *work)
1616 {
1617 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1618 monitor_timer.work);
1619
1620 BT_DBG("chan %p", chan);
1621
1622 l2cap_chan_lock(chan);
1623
1624 if (chan->retry_count >= chan->remote_max_tx) {
1625 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1626 l2cap_chan_unlock(chan);
1627 l2cap_chan_put(chan);
1628 return;
1629 }
1630
1631 chan->retry_count++;
1632 __set_monitor_timer(chan);
1633
1634 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1635 l2cap_chan_unlock(chan);
1636 l2cap_chan_put(chan);
1637 }
1638
1639 static void l2cap_retrans_timeout(struct work_struct *work)
1640 {
1641 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1642 retrans_timer.work);
1643
1644 BT_DBG("chan %p", chan);
1645
1646 l2cap_chan_lock(chan);
1647
1648 chan->retry_count = 1;
1649 __set_monitor_timer(chan);
1650
1651 set_bit(CONN_WAIT_F, &chan->conn_state);
1652
1653 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1654
1655 l2cap_chan_unlock(chan);
1656 l2cap_chan_put(chan);
1657 }
1658
1659 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1660 {
1661 struct sk_buff *skb;
1662
1663 while ((skb = skb_peek(&chan->tx_q)) &&
1664 chan->unacked_frames) {
1665 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1666 break;
1667
1668 skb = skb_dequeue(&chan->tx_q);
1669 kfree_skb(skb);
1670
1671 chan->unacked_frames--;
1672 }
1673
1674 if (!chan->unacked_frames)
1675 __clear_retrans_timer(chan);
1676 }
1677
1678 static int l2cap_streaming_send(struct l2cap_chan *chan,
1679 struct sk_buff_head *skbs)
1680 {
1681 struct sk_buff *skb;
1682 struct l2cap_ctrl *control;
1683
1684 BT_DBG("chan %p, skbs %p", chan, skbs);
1685
1686 if (chan->state != BT_CONNECTED)
1687 return -ENOTCONN;
1688
1689 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1690
1691 while (!skb_queue_empty(&chan->tx_q)) {
1692
1693 skb = skb_dequeue(&chan->tx_q);
1694
1695 bt_cb(skb)->control.retries = 1;
1696 control = &bt_cb(skb)->control;
1697
1698 control->reqseq = 0;
1699 control->txseq = chan->next_tx_seq;
1700
1701 __pack_control(chan, control, skb);
1702
1703 if (chan->fcs == L2CAP_FCS_CRC16) {
1704 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1705 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1706 }
1707
1708 l2cap_do_send(chan, skb);
1709
1710 BT_DBG("Sent txseq %d", (int)control->txseq);
1711
1712 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1713 chan->frames_sent++;
1714 }
1715
1716 return 0;
1717 }
1718
1719 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1720 {
1721 struct sk_buff *skb, *tx_skb;
1722 u16 fcs;
1723 u32 control;
1724
1725 skb = skb_peek(&chan->tx_q);
1726 if (!skb)
1727 return;
1728
1729 while (bt_cb(skb)->control.txseq != tx_seq) {
1730 if (skb_queue_is_last(&chan->tx_q, skb))
1731 return;
1732
1733 skb = skb_queue_next(&chan->tx_q, skb);
1734 }
1735
1736 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1737 chan->remote_max_tx) {
1738 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1739 return;
1740 }
1741
1742 tx_skb = skb_clone(skb, GFP_ATOMIC);
1743 bt_cb(skb)->control.retries++;
1744
1745 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1746 control &= __get_sar_mask(chan);
1747
1748 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1749 control |= __set_ctrl_final(chan);
1750
1751 control |= __set_reqseq(chan, chan->buffer_seq);
1752 control |= __set_txseq(chan, tx_seq);
1753
1754 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1755
1756 if (chan->fcs == L2CAP_FCS_CRC16) {
1757 fcs = crc16(0, (u8 *)tx_skb->data,
1758 tx_skb->len - L2CAP_FCS_SIZE);
1759 put_unaligned_le16(fcs,
1760 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1761 }
1762
1763 l2cap_do_send(chan, tx_skb);
1764 }
1765
1766 static int l2cap_ertm_send(struct l2cap_chan *chan)
1767 {
1768 struct sk_buff *skb, *tx_skb;
1769 struct l2cap_ctrl *control;
1770 int sent = 0;
1771
1772 BT_DBG("chan %p", chan);
1773
1774 if (chan->state != BT_CONNECTED)
1775 return -ENOTCONN;
1776
1777 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1778 return 0;
1779
1780 while (chan->tx_send_head &&
1781 chan->unacked_frames < chan->remote_tx_win &&
1782 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1783
1784 skb = chan->tx_send_head;
1785
1786 bt_cb(skb)->control.retries = 1;
1787 control = &bt_cb(skb)->control;
1788
1789 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1790 control->final = 1;
1791
1792 control->reqseq = chan->buffer_seq;
1793 chan->last_acked_seq = chan->buffer_seq;
1794 control->txseq = chan->next_tx_seq;
1795
1796 __pack_control(chan, control, skb);
1797
1798 if (chan->fcs == L2CAP_FCS_CRC16) {
1799 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1800 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1801 }
1802
1803 /* Clone after data has been modified. Data is assumed to be
1804 read-only (for locking purposes) on cloned sk_buffs.
1805 */
1806 tx_skb = skb_clone(skb, GFP_KERNEL);
1807
1808 if (!tx_skb)
1809 break;
1810
1811 __set_retrans_timer(chan);
1812
1813 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1814 chan->unacked_frames++;
1815 chan->frames_sent++;
1816 sent++;
1817
1818 if (skb_queue_is_last(&chan->tx_q, skb))
1819 chan->tx_send_head = NULL;
1820 else
1821 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1822
1823 l2cap_do_send(chan, tx_skb);
1824 BT_DBG("Sent txseq %d", (int)control->txseq);
1825 }
1826
1827 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1828 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1829
1830 return sent;
1831 }
1832
1833 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1834 {
1835 int ret;
1836
1837 if (!skb_queue_empty(&chan->tx_q))
1838 chan->tx_send_head = chan->tx_q.next;
1839
1840 chan->next_tx_seq = chan->expected_ack_seq;
1841 ret = l2cap_ertm_send(chan);
1842 return ret;
1843 }
1844
1845 static void __l2cap_send_ack(struct l2cap_chan *chan)
1846 {
1847 u32 control = 0;
1848
1849 control |= __set_reqseq(chan, chan->buffer_seq);
1850
1851 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1852 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1853 set_bit(CONN_RNR_SENT, &chan->conn_state);
1854 return;
1855 }
1856
1857 if (l2cap_ertm_send(chan) > 0)
1858 return;
1859
1860 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1861 }
1862
1863 static void l2cap_send_ack(struct l2cap_chan *chan)
1864 {
1865 __clear_ack_timer(chan);
1866 __l2cap_send_ack(chan);
1867 }
1868
1869 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1870 {
1871 struct srej_list *tail;
1872 u32 control;
1873
1874 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1875 control |= __set_ctrl_final(chan);
1876
1877 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1878 control |= __set_reqseq(chan, tail->tx_seq);
1879 }
1880
1881 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1882 struct msghdr *msg, int len,
1883 int count, struct sk_buff *skb)
1884 {
1885 struct l2cap_conn *conn = chan->conn;
1886 struct sk_buff **frag;
1887 int sent = 0;
1888
1889 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1890 return -EFAULT;
1891
1892 sent += count;
1893 len -= count;
1894
1895 /* Continuation fragments (no L2CAP header) */
1896 frag = &skb_shinfo(skb)->frag_list;
1897 while (len) {
1898 struct sk_buff *tmp;
1899
1900 count = min_t(unsigned int, conn->mtu, len);
1901
1902 tmp = chan->ops->alloc_skb(chan, count,
1903 msg->msg_flags & MSG_DONTWAIT);
1904 if (IS_ERR(tmp))
1905 return PTR_ERR(tmp);
1906
1907 *frag = tmp;
1908
1909 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1910 return -EFAULT;
1911
1912 (*frag)->priority = skb->priority;
1913
1914 sent += count;
1915 len -= count;
1916
1917 skb->len += (*frag)->len;
1918 skb->data_len += (*frag)->len;
1919
1920 frag = &(*frag)->next;
1921 }
1922
1923 return sent;
1924 }
1925
1926 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1927 struct msghdr *msg, size_t len,
1928 u32 priority)
1929 {
1930 struct l2cap_conn *conn = chan->conn;
1931 struct sk_buff *skb;
1932 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1933 struct l2cap_hdr *lh;
1934
1935 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1936
1937 count = min_t(unsigned int, (conn->mtu - hlen), len);
1938
1939 skb = chan->ops->alloc_skb(chan, count + hlen,
1940 msg->msg_flags & MSG_DONTWAIT);
1941 if (IS_ERR(skb))
1942 return skb;
1943
1944 skb->priority = priority;
1945
1946 /* Create L2CAP header */
1947 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1948 lh->cid = cpu_to_le16(chan->dcid);
1949 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1950 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1951
1952 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1953 if (unlikely(err < 0)) {
1954 kfree_skb(skb);
1955 return ERR_PTR(err);
1956 }
1957 return skb;
1958 }
1959
1960 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1961 struct msghdr *msg, size_t len,
1962 u32 priority)
1963 {
1964 struct l2cap_conn *conn = chan->conn;
1965 struct sk_buff *skb;
1966 int err, count;
1967 struct l2cap_hdr *lh;
1968
1969 BT_DBG("chan %p len %d", chan, (int)len);
1970
1971 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1972
1973 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1974 msg->msg_flags & MSG_DONTWAIT);
1975 if (IS_ERR(skb))
1976 return skb;
1977
1978 skb->priority = priority;
1979
1980 /* Create L2CAP header */
1981 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1982 lh->cid = cpu_to_le16(chan->dcid);
1983 lh->len = cpu_to_le16(len);
1984
1985 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1986 if (unlikely(err < 0)) {
1987 kfree_skb(skb);
1988 return ERR_PTR(err);
1989 }
1990 return skb;
1991 }
1992
1993 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1994 struct msghdr *msg, size_t len,
1995 u16 sdulen)
1996 {
1997 struct l2cap_conn *conn = chan->conn;
1998 struct sk_buff *skb;
1999 int err, count, hlen;
2000 struct l2cap_hdr *lh;
2001
2002 BT_DBG("chan %p len %d", chan, (int)len);
2003
2004 if (!conn)
2005 return ERR_PTR(-ENOTCONN);
2006
2007 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2008 hlen = L2CAP_EXT_HDR_SIZE;
2009 else
2010 hlen = L2CAP_ENH_HDR_SIZE;
2011
2012 if (sdulen)
2013 hlen += L2CAP_SDULEN_SIZE;
2014
2015 if (chan->fcs == L2CAP_FCS_CRC16)
2016 hlen += L2CAP_FCS_SIZE;
2017
2018 count = min_t(unsigned int, (conn->mtu - hlen), len);
2019
2020 skb = chan->ops->alloc_skb(chan, count + hlen,
2021 msg->msg_flags & MSG_DONTWAIT);
2022 if (IS_ERR(skb))
2023 return skb;
2024
2025 /* Create L2CAP header */
2026 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2027 lh->cid = cpu_to_le16(chan->dcid);
2028 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2029
2030 /* Control header is populated later */
2031 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2032 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2033 else
2034 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2035
2036 if (sdulen)
2037 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2038
2039 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2040 if (unlikely(err < 0)) {
2041 kfree_skb(skb);
2042 return ERR_PTR(err);
2043 }
2044
2045 bt_cb(skb)->control.fcs = chan->fcs;
2046 bt_cb(skb)->control.retries = 0;
2047 return skb;
2048 }
2049
2050 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2051 struct sk_buff_head *seg_queue,
2052 struct msghdr *msg, size_t len)
2053 {
2054 struct sk_buff *skb;
2055 u16 sdu_len;
2056 size_t pdu_len;
2057 int err = 0;
2058 u8 sar;
2059
2060 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2061
2062 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2063 * so fragmented skbs are not used. The HCI layer's handling
2064 * of fragmented skbs is not compatible with ERTM's queueing.
2065 */
2066
2067 /* PDU size is derived from the HCI MTU */
2068 pdu_len = chan->conn->mtu;
2069
2070 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2071
2072 /* Adjust for largest possible L2CAP overhead. */
2073 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2074
2075 /* Remote device may have requested smaller PDUs */
2076 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2077
2078 if (len <= pdu_len) {
2079 sar = L2CAP_SAR_UNSEGMENTED;
2080 sdu_len = 0;
2081 pdu_len = len;
2082 } else {
2083 sar = L2CAP_SAR_START;
2084 sdu_len = len;
2085 pdu_len -= L2CAP_SDULEN_SIZE;
2086 }
2087
2088 while (len > 0) {
2089 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2090
2091 if (IS_ERR(skb)) {
2092 __skb_queue_purge(seg_queue);
2093 return PTR_ERR(skb);
2094 }
2095
2096 bt_cb(skb)->control.sar = sar;
2097 __skb_queue_tail(seg_queue, skb);
2098
2099 len -= pdu_len;
2100 if (sdu_len) {
2101 sdu_len = 0;
2102 pdu_len += L2CAP_SDULEN_SIZE;
2103 }
2104
2105 if (len <= pdu_len) {
2106 sar = L2CAP_SAR_END;
2107 pdu_len = len;
2108 } else {
2109 sar = L2CAP_SAR_CONTINUE;
2110 }
2111 }
2112
2113 return err;
2114 }
2115
2116 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2117 u32 priority)
2118 {
2119 struct sk_buff *skb;
2120 int err;
2121 struct sk_buff_head seg_queue;
2122
2123 /* Connectionless channel */
2124 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2125 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2126 if (IS_ERR(skb))
2127 return PTR_ERR(skb);
2128
2129 l2cap_do_send(chan, skb);
2130 return len;
2131 }
2132
2133 switch (chan->mode) {
2134 case L2CAP_MODE_BASIC:
2135 /* Check outgoing MTU */
2136 if (len > chan->omtu)
2137 return -EMSGSIZE;
2138
2139 /* Create a basic PDU */
2140 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2141 if (IS_ERR(skb))
2142 return PTR_ERR(skb);
2143
2144 l2cap_do_send(chan, skb);
2145 err = len;
2146 break;
2147
2148 case L2CAP_MODE_ERTM:
2149 case L2CAP_MODE_STREAMING:
2150 /* Check outgoing MTU */
2151 if (len > chan->omtu) {
2152 err = -EMSGSIZE;
2153 break;
2154 }
2155
2156 __skb_queue_head_init(&seg_queue);
2157
2158 /* Do segmentation before calling in to the state machine,
2159 * since it's possible to block while waiting for memory
2160 * allocation.
2161 */
2162 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2163
2164 /* The channel could have been closed while segmenting,
2165 * check that it is still connected.
2166 */
2167 if (chan->state != BT_CONNECTED) {
2168 __skb_queue_purge(&seg_queue);
2169 err = -ENOTCONN;
2170 }
2171
2172 if (err)
2173 break;
2174
2175 if (chan->mode == L2CAP_MODE_ERTM)
2176 err = l2cap_tx(chan, 0, &seg_queue,
2177 L2CAP_EV_DATA_REQUEST);
2178 else
2179 err = l2cap_streaming_send(chan, &seg_queue);
2180
2181 if (!err)
2182 err = len;
2183
2184 /* If the skbs were not queued for sending, they'll still be in
2185 * seg_queue and need to be purged.
2186 */
2187 __skb_queue_purge(&seg_queue);
2188 break;
2189
2190 default:
2191 BT_DBG("bad state %1.1x", chan->mode);
2192 err = -EBADFD;
2193 }
2194
2195 return err;
2196 }
2197
2198 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2199 {
2200 struct sk_buff *acked_skb;
2201 u16 ackseq;
2202
2203 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2204
2205 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2206 return;
2207
2208 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2209 chan->expected_ack_seq, chan->unacked_frames);
2210
2211 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2212 ackseq = __next_seq(chan, ackseq)) {
2213
2214 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2215 if (acked_skb) {
2216 skb_unlink(acked_skb, &chan->tx_q);
2217 kfree_skb(acked_skb);
2218 chan->unacked_frames--;
2219 }
2220 }
2221
2222 chan->expected_ack_seq = reqseq;
2223
2224 if (chan->unacked_frames == 0)
2225 __clear_retrans_timer(chan);
2226
2227 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2228 }
2229
2230 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2231 {
2232 BT_DBG("chan %p", chan);
2233
2234 chan->expected_tx_seq = chan->buffer_seq;
2235 l2cap_seq_list_clear(&chan->srej_list);
2236 skb_queue_purge(&chan->srej_q);
2237 chan->rx_state = L2CAP_RX_STATE_RECV;
2238 }
2239
2240 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2241 struct l2cap_ctrl *control,
2242 struct sk_buff_head *skbs, u8 event)
2243 {
2244 int err = 0;
2245
2246 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2247 event);
2248
2249 switch (event) {
2250 case L2CAP_EV_DATA_REQUEST:
2251 if (chan->tx_send_head == NULL)
2252 chan->tx_send_head = skb_peek(skbs);
2253
2254 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2255 l2cap_ertm_send(chan);
2256 break;
2257 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2258 BT_DBG("Enter LOCAL_BUSY");
2259 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2260
2261 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2262 /* The SREJ_SENT state must be aborted if we are to
2263 * enter the LOCAL_BUSY state.
2264 */
2265 l2cap_abort_rx_srej_sent(chan);
2266 }
2267
2268 l2cap_send_ack(chan);
2269
2270 break;
2271 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2272 BT_DBG("Exit LOCAL_BUSY");
2273 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2274
2275 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2276 struct l2cap_ctrl local_control;
2277
2278 memset(&local_control, 0, sizeof(local_control));
2279 local_control.sframe = 1;
2280 local_control.super = L2CAP_SUPER_RR;
2281 local_control.poll = 1;
2282 local_control.reqseq = chan->buffer_seq;
2283 l2cap_send_sframe(chan, &local_control);
2284
2285 chan->retry_count = 1;
2286 __set_monitor_timer(chan);
2287 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2288 }
2289 break;
2290 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2291 l2cap_process_reqseq(chan, control->reqseq);
2292 break;
2293 case L2CAP_EV_EXPLICIT_POLL:
2294 l2cap_send_rr_or_rnr(chan, 1);
2295 chan->retry_count = 1;
2296 __set_monitor_timer(chan);
2297 __clear_ack_timer(chan);
2298 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2299 break;
2300 case L2CAP_EV_RETRANS_TO:
2301 l2cap_send_rr_or_rnr(chan, 1);
2302 chan->retry_count = 1;
2303 __set_monitor_timer(chan);
2304 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2305 break;
2306 case L2CAP_EV_RECV_FBIT:
2307 /* Nothing to process */
2308 break;
2309 default:
2310 break;
2311 }
2312
2313 return err;
2314 }
2315
2316 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2317 struct l2cap_ctrl *control,
2318 struct sk_buff_head *skbs, u8 event)
2319 {
2320 int err = 0;
2321
2322 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2323 event);
2324
2325 switch (event) {
2326 case L2CAP_EV_DATA_REQUEST:
2327 if (chan->tx_send_head == NULL)
2328 chan->tx_send_head = skb_peek(skbs);
2329 /* Queue data, but don't send. */
2330 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2331 break;
2332 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2333 BT_DBG("Enter LOCAL_BUSY");
2334 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2335
2336 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2337 /* The SREJ_SENT state must be aborted if we are to
2338 * enter the LOCAL_BUSY state.
2339 */
2340 l2cap_abort_rx_srej_sent(chan);
2341 }
2342
2343 l2cap_send_ack(chan);
2344
2345 break;
2346 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2347 BT_DBG("Exit LOCAL_BUSY");
2348 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2349
2350 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2351 struct l2cap_ctrl local_control;
2352 memset(&local_control, 0, sizeof(local_control));
2353 local_control.sframe = 1;
2354 local_control.super = L2CAP_SUPER_RR;
2355 local_control.poll = 1;
2356 local_control.reqseq = chan->buffer_seq;
2357 l2cap_send_sframe(chan, &local_control);
2358
2359 chan->retry_count = 1;
2360 __set_monitor_timer(chan);
2361 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2362 }
2363 break;
2364 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2365 l2cap_process_reqseq(chan, control->reqseq);
2366
2367 /* Fall through */
2368
2369 case L2CAP_EV_RECV_FBIT:
2370 if (control && control->final) {
2371 __clear_monitor_timer(chan);
2372 if (chan->unacked_frames > 0)
2373 __set_retrans_timer(chan);
2374 chan->retry_count = 0;
2375 chan->tx_state = L2CAP_TX_STATE_XMIT;
2376 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2377 }
2378 break;
2379 case L2CAP_EV_EXPLICIT_POLL:
2380 /* Ignore */
2381 break;
2382 case L2CAP_EV_MONITOR_TO:
2383 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2384 l2cap_send_rr_or_rnr(chan, 1);
2385 __set_monitor_timer(chan);
2386 chan->retry_count++;
2387 } else {
2388 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2389 }
2390 break;
2391 default:
2392 break;
2393 }
2394
2395 return err;
2396 }
2397
2398 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2399 struct sk_buff_head *skbs, u8 event)
2400 {
2401 int err = 0;
2402
2403 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2404 chan, control, skbs, event, chan->tx_state);
2405
2406 switch (chan->tx_state) {
2407 case L2CAP_TX_STATE_XMIT:
2408 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2409 break;
2410 case L2CAP_TX_STATE_WAIT_F:
2411 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2412 break;
2413 default:
2414 /* Ignore event */
2415 break;
2416 }
2417
2418 return err;
2419 }
2420
2421 /* Copy frame to all raw sockets on that connection */
2422 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2423 {
2424 struct sk_buff *nskb;
2425 struct l2cap_chan *chan;
2426
2427 BT_DBG("conn %p", conn);
2428
2429 mutex_lock(&conn->chan_lock);
2430
2431 list_for_each_entry(chan, &conn->chan_l, list) {
2432 struct sock *sk = chan->sk;
2433 if (chan->chan_type != L2CAP_CHAN_RAW)
2434 continue;
2435
2436 /* Don't send frame to the socket it came from */
2437 if (skb->sk == sk)
2438 continue;
2439 nskb = skb_clone(skb, GFP_ATOMIC);
2440 if (!nskb)
2441 continue;
2442
2443 if (chan->ops->recv(chan->data, nskb))
2444 kfree_skb(nskb);
2445 }
2446
2447 mutex_unlock(&conn->chan_lock);
2448 }
2449
2450 /* ---- L2CAP signalling commands ---- */
2451 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2452 u8 code, u8 ident, u16 dlen, void *data)
2453 {
2454 struct sk_buff *skb, **frag;
2455 struct l2cap_cmd_hdr *cmd;
2456 struct l2cap_hdr *lh;
2457 int len, count;
2458
2459 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2460 conn, code, ident, dlen);
2461
2462 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2463 count = min_t(unsigned int, conn->mtu, len);
2464
2465 skb = bt_skb_alloc(count, GFP_ATOMIC);
2466 if (!skb)
2467 return NULL;
2468
2469 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2470 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2471
2472 if (conn->hcon->type == LE_LINK)
2473 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2474 else
2475 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2476
2477 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2478 cmd->code = code;
2479 cmd->ident = ident;
2480 cmd->len = cpu_to_le16(dlen);
2481
2482 if (dlen) {
2483 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2484 memcpy(skb_put(skb, count), data, count);
2485 data += count;
2486 }
2487
2488 len -= skb->len;
2489
2490 /* Continuation fragments (no L2CAP header) */
2491 frag = &skb_shinfo(skb)->frag_list;
2492 while (len) {
2493 count = min_t(unsigned int, conn->mtu, len);
2494
2495 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2496 if (!*frag)
2497 goto fail;
2498
2499 memcpy(skb_put(*frag, count), data, count);
2500
2501 len -= count;
2502 data += count;
2503
2504 frag = &(*frag)->next;
2505 }
2506
2507 return skb;
2508
2509 fail:
2510 kfree_skb(skb);
2511 return NULL;
2512 }
2513
2514 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2515 {
2516 struct l2cap_conf_opt *opt = *ptr;
2517 int len;
2518
2519 len = L2CAP_CONF_OPT_SIZE + opt->len;
2520 *ptr += len;
2521
2522 *type = opt->type;
2523 *olen = opt->len;
2524
2525 switch (opt->len) {
2526 case 1:
2527 *val = *((u8 *) opt->val);
2528 break;
2529
2530 case 2:
2531 *val = get_unaligned_le16(opt->val);
2532 break;
2533
2534 case 4:
2535 *val = get_unaligned_le32(opt->val);
2536 break;
2537
2538 default:
2539 *val = (unsigned long) opt->val;
2540 break;
2541 }
2542
2543 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2544 return len;
2545 }
2546
2547 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2548 {
2549 struct l2cap_conf_opt *opt = *ptr;
2550
2551 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2552
2553 opt->type = type;
2554 opt->len = len;
2555
2556 switch (len) {
2557 case 1:
2558 *((u8 *) opt->val) = val;
2559 break;
2560
2561 case 2:
2562 put_unaligned_le16(val, opt->val);
2563 break;
2564
2565 case 4:
2566 put_unaligned_le32(val, opt->val);
2567 break;
2568
2569 default:
2570 memcpy(opt->val, (void *) val, len);
2571 break;
2572 }
2573
2574 *ptr += L2CAP_CONF_OPT_SIZE + len;
2575 }
2576
2577 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2578 {
2579 struct l2cap_conf_efs efs;
2580
2581 switch (chan->mode) {
2582 case L2CAP_MODE_ERTM:
2583 efs.id = chan->local_id;
2584 efs.stype = chan->local_stype;
2585 efs.msdu = cpu_to_le16(chan->local_msdu);
2586 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2587 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2588 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2589 break;
2590
2591 case L2CAP_MODE_STREAMING:
2592 efs.id = 1;
2593 efs.stype = L2CAP_SERV_BESTEFFORT;
2594 efs.msdu = cpu_to_le16(chan->local_msdu);
2595 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2596 efs.acc_lat = 0;
2597 efs.flush_to = 0;
2598 break;
2599
2600 default:
2601 return;
2602 }
2603
2604 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2605 (unsigned long) &efs);
2606 }
2607
2608 static void l2cap_ack_timeout(struct work_struct *work)
2609 {
2610 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2611 ack_timer.work);
2612
2613 BT_DBG("chan %p", chan);
2614
2615 l2cap_chan_lock(chan);
2616
2617 __l2cap_send_ack(chan);
2618
2619 l2cap_chan_unlock(chan);
2620
2621 l2cap_chan_put(chan);
2622 }
2623
2624 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2625 {
2626 int err;
2627
2628 chan->next_tx_seq = 0;
2629 chan->expected_tx_seq = 0;
2630 chan->expected_ack_seq = 0;
2631 chan->unacked_frames = 0;
2632 chan->buffer_seq = 0;
2633 chan->num_acked = 0;
2634 chan->frames_sent = 0;
2635 chan->last_acked_seq = 0;
2636 chan->sdu = NULL;
2637 chan->sdu_last_frag = NULL;
2638 chan->sdu_len = 0;
2639
2640 skb_queue_head_init(&chan->tx_q);
2641
2642 if (chan->mode != L2CAP_MODE_ERTM)
2643 return 0;
2644
2645 chan->rx_state = L2CAP_RX_STATE_RECV;
2646 chan->tx_state = L2CAP_TX_STATE_XMIT;
2647
2648 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2649 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2650 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2651
2652 skb_queue_head_init(&chan->srej_q);
2653
2654 INIT_LIST_HEAD(&chan->srej_l);
2655 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2656 if (err < 0)
2657 return err;
2658
2659 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2660 if (err < 0)
2661 l2cap_seq_list_free(&chan->srej_list);
2662
2663 return err;
2664 }
2665
2666 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2667 {
2668 switch (mode) {
2669 case L2CAP_MODE_STREAMING:
2670 case L2CAP_MODE_ERTM:
2671 if (l2cap_mode_supported(mode, remote_feat_mask))
2672 return mode;
2673 /* fall through */
2674 default:
2675 return L2CAP_MODE_BASIC;
2676 }
2677 }
2678
2679 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2680 {
2681 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2682 }
2683
2684 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2685 {
2686 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2687 }
2688
2689 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2690 {
2691 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2692 __l2cap_ews_supported(chan)) {
2693 /* use extended control field */
2694 set_bit(FLAG_EXT_CTRL, &chan->flags);
2695 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2696 } else {
2697 chan->tx_win = min_t(u16, chan->tx_win,
2698 L2CAP_DEFAULT_TX_WINDOW);
2699 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2700 }
2701 }
2702
2703 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2704 {
2705 struct l2cap_conf_req *req = data;
2706 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2707 void *ptr = req->data;
2708 u16 size;
2709
2710 BT_DBG("chan %p", chan);
2711
2712 if (chan->num_conf_req || chan->num_conf_rsp)
2713 goto done;
2714
2715 switch (chan->mode) {
2716 case L2CAP_MODE_STREAMING:
2717 case L2CAP_MODE_ERTM:
2718 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2719 break;
2720
2721 if (__l2cap_efs_supported(chan))
2722 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2723
2724 /* fall through */
2725 default:
2726 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2727 break;
2728 }
2729
2730 done:
2731 if (chan->imtu != L2CAP_DEFAULT_MTU)
2732 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2733
2734 switch (chan->mode) {
2735 case L2CAP_MODE_BASIC:
2736 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2737 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2738 break;
2739
2740 rfc.mode = L2CAP_MODE_BASIC;
2741 rfc.txwin_size = 0;
2742 rfc.max_transmit = 0;
2743 rfc.retrans_timeout = 0;
2744 rfc.monitor_timeout = 0;
2745 rfc.max_pdu_size = 0;
2746
2747 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2748 (unsigned long) &rfc);
2749 break;
2750
2751 case L2CAP_MODE_ERTM:
2752 rfc.mode = L2CAP_MODE_ERTM;
2753 rfc.max_transmit = chan->max_tx;
2754 rfc.retrans_timeout = 0;
2755 rfc.monitor_timeout = 0;
2756
2757 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2758 L2CAP_EXT_HDR_SIZE -
2759 L2CAP_SDULEN_SIZE -
2760 L2CAP_FCS_SIZE);
2761 rfc.max_pdu_size = cpu_to_le16(size);
2762
2763 l2cap_txwin_setup(chan);
2764
2765 rfc.txwin_size = min_t(u16, chan->tx_win,
2766 L2CAP_DEFAULT_TX_WINDOW);
2767
2768 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2769 (unsigned long) &rfc);
2770
2771 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2772 l2cap_add_opt_efs(&ptr, chan);
2773
2774 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2775 break;
2776
2777 if (chan->fcs == L2CAP_FCS_NONE ||
2778 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2779 chan->fcs = L2CAP_FCS_NONE;
2780 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2781 }
2782
2783 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2784 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2785 chan->tx_win);
2786 break;
2787
2788 case L2CAP_MODE_STREAMING:
2789 rfc.mode = L2CAP_MODE_STREAMING;
2790 rfc.txwin_size = 0;
2791 rfc.max_transmit = 0;
2792 rfc.retrans_timeout = 0;
2793 rfc.monitor_timeout = 0;
2794
2795 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2796 L2CAP_EXT_HDR_SIZE -
2797 L2CAP_SDULEN_SIZE -
2798 L2CAP_FCS_SIZE);
2799 rfc.max_pdu_size = cpu_to_le16(size);
2800
2801 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2802 (unsigned long) &rfc);
2803
2804 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2805 l2cap_add_opt_efs(&ptr, chan);
2806
2807 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2808 break;
2809
2810 if (chan->fcs == L2CAP_FCS_NONE ||
2811 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2812 chan->fcs = L2CAP_FCS_NONE;
2813 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2814 }
2815 break;
2816 }
2817
2818 req->dcid = cpu_to_le16(chan->dcid);
2819 req->flags = cpu_to_le16(0);
2820
2821 return ptr - data;
2822 }
2823
2824 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2825 {
2826 struct l2cap_conf_rsp *rsp = data;
2827 void *ptr = rsp->data;
2828 void *req = chan->conf_req;
2829 int len = chan->conf_len;
2830 int type, hint, olen;
2831 unsigned long val;
2832 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2833 struct l2cap_conf_efs efs;
2834 u8 remote_efs = 0;
2835 u16 mtu = L2CAP_DEFAULT_MTU;
2836 u16 result = L2CAP_CONF_SUCCESS;
2837 u16 size;
2838
2839 BT_DBG("chan %p", chan);
2840
2841 while (len >= L2CAP_CONF_OPT_SIZE) {
2842 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2843
2844 hint = type & L2CAP_CONF_HINT;
2845 type &= L2CAP_CONF_MASK;
2846
2847 switch (type) {
2848 case L2CAP_CONF_MTU:
2849 mtu = val;
2850 break;
2851
2852 case L2CAP_CONF_FLUSH_TO:
2853 chan->flush_to = val;
2854 break;
2855
2856 case L2CAP_CONF_QOS:
2857 break;
2858
2859 case L2CAP_CONF_RFC:
2860 if (olen == sizeof(rfc))
2861 memcpy(&rfc, (void *) val, olen);
2862 break;
2863
2864 case L2CAP_CONF_FCS:
2865 if (val == L2CAP_FCS_NONE)
2866 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2867 break;
2868
2869 case L2CAP_CONF_EFS:
2870 remote_efs = 1;
2871 if (olen == sizeof(efs))
2872 memcpy(&efs, (void *) val, olen);
2873 break;
2874
2875 case L2CAP_CONF_EWS:
2876 if (!enable_hs)
2877 return -ECONNREFUSED;
2878
2879 set_bit(FLAG_EXT_CTRL, &chan->flags);
2880 set_bit(CONF_EWS_RECV, &chan->conf_state);
2881 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2882 chan->remote_tx_win = val;
2883 break;
2884
2885 default:
2886 if (hint)
2887 break;
2888
2889 result = L2CAP_CONF_UNKNOWN;
2890 *((u8 *) ptr++) = type;
2891 break;
2892 }
2893 }
2894
2895 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2896 goto done;
2897
2898 switch (chan->mode) {
2899 case L2CAP_MODE_STREAMING:
2900 case L2CAP_MODE_ERTM:
2901 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2902 chan->mode = l2cap_select_mode(rfc.mode,
2903 chan->conn->feat_mask);
2904 break;
2905 }
2906
2907 if (remote_efs) {
2908 if (__l2cap_efs_supported(chan))
2909 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2910 else
2911 return -ECONNREFUSED;
2912 }
2913
2914 if (chan->mode != rfc.mode)
2915 return -ECONNREFUSED;
2916
2917 break;
2918 }
2919
2920 done:
2921 if (chan->mode != rfc.mode) {
2922 result = L2CAP_CONF_UNACCEPT;
2923 rfc.mode = chan->mode;
2924
2925 if (chan->num_conf_rsp == 1)
2926 return -ECONNREFUSED;
2927
2928 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2929 sizeof(rfc), (unsigned long) &rfc);
2930 }
2931
2932 if (result == L2CAP_CONF_SUCCESS) {
2933 /* Configure output options and let the other side know
2934 * which ones we don't like. */
2935
2936 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2937 result = L2CAP_CONF_UNACCEPT;
2938 else {
2939 chan->omtu = mtu;
2940 set_bit(CONF_MTU_DONE, &chan->conf_state);
2941 }
2942 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2943
2944 if (remote_efs) {
2945 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2946 efs.stype != L2CAP_SERV_NOTRAFIC &&
2947 efs.stype != chan->local_stype) {
2948
2949 result = L2CAP_CONF_UNACCEPT;
2950
2951 if (chan->num_conf_req >= 1)
2952 return -ECONNREFUSED;
2953
2954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2955 sizeof(efs),
2956 (unsigned long) &efs);
2957 } else {
2958 /* Send PENDING Conf Rsp */
2959 result = L2CAP_CONF_PENDING;
2960 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2961 }
2962 }
2963
2964 switch (rfc.mode) {
2965 case L2CAP_MODE_BASIC:
2966 chan->fcs = L2CAP_FCS_NONE;
2967 set_bit(CONF_MODE_DONE, &chan->conf_state);
2968 break;
2969
2970 case L2CAP_MODE_ERTM:
2971 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2972 chan->remote_tx_win = rfc.txwin_size;
2973 else
2974 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2975
2976 chan->remote_max_tx = rfc.max_transmit;
2977
2978 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2979 chan->conn->mtu -
2980 L2CAP_EXT_HDR_SIZE -
2981 L2CAP_SDULEN_SIZE -
2982 L2CAP_FCS_SIZE);
2983 rfc.max_pdu_size = cpu_to_le16(size);
2984 chan->remote_mps = size;
2985
2986 rfc.retrans_timeout =
2987 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2988 rfc.monitor_timeout =
2989 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2990
2991 set_bit(CONF_MODE_DONE, &chan->conf_state);
2992
2993 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2994 sizeof(rfc), (unsigned long) &rfc);
2995
2996 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2997 chan->remote_id = efs.id;
2998 chan->remote_stype = efs.stype;
2999 chan->remote_msdu = le16_to_cpu(efs.msdu);
3000 chan->remote_flush_to =
3001 le32_to_cpu(efs.flush_to);
3002 chan->remote_acc_lat =
3003 le32_to_cpu(efs.acc_lat);
3004 chan->remote_sdu_itime =
3005 le32_to_cpu(efs.sdu_itime);
3006 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3007 sizeof(efs), (unsigned long) &efs);
3008 }
3009 break;
3010
3011 case L2CAP_MODE_STREAMING:
3012 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3013 chan->conn->mtu -
3014 L2CAP_EXT_HDR_SIZE -
3015 L2CAP_SDULEN_SIZE -
3016 L2CAP_FCS_SIZE);
3017 rfc.max_pdu_size = cpu_to_le16(size);
3018 chan->remote_mps = size;
3019
3020 set_bit(CONF_MODE_DONE, &chan->conf_state);
3021
3022 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3023 sizeof(rfc), (unsigned long) &rfc);
3024
3025 break;
3026
3027 default:
3028 result = L2CAP_CONF_UNACCEPT;
3029
3030 memset(&rfc, 0, sizeof(rfc));
3031 rfc.mode = chan->mode;
3032 }
3033
3034 if (result == L2CAP_CONF_SUCCESS)
3035 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3036 }
3037 rsp->scid = cpu_to_le16(chan->dcid);
3038 rsp->result = cpu_to_le16(result);
3039 rsp->flags = cpu_to_le16(0x0000);
3040
3041 return ptr - data;
3042 }
3043
3044 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3045 {
3046 struct l2cap_conf_req *req = data;
3047 void *ptr = req->data;
3048 int type, olen;
3049 unsigned long val;
3050 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3051 struct l2cap_conf_efs efs;
3052
3053 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3054
3055 while (len >= L2CAP_CONF_OPT_SIZE) {
3056 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3057
3058 switch (type) {
3059 case L2CAP_CONF_MTU:
3060 if (val < L2CAP_DEFAULT_MIN_MTU) {
3061 *result = L2CAP_CONF_UNACCEPT;
3062 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3063 } else
3064 chan->imtu = val;
3065 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3066 break;
3067
3068 case L2CAP_CONF_FLUSH_TO:
3069 chan->flush_to = val;
3070 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3071 2, chan->flush_to);
3072 break;
3073
3074 case L2CAP_CONF_RFC:
3075 if (olen == sizeof(rfc))
3076 memcpy(&rfc, (void *)val, olen);
3077
3078 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3079 rfc.mode != chan->mode)
3080 return -ECONNREFUSED;
3081
3082 chan->fcs = 0;
3083
3084 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3085 sizeof(rfc), (unsigned long) &rfc);
3086 break;
3087
3088 case L2CAP_CONF_EWS:
3089 chan->tx_win = min_t(u16, val,
3090 L2CAP_DEFAULT_EXT_WINDOW);
3091 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3092 chan->tx_win);
3093 break;
3094
3095 case L2CAP_CONF_EFS:
3096 if (olen == sizeof(efs))
3097 memcpy(&efs, (void *)val, olen);
3098
3099 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3100 efs.stype != L2CAP_SERV_NOTRAFIC &&
3101 efs.stype != chan->local_stype)
3102 return -ECONNREFUSED;
3103
3104 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3105 sizeof(efs), (unsigned long) &efs);
3106 break;
3107 }
3108 }
3109
3110 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3111 return -ECONNREFUSED;
3112
3113 chan->mode = rfc.mode;
3114
3115 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3116 switch (rfc.mode) {
3117 case L2CAP_MODE_ERTM:
3118 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3119 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3120 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3121
3122 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3123 chan->local_msdu = le16_to_cpu(efs.msdu);
3124 chan->local_sdu_itime =
3125 le32_to_cpu(efs.sdu_itime);
3126 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3127 chan->local_flush_to =
3128 le32_to_cpu(efs.flush_to);
3129 }
3130 break;
3131
3132 case L2CAP_MODE_STREAMING:
3133 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3134 }
3135 }
3136
3137 req->dcid = cpu_to_le16(chan->dcid);
3138 req->flags = cpu_to_le16(0x0000);
3139
3140 return ptr - data;
3141 }
3142
3143 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3144 {
3145 struct l2cap_conf_rsp *rsp = data;
3146 void *ptr = rsp->data;
3147
3148 BT_DBG("chan %p", chan);
3149
3150 rsp->scid = cpu_to_le16(chan->dcid);
3151 rsp->result = cpu_to_le16(result);
3152 rsp->flags = cpu_to_le16(flags);
3153
3154 return ptr - data;
3155 }
3156
3157 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3158 {
3159 struct l2cap_conn_rsp rsp;
3160 struct l2cap_conn *conn = chan->conn;
3161 u8 buf[128];
3162
3163 rsp.scid = cpu_to_le16(chan->dcid);
3164 rsp.dcid = cpu_to_le16(chan->scid);
3165 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3166 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3167 l2cap_send_cmd(conn, chan->ident,
3168 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3169
3170 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3171 return;
3172
3173 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3174 l2cap_build_conf_req(chan, buf), buf);
3175 chan->num_conf_req++;
3176 }
3177
3178 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3179 {
3180 int type, olen;
3181 unsigned long val;
3182 struct l2cap_conf_rfc rfc;
3183
3184 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3185
3186 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3187 return;
3188
3189 while (len >= L2CAP_CONF_OPT_SIZE) {
3190 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3191
3192 switch (type) {
3193 case L2CAP_CONF_RFC:
3194 if (olen == sizeof(rfc))
3195 memcpy(&rfc, (void *)val, olen);
3196 goto done;
3197 }
3198 }
3199
3200 /* Use sane default values in case a misbehaving remote device
3201 * did not send an RFC option.
3202 */
3203 rfc.mode = chan->mode;
3204 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3205 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3206 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3207
3208 BT_ERR("Expected RFC option was not found, using defaults");
3209
3210 done:
3211 switch (rfc.mode) {
3212 case L2CAP_MODE_ERTM:
3213 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3214 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3215 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3216 break;
3217 case L2CAP_MODE_STREAMING:
3218 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3219 }
3220 }
3221
3222 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3223 {
3224 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3225
3226 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3227 return 0;
3228
3229 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3230 cmd->ident == conn->info_ident) {
3231 cancel_delayed_work(&conn->info_timer);
3232
3233 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3234 conn->info_ident = 0;
3235
3236 l2cap_conn_start(conn);
3237 }
3238
3239 return 0;
3240 }
3241
3242 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3243 {
3244 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3245 struct l2cap_conn_rsp rsp;
3246 struct l2cap_chan *chan = NULL, *pchan;
3247 struct sock *parent, *sk = NULL;
3248 int result, status = L2CAP_CS_NO_INFO;
3249
3250 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3251 __le16 psm = req->psm;
3252
3253 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3254
3255 /* Check if we have socket listening on psm */
3256 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3257 if (!pchan) {
3258 result = L2CAP_CR_BAD_PSM;
3259 goto sendresp;
3260 }
3261
3262 parent = pchan->sk;
3263
3264 mutex_lock(&conn->chan_lock);
3265 lock_sock(parent);
3266
3267 /* Check if the ACL is secure enough (if not SDP) */
3268 if (psm != cpu_to_le16(0x0001) &&
3269 !hci_conn_check_link_mode(conn->hcon)) {
3270 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3271 result = L2CAP_CR_SEC_BLOCK;
3272 goto response;
3273 }
3274
3275 result = L2CAP_CR_NO_MEM;
3276
3277 /* Check for backlog size */
3278 if (sk_acceptq_is_full(parent)) {
3279 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3280 goto response;
3281 }
3282
3283 chan = pchan->ops->new_connection(pchan->data);
3284 if (!chan)
3285 goto response;
3286
3287 sk = chan->sk;
3288
3289 /* Check if we already have channel with that dcid */
3290 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3291 sock_set_flag(sk, SOCK_ZAPPED);
3292 chan->ops->close(chan->data);
3293 goto response;
3294 }
3295
3296 hci_conn_hold(conn->hcon);
3297
3298 bacpy(&bt_sk(sk)->src, conn->src);
3299 bacpy(&bt_sk(sk)->dst, conn->dst);
3300 chan->psm = psm;
3301 chan->dcid = scid;
3302
3303 bt_accept_enqueue(parent, sk);
3304
3305 __l2cap_chan_add(conn, chan);
3306
3307 dcid = chan->scid;
3308
3309 __set_chan_timer(chan, sk->sk_sndtimeo);
3310
3311 chan->ident = cmd->ident;
3312
3313 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3314 if (l2cap_chan_check_security(chan)) {
3315 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3316 __l2cap_state_change(chan, BT_CONNECT2);
3317 result = L2CAP_CR_PEND;
3318 status = L2CAP_CS_AUTHOR_PEND;
3319 parent->sk_data_ready(parent, 0);
3320 } else {
3321 __l2cap_state_change(chan, BT_CONFIG);
3322 result = L2CAP_CR_SUCCESS;
3323 status = L2CAP_CS_NO_INFO;
3324 }
3325 } else {
3326 __l2cap_state_change(chan, BT_CONNECT2);
3327 result = L2CAP_CR_PEND;
3328 status = L2CAP_CS_AUTHEN_PEND;
3329 }
3330 } else {
3331 __l2cap_state_change(chan, BT_CONNECT2);
3332 result = L2CAP_CR_PEND;
3333 status = L2CAP_CS_NO_INFO;
3334 }
3335
3336 response:
3337 release_sock(parent);
3338 mutex_unlock(&conn->chan_lock);
3339
3340 sendresp:
3341 rsp.scid = cpu_to_le16(scid);
3342 rsp.dcid = cpu_to_le16(dcid);
3343 rsp.result = cpu_to_le16(result);
3344 rsp.status = cpu_to_le16(status);
3345 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3346
3347 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3348 struct l2cap_info_req info;
3349 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3350
3351 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3352 conn->info_ident = l2cap_get_ident(conn);
3353
3354 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3355
3356 l2cap_send_cmd(conn, conn->info_ident,
3357 L2CAP_INFO_REQ, sizeof(info), &info);
3358 }
3359
3360 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3361 result == L2CAP_CR_SUCCESS) {
3362 u8 buf[128];
3363 set_bit(CONF_REQ_SENT, &chan->conf_state);
3364 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3365 l2cap_build_conf_req(chan, buf), buf);
3366 chan->num_conf_req++;
3367 }
3368
3369 return 0;
3370 }
3371
3372 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3373 {
3374 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3375 u16 scid, dcid, result, status;
3376 struct l2cap_chan *chan;
3377 u8 req[128];
3378 int err;
3379
3380 scid = __le16_to_cpu(rsp->scid);
3381 dcid = __le16_to_cpu(rsp->dcid);
3382 result = __le16_to_cpu(rsp->result);
3383 status = __le16_to_cpu(rsp->status);
3384
3385 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3386 dcid, scid, result, status);
3387
3388 mutex_lock(&conn->chan_lock);
3389
3390 if (scid) {
3391 chan = __l2cap_get_chan_by_scid(conn, scid);
3392 if (!chan) {
3393 err = -EFAULT;
3394 goto unlock;
3395 }
3396 } else {
3397 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3398 if (!chan) {
3399 err = -EFAULT;
3400 goto unlock;
3401 }
3402 }
3403
3404 err = 0;
3405
3406 l2cap_chan_lock(chan);
3407
3408 switch (result) {
3409 case L2CAP_CR_SUCCESS:
3410 l2cap_state_change(chan, BT_CONFIG);
3411 chan->ident = 0;
3412 chan->dcid = dcid;
3413 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3414
3415 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3416 break;
3417
3418 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3419 l2cap_build_conf_req(chan, req), req);
3420 chan->num_conf_req++;
3421 break;
3422
3423 case L2CAP_CR_PEND:
3424 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3425 break;
3426
3427 default:
3428 l2cap_chan_del(chan, ECONNREFUSED);
3429 break;
3430 }
3431
3432 l2cap_chan_unlock(chan);
3433
3434 unlock:
3435 mutex_unlock(&conn->chan_lock);
3436
3437 return err;
3438 }
3439
3440 static inline void set_default_fcs(struct l2cap_chan *chan)
3441 {
3442 /* FCS is enabled only in ERTM or streaming mode, if one or both
3443 * sides request it.
3444 */
3445 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3446 chan->fcs = L2CAP_FCS_NONE;
3447 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3448 chan->fcs = L2CAP_FCS_CRC16;
3449 }
3450
3451 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3452 {
3453 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3454 u16 dcid, flags;
3455 u8 rsp[64];
3456 struct l2cap_chan *chan;
3457 int len, err = 0;
3458
3459 dcid = __le16_to_cpu(req->dcid);
3460 flags = __le16_to_cpu(req->flags);
3461
3462 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3463
3464 chan = l2cap_get_chan_by_scid(conn, dcid);
3465 if (!chan)
3466 return -ENOENT;
3467
3468 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3469 struct l2cap_cmd_rej_cid rej;
3470
3471 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3472 rej.scid = cpu_to_le16(chan->scid);
3473 rej.dcid = cpu_to_le16(chan->dcid);
3474
3475 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3476 sizeof(rej), &rej);
3477 goto unlock;
3478 }
3479
3480 /* Reject if config buffer is too small. */
3481 len = cmd_len - sizeof(*req);
3482 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3483 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3484 l2cap_build_conf_rsp(chan, rsp,
3485 L2CAP_CONF_REJECT, flags), rsp);
3486 goto unlock;
3487 }
3488
3489 /* Store config. */
3490 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3491 chan->conf_len += len;
3492
3493 if (flags & 0x0001) {
3494 /* Incomplete config. Send empty response. */
3495 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3496 l2cap_build_conf_rsp(chan, rsp,
3497 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3498 goto unlock;
3499 }
3500
3501 /* Complete config. */
3502 len = l2cap_parse_conf_req(chan, rsp);
3503 if (len < 0) {
3504 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3505 goto unlock;
3506 }
3507
3508 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3509 chan->num_conf_rsp++;
3510
3511 /* Reset config buffer. */
3512 chan->conf_len = 0;
3513
3514 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3515 goto unlock;
3516
3517 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3518 set_default_fcs(chan);
3519
3520 l2cap_state_change(chan, BT_CONNECTED);
3521
3522 if (chan->mode == L2CAP_MODE_ERTM ||
3523 chan->mode == L2CAP_MODE_STREAMING)
3524 err = l2cap_ertm_init(chan);
3525
3526 if (err < 0)
3527 l2cap_send_disconn_req(chan->conn, chan, -err);
3528 else
3529 l2cap_chan_ready(chan);
3530
3531 goto unlock;
3532 }
3533
3534 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3535 u8 buf[64];
3536 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3537 l2cap_build_conf_req(chan, buf), buf);
3538 chan->num_conf_req++;
3539 }
3540
3541 /* Got Conf Rsp PENDING from remote side and asume we sent
3542 Conf Rsp PENDING in the code above */
3543 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3544 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3545
3546 /* check compatibility */
3547
3548 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3549 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3550
3551 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3552 l2cap_build_conf_rsp(chan, rsp,
3553 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3554 }
3555
3556 unlock:
3557 l2cap_chan_unlock(chan);
3558 return err;
3559 }
3560
3561 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3562 {
3563 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3564 u16 scid, flags, result;
3565 struct l2cap_chan *chan;
3566 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3567 int err = 0;
3568
3569 scid = __le16_to_cpu(rsp->scid);
3570 flags = __le16_to_cpu(rsp->flags);
3571 result = __le16_to_cpu(rsp->result);
3572
3573 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3574 result, len);
3575
3576 chan = l2cap_get_chan_by_scid(conn, scid);
3577 if (!chan)
3578 return 0;
3579
3580 switch (result) {
3581 case L2CAP_CONF_SUCCESS:
3582 l2cap_conf_rfc_get(chan, rsp->data, len);
3583 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3584 break;
3585
3586 case L2CAP_CONF_PENDING:
3587 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3588
3589 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3590 char buf[64];
3591
3592 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3593 buf, &result);
3594 if (len < 0) {
3595 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3596 goto done;
3597 }
3598
3599 /* check compatibility */
3600
3601 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3602 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3603
3604 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3605 l2cap_build_conf_rsp(chan, buf,
3606 L2CAP_CONF_SUCCESS, 0x0000), buf);
3607 }
3608 goto done;
3609
3610 case L2CAP_CONF_UNACCEPT:
3611 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3612 char req[64];
3613
3614 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3615 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3616 goto done;
3617 }
3618
3619 /* throw out any old stored conf requests */
3620 result = L2CAP_CONF_SUCCESS;
3621 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3622 req, &result);
3623 if (len < 0) {
3624 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3625 goto done;
3626 }
3627
3628 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3629 L2CAP_CONF_REQ, len, req);
3630 chan->num_conf_req++;
3631 if (result != L2CAP_CONF_SUCCESS)
3632 goto done;
3633 break;
3634 }
3635
3636 default:
3637 l2cap_chan_set_err(chan, ECONNRESET);
3638
3639 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3640 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3641 goto done;
3642 }
3643
3644 if (flags & 0x01)
3645 goto done;
3646
3647 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3648
3649 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3650 set_default_fcs(chan);
3651
3652 l2cap_state_change(chan, BT_CONNECTED);
3653 if (chan->mode == L2CAP_MODE_ERTM ||
3654 chan->mode == L2CAP_MODE_STREAMING)
3655 err = l2cap_ertm_init(chan);
3656
3657 if (err < 0)
3658 l2cap_send_disconn_req(chan->conn, chan, -err);
3659 else
3660 l2cap_chan_ready(chan);
3661 }
3662
3663 done:
3664 l2cap_chan_unlock(chan);
3665 return err;
3666 }
3667
3668 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3669 {
3670 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3671 struct l2cap_disconn_rsp rsp;
3672 u16 dcid, scid;
3673 struct l2cap_chan *chan;
3674 struct sock *sk;
3675
3676 scid = __le16_to_cpu(req->scid);
3677 dcid = __le16_to_cpu(req->dcid);
3678
3679 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3680
3681 mutex_lock(&conn->chan_lock);
3682
3683 chan = __l2cap_get_chan_by_scid(conn, dcid);
3684 if (!chan) {
3685 mutex_unlock(&conn->chan_lock);
3686 return 0;
3687 }
3688
3689 l2cap_chan_lock(chan);
3690
3691 sk = chan->sk;
3692
3693 rsp.dcid = cpu_to_le16(chan->scid);
3694 rsp.scid = cpu_to_le16(chan->dcid);
3695 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3696
3697 lock_sock(sk);
3698 sk->sk_shutdown = SHUTDOWN_MASK;
3699 release_sock(sk);
3700
3701 l2cap_chan_hold(chan);
3702 l2cap_chan_del(chan, ECONNRESET);
3703
3704 l2cap_chan_unlock(chan);
3705
3706 chan->ops->close(chan->data);
3707 l2cap_chan_put(chan);
3708
3709 mutex_unlock(&conn->chan_lock);
3710
3711 return 0;
3712 }
3713
3714 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3715 {
3716 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3717 u16 dcid, scid;
3718 struct l2cap_chan *chan;
3719
3720 scid = __le16_to_cpu(rsp->scid);
3721 dcid = __le16_to_cpu(rsp->dcid);
3722
3723 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3724
3725 mutex_lock(&conn->chan_lock);
3726
3727 chan = __l2cap_get_chan_by_scid(conn, scid);
3728 if (!chan) {
3729 mutex_unlock(&conn->chan_lock);
3730 return 0;
3731 }
3732
3733 l2cap_chan_lock(chan);
3734
3735 l2cap_chan_hold(chan);
3736 l2cap_chan_del(chan, 0);
3737
3738 l2cap_chan_unlock(chan);
3739
3740 chan->ops->close(chan->data);
3741 l2cap_chan_put(chan);
3742
3743 mutex_unlock(&conn->chan_lock);
3744
3745 return 0;
3746 }
3747
3748 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3749 {
3750 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3751 u16 type;
3752
3753 type = __le16_to_cpu(req->type);
3754
3755 BT_DBG("type 0x%4.4x", type);
3756
3757 if (type == L2CAP_IT_FEAT_MASK) {
3758 u8 buf[8];
3759 u32 feat_mask = l2cap_feat_mask;
3760 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3761 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3762 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3763 if (!disable_ertm)
3764 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3765 | L2CAP_FEAT_FCS;
3766 if (enable_hs)
3767 feat_mask |= L2CAP_FEAT_EXT_FLOW
3768 | L2CAP_FEAT_EXT_WINDOW;
3769
3770 put_unaligned_le32(feat_mask, rsp->data);
3771 l2cap_send_cmd(conn, cmd->ident,
3772 L2CAP_INFO_RSP, sizeof(buf), buf);
3773 } else if (type == L2CAP_IT_FIXED_CHAN) {
3774 u8 buf[12];
3775 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3776
3777 if (enable_hs)
3778 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3779 else
3780 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3781
3782 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3783 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3784 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3785 l2cap_send_cmd(conn, cmd->ident,
3786 L2CAP_INFO_RSP, sizeof(buf), buf);
3787 } else {
3788 struct l2cap_info_rsp rsp;
3789 rsp.type = cpu_to_le16(type);
3790 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3791 l2cap_send_cmd(conn, cmd->ident,
3792 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3793 }
3794
3795 return 0;
3796 }
3797
3798 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3799 {
3800 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3801 u16 type, result;
3802
3803 type = __le16_to_cpu(rsp->type);
3804 result = __le16_to_cpu(rsp->result);
3805
3806 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3807
3808 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3809 if (cmd->ident != conn->info_ident ||
3810 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3811 return 0;
3812
3813 cancel_delayed_work(&conn->info_timer);
3814
3815 if (result != L2CAP_IR_SUCCESS) {
3816 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3817 conn->info_ident = 0;
3818
3819 l2cap_conn_start(conn);
3820
3821 return 0;
3822 }
3823
3824 switch (type) {
3825 case L2CAP_IT_FEAT_MASK:
3826 conn->feat_mask = get_unaligned_le32(rsp->data);
3827
3828 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3829 struct l2cap_info_req req;
3830 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3831
3832 conn->info_ident = l2cap_get_ident(conn);
3833
3834 l2cap_send_cmd(conn, conn->info_ident,
3835 L2CAP_INFO_REQ, sizeof(req), &req);
3836 } else {
3837 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3838 conn->info_ident = 0;
3839
3840 l2cap_conn_start(conn);
3841 }
3842 break;
3843
3844 case L2CAP_IT_FIXED_CHAN:
3845 conn->fixed_chan_mask = rsp->data[0];
3846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3847 conn->info_ident = 0;
3848
3849 l2cap_conn_start(conn);
3850 break;
3851 }
3852
3853 return 0;
3854 }
3855
3856 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3857 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3858 void *data)
3859 {
3860 struct l2cap_create_chan_req *req = data;
3861 struct l2cap_create_chan_rsp rsp;
3862 u16 psm, scid;
3863
3864 if (cmd_len != sizeof(*req))
3865 return -EPROTO;
3866
3867 if (!enable_hs)
3868 return -EINVAL;
3869
3870 psm = le16_to_cpu(req->psm);
3871 scid = le16_to_cpu(req->scid);
3872
3873 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3874
3875 /* Placeholder: Always reject */
3876 rsp.dcid = 0;
3877 rsp.scid = cpu_to_le16(scid);
3878 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3879 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3880
3881 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3882 sizeof(rsp), &rsp);
3883
3884 return 0;
3885 }
3886
3887 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3888 struct l2cap_cmd_hdr *cmd, void *data)
3889 {
3890 BT_DBG("conn %p", conn);
3891
3892 return l2cap_connect_rsp(conn, cmd, data);
3893 }
3894
3895 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3896 u16 icid, u16 result)
3897 {
3898 struct l2cap_move_chan_rsp rsp;
3899
3900 BT_DBG("icid %d, result %d", icid, result);
3901
3902 rsp.icid = cpu_to_le16(icid);
3903 rsp.result = cpu_to_le16(result);
3904
3905 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3906 }
3907
3908 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3909 struct l2cap_chan *chan, u16 icid, u16 result)
3910 {
3911 struct l2cap_move_chan_cfm cfm;
3912 u8 ident;
3913
3914 BT_DBG("icid %d, result %d", icid, result);
3915
3916 ident = l2cap_get_ident(conn);
3917 if (chan)
3918 chan->ident = ident;
3919
3920 cfm.icid = cpu_to_le16(icid);
3921 cfm.result = cpu_to_le16(result);
3922
3923 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3924 }
3925
3926 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3927 u16 icid)
3928 {
3929 struct l2cap_move_chan_cfm_rsp rsp;
3930
3931 BT_DBG("icid %d", icid);
3932
3933 rsp.icid = cpu_to_le16(icid);
3934 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3935 }
3936
3937 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3938 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3939 {
3940 struct l2cap_move_chan_req *req = data;
3941 u16 icid = 0;
3942 u16 result = L2CAP_MR_NOT_ALLOWED;
3943
3944 if (cmd_len != sizeof(*req))
3945 return -EPROTO;
3946
3947 icid = le16_to_cpu(req->icid);
3948
3949 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3950
3951 if (!enable_hs)
3952 return -EINVAL;
3953
3954 /* Placeholder: Always refuse */
3955 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3956
3957 return 0;
3958 }
3959
3960 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3961 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3962 {
3963 struct l2cap_move_chan_rsp *rsp = data;
3964 u16 icid, result;
3965
3966 if (cmd_len != sizeof(*rsp))
3967 return -EPROTO;
3968
3969 icid = le16_to_cpu(rsp->icid);
3970 result = le16_to_cpu(rsp->result);
3971
3972 BT_DBG("icid %d, result %d", icid, result);
3973
3974 /* Placeholder: Always unconfirmed */
3975 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3976
3977 return 0;
3978 }
3979
3980 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3981 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3982 {
3983 struct l2cap_move_chan_cfm *cfm = data;
3984 u16 icid, result;
3985
3986 if (cmd_len != sizeof(*cfm))
3987 return -EPROTO;
3988
3989 icid = le16_to_cpu(cfm->icid);
3990 result = le16_to_cpu(cfm->result);
3991
3992 BT_DBG("icid %d, result %d", icid, result);
3993
3994 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3995
3996 return 0;
3997 }
3998
3999 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4000 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4001 {
4002 struct l2cap_move_chan_cfm_rsp *rsp = data;
4003 u16 icid;
4004
4005 if (cmd_len != sizeof(*rsp))
4006 return -EPROTO;
4007
4008 icid = le16_to_cpu(rsp->icid);
4009
4010 BT_DBG("icid %d", icid);
4011
4012 return 0;
4013 }
4014
4015 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4016 u16 to_multiplier)
4017 {
4018 u16 max_latency;
4019
4020 if (min > max || min < 6 || max > 3200)
4021 return -EINVAL;
4022
4023 if (to_multiplier < 10 || to_multiplier > 3200)
4024 return -EINVAL;
4025
4026 if (max >= to_multiplier * 8)
4027 return -EINVAL;
4028
4029 max_latency = (to_multiplier * 8 / max) - 1;
4030 if (latency > 499 || latency > max_latency)
4031 return -EINVAL;
4032
4033 return 0;
4034 }
4035
4036 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4037 struct l2cap_cmd_hdr *cmd, u8 *data)
4038 {
4039 struct hci_conn *hcon = conn->hcon;
4040 struct l2cap_conn_param_update_req *req;
4041 struct l2cap_conn_param_update_rsp rsp;
4042 u16 min, max, latency, to_multiplier, cmd_len;
4043 int err;
4044
4045 if (!(hcon->link_mode & HCI_LM_MASTER))
4046 return -EINVAL;
4047
4048 cmd_len = __le16_to_cpu(cmd->len);
4049 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4050 return -EPROTO;
4051
4052 req = (struct l2cap_conn_param_update_req *) data;
4053 min = __le16_to_cpu(req->min);
4054 max = __le16_to_cpu(req->max);
4055 latency = __le16_to_cpu(req->latency);
4056 to_multiplier = __le16_to_cpu(req->to_multiplier);
4057
4058 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4059 min, max, latency, to_multiplier);
4060
4061 memset(&rsp, 0, sizeof(rsp));
4062
4063 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4064 if (err)
4065 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4066 else
4067 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4068
4069 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4070 sizeof(rsp), &rsp);
4071
4072 if (!err)
4073 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4074
4075 return 0;
4076 }
4077
4078 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4079 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4080 {
4081 int err = 0;
4082
4083 switch (cmd->code) {
4084 case L2CAP_COMMAND_REJ:
4085 l2cap_command_rej(conn, cmd, data);
4086 break;
4087
4088 case L2CAP_CONN_REQ:
4089 err = l2cap_connect_req(conn, cmd, data);
4090 break;
4091
4092 case L2CAP_CONN_RSP:
4093 err = l2cap_connect_rsp(conn, cmd, data);
4094 break;
4095
4096 case L2CAP_CONF_REQ:
4097 err = l2cap_config_req(conn, cmd, cmd_len, data);
4098 break;
4099
4100 case L2CAP_CONF_RSP:
4101 err = l2cap_config_rsp(conn, cmd, data);
4102 break;
4103
4104 case L2CAP_DISCONN_REQ:
4105 err = l2cap_disconnect_req(conn, cmd, data);
4106 break;
4107
4108 case L2CAP_DISCONN_RSP:
4109 err = l2cap_disconnect_rsp(conn, cmd, data);
4110 break;
4111
4112 case L2CAP_ECHO_REQ:
4113 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4114 break;
4115
4116 case L2CAP_ECHO_RSP:
4117 break;
4118
4119 case L2CAP_INFO_REQ:
4120 err = l2cap_information_req(conn, cmd, data);
4121 break;
4122
4123 case L2CAP_INFO_RSP:
4124 err = l2cap_information_rsp(conn, cmd, data);
4125 break;
4126
4127 case L2CAP_CREATE_CHAN_REQ:
4128 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4129 break;
4130
4131 case L2CAP_CREATE_CHAN_RSP:
4132 err = l2cap_create_channel_rsp(conn, cmd, data);
4133 break;
4134
4135 case L2CAP_MOVE_CHAN_REQ:
4136 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4137 break;
4138
4139 case L2CAP_MOVE_CHAN_RSP:
4140 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4141 break;
4142
4143 case L2CAP_MOVE_CHAN_CFM:
4144 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4145 break;
4146
4147 case L2CAP_MOVE_CHAN_CFM_RSP:
4148 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4149 break;
4150
4151 default:
4152 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4153 err = -EINVAL;
4154 break;
4155 }
4156
4157 return err;
4158 }
4159
4160 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4161 struct l2cap_cmd_hdr *cmd, u8 *data)
4162 {
4163 switch (cmd->code) {
4164 case L2CAP_COMMAND_REJ:
4165 return 0;
4166
4167 case L2CAP_CONN_PARAM_UPDATE_REQ:
4168 return l2cap_conn_param_update_req(conn, cmd, data);
4169
4170 case L2CAP_CONN_PARAM_UPDATE_RSP:
4171 return 0;
4172
4173 default:
4174 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4175 return -EINVAL;
4176 }
4177 }
4178
4179 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4180 struct sk_buff *skb)
4181 {
4182 u8 *data = skb->data;
4183 int len = skb->len;
4184 struct l2cap_cmd_hdr cmd;
4185 int err;
4186
4187 l2cap_raw_recv(conn, skb);
4188
4189 while (len >= L2CAP_CMD_HDR_SIZE) {
4190 u16 cmd_len;
4191 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4192 data += L2CAP_CMD_HDR_SIZE;
4193 len -= L2CAP_CMD_HDR_SIZE;
4194
4195 cmd_len = le16_to_cpu(cmd.len);
4196
4197 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4198
4199 if (cmd_len > len || !cmd.ident) {
4200 BT_DBG("corrupted command");
4201 break;
4202 }
4203
4204 if (conn->hcon->type == LE_LINK)
4205 err = l2cap_le_sig_cmd(conn, &cmd, data);
4206 else
4207 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4208
4209 if (err) {
4210 struct l2cap_cmd_rej_unk rej;
4211
4212 BT_ERR("Wrong link type (%d)", err);
4213
4214 /* FIXME: Map err to a valid reason */
4215 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4216 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4217 }
4218
4219 data += cmd_len;
4220 len -= cmd_len;
4221 }
4222
4223 kfree_skb(skb);
4224 }
4225
4226 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4227 {
4228 u16 our_fcs, rcv_fcs;
4229 int hdr_size;
4230
4231 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4232 hdr_size = L2CAP_EXT_HDR_SIZE;
4233 else
4234 hdr_size = L2CAP_ENH_HDR_SIZE;
4235
4236 if (chan->fcs == L2CAP_FCS_CRC16) {
4237 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4238 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4239 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4240
4241 if (our_fcs != rcv_fcs)
4242 return -EBADMSG;
4243 }
4244 return 0;
4245 }
4246
4247 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4248 {
4249 u32 control = 0;
4250
4251 chan->frames_sent = 0;
4252
4253 control |= __set_reqseq(chan, chan->buffer_seq);
4254
4255 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4256 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
4257 set_bit(CONN_RNR_SENT, &chan->conn_state);
4258 }
4259
4260 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
4261 l2cap_retransmit_frames(chan);
4262
4263 l2cap_ertm_send(chan);
4264
4265 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4266 chan->frames_sent == 0) {
4267 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4268 }
4269 }
4270
4271 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4272 {
4273 struct sk_buff *next_skb;
4274 int tx_seq_offset, next_tx_seq_offset;
4275
4276 bt_cb(skb)->control.txseq = tx_seq;
4277 bt_cb(skb)->control.sar = sar;
4278
4279 next_skb = skb_peek(&chan->srej_q);
4280
4281 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4282
4283 while (next_skb) {
4284 if (bt_cb(next_skb)->control.txseq == tx_seq)
4285 return -EINVAL;
4286
4287 next_tx_seq_offset = __seq_offset(chan,
4288 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4289
4290 if (next_tx_seq_offset > tx_seq_offset) {
4291 __skb_queue_before(&chan->srej_q, next_skb, skb);
4292 return 0;
4293 }
4294
4295 if (skb_queue_is_last(&chan->srej_q, next_skb))
4296 next_skb = NULL;
4297 else
4298 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4299 }
4300
4301 __skb_queue_tail(&chan->srej_q, skb);
4302
4303 return 0;
4304 }
4305
4306 static void append_skb_frag(struct sk_buff *skb,
4307 struct sk_buff *new_frag, struct sk_buff **last_frag)
4308 {
4309 /* skb->len reflects data in skb as well as all fragments
4310 * skb->data_len reflects only data in fragments
4311 */
4312 if (!skb_has_frag_list(skb))
4313 skb_shinfo(skb)->frag_list = new_frag;
4314
4315 new_frag->next = NULL;
4316
4317 (*last_frag)->next = new_frag;
4318 *last_frag = new_frag;
4319
4320 skb->len += new_frag->len;
4321 skb->data_len += new_frag->len;
4322 skb->truesize += new_frag->truesize;
4323 }
4324
4325 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4326 {
4327 int err = -EINVAL;
4328
4329 switch (__get_ctrl_sar(chan, control)) {
4330 case L2CAP_SAR_UNSEGMENTED:
4331 if (chan->sdu)
4332 break;
4333
4334 err = chan->ops->recv(chan->data, skb);
4335 break;
4336
4337 case L2CAP_SAR_START:
4338 if (chan->sdu)
4339 break;
4340
4341 chan->sdu_len = get_unaligned_le16(skb->data);
4342 skb_pull(skb, L2CAP_SDULEN_SIZE);
4343
4344 if (chan->sdu_len > chan->imtu) {
4345 err = -EMSGSIZE;
4346 break;
4347 }
4348
4349 if (skb->len >= chan->sdu_len)
4350 break;
4351
4352 chan->sdu = skb;
4353 chan->sdu_last_frag = skb;
4354
4355 skb = NULL;
4356 err = 0;
4357 break;
4358
4359 case L2CAP_SAR_CONTINUE:
4360 if (!chan->sdu)
4361 break;
4362
4363 append_skb_frag(chan->sdu, skb,
4364 &chan->sdu_last_frag);
4365 skb = NULL;
4366
4367 if (chan->sdu->len >= chan->sdu_len)
4368 break;
4369
4370 err = 0;
4371 break;
4372
4373 case L2CAP_SAR_END:
4374 if (!chan->sdu)
4375 break;
4376
4377 append_skb_frag(chan->sdu, skb,
4378 &chan->sdu_last_frag);
4379 skb = NULL;
4380
4381 if (chan->sdu->len != chan->sdu_len)
4382 break;
4383
4384 err = chan->ops->recv(chan->data, chan->sdu);
4385
4386 if (!err) {
4387 /* Reassembly complete */
4388 chan->sdu = NULL;
4389 chan->sdu_last_frag = NULL;
4390 chan->sdu_len = 0;
4391 }
4392 break;
4393 }
4394
4395 if (err) {
4396 kfree_skb(skb);
4397 kfree_skb(chan->sdu);
4398 chan->sdu = NULL;
4399 chan->sdu_last_frag = NULL;
4400 chan->sdu_len = 0;
4401 }
4402
4403 return err;
4404 }
4405
4406 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4407 {
4408 BT_DBG("chan %p, Enter local busy", chan);
4409
4410 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4411 l2cap_seq_list_clear(&chan->srej_list);
4412
4413 __set_ack_timer(chan);
4414 }
4415
4416 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4417 {
4418 u32 control;
4419
4420 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4421 goto done;
4422
4423 control = __set_reqseq(chan, chan->buffer_seq);
4424 control |= __set_ctrl_poll(chan);
4425 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4426 chan->retry_count = 1;
4427
4428 __clear_retrans_timer(chan);
4429 __set_monitor_timer(chan);
4430
4431 set_bit(CONN_WAIT_F, &chan->conn_state);
4432
4433 done:
4434 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4435 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4436
4437 BT_DBG("chan %p, Exit local busy", chan);
4438 }
4439
4440 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4441 {
4442 if (chan->mode == L2CAP_MODE_ERTM) {
4443 if (busy)
4444 l2cap_ertm_enter_local_busy(chan);
4445 else
4446 l2cap_ertm_exit_local_busy(chan);
4447 }
4448 }
4449
4450 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4451 {
4452 struct sk_buff *skb;
4453 u32 control;
4454
4455 while ((skb = skb_peek(&chan->srej_q)) &&
4456 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4457 int err;
4458
4459 if (bt_cb(skb)->control.txseq != tx_seq)
4460 break;
4461
4462 skb = skb_dequeue(&chan->srej_q);
4463 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4464 err = l2cap_reassemble_sdu(chan, skb, control);
4465
4466 if (err < 0) {
4467 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4468 break;
4469 }
4470
4471 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4472 tx_seq = __next_seq(chan, tx_seq);
4473 }
4474 }
4475
4476 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4477 {
4478 struct srej_list *l, *tmp;
4479 u32 control;
4480
4481 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4482 if (l->tx_seq == tx_seq) {
4483 list_del(&l->list);
4484 kfree(l);
4485 return;
4486 }
4487 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4488 control |= __set_reqseq(chan, l->tx_seq);
4489 list_del(&l->list);
4490 list_add_tail(&l->list, &chan->srej_l);
4491 }
4492 }
4493
4494 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4495 {
4496 struct srej_list *new;
4497 u32 control;
4498
4499 while (tx_seq != chan->expected_tx_seq) {
4500 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4501 control |= __set_reqseq(chan, chan->expected_tx_seq);
4502 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4503
4504 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4505 if (!new)
4506 return -ENOMEM;
4507
4508 new->tx_seq = chan->expected_tx_seq;
4509
4510 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4511
4512 list_add_tail(&new->list, &chan->srej_l);
4513 }
4514
4515 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4516
4517 return 0;
4518 }
4519
4520 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4521 {
4522 u16 tx_seq = __get_txseq(chan, rx_control);
4523 u16 req_seq = __get_reqseq(chan, rx_control);
4524 u8 sar = __get_ctrl_sar(chan, rx_control);
4525 int tx_seq_offset, expected_tx_seq_offset;
4526 int num_to_ack = (chan->tx_win/6) + 1;
4527 int err = 0;
4528
4529 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4530 tx_seq, rx_control);
4531
4532 if (__is_ctrl_final(chan, rx_control) &&
4533 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4534 __clear_monitor_timer(chan);
4535 if (chan->unacked_frames > 0)
4536 __set_retrans_timer(chan);
4537 clear_bit(CONN_WAIT_F, &chan->conn_state);
4538 }
4539
4540 chan->expected_ack_seq = req_seq;
4541 l2cap_drop_acked_frames(chan);
4542
4543 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4544
4545 /* invalid tx_seq */
4546 if (tx_seq_offset >= chan->tx_win) {
4547 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4548 goto drop;
4549 }
4550
4551 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4552 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4553 l2cap_send_ack(chan);
4554 goto drop;
4555 }
4556
4557 if (tx_seq == chan->expected_tx_seq)
4558 goto expected;
4559
4560 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4561 struct srej_list *first;
4562
4563 first = list_first_entry(&chan->srej_l,
4564 struct srej_list, list);
4565 if (tx_seq == first->tx_seq) {
4566 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4567 l2cap_check_srej_gap(chan, tx_seq);
4568
4569 list_del(&first->list);
4570 kfree(first);
4571
4572 if (list_empty(&chan->srej_l)) {
4573 chan->buffer_seq = chan->buffer_seq_srej;
4574 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4575 l2cap_send_ack(chan);
4576 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4577 }
4578 } else {
4579 struct srej_list *l;
4580
4581 /* duplicated tx_seq */
4582 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4583 goto drop;
4584
4585 list_for_each_entry(l, &chan->srej_l, list) {
4586 if (l->tx_seq == tx_seq) {
4587 l2cap_resend_srejframe(chan, tx_seq);
4588 return 0;
4589 }
4590 }
4591
4592 err = l2cap_send_srejframe(chan, tx_seq);
4593 if (err < 0) {
4594 l2cap_send_disconn_req(chan->conn, chan, -err);
4595 return err;
4596 }
4597 }
4598 } else {
4599 expected_tx_seq_offset = __seq_offset(chan,
4600 chan->expected_tx_seq, chan->buffer_seq);
4601
4602 /* duplicated tx_seq */
4603 if (tx_seq_offset < expected_tx_seq_offset)
4604 goto drop;
4605
4606 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4607
4608 BT_DBG("chan %p, Enter SREJ", chan);
4609
4610 INIT_LIST_HEAD(&chan->srej_l);
4611 chan->buffer_seq_srej = chan->buffer_seq;
4612
4613 __skb_queue_head_init(&chan->srej_q);
4614 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4615
4616 /* Set P-bit only if there are some I-frames to ack. */
4617 if (__clear_ack_timer(chan))
4618 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4619
4620 err = l2cap_send_srejframe(chan, tx_seq);
4621 if (err < 0) {
4622 l2cap_send_disconn_req(chan->conn, chan, -err);
4623 return err;
4624 }
4625 }
4626 return 0;
4627
4628 expected:
4629 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4630
4631 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4632 bt_cb(skb)->control.txseq = tx_seq;
4633 bt_cb(skb)->control.sar = sar;
4634 __skb_queue_tail(&chan->srej_q, skb);
4635 return 0;
4636 }
4637
4638 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4639 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4640
4641 if (err < 0) {
4642 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4643 return err;
4644 }
4645
4646 if (__is_ctrl_final(chan, rx_control)) {
4647 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4648 l2cap_retransmit_frames(chan);
4649 }
4650
4651
4652 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4653 if (chan->num_acked == num_to_ack - 1)
4654 l2cap_send_ack(chan);
4655 else
4656 __set_ack_timer(chan);
4657
4658 return 0;
4659
4660 drop:
4661 kfree_skb(skb);
4662 return 0;
4663 }
4664
4665 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4666 {
4667 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4668 __get_reqseq(chan, rx_control), rx_control);
4669
4670 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4671 l2cap_drop_acked_frames(chan);
4672
4673 if (__is_ctrl_poll(chan, rx_control)) {
4674 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4675 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4676 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4677 (chan->unacked_frames > 0))
4678 __set_retrans_timer(chan);
4679
4680 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4681 l2cap_send_srejtail(chan);
4682 } else {
4683 l2cap_send_i_or_rr_or_rnr(chan);
4684 }
4685
4686 } else if (__is_ctrl_final(chan, rx_control)) {
4687 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4688
4689 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4690 l2cap_retransmit_frames(chan);
4691
4692 } else {
4693 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4694 (chan->unacked_frames > 0))
4695 __set_retrans_timer(chan);
4696
4697 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4698 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4699 l2cap_send_ack(chan);
4700 else
4701 l2cap_ertm_send(chan);
4702 }
4703 }
4704
4705 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4706 {
4707 u16 tx_seq = __get_reqseq(chan, rx_control);
4708
4709 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4710
4711 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4712
4713 chan->expected_ack_seq = tx_seq;
4714 l2cap_drop_acked_frames(chan);
4715
4716 if (__is_ctrl_final(chan, rx_control)) {
4717 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4718 l2cap_retransmit_frames(chan);
4719 } else {
4720 l2cap_retransmit_frames(chan);
4721
4722 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4723 set_bit(CONN_REJ_ACT, &chan->conn_state);
4724 }
4725 }
4726 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4727 {
4728 u16 tx_seq = __get_reqseq(chan, rx_control);
4729
4730 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4731
4732 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4733
4734 if (__is_ctrl_poll(chan, rx_control)) {
4735 chan->expected_ack_seq = tx_seq;
4736 l2cap_drop_acked_frames(chan);
4737
4738 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4739 l2cap_retransmit_one_frame(chan, tx_seq);
4740
4741 l2cap_ertm_send(chan);
4742
4743 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4744 chan->srej_save_reqseq = tx_seq;
4745 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4746 }
4747 } else if (__is_ctrl_final(chan, rx_control)) {
4748 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4749 chan->srej_save_reqseq == tx_seq)
4750 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4751 else
4752 l2cap_retransmit_one_frame(chan, tx_seq);
4753 } else {
4754 l2cap_retransmit_one_frame(chan, tx_seq);
4755 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4756 chan->srej_save_reqseq = tx_seq;
4757 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4758 }
4759 }
4760 }
4761
4762 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4763 {
4764 u16 tx_seq = __get_reqseq(chan, rx_control);
4765
4766 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4767
4768 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4769 chan->expected_ack_seq = tx_seq;
4770 l2cap_drop_acked_frames(chan);
4771
4772 if (__is_ctrl_poll(chan, rx_control))
4773 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4774
4775 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4776 __clear_retrans_timer(chan);
4777 if (__is_ctrl_poll(chan, rx_control))
4778 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4779 return;
4780 }
4781
4782 if (__is_ctrl_poll(chan, rx_control)) {
4783 l2cap_send_srejtail(chan);
4784 } else {
4785 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4786 }
4787 }
4788
4789 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4790 {
4791 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4792
4793 if (__is_ctrl_final(chan, rx_control) &&
4794 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4795 __clear_monitor_timer(chan);
4796 if (chan->unacked_frames > 0)
4797 __set_retrans_timer(chan);
4798 clear_bit(CONN_WAIT_F, &chan->conn_state);
4799 }
4800
4801 switch (__get_ctrl_super(chan, rx_control)) {
4802 case L2CAP_SUPER_RR:
4803 l2cap_data_channel_rrframe(chan, rx_control);
4804 break;
4805
4806 case L2CAP_SUPER_REJ:
4807 l2cap_data_channel_rejframe(chan, rx_control);
4808 break;
4809
4810 case L2CAP_SUPER_SREJ:
4811 l2cap_data_channel_srejframe(chan, rx_control);
4812 break;
4813
4814 case L2CAP_SUPER_RNR:
4815 l2cap_data_channel_rnrframe(chan, rx_control);
4816 break;
4817 }
4818
4819 kfree_skb(skb);
4820 return 0;
4821 }
4822
4823 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4824 {
4825 u32 control;
4826 u16 req_seq;
4827 int len, next_tx_seq_offset, req_seq_offset;
4828
4829 __unpack_control(chan, skb);
4830
4831 control = __get_control(chan, skb->data);
4832 skb_pull(skb, __ctrl_size(chan));
4833 len = skb->len;
4834
4835 /*
4836 * We can just drop the corrupted I-frame here.
4837 * Receiver will miss it and start proper recovery
4838 * procedures and ask retransmission.
4839 */
4840 if (l2cap_check_fcs(chan, skb))
4841 goto drop;
4842
4843 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4844 len -= L2CAP_SDULEN_SIZE;
4845
4846 if (chan->fcs == L2CAP_FCS_CRC16)
4847 len -= L2CAP_FCS_SIZE;
4848
4849 if (len > chan->mps) {
4850 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4851 goto drop;
4852 }
4853
4854 req_seq = __get_reqseq(chan, control);
4855
4856 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4857
4858 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4859 chan->expected_ack_seq);
4860
4861 /* check for invalid req-seq */
4862 if (req_seq_offset > next_tx_seq_offset) {
4863 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4864 goto drop;
4865 }
4866
4867 if (!__is_sframe(chan, control)) {
4868 if (len < 0) {
4869 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4870 goto drop;
4871 }
4872
4873 l2cap_data_channel_iframe(chan, control, skb);
4874 } else {
4875 if (len != 0) {
4876 BT_ERR("%d", len);
4877 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4878 goto drop;
4879 }
4880
4881 l2cap_data_channel_sframe(chan, control, skb);
4882 }
4883
4884 return 0;
4885
4886 drop:
4887 kfree_skb(skb);
4888 return 0;
4889 }
4890
4891 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4892 {
4893 struct l2cap_chan *chan;
4894 u32 control;
4895 u16 tx_seq;
4896 int len;
4897
4898 chan = l2cap_get_chan_by_scid(conn, cid);
4899 if (!chan) {
4900 BT_DBG("unknown cid 0x%4.4x", cid);
4901 /* Drop packet and return */
4902 kfree_skb(skb);
4903 return 0;
4904 }
4905
4906 BT_DBG("chan %p, len %d", chan, skb->len);
4907
4908 if (chan->state != BT_CONNECTED)
4909 goto drop;
4910
4911 switch (chan->mode) {
4912 case L2CAP_MODE_BASIC:
4913 /* If socket recv buffers overflows we drop data here
4914 * which is *bad* because L2CAP has to be reliable.
4915 * But we don't have any other choice. L2CAP doesn't
4916 * provide flow control mechanism. */
4917
4918 if (chan->imtu < skb->len)
4919 goto drop;
4920
4921 if (!chan->ops->recv(chan->data, skb))
4922 goto done;
4923 break;
4924
4925 case L2CAP_MODE_ERTM:
4926 l2cap_ertm_data_rcv(chan, skb);
4927
4928 goto done;
4929
4930 case L2CAP_MODE_STREAMING:
4931 control = __get_control(chan, skb->data);
4932 skb_pull(skb, __ctrl_size(chan));
4933 len = skb->len;
4934
4935 if (l2cap_check_fcs(chan, skb))
4936 goto drop;
4937
4938 if (__is_sar_start(chan, control))
4939 len -= L2CAP_SDULEN_SIZE;
4940
4941 if (chan->fcs == L2CAP_FCS_CRC16)
4942 len -= L2CAP_FCS_SIZE;
4943
4944 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4945 goto drop;
4946
4947 tx_seq = __get_txseq(chan, control);
4948
4949 if (chan->expected_tx_seq != tx_seq) {
4950 /* Frame(s) missing - must discard partial SDU */
4951 kfree_skb(chan->sdu);
4952 chan->sdu = NULL;
4953 chan->sdu_last_frag = NULL;
4954 chan->sdu_len = 0;
4955
4956 /* TODO: Notify userland of missing data */
4957 }
4958
4959 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4960
4961 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4962 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4963
4964 goto done;
4965
4966 default:
4967 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4968 break;
4969 }
4970
4971 drop:
4972 kfree_skb(skb);
4973
4974 done:
4975 l2cap_chan_unlock(chan);
4976
4977 return 0;
4978 }
4979
4980 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4981 {
4982 struct l2cap_chan *chan;
4983
4984 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4985 if (!chan)
4986 goto drop;
4987
4988 BT_DBG("chan %p, len %d", chan, skb->len);
4989
4990 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4991 goto drop;
4992
4993 if (chan->imtu < skb->len)
4994 goto drop;
4995
4996 if (!chan->ops->recv(chan->data, skb))
4997 return 0;
4998
4999 drop:
5000 kfree_skb(skb);
5001
5002 return 0;
5003 }
5004
5005 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5006 struct sk_buff *skb)
5007 {
5008 struct l2cap_chan *chan;
5009
5010 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5011 if (!chan)
5012 goto drop;
5013
5014 BT_DBG("chan %p, len %d", chan, skb->len);
5015
5016 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5017 goto drop;
5018
5019 if (chan->imtu < skb->len)
5020 goto drop;
5021
5022 if (!chan->ops->recv(chan->data, skb))
5023 return 0;
5024
5025 drop:
5026 kfree_skb(skb);
5027
5028 return 0;
5029 }
5030
5031 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5032 {
5033 struct l2cap_hdr *lh = (void *) skb->data;
5034 u16 cid, len;
5035 __le16 psm;
5036
5037 skb_pull(skb, L2CAP_HDR_SIZE);
5038 cid = __le16_to_cpu(lh->cid);
5039 len = __le16_to_cpu(lh->len);
5040
5041 if (len != skb->len) {
5042 kfree_skb(skb);
5043 return;
5044 }
5045
5046 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5047
5048 switch (cid) {
5049 case L2CAP_CID_LE_SIGNALING:
5050 case L2CAP_CID_SIGNALING:
5051 l2cap_sig_channel(conn, skb);
5052 break;
5053
5054 case L2CAP_CID_CONN_LESS:
5055 psm = get_unaligned((__le16 *) skb->data);
5056 skb_pull(skb, 2);
5057 l2cap_conless_channel(conn, psm, skb);
5058 break;
5059
5060 case L2CAP_CID_LE_DATA:
5061 l2cap_att_channel(conn, cid, skb);
5062 break;
5063
5064 case L2CAP_CID_SMP:
5065 if (smp_sig_channel(conn, skb))
5066 l2cap_conn_del(conn->hcon, EACCES);
5067 break;
5068
5069 default:
5070 l2cap_data_channel(conn, cid, skb);
5071 break;
5072 }
5073 }
5074
5075 /* ---- L2CAP interface with lower layer (HCI) ---- */
5076
5077 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5078 {
5079 int exact = 0, lm1 = 0, lm2 = 0;
5080 struct l2cap_chan *c;
5081
5082 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5083
5084 /* Find listening sockets and check their link_mode */
5085 read_lock(&chan_list_lock);
5086 list_for_each_entry(c, &chan_list, global_l) {
5087 struct sock *sk = c->sk;
5088
5089 if (c->state != BT_LISTEN)
5090 continue;
5091
5092 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5093 lm1 |= HCI_LM_ACCEPT;
5094 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5095 lm1 |= HCI_LM_MASTER;
5096 exact++;
5097 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5098 lm2 |= HCI_LM_ACCEPT;
5099 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5100 lm2 |= HCI_LM_MASTER;
5101 }
5102 }
5103 read_unlock(&chan_list_lock);
5104
5105 return exact ? lm1 : lm2;
5106 }
5107
5108 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5109 {
5110 struct l2cap_conn *conn;
5111
5112 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5113
5114 if (!status) {
5115 conn = l2cap_conn_add(hcon, status);
5116 if (conn)
5117 l2cap_conn_ready(conn);
5118 } else
5119 l2cap_conn_del(hcon, bt_to_errno(status));
5120
5121 return 0;
5122 }
5123
5124 int l2cap_disconn_ind(struct hci_conn *hcon)
5125 {
5126 struct l2cap_conn *conn = hcon->l2cap_data;
5127
5128 BT_DBG("hcon %p", hcon);
5129
5130 if (!conn)
5131 return HCI_ERROR_REMOTE_USER_TERM;
5132 return conn->disc_reason;
5133 }
5134
5135 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5136 {
5137 BT_DBG("hcon %p reason %d", hcon, reason);
5138
5139 l2cap_conn_del(hcon, bt_to_errno(reason));
5140 return 0;
5141 }
5142
5143 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5144 {
5145 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5146 return;
5147
5148 if (encrypt == 0x00) {
5149 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5150 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5151 } else if (chan->sec_level == BT_SECURITY_HIGH)
5152 l2cap_chan_close(chan, ECONNREFUSED);
5153 } else {
5154 if (chan->sec_level == BT_SECURITY_MEDIUM)
5155 __clear_chan_timer(chan);
5156 }
5157 }
5158
5159 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5160 {
5161 struct l2cap_conn *conn = hcon->l2cap_data;
5162 struct l2cap_chan *chan;
5163
5164 if (!conn)
5165 return 0;
5166
5167 BT_DBG("conn %p", conn);
5168
5169 if (hcon->type == LE_LINK) {
5170 if (!status && encrypt)
5171 smp_distribute_keys(conn, 0);
5172 cancel_delayed_work(&conn->security_timer);
5173 }
5174
5175 mutex_lock(&conn->chan_lock);
5176
5177 list_for_each_entry(chan, &conn->chan_l, list) {
5178 l2cap_chan_lock(chan);
5179
5180 BT_DBG("chan->scid %d", chan->scid);
5181
5182 if (chan->scid == L2CAP_CID_LE_DATA) {
5183 if (!status && encrypt) {
5184 chan->sec_level = hcon->sec_level;
5185 l2cap_chan_ready(chan);
5186 }
5187
5188 l2cap_chan_unlock(chan);
5189 continue;
5190 }
5191
5192 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5193 l2cap_chan_unlock(chan);
5194 continue;
5195 }
5196
5197 if (!status && (chan->state == BT_CONNECTED ||
5198 chan->state == BT_CONFIG)) {
5199 struct sock *sk = chan->sk;
5200
5201 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5202 sk->sk_state_change(sk);
5203
5204 l2cap_check_encryption(chan, encrypt);
5205 l2cap_chan_unlock(chan);
5206 continue;
5207 }
5208
5209 if (chan->state == BT_CONNECT) {
5210 if (!status) {
5211 l2cap_send_conn_req(chan);
5212 } else {
5213 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5214 }
5215 } else if (chan->state == BT_CONNECT2) {
5216 struct sock *sk = chan->sk;
5217 struct l2cap_conn_rsp rsp;
5218 __u16 res, stat;
5219
5220 lock_sock(sk);
5221
5222 if (!status) {
5223 if (test_bit(BT_SK_DEFER_SETUP,
5224 &bt_sk(sk)->flags)) {
5225 struct sock *parent = bt_sk(sk)->parent;
5226 res = L2CAP_CR_PEND;
5227 stat = L2CAP_CS_AUTHOR_PEND;
5228 if (parent)
5229 parent->sk_data_ready(parent, 0);
5230 } else {
5231 __l2cap_state_change(chan, BT_CONFIG);
5232 res = L2CAP_CR_SUCCESS;
5233 stat = L2CAP_CS_NO_INFO;
5234 }
5235 } else {
5236 __l2cap_state_change(chan, BT_DISCONN);
5237 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5238 res = L2CAP_CR_SEC_BLOCK;
5239 stat = L2CAP_CS_NO_INFO;
5240 }
5241
5242 release_sock(sk);
5243
5244 rsp.scid = cpu_to_le16(chan->dcid);
5245 rsp.dcid = cpu_to_le16(chan->scid);
5246 rsp.result = cpu_to_le16(res);
5247 rsp.status = cpu_to_le16(stat);
5248 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5249 sizeof(rsp), &rsp);
5250 }
5251
5252 l2cap_chan_unlock(chan);
5253 }
5254
5255 mutex_unlock(&conn->chan_lock);
5256
5257 return 0;
5258 }
5259
5260 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5261 {
5262 struct l2cap_conn *conn = hcon->l2cap_data;
5263
5264 if (!conn)
5265 conn = l2cap_conn_add(hcon, 0);
5266
5267 if (!conn)
5268 goto drop;
5269
5270 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5271
5272 if (!(flags & ACL_CONT)) {
5273 struct l2cap_hdr *hdr;
5274 int len;
5275
5276 if (conn->rx_len) {
5277 BT_ERR("Unexpected start frame (len %d)", skb->len);
5278 kfree_skb(conn->rx_skb);
5279 conn->rx_skb = NULL;
5280 conn->rx_len = 0;
5281 l2cap_conn_unreliable(conn, ECOMM);
5282 }
5283
5284 /* Start fragment always begin with Basic L2CAP header */
5285 if (skb->len < L2CAP_HDR_SIZE) {
5286 BT_ERR("Frame is too short (len %d)", skb->len);
5287 l2cap_conn_unreliable(conn, ECOMM);
5288 goto drop;
5289 }
5290
5291 hdr = (struct l2cap_hdr *) skb->data;
5292 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5293
5294 if (len == skb->len) {
5295 /* Complete frame received */
5296 l2cap_recv_frame(conn, skb);
5297 return 0;
5298 }
5299
5300 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5301
5302 if (skb->len > len) {
5303 BT_ERR("Frame is too long (len %d, expected len %d)",
5304 skb->len, len);
5305 l2cap_conn_unreliable(conn, ECOMM);
5306 goto drop;
5307 }
5308
5309 /* Allocate skb for the complete frame (with header) */
5310 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5311 if (!conn->rx_skb)
5312 goto drop;
5313
5314 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5315 skb->len);
5316 conn->rx_len = len - skb->len;
5317 } else {
5318 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5319
5320 if (!conn->rx_len) {
5321 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5322 l2cap_conn_unreliable(conn, ECOMM);
5323 goto drop;
5324 }
5325
5326 if (skb->len > conn->rx_len) {
5327 BT_ERR("Fragment is too long (len %d, expected %d)",
5328 skb->len, conn->rx_len);
5329 kfree_skb(conn->rx_skb);
5330 conn->rx_skb = NULL;
5331 conn->rx_len = 0;
5332 l2cap_conn_unreliable(conn, ECOMM);
5333 goto drop;
5334 }
5335
5336 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5337 skb->len);
5338 conn->rx_len -= skb->len;
5339
5340 if (!conn->rx_len) {
5341 /* Complete frame received */
5342 l2cap_recv_frame(conn, conn->rx_skb);
5343 conn->rx_skb = NULL;
5344 }
5345 }
5346
5347 drop:
5348 kfree_skb(skb);
5349 return 0;
5350 }
5351
5352 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5353 {
5354 struct l2cap_chan *c;
5355
5356 read_lock(&chan_list_lock);
5357
5358 list_for_each_entry(c, &chan_list, global_l) {
5359 struct sock *sk = c->sk;
5360
5361 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5362 batostr(&bt_sk(sk)->src),
5363 batostr(&bt_sk(sk)->dst),
5364 c->state, __le16_to_cpu(c->psm),
5365 c->scid, c->dcid, c->imtu, c->omtu,
5366 c->sec_level, c->mode);
5367 }
5368
5369 read_unlock(&chan_list_lock);
5370
5371 return 0;
5372 }
5373
5374 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5375 {
5376 return single_open(file, l2cap_debugfs_show, inode->i_private);
5377 }
5378
5379 static const struct file_operations l2cap_debugfs_fops = {
5380 .open = l2cap_debugfs_open,
5381 .read = seq_read,
5382 .llseek = seq_lseek,
5383 .release = single_release,
5384 };
5385
5386 static struct dentry *l2cap_debugfs;
5387
5388 int __init l2cap_init(void)
5389 {
5390 int err;
5391
5392 err = l2cap_init_sockets();
5393 if (err < 0)
5394 return err;
5395
5396 if (bt_debugfs) {
5397 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5398 bt_debugfs, NULL, &l2cap_debugfs_fops);
5399 if (!l2cap_debugfs)
5400 BT_ERR("Failed to create L2CAP debug file");
5401 }
5402
5403 return 0;
5404 }
5405
5406 void l2cap_exit(void)
5407 {
5408 debugfs_remove(l2cap_debugfs);
5409 l2cap_cleanup_sockets();
5410 }
5411
5412 module_param(disable_ertm, bool, 0644);
5413 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");