Bluetooth: Add LE SMP Cryptoolbox functions
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 int disable_ertm;
60
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
63
64 static struct workqueue_struct *_busy_wq;
65
66 static LIST_HEAD(chan_list);
67 static DEFINE_RWLOCK(chan_list_lock);
68
69 static void l2cap_busy_work(struct work_struct *work);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
74 void *data);
75 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
76 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
77 struct l2cap_chan *chan, int err);
78
79 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
80
81 /* ---- L2CAP channels ---- */
82
83 static inline void chan_hold(struct l2cap_chan *c)
84 {
85 atomic_inc(&c->refcnt);
86 }
87
88 static inline void chan_put(struct l2cap_chan *c)
89 {
90 if (atomic_dec_and_test(&c->refcnt))
91 kfree(c);
92 }
93
94 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 {
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->dcid == cid)
100 return c;
101 }
102 return NULL;
103
104 }
105
106 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 {
108 struct l2cap_chan *c;
109
110 list_for_each_entry(c, &conn->chan_l, list) {
111 if (c->scid == cid)
112 return c;
113 }
114 return NULL;
115 }
116
117 /* Find channel with given SCID.
118 * Returns locked socket */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
120 {
121 struct l2cap_chan *c;
122
123 read_lock(&conn->chan_lock);
124 c = __l2cap_get_chan_by_scid(conn, cid);
125 if (c)
126 bh_lock_sock(c->sk);
127 read_unlock(&conn->chan_lock);
128 return c;
129 }
130
131 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
132 {
133 struct l2cap_chan *c;
134
135 list_for_each_entry(c, &conn->chan_l, list) {
136 if (c->ident == ident)
137 return c;
138 }
139 return NULL;
140 }
141
142 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
143 {
144 struct l2cap_chan *c;
145
146 read_lock(&conn->chan_lock);
147 c = __l2cap_get_chan_by_ident(conn, ident);
148 if (c)
149 bh_lock_sock(c->sk);
150 read_unlock(&conn->chan_lock);
151 return c;
152 }
153
154 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
155 {
156 struct l2cap_chan *c;
157
158 list_for_each_entry(c, &chan_list, global_l) {
159 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
160 goto found;
161 }
162
163 c = NULL;
164 found:
165 return c;
166 }
167
168 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 {
170 int err;
171
172 write_lock_bh(&chan_list_lock);
173
174 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
175 err = -EADDRINUSE;
176 goto done;
177 }
178
179 if (psm) {
180 chan->psm = psm;
181 chan->sport = psm;
182 err = 0;
183 } else {
184 u16 p;
185
186 err = -EINVAL;
187 for (p = 0x1001; p < 0x1100; p += 2)
188 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
189 chan->psm = cpu_to_le16(p);
190 chan->sport = cpu_to_le16(p);
191 err = 0;
192 break;
193 }
194 }
195
196 done:
197 write_unlock_bh(&chan_list_lock);
198 return err;
199 }
200
201 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
202 {
203 write_lock_bh(&chan_list_lock);
204
205 chan->scid = scid;
206
207 write_unlock_bh(&chan_list_lock);
208
209 return 0;
210 }
211
212 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
213 {
214 u16 cid = L2CAP_CID_DYN_START;
215
216 for (; cid < L2CAP_CID_DYN_END; cid++) {
217 if (!__l2cap_get_chan_by_scid(conn, cid))
218 return cid;
219 }
220
221 return 0;
222 }
223
224 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
225 {
226 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
227
228 if (!mod_timer(timer, jiffies + timeout))
229 chan_hold(chan);
230 }
231
232 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
233 {
234 BT_DBG("chan %p state %d", chan, chan->state);
235
236 if (timer_pending(timer) && del_timer(timer))
237 chan_put(chan);
238 }
239
240 static void l2cap_state_change(struct l2cap_chan *chan, int state)
241 {
242 chan->state = state;
243 chan->ops->state_change(chan->data, state);
244 }
245
246 static void l2cap_chan_timeout(unsigned long arg)
247 {
248 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
249 struct sock *sk = chan->sk;
250 int reason;
251
252 BT_DBG("chan %p state %d", chan, chan->state);
253
254 bh_lock_sock(sk);
255
256 if (sock_owned_by_user(sk)) {
257 /* sk is owned by user. Try again later */
258 __set_chan_timer(chan, HZ / 5);
259 bh_unlock_sock(sk);
260 chan_put(chan);
261 return;
262 }
263
264 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 reason = ECONNREFUSED;
266 else if (chan->state == BT_CONNECT &&
267 chan->sec_level != BT_SECURITY_SDP)
268 reason = ECONNREFUSED;
269 else
270 reason = ETIMEDOUT;
271
272 l2cap_chan_close(chan, reason);
273
274 bh_unlock_sock(sk);
275
276 chan->ops->close(chan->data);
277 chan_put(chan);
278 }
279
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
281 {
282 struct l2cap_chan *chan;
283
284 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
285 if (!chan)
286 return NULL;
287
288 chan->sk = sk;
289
290 write_lock_bh(&chan_list_lock);
291 list_add(&chan->global_l, &chan_list);
292 write_unlock_bh(&chan_list_lock);
293
294 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
295
296 chan->state = BT_OPEN;
297
298 atomic_set(&chan->refcnt, 1);
299
300 return chan;
301 }
302
303 void l2cap_chan_destroy(struct l2cap_chan *chan)
304 {
305 write_lock_bh(&chan_list_lock);
306 list_del(&chan->global_l);
307 write_unlock_bh(&chan_list_lock);
308
309 chan_put(chan);
310 }
311
312 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
313 {
314 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
315 chan->psm, chan->dcid);
316
317 conn->disc_reason = 0x13;
318
319 chan->conn = conn;
320
321 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
322 if (conn->hcon->type == LE_LINK) {
323 /* LE connection */
324 chan->omtu = L2CAP_LE_DEFAULT_MTU;
325 chan->scid = L2CAP_CID_LE_DATA;
326 chan->dcid = L2CAP_CID_LE_DATA;
327 } else {
328 /* Alloc CID for connection-oriented socket */
329 chan->scid = l2cap_alloc_cid(conn);
330 chan->omtu = L2CAP_DEFAULT_MTU;
331 }
332 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
333 /* Connectionless socket */
334 chan->scid = L2CAP_CID_CONN_LESS;
335 chan->dcid = L2CAP_CID_CONN_LESS;
336 chan->omtu = L2CAP_DEFAULT_MTU;
337 } else {
338 /* Raw socket can send/recv signalling messages only */
339 chan->scid = L2CAP_CID_SIGNALING;
340 chan->dcid = L2CAP_CID_SIGNALING;
341 chan->omtu = L2CAP_DEFAULT_MTU;
342 }
343
344 chan_hold(chan);
345
346 list_add(&chan->list, &conn->chan_l);
347 }
348
349 /* Delete channel.
350 * Must be called on the locked socket. */
351 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
352 {
353 struct sock *sk = chan->sk;
354 struct l2cap_conn *conn = chan->conn;
355 struct sock *parent = bt_sk(sk)->parent;
356
357 __clear_chan_timer(chan);
358
359 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
360
361 if (conn) {
362 /* Delete from channel list */
363 write_lock_bh(&conn->chan_lock);
364 list_del(&chan->list);
365 write_unlock_bh(&conn->chan_lock);
366 chan_put(chan);
367
368 chan->conn = NULL;
369 hci_conn_put(conn->hcon);
370 }
371
372 l2cap_state_change(chan, BT_CLOSED);
373 sock_set_flag(sk, SOCK_ZAPPED);
374
375 if (err)
376 sk->sk_err = err;
377
378 if (parent) {
379 bt_accept_unlink(sk);
380 parent->sk_data_ready(parent, 0);
381 } else
382 sk->sk_state_change(sk);
383
384 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
385 chan->conf_state & L2CAP_CONF_INPUT_DONE))
386 return;
387
388 skb_queue_purge(&chan->tx_q);
389
390 if (chan->mode == L2CAP_MODE_ERTM) {
391 struct srej_list *l, *tmp;
392
393 __clear_retrans_timer(chan);
394 __clear_monitor_timer(chan);
395 __clear_ack_timer(chan);
396
397 skb_queue_purge(&chan->srej_q);
398 skb_queue_purge(&chan->busy_q);
399
400 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
401 list_del(&l->list);
402 kfree(l);
403 }
404 }
405 }
406
407 static void l2cap_chan_cleanup_listen(struct sock *parent)
408 {
409 struct sock *sk;
410
411 BT_DBG("parent %p", parent);
412
413 /* Close not yet accepted channels */
414 while ((sk = bt_accept_dequeue(parent, NULL))) {
415 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
416 __clear_chan_timer(chan);
417 lock_sock(sk);
418 l2cap_chan_close(chan, ECONNRESET);
419 release_sock(sk);
420 chan->ops->close(chan->data);
421 }
422 }
423
424 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
425 {
426 struct l2cap_conn *conn = chan->conn;
427 struct sock *sk = chan->sk;
428
429 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
430
431 switch (chan->state) {
432 case BT_LISTEN:
433 l2cap_chan_cleanup_listen(sk);
434
435 l2cap_state_change(chan, BT_CLOSED);
436 sock_set_flag(sk, SOCK_ZAPPED);
437 break;
438
439 case BT_CONNECTED:
440 case BT_CONFIG:
441 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
442 conn->hcon->type == ACL_LINK) {
443 __clear_chan_timer(chan);
444 __set_chan_timer(chan, sk->sk_sndtimeo);
445 l2cap_send_disconn_req(conn, chan, reason);
446 } else
447 l2cap_chan_del(chan, reason);
448 break;
449
450 case BT_CONNECT2:
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 struct l2cap_conn_rsp rsp;
454 __u16 result;
455
456 if (bt_sk(sk)->defer_setup)
457 result = L2CAP_CR_SEC_BLOCK;
458 else
459 result = L2CAP_CR_BAD_PSM;
460 l2cap_state_change(chan, BT_DISCONN);
461
462 rsp.scid = cpu_to_le16(chan->dcid);
463 rsp.dcid = cpu_to_le16(chan->scid);
464 rsp.result = cpu_to_le16(result);
465 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
466 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
467 sizeof(rsp), &rsp);
468 }
469
470 l2cap_chan_del(chan, reason);
471 break;
472
473 case BT_CONNECT:
474 case BT_DISCONN:
475 l2cap_chan_del(chan, reason);
476 break;
477
478 default:
479 sock_set_flag(sk, SOCK_ZAPPED);
480 break;
481 }
482 }
483
484 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
485 {
486 if (chan->chan_type == L2CAP_CHAN_RAW) {
487 switch (chan->sec_level) {
488 case BT_SECURITY_HIGH:
489 return HCI_AT_DEDICATED_BONDING_MITM;
490 case BT_SECURITY_MEDIUM:
491 return HCI_AT_DEDICATED_BONDING;
492 default:
493 return HCI_AT_NO_BONDING;
494 }
495 } else if (chan->psm == cpu_to_le16(0x0001)) {
496 if (chan->sec_level == BT_SECURITY_LOW)
497 chan->sec_level = BT_SECURITY_SDP;
498
499 if (chan->sec_level == BT_SECURITY_HIGH)
500 return HCI_AT_NO_BONDING_MITM;
501 else
502 return HCI_AT_NO_BONDING;
503 } else {
504 switch (chan->sec_level) {
505 case BT_SECURITY_HIGH:
506 return HCI_AT_GENERAL_BONDING_MITM;
507 case BT_SECURITY_MEDIUM:
508 return HCI_AT_GENERAL_BONDING;
509 default:
510 return HCI_AT_NO_BONDING;
511 }
512 }
513 }
514
515 /* Service level security */
516 static inline int l2cap_check_security(struct l2cap_chan *chan)
517 {
518 struct l2cap_conn *conn = chan->conn;
519 __u8 auth_type;
520
521 auth_type = l2cap_get_auth_type(chan);
522
523 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
524 }
525
526 static u8 l2cap_get_ident(struct l2cap_conn *conn)
527 {
528 u8 id;
529
530 /* Get next available identificator.
531 * 1 - 128 are used by kernel.
532 * 129 - 199 are reserved.
533 * 200 - 254 are used by utilities like l2ping, etc.
534 */
535
536 spin_lock_bh(&conn->lock);
537
538 if (++conn->tx_ident > 128)
539 conn->tx_ident = 1;
540
541 id = conn->tx_ident;
542
543 spin_unlock_bh(&conn->lock);
544
545 return id;
546 }
547
548 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
549 {
550 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
551 u8 flags;
552
553 BT_DBG("code 0x%2.2x", code);
554
555 if (!skb)
556 return;
557
558 if (lmp_no_flush_capable(conn->hcon->hdev))
559 flags = ACL_START_NO_FLUSH;
560 else
561 flags = ACL_START;
562
563 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
564
565 hci_send_acl(conn->hcon, skb, flags);
566 }
567
568 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
569 {
570 struct sk_buff *skb;
571 struct l2cap_hdr *lh;
572 struct l2cap_conn *conn = chan->conn;
573 int count, hlen = L2CAP_HDR_SIZE + 2;
574 u8 flags;
575
576 if (chan->state != BT_CONNECTED)
577 return;
578
579 if (chan->fcs == L2CAP_FCS_CRC16)
580 hlen += 2;
581
582 BT_DBG("chan %p, control 0x%2.2x", chan, control);
583
584 count = min_t(unsigned int, conn->mtu, hlen);
585 control |= L2CAP_CTRL_FRAME_TYPE;
586
587 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
588 control |= L2CAP_CTRL_FINAL;
589 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
590 }
591
592 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
593 control |= L2CAP_CTRL_POLL;
594 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
595 }
596
597 skb = bt_skb_alloc(count, GFP_ATOMIC);
598 if (!skb)
599 return;
600
601 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
602 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
603 lh->cid = cpu_to_le16(chan->dcid);
604 put_unaligned_le16(control, skb_put(skb, 2));
605
606 if (chan->fcs == L2CAP_FCS_CRC16) {
607 u16 fcs = crc16(0, (u8 *)lh, count - 2);
608 put_unaligned_le16(fcs, skb_put(skb, 2));
609 }
610
611 if (lmp_no_flush_capable(conn->hcon->hdev))
612 flags = ACL_START_NO_FLUSH;
613 else
614 flags = ACL_START;
615
616 bt_cb(skb)->force_active = chan->force_active;
617
618 hci_send_acl(chan->conn->hcon, skb, flags);
619 }
620
621 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
622 {
623 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
624 control |= L2CAP_SUPER_RCV_NOT_READY;
625 chan->conn_state |= L2CAP_CONN_RNR_SENT;
626 } else
627 control |= L2CAP_SUPER_RCV_READY;
628
629 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
630
631 l2cap_send_sframe(chan, control);
632 }
633
634 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
635 {
636 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
637 }
638
639 static void l2cap_do_start(struct l2cap_chan *chan)
640 {
641 struct l2cap_conn *conn = chan->conn;
642
643 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
644 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
645 return;
646
647 if (l2cap_check_security(chan) &&
648 __l2cap_no_conn_pending(chan)) {
649 struct l2cap_conn_req req;
650 req.scid = cpu_to_le16(chan->scid);
651 req.psm = chan->psm;
652
653 chan->ident = l2cap_get_ident(conn);
654 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
655
656 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
657 sizeof(req), &req);
658 }
659 } else {
660 struct l2cap_info_req req;
661 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
662
663 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
664 conn->info_ident = l2cap_get_ident(conn);
665
666 mod_timer(&conn->info_timer, jiffies +
667 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
668
669 l2cap_send_cmd(conn, conn->info_ident,
670 L2CAP_INFO_REQ, sizeof(req), &req);
671 }
672 }
673
674 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
675 {
676 u32 local_feat_mask = l2cap_feat_mask;
677 if (!disable_ertm)
678 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
679
680 switch (mode) {
681 case L2CAP_MODE_ERTM:
682 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
683 case L2CAP_MODE_STREAMING:
684 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
685 default:
686 return 0x00;
687 }
688 }
689
690 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
691 {
692 struct sock *sk;
693 struct l2cap_disconn_req req;
694
695 if (!conn)
696 return;
697
698 sk = chan->sk;
699
700 if (chan->mode == L2CAP_MODE_ERTM) {
701 __clear_retrans_timer(chan);
702 __clear_monitor_timer(chan);
703 __clear_ack_timer(chan);
704 }
705
706 req.dcid = cpu_to_le16(chan->dcid);
707 req.scid = cpu_to_le16(chan->scid);
708 l2cap_send_cmd(conn, l2cap_get_ident(conn),
709 L2CAP_DISCONN_REQ, sizeof(req), &req);
710
711 l2cap_state_change(chan, BT_DISCONN);
712 sk->sk_err = err;
713 }
714
715 /* ---- L2CAP connections ---- */
716 static void l2cap_conn_start(struct l2cap_conn *conn)
717 {
718 struct l2cap_chan *chan, *tmp;
719
720 BT_DBG("conn %p", conn);
721
722 read_lock(&conn->chan_lock);
723
724 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
725 struct sock *sk = chan->sk;
726
727 bh_lock_sock(sk);
728
729 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
730 bh_unlock_sock(sk);
731 continue;
732 }
733
734 if (chan->state == BT_CONNECT) {
735 struct l2cap_conn_req req;
736
737 if (!l2cap_check_security(chan) ||
738 !__l2cap_no_conn_pending(chan)) {
739 bh_unlock_sock(sk);
740 continue;
741 }
742
743 if (!l2cap_mode_supported(chan->mode,
744 conn->feat_mask)
745 && chan->conf_state &
746 L2CAP_CONF_STATE2_DEVICE) {
747 /* l2cap_chan_close() calls list_del(chan)
748 * so release the lock */
749 read_unlock_bh(&conn->chan_lock);
750 l2cap_chan_close(chan, ECONNRESET);
751 read_lock_bh(&conn->chan_lock);
752 bh_unlock_sock(sk);
753 continue;
754 }
755
756 req.scid = cpu_to_le16(chan->scid);
757 req.psm = chan->psm;
758
759 chan->ident = l2cap_get_ident(conn);
760 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
761
762 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
763 sizeof(req), &req);
764
765 } else if (chan->state == BT_CONNECT2) {
766 struct l2cap_conn_rsp rsp;
767 char buf[128];
768 rsp.scid = cpu_to_le16(chan->dcid);
769 rsp.dcid = cpu_to_le16(chan->scid);
770
771 if (l2cap_check_security(chan)) {
772 if (bt_sk(sk)->defer_setup) {
773 struct sock *parent = bt_sk(sk)->parent;
774 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
775 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
776 parent->sk_data_ready(parent, 0);
777
778 } else {
779 l2cap_state_change(chan, BT_CONFIG);
780 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
781 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
782 }
783 } else {
784 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
785 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
786 }
787
788 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
789 sizeof(rsp), &rsp);
790
791 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
792 rsp.result != L2CAP_CR_SUCCESS) {
793 bh_unlock_sock(sk);
794 continue;
795 }
796
797 chan->conf_state |= L2CAP_CONF_REQ_SENT;
798 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
799 l2cap_build_conf_req(chan, buf), buf);
800 chan->num_conf_req++;
801 }
802
803 bh_unlock_sock(sk);
804 }
805
806 read_unlock(&conn->chan_lock);
807 }
808
809 /* Find socket with cid and source bdaddr.
810 * Returns closest match, locked.
811 */
812 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
813 {
814 struct l2cap_chan *c, *c1 = NULL;
815
816 read_lock(&chan_list_lock);
817
818 list_for_each_entry(c, &chan_list, global_l) {
819 struct sock *sk = c->sk;
820
821 if (state && c->state != state)
822 continue;
823
824 if (c->scid == cid) {
825 /* Exact match. */
826 if (!bacmp(&bt_sk(sk)->src, src)) {
827 read_unlock(&chan_list_lock);
828 return c;
829 }
830
831 /* Closest match */
832 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
833 c1 = c;
834 }
835 }
836
837 read_unlock(&chan_list_lock);
838
839 return c1;
840 }
841
842 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
843 {
844 struct sock *parent, *sk;
845 struct l2cap_chan *chan, *pchan;
846
847 BT_DBG("");
848
849 /* Check if we have socket listening on cid */
850 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
851 conn->src);
852 if (!pchan)
853 return;
854
855 parent = pchan->sk;
856
857 bh_lock_sock(parent);
858
859 /* Check for backlog size */
860 if (sk_acceptq_is_full(parent)) {
861 BT_DBG("backlog full %d", parent->sk_ack_backlog);
862 goto clean;
863 }
864
865 chan = pchan->ops->new_connection(pchan->data);
866 if (!chan)
867 goto clean;
868
869 sk = chan->sk;
870
871 write_lock_bh(&conn->chan_lock);
872
873 hci_conn_hold(conn->hcon);
874
875 bacpy(&bt_sk(sk)->src, conn->src);
876 bacpy(&bt_sk(sk)->dst, conn->dst);
877
878 bt_accept_enqueue(parent, sk);
879
880 __l2cap_chan_add(conn, chan);
881
882 __set_chan_timer(chan, sk->sk_sndtimeo);
883
884 l2cap_state_change(chan, BT_CONNECTED);
885 parent->sk_data_ready(parent, 0);
886
887 write_unlock_bh(&conn->chan_lock);
888
889 clean:
890 bh_unlock_sock(parent);
891 }
892
893 static void l2cap_conn_ready(struct l2cap_conn *conn)
894 {
895 struct l2cap_chan *chan;
896
897 BT_DBG("conn %p", conn);
898
899 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
900 l2cap_le_conn_ready(conn);
901
902 read_lock(&conn->chan_lock);
903
904 list_for_each_entry(chan, &conn->chan_l, list) {
905 struct sock *sk = chan->sk;
906
907 bh_lock_sock(sk);
908
909 if (conn->hcon->type == LE_LINK) {
910 __clear_chan_timer(chan);
911 l2cap_state_change(chan, BT_CONNECTED);
912 sk->sk_state_change(sk);
913 if (smp_conn_security(conn, chan->sec_level))
914 BT_DBG("Insufficient security");
915 }
916
917 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
918 __clear_chan_timer(chan);
919 l2cap_state_change(chan, BT_CONNECTED);
920 sk->sk_state_change(sk);
921
922 } else if (chan->state == BT_CONNECT)
923 l2cap_do_start(chan);
924
925 bh_unlock_sock(sk);
926 }
927
928 read_unlock(&conn->chan_lock);
929 }
930
931 /* Notify sockets that we cannot guaranty reliability anymore */
932 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
933 {
934 struct l2cap_chan *chan;
935
936 BT_DBG("conn %p", conn);
937
938 read_lock(&conn->chan_lock);
939
940 list_for_each_entry(chan, &conn->chan_l, list) {
941 struct sock *sk = chan->sk;
942
943 if (chan->force_reliable)
944 sk->sk_err = err;
945 }
946
947 read_unlock(&conn->chan_lock);
948 }
949
950 static void l2cap_info_timeout(unsigned long arg)
951 {
952 struct l2cap_conn *conn = (void *) arg;
953
954 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
955 conn->info_ident = 0;
956
957 l2cap_conn_start(conn);
958 }
959
960 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
961 {
962 struct l2cap_conn *conn = hcon->l2cap_data;
963
964 if (conn || status)
965 return conn;
966
967 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
968 if (!conn)
969 return NULL;
970
971 hcon->l2cap_data = conn;
972 conn->hcon = hcon;
973
974 BT_DBG("hcon %p conn %p", hcon, conn);
975
976 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
977 conn->mtu = hcon->hdev->le_mtu;
978 else
979 conn->mtu = hcon->hdev->acl_mtu;
980
981 conn->src = &hcon->hdev->bdaddr;
982 conn->dst = &hcon->dst;
983
984 conn->feat_mask = 0;
985
986 spin_lock_init(&conn->lock);
987 rwlock_init(&conn->chan_lock);
988
989 INIT_LIST_HEAD(&conn->chan_l);
990
991 if (hcon->type != LE_LINK)
992 setup_timer(&conn->info_timer, l2cap_info_timeout,
993 (unsigned long) conn);
994
995 conn->disc_reason = 0x13;
996
997 return conn;
998 }
999
1000 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1001 {
1002 struct l2cap_conn *conn = hcon->l2cap_data;
1003 struct l2cap_chan *chan, *l;
1004 struct sock *sk;
1005
1006 if (!conn)
1007 return;
1008
1009 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1010
1011 kfree_skb(conn->rx_skb);
1012
1013 /* Kill channels */
1014 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1015 sk = chan->sk;
1016 bh_lock_sock(sk);
1017 l2cap_chan_del(chan, err);
1018 bh_unlock_sock(sk);
1019 chan->ops->close(chan->data);
1020 }
1021
1022 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1023 del_timer_sync(&conn->info_timer);
1024
1025 hcon->l2cap_data = NULL;
1026 kfree(conn);
1027 }
1028
1029 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1030 {
1031 write_lock_bh(&conn->chan_lock);
1032 __l2cap_chan_add(conn, chan);
1033 write_unlock_bh(&conn->chan_lock);
1034 }
1035
1036 /* ---- Socket interface ---- */
1037
1038 /* Find socket with psm and source bdaddr.
1039 * Returns closest match.
1040 */
1041 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1042 {
1043 struct l2cap_chan *c, *c1 = NULL;
1044
1045 read_lock(&chan_list_lock);
1046
1047 list_for_each_entry(c, &chan_list, global_l) {
1048 struct sock *sk = c->sk;
1049
1050 if (state && c->state != state)
1051 continue;
1052
1053 if (c->psm == psm) {
1054 /* Exact match. */
1055 if (!bacmp(&bt_sk(sk)->src, src)) {
1056 read_unlock(&chan_list_lock);
1057 return c;
1058 }
1059
1060 /* Closest match */
1061 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1062 c1 = c;
1063 }
1064 }
1065
1066 read_unlock(&chan_list_lock);
1067
1068 return c1;
1069 }
1070
1071 int l2cap_chan_connect(struct l2cap_chan *chan)
1072 {
1073 struct sock *sk = chan->sk;
1074 bdaddr_t *src = &bt_sk(sk)->src;
1075 bdaddr_t *dst = &bt_sk(sk)->dst;
1076 struct l2cap_conn *conn;
1077 struct hci_conn *hcon;
1078 struct hci_dev *hdev;
1079 __u8 auth_type;
1080 int err;
1081
1082 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1083 chan->psm);
1084
1085 hdev = hci_get_route(dst, src);
1086 if (!hdev)
1087 return -EHOSTUNREACH;
1088
1089 hci_dev_lock_bh(hdev);
1090
1091 auth_type = l2cap_get_auth_type(chan);
1092
1093 if (chan->dcid == L2CAP_CID_LE_DATA)
1094 hcon = hci_connect(hdev, LE_LINK, dst,
1095 chan->sec_level, auth_type);
1096 else
1097 hcon = hci_connect(hdev, ACL_LINK, dst,
1098 chan->sec_level, auth_type);
1099
1100 if (IS_ERR(hcon)) {
1101 err = PTR_ERR(hcon);
1102 goto done;
1103 }
1104
1105 conn = l2cap_conn_add(hcon, 0);
1106 if (!conn) {
1107 hci_conn_put(hcon);
1108 err = -ENOMEM;
1109 goto done;
1110 }
1111
1112 /* Update source addr of the socket */
1113 bacpy(src, conn->src);
1114
1115 l2cap_chan_add(conn, chan);
1116
1117 l2cap_state_change(chan, BT_CONNECT);
1118 __set_chan_timer(chan, sk->sk_sndtimeo);
1119
1120 if (hcon->state == BT_CONNECTED) {
1121 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1122 __clear_chan_timer(chan);
1123 if (l2cap_check_security(chan))
1124 l2cap_state_change(chan, BT_CONNECTED);
1125 } else
1126 l2cap_do_start(chan);
1127 }
1128
1129 err = 0;
1130
1131 done:
1132 hci_dev_unlock_bh(hdev);
1133 hci_dev_put(hdev);
1134 return err;
1135 }
1136
1137 int __l2cap_wait_ack(struct sock *sk)
1138 {
1139 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1140 DECLARE_WAITQUEUE(wait, current);
1141 int err = 0;
1142 int timeo = HZ/5;
1143
1144 add_wait_queue(sk_sleep(sk), &wait);
1145 while ((chan->unacked_frames > 0 && chan->conn)) {
1146 set_current_state(TASK_INTERRUPTIBLE);
1147
1148 if (!timeo)
1149 timeo = HZ/5;
1150
1151 if (signal_pending(current)) {
1152 err = sock_intr_errno(timeo);
1153 break;
1154 }
1155
1156 release_sock(sk);
1157 timeo = schedule_timeout(timeo);
1158 lock_sock(sk);
1159
1160 err = sock_error(sk);
1161 if (err)
1162 break;
1163 }
1164 set_current_state(TASK_RUNNING);
1165 remove_wait_queue(sk_sleep(sk), &wait);
1166 return err;
1167 }
1168
1169 static void l2cap_monitor_timeout(unsigned long arg)
1170 {
1171 struct l2cap_chan *chan = (void *) arg;
1172 struct sock *sk = chan->sk;
1173
1174 BT_DBG("chan %p", chan);
1175
1176 bh_lock_sock(sk);
1177 if (chan->retry_count >= chan->remote_max_tx) {
1178 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1179 bh_unlock_sock(sk);
1180 return;
1181 }
1182
1183 chan->retry_count++;
1184 __set_monitor_timer(chan);
1185
1186 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1187 bh_unlock_sock(sk);
1188 }
1189
1190 static void l2cap_retrans_timeout(unsigned long arg)
1191 {
1192 struct l2cap_chan *chan = (void *) arg;
1193 struct sock *sk = chan->sk;
1194
1195 BT_DBG("chan %p", chan);
1196
1197 bh_lock_sock(sk);
1198 chan->retry_count = 1;
1199 __set_monitor_timer(chan);
1200
1201 chan->conn_state |= L2CAP_CONN_WAIT_F;
1202
1203 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1204 bh_unlock_sock(sk);
1205 }
1206
1207 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1208 {
1209 struct sk_buff *skb;
1210
1211 while ((skb = skb_peek(&chan->tx_q)) &&
1212 chan->unacked_frames) {
1213 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1214 break;
1215
1216 skb = skb_dequeue(&chan->tx_q);
1217 kfree_skb(skb);
1218
1219 chan->unacked_frames--;
1220 }
1221
1222 if (!chan->unacked_frames)
1223 __clear_retrans_timer(chan);
1224 }
1225
1226 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1227 {
1228 struct hci_conn *hcon = chan->conn->hcon;
1229 u16 flags;
1230
1231 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1232
1233 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1234 flags = ACL_START_NO_FLUSH;
1235 else
1236 flags = ACL_START;
1237
1238 bt_cb(skb)->force_active = chan->force_active;
1239 hci_send_acl(hcon, skb, flags);
1240 }
1241
1242 void l2cap_streaming_send(struct l2cap_chan *chan)
1243 {
1244 struct sk_buff *skb;
1245 u16 control, fcs;
1246
1247 while ((skb = skb_dequeue(&chan->tx_q))) {
1248 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1249 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1250 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1251
1252 if (chan->fcs == L2CAP_FCS_CRC16) {
1253 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1254 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1255 }
1256
1257 l2cap_do_send(chan, skb);
1258
1259 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1260 }
1261 }
1262
1263 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1264 {
1265 struct sk_buff *skb, *tx_skb;
1266 u16 control, fcs;
1267
1268 skb = skb_peek(&chan->tx_q);
1269 if (!skb)
1270 return;
1271
1272 do {
1273 if (bt_cb(skb)->tx_seq == tx_seq)
1274 break;
1275
1276 if (skb_queue_is_last(&chan->tx_q, skb))
1277 return;
1278
1279 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1280
1281 if (chan->remote_max_tx &&
1282 bt_cb(skb)->retries == chan->remote_max_tx) {
1283 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1284 return;
1285 }
1286
1287 tx_skb = skb_clone(skb, GFP_ATOMIC);
1288 bt_cb(skb)->retries++;
1289 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1290 control &= L2CAP_CTRL_SAR;
1291
1292 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1293 control |= L2CAP_CTRL_FINAL;
1294 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1295 }
1296
1297 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1298 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1299
1300 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1301
1302 if (chan->fcs == L2CAP_FCS_CRC16) {
1303 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1304 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1305 }
1306
1307 l2cap_do_send(chan, tx_skb);
1308 }
1309
1310 int l2cap_ertm_send(struct l2cap_chan *chan)
1311 {
1312 struct sk_buff *skb, *tx_skb;
1313 u16 control, fcs;
1314 int nsent = 0;
1315
1316 if (chan->state != BT_CONNECTED)
1317 return -ENOTCONN;
1318
1319 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1320
1321 if (chan->remote_max_tx &&
1322 bt_cb(skb)->retries == chan->remote_max_tx) {
1323 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1324 break;
1325 }
1326
1327 tx_skb = skb_clone(skb, GFP_ATOMIC);
1328
1329 bt_cb(skb)->retries++;
1330
1331 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1332 control &= L2CAP_CTRL_SAR;
1333
1334 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1335 control |= L2CAP_CTRL_FINAL;
1336 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1337 }
1338 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1339 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1340 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1341
1342
1343 if (chan->fcs == L2CAP_FCS_CRC16) {
1344 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1345 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1346 }
1347
1348 l2cap_do_send(chan, tx_skb);
1349
1350 __set_retrans_timer(chan);
1351
1352 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1353 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1354
1355 if (bt_cb(skb)->retries == 1)
1356 chan->unacked_frames++;
1357
1358 chan->frames_sent++;
1359
1360 if (skb_queue_is_last(&chan->tx_q, skb))
1361 chan->tx_send_head = NULL;
1362 else
1363 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1364
1365 nsent++;
1366 }
1367
1368 return nsent;
1369 }
1370
1371 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1372 {
1373 int ret;
1374
1375 if (!skb_queue_empty(&chan->tx_q))
1376 chan->tx_send_head = chan->tx_q.next;
1377
1378 chan->next_tx_seq = chan->expected_ack_seq;
1379 ret = l2cap_ertm_send(chan);
1380 return ret;
1381 }
1382
1383 static void l2cap_send_ack(struct l2cap_chan *chan)
1384 {
1385 u16 control = 0;
1386
1387 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1388
1389 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1390 control |= L2CAP_SUPER_RCV_NOT_READY;
1391 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1392 l2cap_send_sframe(chan, control);
1393 return;
1394 }
1395
1396 if (l2cap_ertm_send(chan) > 0)
1397 return;
1398
1399 control |= L2CAP_SUPER_RCV_READY;
1400 l2cap_send_sframe(chan, control);
1401 }
1402
1403 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1404 {
1405 struct srej_list *tail;
1406 u16 control;
1407
1408 control = L2CAP_SUPER_SELECT_REJECT;
1409 control |= L2CAP_CTRL_FINAL;
1410
1411 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1412 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1413
1414 l2cap_send_sframe(chan, control);
1415 }
1416
1417 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1418 {
1419 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1420 struct sk_buff **frag;
1421 int err, sent = 0;
1422
1423 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1424 return -EFAULT;
1425
1426 sent += count;
1427 len -= count;
1428
1429 /* Continuation fragments (no L2CAP header) */
1430 frag = &skb_shinfo(skb)->frag_list;
1431 while (len) {
1432 count = min_t(unsigned int, conn->mtu, len);
1433
1434 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1435 if (!*frag)
1436 return err;
1437 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1438 return -EFAULT;
1439
1440 sent += count;
1441 len -= count;
1442
1443 frag = &(*frag)->next;
1444 }
1445
1446 return sent;
1447 }
1448
1449 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1450 {
1451 struct sock *sk = chan->sk;
1452 struct l2cap_conn *conn = chan->conn;
1453 struct sk_buff *skb;
1454 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1455 struct l2cap_hdr *lh;
1456
1457 BT_DBG("sk %p len %d", sk, (int)len);
1458
1459 count = min_t(unsigned int, (conn->mtu - hlen), len);
1460 skb = bt_skb_send_alloc(sk, count + hlen,
1461 msg->msg_flags & MSG_DONTWAIT, &err);
1462 if (!skb)
1463 return ERR_PTR(err);
1464
1465 /* Create L2CAP header */
1466 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1467 lh->cid = cpu_to_le16(chan->dcid);
1468 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1469 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1470
1471 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1472 if (unlikely(err < 0)) {
1473 kfree_skb(skb);
1474 return ERR_PTR(err);
1475 }
1476 return skb;
1477 }
1478
1479 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1480 {
1481 struct sock *sk = chan->sk;
1482 struct l2cap_conn *conn = chan->conn;
1483 struct sk_buff *skb;
1484 int err, count, hlen = L2CAP_HDR_SIZE;
1485 struct l2cap_hdr *lh;
1486
1487 BT_DBG("sk %p len %d", sk, (int)len);
1488
1489 count = min_t(unsigned int, (conn->mtu - hlen), len);
1490 skb = bt_skb_send_alloc(sk, count + hlen,
1491 msg->msg_flags & MSG_DONTWAIT, &err);
1492 if (!skb)
1493 return ERR_PTR(err);
1494
1495 /* Create L2CAP header */
1496 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1497 lh->cid = cpu_to_le16(chan->dcid);
1498 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1499
1500 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1501 if (unlikely(err < 0)) {
1502 kfree_skb(skb);
1503 return ERR_PTR(err);
1504 }
1505 return skb;
1506 }
1507
1508 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1509 {
1510 struct sock *sk = chan->sk;
1511 struct l2cap_conn *conn = chan->conn;
1512 struct sk_buff *skb;
1513 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1514 struct l2cap_hdr *lh;
1515
1516 BT_DBG("sk %p len %d", sk, (int)len);
1517
1518 if (!conn)
1519 return ERR_PTR(-ENOTCONN);
1520
1521 if (sdulen)
1522 hlen += 2;
1523
1524 if (chan->fcs == L2CAP_FCS_CRC16)
1525 hlen += 2;
1526
1527 count = min_t(unsigned int, (conn->mtu - hlen), len);
1528 skb = bt_skb_send_alloc(sk, count + hlen,
1529 msg->msg_flags & MSG_DONTWAIT, &err);
1530 if (!skb)
1531 return ERR_PTR(err);
1532
1533 /* Create L2CAP header */
1534 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1535 lh->cid = cpu_to_le16(chan->dcid);
1536 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1537 put_unaligned_le16(control, skb_put(skb, 2));
1538 if (sdulen)
1539 put_unaligned_le16(sdulen, skb_put(skb, 2));
1540
1541 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1542 if (unlikely(err < 0)) {
1543 kfree_skb(skb);
1544 return ERR_PTR(err);
1545 }
1546
1547 if (chan->fcs == L2CAP_FCS_CRC16)
1548 put_unaligned_le16(0, skb_put(skb, 2));
1549
1550 bt_cb(skb)->retries = 0;
1551 return skb;
1552 }
1553
1554 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1555 {
1556 struct sk_buff *skb;
1557 struct sk_buff_head sar_queue;
1558 u16 control;
1559 size_t size = 0;
1560
1561 skb_queue_head_init(&sar_queue);
1562 control = L2CAP_SDU_START;
1563 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1564 if (IS_ERR(skb))
1565 return PTR_ERR(skb);
1566
1567 __skb_queue_tail(&sar_queue, skb);
1568 len -= chan->remote_mps;
1569 size += chan->remote_mps;
1570
1571 while (len > 0) {
1572 size_t buflen;
1573
1574 if (len > chan->remote_mps) {
1575 control = L2CAP_SDU_CONTINUE;
1576 buflen = chan->remote_mps;
1577 } else {
1578 control = L2CAP_SDU_END;
1579 buflen = len;
1580 }
1581
1582 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1583 if (IS_ERR(skb)) {
1584 skb_queue_purge(&sar_queue);
1585 return PTR_ERR(skb);
1586 }
1587
1588 __skb_queue_tail(&sar_queue, skb);
1589 len -= buflen;
1590 size += buflen;
1591 }
1592 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1593 if (chan->tx_send_head == NULL)
1594 chan->tx_send_head = sar_queue.next;
1595
1596 return size;
1597 }
1598
1599 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1600 {
1601 struct sk_buff *skb;
1602 u16 control;
1603 int err;
1604
1605 /* Connectionless channel */
1606 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1607 skb = l2cap_create_connless_pdu(chan, msg, len);
1608 if (IS_ERR(skb))
1609 return PTR_ERR(skb);
1610
1611 l2cap_do_send(chan, skb);
1612 return len;
1613 }
1614
1615 switch (chan->mode) {
1616 case L2CAP_MODE_BASIC:
1617 /* Check outgoing MTU */
1618 if (len > chan->omtu)
1619 return -EMSGSIZE;
1620
1621 /* Create a basic PDU */
1622 skb = l2cap_create_basic_pdu(chan, msg, len);
1623 if (IS_ERR(skb))
1624 return PTR_ERR(skb);
1625
1626 l2cap_do_send(chan, skb);
1627 err = len;
1628 break;
1629
1630 case L2CAP_MODE_ERTM:
1631 case L2CAP_MODE_STREAMING:
1632 /* Entire SDU fits into one PDU */
1633 if (len <= chan->remote_mps) {
1634 control = L2CAP_SDU_UNSEGMENTED;
1635 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1636 0);
1637 if (IS_ERR(skb))
1638 return PTR_ERR(skb);
1639
1640 __skb_queue_tail(&chan->tx_q, skb);
1641
1642 if (chan->tx_send_head == NULL)
1643 chan->tx_send_head = skb;
1644
1645 } else {
1646 /* Segment SDU into multiples PDUs */
1647 err = l2cap_sar_segment_sdu(chan, msg, len);
1648 if (err < 0)
1649 return err;
1650 }
1651
1652 if (chan->mode == L2CAP_MODE_STREAMING) {
1653 l2cap_streaming_send(chan);
1654 err = len;
1655 break;
1656 }
1657
1658 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1659 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
1660 err = len;
1661 break;
1662 }
1663
1664 err = l2cap_ertm_send(chan);
1665 if (err >= 0)
1666 err = len;
1667
1668 break;
1669
1670 default:
1671 BT_DBG("bad state %1.1x", chan->mode);
1672 err = -EBADFD;
1673 }
1674
1675 return err;
1676 }
1677
1678 static void l2cap_chan_ready(struct sock *sk)
1679 {
1680 struct sock *parent = bt_sk(sk)->parent;
1681 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1682
1683 BT_DBG("sk %p, parent %p", sk, parent);
1684
1685 chan->conf_state = 0;
1686 __clear_chan_timer(chan);
1687
1688 if (!parent) {
1689 /* Outgoing channel.
1690 * Wake up socket sleeping on connect.
1691 */
1692 l2cap_state_change(chan, BT_CONNECTED);
1693 sk->sk_state_change(sk);
1694 } else {
1695 /* Incoming channel.
1696 * Wake up socket sleeping on accept.
1697 */
1698 parent->sk_data_ready(parent, 0);
1699 }
1700 }
1701
1702 /* Copy frame to all raw sockets on that connection */
1703 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1704 {
1705 struct sk_buff *nskb;
1706 struct l2cap_chan *chan;
1707
1708 BT_DBG("conn %p", conn);
1709
1710 read_lock(&conn->chan_lock);
1711 list_for_each_entry(chan, &conn->chan_l, list) {
1712 struct sock *sk = chan->sk;
1713 if (chan->chan_type != L2CAP_CHAN_RAW)
1714 continue;
1715
1716 /* Don't send frame to the socket it came from */
1717 if (skb->sk == sk)
1718 continue;
1719 nskb = skb_clone(skb, GFP_ATOMIC);
1720 if (!nskb)
1721 continue;
1722
1723 if (chan->ops->recv(chan->data, nskb))
1724 kfree_skb(nskb);
1725 }
1726 read_unlock(&conn->chan_lock);
1727 }
1728
1729 /* ---- L2CAP signalling commands ---- */
1730 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1731 u8 code, u8 ident, u16 dlen, void *data)
1732 {
1733 struct sk_buff *skb, **frag;
1734 struct l2cap_cmd_hdr *cmd;
1735 struct l2cap_hdr *lh;
1736 int len, count;
1737
1738 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1739 conn, code, ident, dlen);
1740
1741 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1742 count = min_t(unsigned int, conn->mtu, len);
1743
1744 skb = bt_skb_alloc(count, GFP_ATOMIC);
1745 if (!skb)
1746 return NULL;
1747
1748 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1749 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1750
1751 if (conn->hcon->type == LE_LINK)
1752 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1753 else
1754 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1755
1756 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1757 cmd->code = code;
1758 cmd->ident = ident;
1759 cmd->len = cpu_to_le16(dlen);
1760
1761 if (dlen) {
1762 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1763 memcpy(skb_put(skb, count), data, count);
1764 data += count;
1765 }
1766
1767 len -= skb->len;
1768
1769 /* Continuation fragments (no L2CAP header) */
1770 frag = &skb_shinfo(skb)->frag_list;
1771 while (len) {
1772 count = min_t(unsigned int, conn->mtu, len);
1773
1774 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1775 if (!*frag)
1776 goto fail;
1777
1778 memcpy(skb_put(*frag, count), data, count);
1779
1780 len -= count;
1781 data += count;
1782
1783 frag = &(*frag)->next;
1784 }
1785
1786 return skb;
1787
1788 fail:
1789 kfree_skb(skb);
1790 return NULL;
1791 }
1792
1793 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1794 {
1795 struct l2cap_conf_opt *opt = *ptr;
1796 int len;
1797
1798 len = L2CAP_CONF_OPT_SIZE + opt->len;
1799 *ptr += len;
1800
1801 *type = opt->type;
1802 *olen = opt->len;
1803
1804 switch (opt->len) {
1805 case 1:
1806 *val = *((u8 *) opt->val);
1807 break;
1808
1809 case 2:
1810 *val = get_unaligned_le16(opt->val);
1811 break;
1812
1813 case 4:
1814 *val = get_unaligned_le32(opt->val);
1815 break;
1816
1817 default:
1818 *val = (unsigned long) opt->val;
1819 break;
1820 }
1821
1822 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1823 return len;
1824 }
1825
1826 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1827 {
1828 struct l2cap_conf_opt *opt = *ptr;
1829
1830 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1831
1832 opt->type = type;
1833 opt->len = len;
1834
1835 switch (len) {
1836 case 1:
1837 *((u8 *) opt->val) = val;
1838 break;
1839
1840 case 2:
1841 put_unaligned_le16(val, opt->val);
1842 break;
1843
1844 case 4:
1845 put_unaligned_le32(val, opt->val);
1846 break;
1847
1848 default:
1849 memcpy(opt->val, (void *) val, len);
1850 break;
1851 }
1852
1853 *ptr += L2CAP_CONF_OPT_SIZE + len;
1854 }
1855
1856 static void l2cap_ack_timeout(unsigned long arg)
1857 {
1858 struct l2cap_chan *chan = (void *) arg;
1859
1860 bh_lock_sock(chan->sk);
1861 l2cap_send_ack(chan);
1862 bh_unlock_sock(chan->sk);
1863 }
1864
1865 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1866 {
1867 struct sock *sk = chan->sk;
1868
1869 chan->expected_ack_seq = 0;
1870 chan->unacked_frames = 0;
1871 chan->buffer_seq = 0;
1872 chan->num_acked = 0;
1873 chan->frames_sent = 0;
1874
1875 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1876 (unsigned long) chan);
1877 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1878 (unsigned long) chan);
1879 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1880
1881 skb_queue_head_init(&chan->srej_q);
1882 skb_queue_head_init(&chan->busy_q);
1883
1884 INIT_LIST_HEAD(&chan->srej_l);
1885
1886 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1887
1888 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1889 }
1890
1891 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1892 {
1893 switch (mode) {
1894 case L2CAP_MODE_STREAMING:
1895 case L2CAP_MODE_ERTM:
1896 if (l2cap_mode_supported(mode, remote_feat_mask))
1897 return mode;
1898 /* fall through */
1899 default:
1900 return L2CAP_MODE_BASIC;
1901 }
1902 }
1903
1904 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1905 {
1906 struct l2cap_conf_req *req = data;
1907 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1908 void *ptr = req->data;
1909
1910 BT_DBG("chan %p", chan);
1911
1912 if (chan->num_conf_req || chan->num_conf_rsp)
1913 goto done;
1914
1915 switch (chan->mode) {
1916 case L2CAP_MODE_STREAMING:
1917 case L2CAP_MODE_ERTM:
1918 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1919 break;
1920
1921 /* fall through */
1922 default:
1923 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1924 break;
1925 }
1926
1927 done:
1928 if (chan->imtu != L2CAP_DEFAULT_MTU)
1929 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1930
1931 switch (chan->mode) {
1932 case L2CAP_MODE_BASIC:
1933 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1934 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1935 break;
1936
1937 rfc.mode = L2CAP_MODE_BASIC;
1938 rfc.txwin_size = 0;
1939 rfc.max_transmit = 0;
1940 rfc.retrans_timeout = 0;
1941 rfc.monitor_timeout = 0;
1942 rfc.max_pdu_size = 0;
1943
1944 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1945 (unsigned long) &rfc);
1946 break;
1947
1948 case L2CAP_MODE_ERTM:
1949 rfc.mode = L2CAP_MODE_ERTM;
1950 rfc.txwin_size = chan->tx_win;
1951 rfc.max_transmit = chan->max_tx;
1952 rfc.retrans_timeout = 0;
1953 rfc.monitor_timeout = 0;
1954 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1955 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1956 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1957
1958 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1959 (unsigned long) &rfc);
1960
1961 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1962 break;
1963
1964 if (chan->fcs == L2CAP_FCS_NONE ||
1965 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1966 chan->fcs = L2CAP_FCS_NONE;
1967 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1968 }
1969 break;
1970
1971 case L2CAP_MODE_STREAMING:
1972 rfc.mode = L2CAP_MODE_STREAMING;
1973 rfc.txwin_size = 0;
1974 rfc.max_transmit = 0;
1975 rfc.retrans_timeout = 0;
1976 rfc.monitor_timeout = 0;
1977 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1978 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1979 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1980
1981 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1982 (unsigned long) &rfc);
1983
1984 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1985 break;
1986
1987 if (chan->fcs == L2CAP_FCS_NONE ||
1988 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1989 chan->fcs = L2CAP_FCS_NONE;
1990 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1991 }
1992 break;
1993 }
1994
1995 req->dcid = cpu_to_le16(chan->dcid);
1996 req->flags = cpu_to_le16(0);
1997
1998 return ptr - data;
1999 }
2000
2001 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2002 {
2003 struct l2cap_conf_rsp *rsp = data;
2004 void *ptr = rsp->data;
2005 void *req = chan->conf_req;
2006 int len = chan->conf_len;
2007 int type, hint, olen;
2008 unsigned long val;
2009 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2010 u16 mtu = L2CAP_DEFAULT_MTU;
2011 u16 result = L2CAP_CONF_SUCCESS;
2012
2013 BT_DBG("chan %p", chan);
2014
2015 while (len >= L2CAP_CONF_OPT_SIZE) {
2016 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2017
2018 hint = type & L2CAP_CONF_HINT;
2019 type &= L2CAP_CONF_MASK;
2020
2021 switch (type) {
2022 case L2CAP_CONF_MTU:
2023 mtu = val;
2024 break;
2025
2026 case L2CAP_CONF_FLUSH_TO:
2027 chan->flush_to = val;
2028 break;
2029
2030 case L2CAP_CONF_QOS:
2031 break;
2032
2033 case L2CAP_CONF_RFC:
2034 if (olen == sizeof(rfc))
2035 memcpy(&rfc, (void *) val, olen);
2036 break;
2037
2038 case L2CAP_CONF_FCS:
2039 if (val == L2CAP_FCS_NONE)
2040 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2041
2042 break;
2043
2044 default:
2045 if (hint)
2046 break;
2047
2048 result = L2CAP_CONF_UNKNOWN;
2049 *((u8 *) ptr++) = type;
2050 break;
2051 }
2052 }
2053
2054 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2055 goto done;
2056
2057 switch (chan->mode) {
2058 case L2CAP_MODE_STREAMING:
2059 case L2CAP_MODE_ERTM:
2060 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2061 chan->mode = l2cap_select_mode(rfc.mode,
2062 chan->conn->feat_mask);
2063 break;
2064 }
2065
2066 if (chan->mode != rfc.mode)
2067 return -ECONNREFUSED;
2068
2069 break;
2070 }
2071
2072 done:
2073 if (chan->mode != rfc.mode) {
2074 result = L2CAP_CONF_UNACCEPT;
2075 rfc.mode = chan->mode;
2076
2077 if (chan->num_conf_rsp == 1)
2078 return -ECONNREFUSED;
2079
2080 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2081 sizeof(rfc), (unsigned long) &rfc);
2082 }
2083
2084
2085 if (result == L2CAP_CONF_SUCCESS) {
2086 /* Configure output options and let the other side know
2087 * which ones we don't like. */
2088
2089 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2090 result = L2CAP_CONF_UNACCEPT;
2091 else {
2092 chan->omtu = mtu;
2093 chan->conf_state |= L2CAP_CONF_MTU_DONE;
2094 }
2095 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2096
2097 switch (rfc.mode) {
2098 case L2CAP_MODE_BASIC:
2099 chan->fcs = L2CAP_FCS_NONE;
2100 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2101 break;
2102
2103 case L2CAP_MODE_ERTM:
2104 chan->remote_tx_win = rfc.txwin_size;
2105 chan->remote_max_tx = rfc.max_transmit;
2106
2107 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2108 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2109
2110 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2111
2112 rfc.retrans_timeout =
2113 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2114 rfc.monitor_timeout =
2115 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2116
2117 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2118
2119 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2120 sizeof(rfc), (unsigned long) &rfc);
2121
2122 break;
2123
2124 case L2CAP_MODE_STREAMING:
2125 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2126 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2127
2128 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2129
2130 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2131
2132 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2133 sizeof(rfc), (unsigned long) &rfc);
2134
2135 break;
2136
2137 default:
2138 result = L2CAP_CONF_UNACCEPT;
2139
2140 memset(&rfc, 0, sizeof(rfc));
2141 rfc.mode = chan->mode;
2142 }
2143
2144 if (result == L2CAP_CONF_SUCCESS)
2145 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2146 }
2147 rsp->scid = cpu_to_le16(chan->dcid);
2148 rsp->result = cpu_to_le16(result);
2149 rsp->flags = cpu_to_le16(0x0000);
2150
2151 return ptr - data;
2152 }
2153
2154 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2155 {
2156 struct l2cap_conf_req *req = data;
2157 void *ptr = req->data;
2158 int type, olen;
2159 unsigned long val;
2160 struct l2cap_conf_rfc rfc;
2161
2162 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2163
2164 while (len >= L2CAP_CONF_OPT_SIZE) {
2165 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2166
2167 switch (type) {
2168 case L2CAP_CONF_MTU:
2169 if (val < L2CAP_DEFAULT_MIN_MTU) {
2170 *result = L2CAP_CONF_UNACCEPT;
2171 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2172 } else
2173 chan->imtu = val;
2174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2175 break;
2176
2177 case L2CAP_CONF_FLUSH_TO:
2178 chan->flush_to = val;
2179 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2180 2, chan->flush_to);
2181 break;
2182
2183 case L2CAP_CONF_RFC:
2184 if (olen == sizeof(rfc))
2185 memcpy(&rfc, (void *)val, olen);
2186
2187 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2188 rfc.mode != chan->mode)
2189 return -ECONNREFUSED;
2190
2191 chan->fcs = 0;
2192
2193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2194 sizeof(rfc), (unsigned long) &rfc);
2195 break;
2196 }
2197 }
2198
2199 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2200 return -ECONNREFUSED;
2201
2202 chan->mode = rfc.mode;
2203
2204 if (*result == L2CAP_CONF_SUCCESS) {
2205 switch (rfc.mode) {
2206 case L2CAP_MODE_ERTM:
2207 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2208 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2209 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2210 break;
2211 case L2CAP_MODE_STREAMING:
2212 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2213 }
2214 }
2215
2216 req->dcid = cpu_to_le16(chan->dcid);
2217 req->flags = cpu_to_le16(0x0000);
2218
2219 return ptr - data;
2220 }
2221
2222 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2223 {
2224 struct l2cap_conf_rsp *rsp = data;
2225 void *ptr = rsp->data;
2226
2227 BT_DBG("chan %p", chan);
2228
2229 rsp->scid = cpu_to_le16(chan->dcid);
2230 rsp->result = cpu_to_le16(result);
2231 rsp->flags = cpu_to_le16(flags);
2232
2233 return ptr - data;
2234 }
2235
2236 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2237 {
2238 struct l2cap_conn_rsp rsp;
2239 struct l2cap_conn *conn = chan->conn;
2240 u8 buf[128];
2241
2242 rsp.scid = cpu_to_le16(chan->dcid);
2243 rsp.dcid = cpu_to_le16(chan->scid);
2244 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2245 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2246 l2cap_send_cmd(conn, chan->ident,
2247 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2248
2249 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2250 return;
2251
2252 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2253 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2254 l2cap_build_conf_req(chan, buf), buf);
2255 chan->num_conf_req++;
2256 }
2257
2258 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2259 {
2260 int type, olen;
2261 unsigned long val;
2262 struct l2cap_conf_rfc rfc;
2263
2264 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2265
2266 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2267 return;
2268
2269 while (len >= L2CAP_CONF_OPT_SIZE) {
2270 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2271
2272 switch (type) {
2273 case L2CAP_CONF_RFC:
2274 if (olen == sizeof(rfc))
2275 memcpy(&rfc, (void *)val, olen);
2276 goto done;
2277 }
2278 }
2279
2280 done:
2281 switch (rfc.mode) {
2282 case L2CAP_MODE_ERTM:
2283 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2284 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2285 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2286 break;
2287 case L2CAP_MODE_STREAMING:
2288 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2289 }
2290 }
2291
2292 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2293 {
2294 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2295
2296 if (rej->reason != 0x0000)
2297 return 0;
2298
2299 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2300 cmd->ident == conn->info_ident) {
2301 del_timer(&conn->info_timer);
2302
2303 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2304 conn->info_ident = 0;
2305
2306 l2cap_conn_start(conn);
2307 }
2308
2309 return 0;
2310 }
2311
2312 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2313 {
2314 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2315 struct l2cap_conn_rsp rsp;
2316 struct l2cap_chan *chan = NULL, *pchan;
2317 struct sock *parent, *sk = NULL;
2318 int result, status = L2CAP_CS_NO_INFO;
2319
2320 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2321 __le16 psm = req->psm;
2322
2323 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2324
2325 /* Check if we have socket listening on psm */
2326 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2327 if (!pchan) {
2328 result = L2CAP_CR_BAD_PSM;
2329 goto sendresp;
2330 }
2331
2332 parent = pchan->sk;
2333
2334 bh_lock_sock(parent);
2335
2336 /* Check if the ACL is secure enough (if not SDP) */
2337 if (psm != cpu_to_le16(0x0001) &&
2338 !hci_conn_check_link_mode(conn->hcon)) {
2339 conn->disc_reason = 0x05;
2340 result = L2CAP_CR_SEC_BLOCK;
2341 goto response;
2342 }
2343
2344 result = L2CAP_CR_NO_MEM;
2345
2346 /* Check for backlog size */
2347 if (sk_acceptq_is_full(parent)) {
2348 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2349 goto response;
2350 }
2351
2352 chan = pchan->ops->new_connection(pchan->data);
2353 if (!chan)
2354 goto response;
2355
2356 sk = chan->sk;
2357
2358 write_lock_bh(&conn->chan_lock);
2359
2360 /* Check if we already have channel with that dcid */
2361 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2362 write_unlock_bh(&conn->chan_lock);
2363 sock_set_flag(sk, SOCK_ZAPPED);
2364 chan->ops->close(chan->data);
2365 goto response;
2366 }
2367
2368 hci_conn_hold(conn->hcon);
2369
2370 bacpy(&bt_sk(sk)->src, conn->src);
2371 bacpy(&bt_sk(sk)->dst, conn->dst);
2372 chan->psm = psm;
2373 chan->dcid = scid;
2374
2375 bt_accept_enqueue(parent, sk);
2376
2377 __l2cap_chan_add(conn, chan);
2378
2379 dcid = chan->scid;
2380
2381 __set_chan_timer(chan, sk->sk_sndtimeo);
2382
2383 chan->ident = cmd->ident;
2384
2385 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2386 if (l2cap_check_security(chan)) {
2387 if (bt_sk(sk)->defer_setup) {
2388 l2cap_state_change(chan, BT_CONNECT2);
2389 result = L2CAP_CR_PEND;
2390 status = L2CAP_CS_AUTHOR_PEND;
2391 parent->sk_data_ready(parent, 0);
2392 } else {
2393 l2cap_state_change(chan, BT_CONFIG);
2394 result = L2CAP_CR_SUCCESS;
2395 status = L2CAP_CS_NO_INFO;
2396 }
2397 } else {
2398 l2cap_state_change(chan, BT_CONNECT2);
2399 result = L2CAP_CR_PEND;
2400 status = L2CAP_CS_AUTHEN_PEND;
2401 }
2402 } else {
2403 l2cap_state_change(chan, BT_CONNECT2);
2404 result = L2CAP_CR_PEND;
2405 status = L2CAP_CS_NO_INFO;
2406 }
2407
2408 write_unlock_bh(&conn->chan_lock);
2409
2410 response:
2411 bh_unlock_sock(parent);
2412
2413 sendresp:
2414 rsp.scid = cpu_to_le16(scid);
2415 rsp.dcid = cpu_to_le16(dcid);
2416 rsp.result = cpu_to_le16(result);
2417 rsp.status = cpu_to_le16(status);
2418 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2419
2420 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2421 struct l2cap_info_req info;
2422 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2423
2424 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2425 conn->info_ident = l2cap_get_ident(conn);
2426
2427 mod_timer(&conn->info_timer, jiffies +
2428 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2429
2430 l2cap_send_cmd(conn, conn->info_ident,
2431 L2CAP_INFO_REQ, sizeof(info), &info);
2432 }
2433
2434 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2435 result == L2CAP_CR_SUCCESS) {
2436 u8 buf[128];
2437 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2438 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2439 l2cap_build_conf_req(chan, buf), buf);
2440 chan->num_conf_req++;
2441 }
2442
2443 return 0;
2444 }
2445
2446 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2447 {
2448 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2449 u16 scid, dcid, result, status;
2450 struct l2cap_chan *chan;
2451 struct sock *sk;
2452 u8 req[128];
2453
2454 scid = __le16_to_cpu(rsp->scid);
2455 dcid = __le16_to_cpu(rsp->dcid);
2456 result = __le16_to_cpu(rsp->result);
2457 status = __le16_to_cpu(rsp->status);
2458
2459 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2460
2461 if (scid) {
2462 chan = l2cap_get_chan_by_scid(conn, scid);
2463 if (!chan)
2464 return -EFAULT;
2465 } else {
2466 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2467 if (!chan)
2468 return -EFAULT;
2469 }
2470
2471 sk = chan->sk;
2472
2473 switch (result) {
2474 case L2CAP_CR_SUCCESS:
2475 l2cap_state_change(chan, BT_CONFIG);
2476 chan->ident = 0;
2477 chan->dcid = dcid;
2478 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2479
2480 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2481 break;
2482
2483 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2484
2485 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2486 l2cap_build_conf_req(chan, req), req);
2487 chan->num_conf_req++;
2488 break;
2489
2490 case L2CAP_CR_PEND:
2491 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2492 break;
2493
2494 default:
2495 /* don't delete l2cap channel if sk is owned by user */
2496 if (sock_owned_by_user(sk)) {
2497 l2cap_state_change(chan, BT_DISCONN);
2498 __clear_chan_timer(chan);
2499 __set_chan_timer(chan, HZ / 5);
2500 break;
2501 }
2502
2503 l2cap_chan_del(chan, ECONNREFUSED);
2504 break;
2505 }
2506
2507 bh_unlock_sock(sk);
2508 return 0;
2509 }
2510
2511 static inline void set_default_fcs(struct l2cap_chan *chan)
2512 {
2513 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2514
2515 /* FCS is enabled only in ERTM or streaming mode, if one or both
2516 * sides request it.
2517 */
2518 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2519 chan->fcs = L2CAP_FCS_NONE;
2520 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2521 chan->fcs = L2CAP_FCS_CRC16;
2522 }
2523
2524 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2525 {
2526 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2527 u16 dcid, flags;
2528 u8 rsp[64];
2529 struct l2cap_chan *chan;
2530 struct sock *sk;
2531 int len;
2532
2533 dcid = __le16_to_cpu(req->dcid);
2534 flags = __le16_to_cpu(req->flags);
2535
2536 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2537
2538 chan = l2cap_get_chan_by_scid(conn, dcid);
2539 if (!chan)
2540 return -ENOENT;
2541
2542 sk = chan->sk;
2543
2544 if (chan->state != BT_CONFIG) {
2545 struct l2cap_cmd_rej rej;
2546
2547 rej.reason = cpu_to_le16(0x0002);
2548 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2549 sizeof(rej), &rej);
2550 goto unlock;
2551 }
2552
2553 /* Reject if config buffer is too small. */
2554 len = cmd_len - sizeof(*req);
2555 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2556 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2557 l2cap_build_conf_rsp(chan, rsp,
2558 L2CAP_CONF_REJECT, flags), rsp);
2559 goto unlock;
2560 }
2561
2562 /* Store config. */
2563 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2564 chan->conf_len += len;
2565
2566 if (flags & 0x0001) {
2567 /* Incomplete config. Send empty response. */
2568 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2569 l2cap_build_conf_rsp(chan, rsp,
2570 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2571 goto unlock;
2572 }
2573
2574 /* Complete config. */
2575 len = l2cap_parse_conf_req(chan, rsp);
2576 if (len < 0) {
2577 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2578 goto unlock;
2579 }
2580
2581 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2582 chan->num_conf_rsp++;
2583
2584 /* Reset config buffer. */
2585 chan->conf_len = 0;
2586
2587 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2588 goto unlock;
2589
2590 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2591 set_default_fcs(chan);
2592
2593 l2cap_state_change(chan, BT_CONNECTED);
2594
2595 chan->next_tx_seq = 0;
2596 chan->expected_tx_seq = 0;
2597 skb_queue_head_init(&chan->tx_q);
2598 if (chan->mode == L2CAP_MODE_ERTM)
2599 l2cap_ertm_init(chan);
2600
2601 l2cap_chan_ready(sk);
2602 goto unlock;
2603 }
2604
2605 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2606 u8 buf[64];
2607 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2608 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2609 l2cap_build_conf_req(chan, buf), buf);
2610 chan->num_conf_req++;
2611 }
2612
2613 unlock:
2614 bh_unlock_sock(sk);
2615 return 0;
2616 }
2617
2618 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2619 {
2620 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2621 u16 scid, flags, result;
2622 struct l2cap_chan *chan;
2623 struct sock *sk;
2624 int len = cmd->len - sizeof(*rsp);
2625
2626 scid = __le16_to_cpu(rsp->scid);
2627 flags = __le16_to_cpu(rsp->flags);
2628 result = __le16_to_cpu(rsp->result);
2629
2630 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2631 scid, flags, result);
2632
2633 chan = l2cap_get_chan_by_scid(conn, scid);
2634 if (!chan)
2635 return 0;
2636
2637 sk = chan->sk;
2638
2639 switch (result) {
2640 case L2CAP_CONF_SUCCESS:
2641 l2cap_conf_rfc_get(chan, rsp->data, len);
2642 break;
2643
2644 case L2CAP_CONF_UNACCEPT:
2645 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2646 char req[64];
2647
2648 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2649 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2650 goto done;
2651 }
2652
2653 /* throw out any old stored conf requests */
2654 result = L2CAP_CONF_SUCCESS;
2655 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2656 req, &result);
2657 if (len < 0) {
2658 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2659 goto done;
2660 }
2661
2662 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2663 L2CAP_CONF_REQ, len, req);
2664 chan->num_conf_req++;
2665 if (result != L2CAP_CONF_SUCCESS)
2666 goto done;
2667 break;
2668 }
2669
2670 default:
2671 sk->sk_err = ECONNRESET;
2672 __set_chan_timer(chan, HZ * 5);
2673 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2674 goto done;
2675 }
2676
2677 if (flags & 0x01)
2678 goto done;
2679
2680 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2681
2682 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2683 set_default_fcs(chan);
2684
2685 l2cap_state_change(chan, BT_CONNECTED);
2686 chan->next_tx_seq = 0;
2687 chan->expected_tx_seq = 0;
2688 skb_queue_head_init(&chan->tx_q);
2689 if (chan->mode == L2CAP_MODE_ERTM)
2690 l2cap_ertm_init(chan);
2691
2692 l2cap_chan_ready(sk);
2693 }
2694
2695 done:
2696 bh_unlock_sock(sk);
2697 return 0;
2698 }
2699
2700 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2701 {
2702 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2703 struct l2cap_disconn_rsp rsp;
2704 u16 dcid, scid;
2705 struct l2cap_chan *chan;
2706 struct sock *sk;
2707
2708 scid = __le16_to_cpu(req->scid);
2709 dcid = __le16_to_cpu(req->dcid);
2710
2711 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2712
2713 chan = l2cap_get_chan_by_scid(conn, dcid);
2714 if (!chan)
2715 return 0;
2716
2717 sk = chan->sk;
2718
2719 rsp.dcid = cpu_to_le16(chan->scid);
2720 rsp.scid = cpu_to_le16(chan->dcid);
2721 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2722
2723 sk->sk_shutdown = SHUTDOWN_MASK;
2724
2725 /* don't delete l2cap channel if sk is owned by user */
2726 if (sock_owned_by_user(sk)) {
2727 l2cap_state_change(chan, BT_DISCONN);
2728 __clear_chan_timer(chan);
2729 __set_chan_timer(chan, HZ / 5);
2730 bh_unlock_sock(sk);
2731 return 0;
2732 }
2733
2734 l2cap_chan_del(chan, ECONNRESET);
2735 bh_unlock_sock(sk);
2736
2737 chan->ops->close(chan->data);
2738 return 0;
2739 }
2740
2741 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2742 {
2743 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2744 u16 dcid, scid;
2745 struct l2cap_chan *chan;
2746 struct sock *sk;
2747
2748 scid = __le16_to_cpu(rsp->scid);
2749 dcid = __le16_to_cpu(rsp->dcid);
2750
2751 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2752
2753 chan = l2cap_get_chan_by_scid(conn, scid);
2754 if (!chan)
2755 return 0;
2756
2757 sk = chan->sk;
2758
2759 /* don't delete l2cap channel if sk is owned by user */
2760 if (sock_owned_by_user(sk)) {
2761 l2cap_state_change(chan,BT_DISCONN);
2762 __clear_chan_timer(chan);
2763 __set_chan_timer(chan, HZ / 5);
2764 bh_unlock_sock(sk);
2765 return 0;
2766 }
2767
2768 l2cap_chan_del(chan, 0);
2769 bh_unlock_sock(sk);
2770
2771 chan->ops->close(chan->data);
2772 return 0;
2773 }
2774
2775 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2776 {
2777 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2778 u16 type;
2779
2780 type = __le16_to_cpu(req->type);
2781
2782 BT_DBG("type 0x%4.4x", type);
2783
2784 if (type == L2CAP_IT_FEAT_MASK) {
2785 u8 buf[8];
2786 u32 feat_mask = l2cap_feat_mask;
2787 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2788 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2789 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2790 if (!disable_ertm)
2791 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2792 | L2CAP_FEAT_FCS;
2793 put_unaligned_le32(feat_mask, rsp->data);
2794 l2cap_send_cmd(conn, cmd->ident,
2795 L2CAP_INFO_RSP, sizeof(buf), buf);
2796 } else if (type == L2CAP_IT_FIXED_CHAN) {
2797 u8 buf[12];
2798 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2799 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2800 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2801 memcpy(buf + 4, l2cap_fixed_chan, 8);
2802 l2cap_send_cmd(conn, cmd->ident,
2803 L2CAP_INFO_RSP, sizeof(buf), buf);
2804 } else {
2805 struct l2cap_info_rsp rsp;
2806 rsp.type = cpu_to_le16(type);
2807 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2808 l2cap_send_cmd(conn, cmd->ident,
2809 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2810 }
2811
2812 return 0;
2813 }
2814
2815 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2816 {
2817 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2818 u16 type, result;
2819
2820 type = __le16_to_cpu(rsp->type);
2821 result = __le16_to_cpu(rsp->result);
2822
2823 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2824
2825 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2826 if (cmd->ident != conn->info_ident ||
2827 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2828 return 0;
2829
2830 del_timer(&conn->info_timer);
2831
2832 if (result != L2CAP_IR_SUCCESS) {
2833 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2834 conn->info_ident = 0;
2835
2836 l2cap_conn_start(conn);
2837
2838 return 0;
2839 }
2840
2841 if (type == L2CAP_IT_FEAT_MASK) {
2842 conn->feat_mask = get_unaligned_le32(rsp->data);
2843
2844 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2845 struct l2cap_info_req req;
2846 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2847
2848 conn->info_ident = l2cap_get_ident(conn);
2849
2850 l2cap_send_cmd(conn, conn->info_ident,
2851 L2CAP_INFO_REQ, sizeof(req), &req);
2852 } else {
2853 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2854 conn->info_ident = 0;
2855
2856 l2cap_conn_start(conn);
2857 }
2858 } else if (type == L2CAP_IT_FIXED_CHAN) {
2859 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2860 conn->info_ident = 0;
2861
2862 l2cap_conn_start(conn);
2863 }
2864
2865 return 0;
2866 }
2867
2868 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2869 u16 to_multiplier)
2870 {
2871 u16 max_latency;
2872
2873 if (min > max || min < 6 || max > 3200)
2874 return -EINVAL;
2875
2876 if (to_multiplier < 10 || to_multiplier > 3200)
2877 return -EINVAL;
2878
2879 if (max >= to_multiplier * 8)
2880 return -EINVAL;
2881
2882 max_latency = (to_multiplier * 8 / max) - 1;
2883 if (latency > 499 || latency > max_latency)
2884 return -EINVAL;
2885
2886 return 0;
2887 }
2888
2889 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2890 struct l2cap_cmd_hdr *cmd, u8 *data)
2891 {
2892 struct hci_conn *hcon = conn->hcon;
2893 struct l2cap_conn_param_update_req *req;
2894 struct l2cap_conn_param_update_rsp rsp;
2895 u16 min, max, latency, to_multiplier, cmd_len;
2896 int err;
2897
2898 if (!(hcon->link_mode & HCI_LM_MASTER))
2899 return -EINVAL;
2900
2901 cmd_len = __le16_to_cpu(cmd->len);
2902 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2903 return -EPROTO;
2904
2905 req = (struct l2cap_conn_param_update_req *) data;
2906 min = __le16_to_cpu(req->min);
2907 max = __le16_to_cpu(req->max);
2908 latency = __le16_to_cpu(req->latency);
2909 to_multiplier = __le16_to_cpu(req->to_multiplier);
2910
2911 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2912 min, max, latency, to_multiplier);
2913
2914 memset(&rsp, 0, sizeof(rsp));
2915
2916 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2917 if (err)
2918 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2919 else
2920 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2921
2922 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2923 sizeof(rsp), &rsp);
2924
2925 if (!err)
2926 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2927
2928 return 0;
2929 }
2930
2931 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2932 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2933 {
2934 int err = 0;
2935
2936 switch (cmd->code) {
2937 case L2CAP_COMMAND_REJ:
2938 l2cap_command_rej(conn, cmd, data);
2939 break;
2940
2941 case L2CAP_CONN_REQ:
2942 err = l2cap_connect_req(conn, cmd, data);
2943 break;
2944
2945 case L2CAP_CONN_RSP:
2946 err = l2cap_connect_rsp(conn, cmd, data);
2947 break;
2948
2949 case L2CAP_CONF_REQ:
2950 err = l2cap_config_req(conn, cmd, cmd_len, data);
2951 break;
2952
2953 case L2CAP_CONF_RSP:
2954 err = l2cap_config_rsp(conn, cmd, data);
2955 break;
2956
2957 case L2CAP_DISCONN_REQ:
2958 err = l2cap_disconnect_req(conn, cmd, data);
2959 break;
2960
2961 case L2CAP_DISCONN_RSP:
2962 err = l2cap_disconnect_rsp(conn, cmd, data);
2963 break;
2964
2965 case L2CAP_ECHO_REQ:
2966 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2967 break;
2968
2969 case L2CAP_ECHO_RSP:
2970 break;
2971
2972 case L2CAP_INFO_REQ:
2973 err = l2cap_information_req(conn, cmd, data);
2974 break;
2975
2976 case L2CAP_INFO_RSP:
2977 err = l2cap_information_rsp(conn, cmd, data);
2978 break;
2979
2980 default:
2981 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2982 err = -EINVAL;
2983 break;
2984 }
2985
2986 return err;
2987 }
2988
2989 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2990 struct l2cap_cmd_hdr *cmd, u8 *data)
2991 {
2992 switch (cmd->code) {
2993 case L2CAP_COMMAND_REJ:
2994 return 0;
2995
2996 case L2CAP_CONN_PARAM_UPDATE_REQ:
2997 return l2cap_conn_param_update_req(conn, cmd, data);
2998
2999 case L2CAP_CONN_PARAM_UPDATE_RSP:
3000 return 0;
3001
3002 default:
3003 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3004 return -EINVAL;
3005 }
3006 }
3007
3008 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3009 struct sk_buff *skb)
3010 {
3011 u8 *data = skb->data;
3012 int len = skb->len;
3013 struct l2cap_cmd_hdr cmd;
3014 int err;
3015
3016 l2cap_raw_recv(conn, skb);
3017
3018 while (len >= L2CAP_CMD_HDR_SIZE) {
3019 u16 cmd_len;
3020 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3021 data += L2CAP_CMD_HDR_SIZE;
3022 len -= L2CAP_CMD_HDR_SIZE;
3023
3024 cmd_len = le16_to_cpu(cmd.len);
3025
3026 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3027
3028 if (cmd_len > len || !cmd.ident) {
3029 BT_DBG("corrupted command");
3030 break;
3031 }
3032
3033 if (conn->hcon->type == LE_LINK)
3034 err = l2cap_le_sig_cmd(conn, &cmd, data);
3035 else
3036 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3037
3038 if (err) {
3039 struct l2cap_cmd_rej rej;
3040
3041 BT_ERR("Wrong link type (%d)", err);
3042
3043 /* FIXME: Map err to a valid reason */
3044 rej.reason = cpu_to_le16(0);
3045 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3046 }
3047
3048 data += cmd_len;
3049 len -= cmd_len;
3050 }
3051
3052 kfree_skb(skb);
3053 }
3054
3055 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3056 {
3057 u16 our_fcs, rcv_fcs;
3058 int hdr_size = L2CAP_HDR_SIZE + 2;
3059
3060 if (chan->fcs == L2CAP_FCS_CRC16) {
3061 skb_trim(skb, skb->len - 2);
3062 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3063 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3064
3065 if (our_fcs != rcv_fcs)
3066 return -EBADMSG;
3067 }
3068 return 0;
3069 }
3070
3071 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3072 {
3073 u16 control = 0;
3074
3075 chan->frames_sent = 0;
3076
3077 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3078
3079 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3080 control |= L2CAP_SUPER_RCV_NOT_READY;
3081 l2cap_send_sframe(chan, control);
3082 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3083 }
3084
3085 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
3086 l2cap_retransmit_frames(chan);
3087
3088 l2cap_ertm_send(chan);
3089
3090 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3091 chan->frames_sent == 0) {
3092 control |= L2CAP_SUPER_RCV_READY;
3093 l2cap_send_sframe(chan, control);
3094 }
3095 }
3096
3097 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3098 {
3099 struct sk_buff *next_skb;
3100 int tx_seq_offset, next_tx_seq_offset;
3101
3102 bt_cb(skb)->tx_seq = tx_seq;
3103 bt_cb(skb)->sar = sar;
3104
3105 next_skb = skb_peek(&chan->srej_q);
3106 if (!next_skb) {
3107 __skb_queue_tail(&chan->srej_q, skb);
3108 return 0;
3109 }
3110
3111 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3112 if (tx_seq_offset < 0)
3113 tx_seq_offset += 64;
3114
3115 do {
3116 if (bt_cb(next_skb)->tx_seq == tx_seq)
3117 return -EINVAL;
3118
3119 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3120 chan->buffer_seq) % 64;
3121 if (next_tx_seq_offset < 0)
3122 next_tx_seq_offset += 64;
3123
3124 if (next_tx_seq_offset > tx_seq_offset) {
3125 __skb_queue_before(&chan->srej_q, next_skb, skb);
3126 return 0;
3127 }
3128
3129 if (skb_queue_is_last(&chan->srej_q, next_skb))
3130 break;
3131
3132 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3133
3134 __skb_queue_tail(&chan->srej_q, skb);
3135
3136 return 0;
3137 }
3138
3139 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3140 {
3141 struct sk_buff *_skb;
3142 int err;
3143
3144 switch (control & L2CAP_CTRL_SAR) {
3145 case L2CAP_SDU_UNSEGMENTED:
3146 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3147 goto drop;
3148
3149 return chan->ops->recv(chan->data, skb);
3150
3151 case L2CAP_SDU_START:
3152 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3153 goto drop;
3154
3155 chan->sdu_len = get_unaligned_le16(skb->data);
3156
3157 if (chan->sdu_len > chan->imtu)
3158 goto disconnect;
3159
3160 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3161 if (!chan->sdu)
3162 return -ENOMEM;
3163
3164 /* pull sdu_len bytes only after alloc, because of Local Busy
3165 * condition we have to be sure that this will be executed
3166 * only once, i.e., when alloc does not fail */
3167 skb_pull(skb, 2);
3168
3169 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3170
3171 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3172 chan->partial_sdu_len = skb->len;
3173 break;
3174
3175 case L2CAP_SDU_CONTINUE:
3176 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3177 goto disconnect;
3178
3179 if (!chan->sdu)
3180 goto disconnect;
3181
3182 chan->partial_sdu_len += skb->len;
3183 if (chan->partial_sdu_len > chan->sdu_len)
3184 goto drop;
3185
3186 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3187
3188 break;
3189
3190 case L2CAP_SDU_END:
3191 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3192 goto disconnect;
3193
3194 if (!chan->sdu)
3195 goto disconnect;
3196
3197 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
3198 chan->partial_sdu_len += skb->len;
3199
3200 if (chan->partial_sdu_len > chan->imtu)
3201 goto drop;
3202
3203 if (chan->partial_sdu_len != chan->sdu_len)
3204 goto drop;
3205
3206 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3207 }
3208
3209 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3210 if (!_skb) {
3211 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3212 return -ENOMEM;
3213 }
3214
3215 err = chan->ops->recv(chan->data, _skb);
3216 if (err < 0) {
3217 kfree_skb(_skb);
3218 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3219 return err;
3220 }
3221
3222 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3223 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3224
3225 kfree_skb(chan->sdu);
3226 break;
3227 }
3228
3229 kfree_skb(skb);
3230 return 0;
3231
3232 drop:
3233 kfree_skb(chan->sdu);
3234 chan->sdu = NULL;
3235
3236 disconnect:
3237 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3238 kfree_skb(skb);
3239 return 0;
3240 }
3241
3242 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3243 {
3244 struct sk_buff *skb;
3245 u16 control;
3246 int err;
3247
3248 while ((skb = skb_dequeue(&chan->busy_q))) {
3249 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3250 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3251 if (err < 0) {
3252 skb_queue_head(&chan->busy_q, skb);
3253 return -EBUSY;
3254 }
3255
3256 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3257 }
3258
3259 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3260 goto done;
3261
3262 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3263 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3264 l2cap_send_sframe(chan, control);
3265 chan->retry_count = 1;
3266
3267 __clear_retrans_timer(chan);
3268 __set_monitor_timer(chan);
3269
3270 chan->conn_state |= L2CAP_CONN_WAIT_F;
3271
3272 done:
3273 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3274 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3275
3276 BT_DBG("chan %p, Exit local busy", chan);
3277
3278 return 0;
3279 }
3280
3281 static void l2cap_busy_work(struct work_struct *work)
3282 {
3283 DECLARE_WAITQUEUE(wait, current);
3284 struct l2cap_chan *chan =
3285 container_of(work, struct l2cap_chan, busy_work);
3286 struct sock *sk = chan->sk;
3287 int n_tries = 0, timeo = HZ/5, err;
3288 struct sk_buff *skb;
3289
3290 lock_sock(sk);
3291
3292 add_wait_queue(sk_sleep(sk), &wait);
3293 while ((skb = skb_peek(&chan->busy_q))) {
3294 set_current_state(TASK_INTERRUPTIBLE);
3295
3296 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3297 err = -EBUSY;
3298 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3299 break;
3300 }
3301
3302 if (!timeo)
3303 timeo = HZ/5;
3304
3305 if (signal_pending(current)) {
3306 err = sock_intr_errno(timeo);
3307 break;
3308 }
3309
3310 release_sock(sk);
3311 timeo = schedule_timeout(timeo);
3312 lock_sock(sk);
3313
3314 err = sock_error(sk);
3315 if (err)
3316 break;
3317
3318 if (l2cap_try_push_rx_skb(chan) == 0)
3319 break;
3320 }
3321
3322 set_current_state(TASK_RUNNING);
3323 remove_wait_queue(sk_sleep(sk), &wait);
3324
3325 release_sock(sk);
3326 }
3327
3328 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3329 {
3330 int sctrl, err;
3331
3332 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3333 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3334 __skb_queue_tail(&chan->busy_q, skb);
3335 return l2cap_try_push_rx_skb(chan);
3336
3337
3338 }
3339
3340 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3341 if (err >= 0) {
3342 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3343 return err;
3344 }
3345
3346 /* Busy Condition */
3347 BT_DBG("chan %p, Enter local busy", chan);
3348
3349 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3350 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3351 __skb_queue_tail(&chan->busy_q, skb);
3352
3353 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3354 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3355 l2cap_send_sframe(chan, sctrl);
3356
3357 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3358
3359 __clear_ack_timer(chan);
3360
3361 queue_work(_busy_wq, &chan->busy_work);
3362
3363 return err;
3364 }
3365
3366 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3367 {
3368 struct sk_buff *_skb;
3369 int err = -EINVAL;
3370
3371 /*
3372 * TODO: We have to notify the userland if some data is lost with the
3373 * Streaming Mode.
3374 */
3375
3376 switch (control & L2CAP_CTRL_SAR) {
3377 case L2CAP_SDU_UNSEGMENTED:
3378 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3379 kfree_skb(chan->sdu);
3380 break;
3381 }
3382
3383 err = chan->ops->recv(chan->data, skb);
3384 if (!err)
3385 return 0;
3386
3387 break;
3388
3389 case L2CAP_SDU_START:
3390 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3391 kfree_skb(chan->sdu);
3392 break;
3393 }
3394
3395 chan->sdu_len = get_unaligned_le16(skb->data);
3396 skb_pull(skb, 2);
3397
3398 if (chan->sdu_len > chan->imtu) {
3399 err = -EMSGSIZE;
3400 break;
3401 }
3402
3403 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3404 if (!chan->sdu) {
3405 err = -ENOMEM;
3406 break;
3407 }
3408
3409 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3410
3411 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3412 chan->partial_sdu_len = skb->len;
3413 err = 0;
3414 break;
3415
3416 case L2CAP_SDU_CONTINUE:
3417 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3418 break;
3419
3420 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3421
3422 chan->partial_sdu_len += skb->len;
3423 if (chan->partial_sdu_len > chan->sdu_len)
3424 kfree_skb(chan->sdu);
3425 else
3426 err = 0;
3427
3428 break;
3429
3430 case L2CAP_SDU_END:
3431 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3432 break;
3433
3434 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3435
3436 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3437 chan->partial_sdu_len += skb->len;
3438
3439 if (chan->partial_sdu_len > chan->imtu)
3440 goto drop;
3441
3442 if (chan->partial_sdu_len == chan->sdu_len) {
3443 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3444 err = chan->ops->recv(chan->data, _skb);
3445 if (err < 0)
3446 kfree_skb(_skb);
3447 }
3448 err = 0;
3449
3450 drop:
3451 kfree_skb(chan->sdu);
3452 break;
3453 }
3454
3455 kfree_skb(skb);
3456 return err;
3457 }
3458
3459 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3460 {
3461 struct sk_buff *skb;
3462 u16 control;
3463
3464 while ((skb = skb_peek(&chan->srej_q))) {
3465 if (bt_cb(skb)->tx_seq != tx_seq)
3466 break;
3467
3468 skb = skb_dequeue(&chan->srej_q);
3469 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3470 l2cap_ertm_reassembly_sdu(chan, skb, control);
3471 chan->buffer_seq_srej =
3472 (chan->buffer_seq_srej + 1) % 64;
3473 tx_seq = (tx_seq + 1) % 64;
3474 }
3475 }
3476
3477 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3478 {
3479 struct srej_list *l, *tmp;
3480 u16 control;
3481
3482 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3483 if (l->tx_seq == tx_seq) {
3484 list_del(&l->list);
3485 kfree(l);
3486 return;
3487 }
3488 control = L2CAP_SUPER_SELECT_REJECT;
3489 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3490 l2cap_send_sframe(chan, control);
3491 list_del(&l->list);
3492 list_add_tail(&l->list, &chan->srej_l);
3493 }
3494 }
3495
3496 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3497 {
3498 struct srej_list *new;
3499 u16 control;
3500
3501 while (tx_seq != chan->expected_tx_seq) {
3502 control = L2CAP_SUPER_SELECT_REJECT;
3503 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3504 l2cap_send_sframe(chan, control);
3505
3506 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3507 new->tx_seq = chan->expected_tx_seq;
3508 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3509 list_add_tail(&new->list, &chan->srej_l);
3510 }
3511 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3512 }
3513
3514 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3515 {
3516 u8 tx_seq = __get_txseq(rx_control);
3517 u8 req_seq = __get_reqseq(rx_control);
3518 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3519 int tx_seq_offset, expected_tx_seq_offset;
3520 int num_to_ack = (chan->tx_win/6) + 1;
3521 int err = 0;
3522
3523 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3524 tx_seq, rx_control);
3525
3526 if (L2CAP_CTRL_FINAL & rx_control &&
3527 chan->conn_state & L2CAP_CONN_WAIT_F) {
3528 __clear_monitor_timer(chan);
3529 if (chan->unacked_frames > 0)
3530 __set_retrans_timer(chan);
3531 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3532 }
3533
3534 chan->expected_ack_seq = req_seq;
3535 l2cap_drop_acked_frames(chan);
3536
3537 if (tx_seq == chan->expected_tx_seq)
3538 goto expected;
3539
3540 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3541 if (tx_seq_offset < 0)
3542 tx_seq_offset += 64;
3543
3544 /* invalid tx_seq */
3545 if (tx_seq_offset >= chan->tx_win) {
3546 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3547 goto drop;
3548 }
3549
3550 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY)
3551 goto drop;
3552
3553 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3554 struct srej_list *first;
3555
3556 first = list_first_entry(&chan->srej_l,
3557 struct srej_list, list);
3558 if (tx_seq == first->tx_seq) {
3559 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3560 l2cap_check_srej_gap(chan, tx_seq);
3561
3562 list_del(&first->list);
3563 kfree(first);
3564
3565 if (list_empty(&chan->srej_l)) {
3566 chan->buffer_seq = chan->buffer_seq_srej;
3567 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3568 l2cap_send_ack(chan);
3569 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3570 }
3571 } else {
3572 struct srej_list *l;
3573
3574 /* duplicated tx_seq */
3575 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3576 goto drop;
3577
3578 list_for_each_entry(l, &chan->srej_l, list) {
3579 if (l->tx_seq == tx_seq) {
3580 l2cap_resend_srejframe(chan, tx_seq);
3581 return 0;
3582 }
3583 }
3584 l2cap_send_srejframe(chan, tx_seq);
3585 }
3586 } else {
3587 expected_tx_seq_offset =
3588 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3589 if (expected_tx_seq_offset < 0)
3590 expected_tx_seq_offset += 64;
3591
3592 /* duplicated tx_seq */
3593 if (tx_seq_offset < expected_tx_seq_offset)
3594 goto drop;
3595
3596 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3597
3598 BT_DBG("chan %p, Enter SREJ", chan);
3599
3600 INIT_LIST_HEAD(&chan->srej_l);
3601 chan->buffer_seq_srej = chan->buffer_seq;
3602
3603 __skb_queue_head_init(&chan->srej_q);
3604 __skb_queue_head_init(&chan->busy_q);
3605 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3606
3607 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3608
3609 l2cap_send_srejframe(chan, tx_seq);
3610
3611 __clear_ack_timer(chan);
3612 }
3613 return 0;
3614
3615 expected:
3616 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3617
3618 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3619 bt_cb(skb)->tx_seq = tx_seq;
3620 bt_cb(skb)->sar = sar;
3621 __skb_queue_tail(&chan->srej_q, skb);
3622 return 0;
3623 }
3624
3625 err = l2cap_push_rx_skb(chan, skb, rx_control);
3626 if (err < 0)
3627 return 0;
3628
3629 if (rx_control & L2CAP_CTRL_FINAL) {
3630 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3631 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3632 else
3633 l2cap_retransmit_frames(chan);
3634 }
3635
3636 __set_ack_timer(chan);
3637
3638 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3639 if (chan->num_acked == num_to_ack - 1)
3640 l2cap_send_ack(chan);
3641
3642 return 0;
3643
3644 drop:
3645 kfree_skb(skb);
3646 return 0;
3647 }
3648
3649 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3650 {
3651 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3652 rx_control);
3653
3654 chan->expected_ack_seq = __get_reqseq(rx_control);
3655 l2cap_drop_acked_frames(chan);
3656
3657 if (rx_control & L2CAP_CTRL_POLL) {
3658 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3659 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3660 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3661 (chan->unacked_frames > 0))
3662 __set_retrans_timer(chan);
3663
3664 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3665 l2cap_send_srejtail(chan);
3666 } else {
3667 l2cap_send_i_or_rr_or_rnr(chan);
3668 }
3669
3670 } else if (rx_control & L2CAP_CTRL_FINAL) {
3671 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3672
3673 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3674 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3675 else
3676 l2cap_retransmit_frames(chan);
3677
3678 } else {
3679 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3680 (chan->unacked_frames > 0))
3681 __set_retrans_timer(chan);
3682
3683 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3684 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3685 l2cap_send_ack(chan);
3686 else
3687 l2cap_ertm_send(chan);
3688 }
3689 }
3690
3691 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3692 {
3693 u8 tx_seq = __get_reqseq(rx_control);
3694
3695 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3696
3697 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3698
3699 chan->expected_ack_seq = tx_seq;
3700 l2cap_drop_acked_frames(chan);
3701
3702 if (rx_control & L2CAP_CTRL_FINAL) {
3703 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3704 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3705 else
3706 l2cap_retransmit_frames(chan);
3707 } else {
3708 l2cap_retransmit_frames(chan);
3709
3710 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3711 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3712 }
3713 }
3714 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3715 {
3716 u8 tx_seq = __get_reqseq(rx_control);
3717
3718 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3719
3720 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3721
3722 if (rx_control & L2CAP_CTRL_POLL) {
3723 chan->expected_ack_seq = tx_seq;
3724 l2cap_drop_acked_frames(chan);
3725
3726 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3727 l2cap_retransmit_one_frame(chan, tx_seq);
3728
3729 l2cap_ertm_send(chan);
3730
3731 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3732 chan->srej_save_reqseq = tx_seq;
3733 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3734 }
3735 } else if (rx_control & L2CAP_CTRL_FINAL) {
3736 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3737 chan->srej_save_reqseq == tx_seq)
3738 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3739 else
3740 l2cap_retransmit_one_frame(chan, tx_seq);
3741 } else {
3742 l2cap_retransmit_one_frame(chan, tx_seq);
3743 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3744 chan->srej_save_reqseq = tx_seq;
3745 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3746 }
3747 }
3748 }
3749
3750 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3751 {
3752 u8 tx_seq = __get_reqseq(rx_control);
3753
3754 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3755
3756 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3757 chan->expected_ack_seq = tx_seq;
3758 l2cap_drop_acked_frames(chan);
3759
3760 if (rx_control & L2CAP_CTRL_POLL)
3761 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3762
3763 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3764 __clear_retrans_timer(chan);
3765 if (rx_control & L2CAP_CTRL_POLL)
3766 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3767 return;
3768 }
3769
3770 if (rx_control & L2CAP_CTRL_POLL)
3771 l2cap_send_srejtail(chan);
3772 else
3773 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3774 }
3775
3776 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3777 {
3778 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3779
3780 if (L2CAP_CTRL_FINAL & rx_control &&
3781 chan->conn_state & L2CAP_CONN_WAIT_F) {
3782 __clear_monitor_timer(chan);
3783 if (chan->unacked_frames > 0)
3784 __set_retrans_timer(chan);
3785 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3786 }
3787
3788 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3789 case L2CAP_SUPER_RCV_READY:
3790 l2cap_data_channel_rrframe(chan, rx_control);
3791 break;
3792
3793 case L2CAP_SUPER_REJECT:
3794 l2cap_data_channel_rejframe(chan, rx_control);
3795 break;
3796
3797 case L2CAP_SUPER_SELECT_REJECT:
3798 l2cap_data_channel_srejframe(chan, rx_control);
3799 break;
3800
3801 case L2CAP_SUPER_RCV_NOT_READY:
3802 l2cap_data_channel_rnrframe(chan, rx_control);
3803 break;
3804 }
3805
3806 kfree_skb(skb);
3807 return 0;
3808 }
3809
3810 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3811 {
3812 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3813 u16 control;
3814 u8 req_seq;
3815 int len, next_tx_seq_offset, req_seq_offset;
3816
3817 control = get_unaligned_le16(skb->data);
3818 skb_pull(skb, 2);
3819 len = skb->len;
3820
3821 /*
3822 * We can just drop the corrupted I-frame here.
3823 * Receiver will miss it and start proper recovery
3824 * procedures and ask retransmission.
3825 */
3826 if (l2cap_check_fcs(chan, skb))
3827 goto drop;
3828
3829 if (__is_sar_start(control) && __is_iframe(control))
3830 len -= 2;
3831
3832 if (chan->fcs == L2CAP_FCS_CRC16)
3833 len -= 2;
3834
3835 if (len > chan->mps) {
3836 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3837 goto drop;
3838 }
3839
3840 req_seq = __get_reqseq(control);
3841 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3842 if (req_seq_offset < 0)
3843 req_seq_offset += 64;
3844
3845 next_tx_seq_offset =
3846 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3847 if (next_tx_seq_offset < 0)
3848 next_tx_seq_offset += 64;
3849
3850 /* check for invalid req-seq */
3851 if (req_seq_offset > next_tx_seq_offset) {
3852 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3853 goto drop;
3854 }
3855
3856 if (__is_iframe(control)) {
3857 if (len < 0) {
3858 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3859 goto drop;
3860 }
3861
3862 l2cap_data_channel_iframe(chan, control, skb);
3863 } else {
3864 if (len != 0) {
3865 BT_ERR("%d", len);
3866 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3867 goto drop;
3868 }
3869
3870 l2cap_data_channel_sframe(chan, control, skb);
3871 }
3872
3873 return 0;
3874
3875 drop:
3876 kfree_skb(skb);
3877 return 0;
3878 }
3879
3880 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3881 {
3882 struct l2cap_chan *chan;
3883 struct sock *sk = NULL;
3884 u16 control;
3885 u8 tx_seq;
3886 int len;
3887
3888 chan = l2cap_get_chan_by_scid(conn, cid);
3889 if (!chan) {
3890 BT_DBG("unknown cid 0x%4.4x", cid);
3891 goto drop;
3892 }
3893
3894 sk = chan->sk;
3895
3896 BT_DBG("chan %p, len %d", chan, skb->len);
3897
3898 if (chan->state != BT_CONNECTED)
3899 goto drop;
3900
3901 switch (chan->mode) {
3902 case L2CAP_MODE_BASIC:
3903 /* If socket recv buffers overflows we drop data here
3904 * which is *bad* because L2CAP has to be reliable.
3905 * But we don't have any other choice. L2CAP doesn't
3906 * provide flow control mechanism. */
3907
3908 if (chan->imtu < skb->len)
3909 goto drop;
3910
3911 if (!chan->ops->recv(chan->data, skb))
3912 goto done;
3913 break;
3914
3915 case L2CAP_MODE_ERTM:
3916 if (!sock_owned_by_user(sk)) {
3917 l2cap_ertm_data_rcv(sk, skb);
3918 } else {
3919 if (sk_add_backlog(sk, skb))
3920 goto drop;
3921 }
3922
3923 goto done;
3924
3925 case L2CAP_MODE_STREAMING:
3926 control = get_unaligned_le16(skb->data);
3927 skb_pull(skb, 2);
3928 len = skb->len;
3929
3930 if (l2cap_check_fcs(chan, skb))
3931 goto drop;
3932
3933 if (__is_sar_start(control))
3934 len -= 2;
3935
3936 if (chan->fcs == L2CAP_FCS_CRC16)
3937 len -= 2;
3938
3939 if (len > chan->mps || len < 0 || __is_sframe(control))
3940 goto drop;
3941
3942 tx_seq = __get_txseq(control);
3943
3944 if (chan->expected_tx_seq == tx_seq)
3945 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3946 else
3947 chan->expected_tx_seq = (tx_seq + 1) % 64;
3948
3949 l2cap_streaming_reassembly_sdu(chan, skb, control);
3950
3951 goto done;
3952
3953 default:
3954 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3955 break;
3956 }
3957
3958 drop:
3959 kfree_skb(skb);
3960
3961 done:
3962 if (sk)
3963 bh_unlock_sock(sk);
3964
3965 return 0;
3966 }
3967
3968 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3969 {
3970 struct sock *sk = NULL;
3971 struct l2cap_chan *chan;
3972
3973 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3974 if (!chan)
3975 goto drop;
3976
3977 sk = chan->sk;
3978
3979 bh_lock_sock(sk);
3980
3981 BT_DBG("sk %p, len %d", sk, skb->len);
3982
3983 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3984 goto drop;
3985
3986 if (l2cap_pi(sk)->chan->imtu < skb->len)
3987 goto drop;
3988
3989 if (!chan->ops->recv(chan->data, skb))
3990 goto done;
3991
3992 drop:
3993 kfree_skb(skb);
3994
3995 done:
3996 if (sk)
3997 bh_unlock_sock(sk);
3998 return 0;
3999 }
4000
4001 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4002 {
4003 struct sock *sk = NULL;
4004 struct l2cap_chan *chan;
4005
4006 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4007 if (!chan)
4008 goto drop;
4009
4010 sk = chan->sk;
4011
4012 bh_lock_sock(sk);
4013
4014 BT_DBG("sk %p, len %d", sk, skb->len);
4015
4016 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4017 goto drop;
4018
4019 if (l2cap_pi(sk)->chan->imtu < skb->len)
4020 goto drop;
4021
4022 if (!chan->ops->recv(chan->data, skb))
4023 goto done;
4024
4025 drop:
4026 kfree_skb(skb);
4027
4028 done:
4029 if (sk)
4030 bh_unlock_sock(sk);
4031 return 0;
4032 }
4033
4034 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4035 {
4036 struct l2cap_hdr *lh = (void *) skb->data;
4037 u16 cid, len;
4038 __le16 psm;
4039
4040 skb_pull(skb, L2CAP_HDR_SIZE);
4041 cid = __le16_to_cpu(lh->cid);
4042 len = __le16_to_cpu(lh->len);
4043
4044 if (len != skb->len) {
4045 kfree_skb(skb);
4046 return;
4047 }
4048
4049 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4050
4051 switch (cid) {
4052 case L2CAP_CID_LE_SIGNALING:
4053 case L2CAP_CID_SIGNALING:
4054 l2cap_sig_channel(conn, skb);
4055 break;
4056
4057 case L2CAP_CID_CONN_LESS:
4058 psm = get_unaligned_le16(skb->data);
4059 skb_pull(skb, 2);
4060 l2cap_conless_channel(conn, psm, skb);
4061 break;
4062
4063 case L2CAP_CID_LE_DATA:
4064 l2cap_att_channel(conn, cid, skb);
4065 break;
4066
4067 case L2CAP_CID_SMP:
4068 if (smp_sig_channel(conn, skb))
4069 l2cap_conn_del(conn->hcon, EACCES);
4070 break;
4071
4072 default:
4073 l2cap_data_channel(conn, cid, skb);
4074 break;
4075 }
4076 }
4077
4078 /* ---- L2CAP interface with lower layer (HCI) ---- */
4079
4080 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4081 {
4082 int exact = 0, lm1 = 0, lm2 = 0;
4083 struct l2cap_chan *c;
4084
4085 if (type != ACL_LINK)
4086 return -EINVAL;
4087
4088 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4089
4090 /* Find listening sockets and check their link_mode */
4091 read_lock(&chan_list_lock);
4092 list_for_each_entry(c, &chan_list, global_l) {
4093 struct sock *sk = c->sk;
4094
4095 if (c->state != BT_LISTEN)
4096 continue;
4097
4098 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4099 lm1 |= HCI_LM_ACCEPT;
4100 if (c->role_switch)
4101 lm1 |= HCI_LM_MASTER;
4102 exact++;
4103 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4104 lm2 |= HCI_LM_ACCEPT;
4105 if (c->role_switch)
4106 lm2 |= HCI_LM_MASTER;
4107 }
4108 }
4109 read_unlock(&chan_list_lock);
4110
4111 return exact ? lm1 : lm2;
4112 }
4113
4114 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4115 {
4116 struct l2cap_conn *conn;
4117
4118 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4119
4120 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4121 return -EINVAL;
4122
4123 if (!status) {
4124 conn = l2cap_conn_add(hcon, status);
4125 if (conn)
4126 l2cap_conn_ready(conn);
4127 } else
4128 l2cap_conn_del(hcon, bt_err(status));
4129
4130 return 0;
4131 }
4132
4133 static int l2cap_disconn_ind(struct hci_conn *hcon)
4134 {
4135 struct l2cap_conn *conn = hcon->l2cap_data;
4136
4137 BT_DBG("hcon %p", hcon);
4138
4139 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4140 return 0x13;
4141
4142 return conn->disc_reason;
4143 }
4144
4145 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4146 {
4147 BT_DBG("hcon %p reason %d", hcon, reason);
4148
4149 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4150 return -EINVAL;
4151
4152 l2cap_conn_del(hcon, bt_err(reason));
4153
4154 return 0;
4155 }
4156
4157 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4158 {
4159 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4160 return;
4161
4162 if (encrypt == 0x00) {
4163 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4164 __clear_chan_timer(chan);
4165 __set_chan_timer(chan, HZ * 5);
4166 } else if (chan->sec_level == BT_SECURITY_HIGH)
4167 l2cap_chan_close(chan, ECONNREFUSED);
4168 } else {
4169 if (chan->sec_level == BT_SECURITY_MEDIUM)
4170 __clear_chan_timer(chan);
4171 }
4172 }
4173
4174 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4175 {
4176 struct l2cap_conn *conn = hcon->l2cap_data;
4177 struct l2cap_chan *chan;
4178
4179 if (!conn)
4180 return 0;
4181
4182 BT_DBG("conn %p", conn);
4183
4184 read_lock(&conn->chan_lock);
4185
4186 list_for_each_entry(chan, &conn->chan_l, list) {
4187 struct sock *sk = chan->sk;
4188
4189 bh_lock_sock(sk);
4190
4191 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
4192 bh_unlock_sock(sk);
4193 continue;
4194 }
4195
4196 if (!status && (chan->state == BT_CONNECTED ||
4197 chan->state == BT_CONFIG)) {
4198 l2cap_check_encryption(chan, encrypt);
4199 bh_unlock_sock(sk);
4200 continue;
4201 }
4202
4203 if (chan->state == BT_CONNECT) {
4204 if (!status) {
4205 struct l2cap_conn_req req;
4206 req.scid = cpu_to_le16(chan->scid);
4207 req.psm = chan->psm;
4208
4209 chan->ident = l2cap_get_ident(conn);
4210 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
4211
4212 l2cap_send_cmd(conn, chan->ident,
4213 L2CAP_CONN_REQ, sizeof(req), &req);
4214 } else {
4215 __clear_chan_timer(chan);
4216 __set_chan_timer(chan, HZ / 10);
4217 }
4218 } else if (chan->state == BT_CONNECT2) {
4219 struct l2cap_conn_rsp rsp;
4220 __u16 result;
4221
4222 if (!status) {
4223 l2cap_state_change(chan, BT_CONFIG);
4224 result = L2CAP_CR_SUCCESS;
4225 } else {
4226 l2cap_state_change(chan, BT_DISCONN);
4227 __set_chan_timer(chan, HZ / 10);
4228 result = L2CAP_CR_SEC_BLOCK;
4229 }
4230
4231 rsp.scid = cpu_to_le16(chan->dcid);
4232 rsp.dcid = cpu_to_le16(chan->scid);
4233 rsp.result = cpu_to_le16(result);
4234 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4235 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4236 sizeof(rsp), &rsp);
4237 }
4238
4239 bh_unlock_sock(sk);
4240 }
4241
4242 read_unlock(&conn->chan_lock);
4243
4244 return 0;
4245 }
4246
4247 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4248 {
4249 struct l2cap_conn *conn = hcon->l2cap_data;
4250
4251 if (!conn)
4252 conn = l2cap_conn_add(hcon, 0);
4253
4254 if (!conn)
4255 goto drop;
4256
4257 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4258
4259 if (!(flags & ACL_CONT)) {
4260 struct l2cap_hdr *hdr;
4261 struct l2cap_chan *chan;
4262 u16 cid;
4263 int len;
4264
4265 if (conn->rx_len) {
4266 BT_ERR("Unexpected start frame (len %d)", skb->len);
4267 kfree_skb(conn->rx_skb);
4268 conn->rx_skb = NULL;
4269 conn->rx_len = 0;
4270 l2cap_conn_unreliable(conn, ECOMM);
4271 }
4272
4273 /* Start fragment always begin with Basic L2CAP header */
4274 if (skb->len < L2CAP_HDR_SIZE) {
4275 BT_ERR("Frame is too short (len %d)", skb->len);
4276 l2cap_conn_unreliable(conn, ECOMM);
4277 goto drop;
4278 }
4279
4280 hdr = (struct l2cap_hdr *) skb->data;
4281 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4282 cid = __le16_to_cpu(hdr->cid);
4283
4284 if (len == skb->len) {
4285 /* Complete frame received */
4286 l2cap_recv_frame(conn, skb);
4287 return 0;
4288 }
4289
4290 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4291
4292 if (skb->len > len) {
4293 BT_ERR("Frame is too long (len %d, expected len %d)",
4294 skb->len, len);
4295 l2cap_conn_unreliable(conn, ECOMM);
4296 goto drop;
4297 }
4298
4299 chan = l2cap_get_chan_by_scid(conn, cid);
4300
4301 if (chan && chan->sk) {
4302 struct sock *sk = chan->sk;
4303
4304 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4305 BT_ERR("Frame exceeding recv MTU (len %d, "
4306 "MTU %d)", len,
4307 chan->imtu);
4308 bh_unlock_sock(sk);
4309 l2cap_conn_unreliable(conn, ECOMM);
4310 goto drop;
4311 }
4312 bh_unlock_sock(sk);
4313 }
4314
4315 /* Allocate skb for the complete frame (with header) */
4316 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4317 if (!conn->rx_skb)
4318 goto drop;
4319
4320 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4321 skb->len);
4322 conn->rx_len = len - skb->len;
4323 } else {
4324 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4325
4326 if (!conn->rx_len) {
4327 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4328 l2cap_conn_unreliable(conn, ECOMM);
4329 goto drop;
4330 }
4331
4332 if (skb->len > conn->rx_len) {
4333 BT_ERR("Fragment is too long (len %d, expected %d)",
4334 skb->len, conn->rx_len);
4335 kfree_skb(conn->rx_skb);
4336 conn->rx_skb = NULL;
4337 conn->rx_len = 0;
4338 l2cap_conn_unreliable(conn, ECOMM);
4339 goto drop;
4340 }
4341
4342 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4343 skb->len);
4344 conn->rx_len -= skb->len;
4345
4346 if (!conn->rx_len) {
4347 /* Complete frame received */
4348 l2cap_recv_frame(conn, conn->rx_skb);
4349 conn->rx_skb = NULL;
4350 }
4351 }
4352
4353 drop:
4354 kfree_skb(skb);
4355 return 0;
4356 }
4357
4358 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4359 {
4360 struct l2cap_chan *c;
4361
4362 read_lock_bh(&chan_list_lock);
4363
4364 list_for_each_entry(c, &chan_list, global_l) {
4365 struct sock *sk = c->sk;
4366
4367 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4368 batostr(&bt_sk(sk)->src),
4369 batostr(&bt_sk(sk)->dst),
4370 c->state, __le16_to_cpu(c->psm),
4371 c->scid, c->dcid, c->imtu, c->omtu,
4372 c->sec_level, c->mode);
4373 }
4374
4375 read_unlock_bh(&chan_list_lock);
4376
4377 return 0;
4378 }
4379
4380 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4381 {
4382 return single_open(file, l2cap_debugfs_show, inode->i_private);
4383 }
4384
4385 static const struct file_operations l2cap_debugfs_fops = {
4386 .open = l2cap_debugfs_open,
4387 .read = seq_read,
4388 .llseek = seq_lseek,
4389 .release = single_release,
4390 };
4391
4392 static struct dentry *l2cap_debugfs;
4393
4394 static struct hci_proto l2cap_hci_proto = {
4395 .name = "L2CAP",
4396 .id = HCI_PROTO_L2CAP,
4397 .connect_ind = l2cap_connect_ind,
4398 .connect_cfm = l2cap_connect_cfm,
4399 .disconn_ind = l2cap_disconn_ind,
4400 .disconn_cfm = l2cap_disconn_cfm,
4401 .security_cfm = l2cap_security_cfm,
4402 .recv_acldata = l2cap_recv_acldata
4403 };
4404
4405 int __init l2cap_init(void)
4406 {
4407 int err;
4408
4409 err = l2cap_init_sockets();
4410 if (err < 0)
4411 return err;
4412
4413 _busy_wq = create_singlethread_workqueue("l2cap");
4414 if (!_busy_wq) {
4415 err = -ENOMEM;
4416 goto error;
4417 }
4418
4419 err = hci_register_proto(&l2cap_hci_proto);
4420 if (err < 0) {
4421 BT_ERR("L2CAP protocol registration failed");
4422 bt_sock_unregister(BTPROTO_L2CAP);
4423 goto error;
4424 }
4425
4426 if (bt_debugfs) {
4427 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4428 bt_debugfs, NULL, &l2cap_debugfs_fops);
4429 if (!l2cap_debugfs)
4430 BT_ERR("Failed to create L2CAP debug file");
4431 }
4432
4433 return 0;
4434
4435 error:
4436 destroy_workqueue(_busy_wq);
4437 l2cap_cleanup_sockets();
4438 return err;
4439 }
4440
4441 void l2cap_exit(void)
4442 {
4443 debugfs_remove(l2cap_debugfs);
4444
4445 flush_workqueue(_busy_wq);
4446 destroy_workqueue(_busy_wq);
4447
4448 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4449 BT_ERR("L2CAP protocol unregistration failed");
4450
4451 l2cap_cleanup_sockets();
4452 }
4453
4454 module_param(disable_ertm, bool, 0644);
4455 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");