Bluetooth: EFS: add efs option in L2CAP conf req
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 int disable_ertm;
60 int enable_hs;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78 /* ---- L2CAP channels ---- */
79
80 static inline void chan_hold(struct l2cap_chan *c)
81 {
82 atomic_inc(&c->refcnt);
83 }
84
85 static inline void chan_put(struct l2cap_chan *c)
86 {
87 if (atomic_dec_and_test(&c->refcnt))
88 kfree(c);
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100
101 }
102
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 {
118 struct l2cap_chan *c;
119
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
122 if (c)
123 bh_lock_sock(c->sk);
124 read_unlock(&conn->chan_lock);
125 return c;
126 }
127
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 {
130 struct l2cap_chan *c;
131
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
134 return c;
135 }
136 return NULL;
137 }
138
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
145 if (c)
146 bh_lock_sock(c->sk);
147 read_unlock(&conn->chan_lock);
148 return c;
149 }
150
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 {
153 struct l2cap_chan *c;
154
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 goto found;
158 }
159
160 c = NULL;
161 found:
162 return c;
163 }
164
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 {
167 int err;
168
169 write_lock_bh(&chan_list_lock);
170
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
172 err = -EADDRINUSE;
173 goto done;
174 }
175
176 if (psm) {
177 chan->psm = psm;
178 chan->sport = psm;
179 err = 0;
180 } else {
181 u16 p;
182
183 err = -EINVAL;
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
188 err = 0;
189 break;
190 }
191 }
192
193 done:
194 write_unlock_bh(&chan_list_lock);
195 return err;
196 }
197
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 {
200 write_lock_bh(&chan_list_lock);
201
202 chan->scid = scid;
203
204 write_unlock_bh(&chan_list_lock);
205
206 return 0;
207 }
208
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 {
211 u16 cid = L2CAP_CID_DYN_START;
212
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
215 return cid;
216 }
217
218 return 0;
219 }
220
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 {
223 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
226 chan_hold(chan);
227 }
228
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 {
231 BT_DBG("chan %p state %d", chan, chan->state);
232
233 if (timer_pending(timer) && del_timer(timer))
234 chan_put(chan);
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 chan->state = state;
240 chan->ops->state_change(chan->data, state);
241 }
242
243 static void l2cap_chan_timeout(unsigned long arg)
244 {
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
247 int reason;
248
249 BT_DBG("chan %p state %d", chan, chan->state);
250
251 bh_lock_sock(sk);
252
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
256 bh_unlock_sock(sk);
257 chan_put(chan);
258 return;
259 }
260
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
266 else
267 reason = ETIMEDOUT;
268
269 l2cap_chan_close(chan, reason);
270
271 bh_unlock_sock(sk);
272
273 chan->ops->close(chan->data);
274 chan_put(chan);
275 }
276
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 {
279 struct l2cap_chan *chan;
280
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
282 if (!chan)
283 return NULL;
284
285 chan->sk = sk;
286
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
290
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292
293 chan->state = BT_OPEN;
294
295 atomic_set(&chan->refcnt, 1);
296
297 return chan;
298 }
299
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 {
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
305
306 chan_put(chan);
307 }
308
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 {
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
313
314 conn->disc_reason = 0x13;
315
316 chan->conn = conn;
317
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
320 /* LE connection */
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
324 } else {
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
328 }
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
334 } else {
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
339 }
340
341 chan->local_id = L2CAP_BESTEFFORT_ID;
342 chan->local_stype = L2CAP_SERV_BESTEFFORT;
343 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
344 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
345 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
346 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
347
348 chan_hold(chan);
349
350 list_add(&chan->list, &conn->chan_l);
351 }
352
353 /* Delete channel.
354 * Must be called on the locked socket. */
355 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
356 {
357 struct sock *sk = chan->sk;
358 struct l2cap_conn *conn = chan->conn;
359 struct sock *parent = bt_sk(sk)->parent;
360
361 __clear_chan_timer(chan);
362
363 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
364
365 if (conn) {
366 /* Delete from channel list */
367 write_lock_bh(&conn->chan_lock);
368 list_del(&chan->list);
369 write_unlock_bh(&conn->chan_lock);
370 chan_put(chan);
371
372 chan->conn = NULL;
373 hci_conn_put(conn->hcon);
374 }
375
376 l2cap_state_change(chan, BT_CLOSED);
377 sock_set_flag(sk, SOCK_ZAPPED);
378
379 if (err)
380 sk->sk_err = err;
381
382 if (parent) {
383 bt_accept_unlink(sk);
384 parent->sk_data_ready(parent, 0);
385 } else
386 sk->sk_state_change(sk);
387
388 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
389 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
390 return;
391
392 skb_queue_purge(&chan->tx_q);
393
394 if (chan->mode == L2CAP_MODE_ERTM) {
395 struct srej_list *l, *tmp;
396
397 __clear_retrans_timer(chan);
398 __clear_monitor_timer(chan);
399 __clear_ack_timer(chan);
400
401 skb_queue_purge(&chan->srej_q);
402
403 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
404 list_del(&l->list);
405 kfree(l);
406 }
407 }
408 }
409
410 static void l2cap_chan_cleanup_listen(struct sock *parent)
411 {
412 struct sock *sk;
413
414 BT_DBG("parent %p", parent);
415
416 /* Close not yet accepted channels */
417 while ((sk = bt_accept_dequeue(parent, NULL))) {
418 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
419 __clear_chan_timer(chan);
420 lock_sock(sk);
421 l2cap_chan_close(chan, ECONNRESET);
422 release_sock(sk);
423 chan->ops->close(chan->data);
424 }
425 }
426
427 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
428 {
429 struct l2cap_conn *conn = chan->conn;
430 struct sock *sk = chan->sk;
431
432 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
433
434 switch (chan->state) {
435 case BT_LISTEN:
436 l2cap_chan_cleanup_listen(sk);
437
438 l2cap_state_change(chan, BT_CLOSED);
439 sock_set_flag(sk, SOCK_ZAPPED);
440 break;
441
442 case BT_CONNECTED:
443 case BT_CONFIG:
444 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
445 conn->hcon->type == ACL_LINK) {
446 __clear_chan_timer(chan);
447 __set_chan_timer(chan, sk->sk_sndtimeo);
448 l2cap_send_disconn_req(conn, chan, reason);
449 } else
450 l2cap_chan_del(chan, reason);
451 break;
452
453 case BT_CONNECT2:
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 struct l2cap_conn_rsp rsp;
457 __u16 result;
458
459 if (bt_sk(sk)->defer_setup)
460 result = L2CAP_CR_SEC_BLOCK;
461 else
462 result = L2CAP_CR_BAD_PSM;
463 l2cap_state_change(chan, BT_DISCONN);
464
465 rsp.scid = cpu_to_le16(chan->dcid);
466 rsp.dcid = cpu_to_le16(chan->scid);
467 rsp.result = cpu_to_le16(result);
468 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
469 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
470 sizeof(rsp), &rsp);
471 }
472
473 l2cap_chan_del(chan, reason);
474 break;
475
476 case BT_CONNECT:
477 case BT_DISCONN:
478 l2cap_chan_del(chan, reason);
479 break;
480
481 default:
482 sock_set_flag(sk, SOCK_ZAPPED);
483 break;
484 }
485 }
486
487 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
488 {
489 if (chan->chan_type == L2CAP_CHAN_RAW) {
490 switch (chan->sec_level) {
491 case BT_SECURITY_HIGH:
492 return HCI_AT_DEDICATED_BONDING_MITM;
493 case BT_SECURITY_MEDIUM:
494 return HCI_AT_DEDICATED_BONDING;
495 default:
496 return HCI_AT_NO_BONDING;
497 }
498 } else if (chan->psm == cpu_to_le16(0x0001)) {
499 if (chan->sec_level == BT_SECURITY_LOW)
500 chan->sec_level = BT_SECURITY_SDP;
501
502 if (chan->sec_level == BT_SECURITY_HIGH)
503 return HCI_AT_NO_BONDING_MITM;
504 else
505 return HCI_AT_NO_BONDING;
506 } else {
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_GENERAL_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_GENERAL_BONDING;
512 default:
513 return HCI_AT_NO_BONDING;
514 }
515 }
516 }
517
518 /* Service level security */
519 static inline int l2cap_check_security(struct l2cap_chan *chan)
520 {
521 struct l2cap_conn *conn = chan->conn;
522 __u8 auth_type;
523
524 auth_type = l2cap_get_auth_type(chan);
525
526 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
527 }
528
529 static u8 l2cap_get_ident(struct l2cap_conn *conn)
530 {
531 u8 id;
532
533 /* Get next available identificator.
534 * 1 - 128 are used by kernel.
535 * 129 - 199 are reserved.
536 * 200 - 254 are used by utilities like l2ping, etc.
537 */
538
539 spin_lock_bh(&conn->lock);
540
541 if (++conn->tx_ident > 128)
542 conn->tx_ident = 1;
543
544 id = conn->tx_ident;
545
546 spin_unlock_bh(&conn->lock);
547
548 return id;
549 }
550
551 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
552 {
553 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
554 u8 flags;
555
556 BT_DBG("code 0x%2.2x", code);
557
558 if (!skb)
559 return;
560
561 if (lmp_no_flush_capable(conn->hcon->hdev))
562 flags = ACL_START_NO_FLUSH;
563 else
564 flags = ACL_START;
565
566 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
567
568 hci_send_acl(conn->hcon, skb, flags);
569 }
570
571 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
572 {
573 struct sk_buff *skb;
574 struct l2cap_hdr *lh;
575 struct l2cap_conn *conn = chan->conn;
576 int count, hlen;
577 u8 flags;
578
579 if (chan->state != BT_CONNECTED)
580 return;
581
582 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
583 hlen = L2CAP_EXT_HDR_SIZE;
584 else
585 hlen = L2CAP_ENH_HDR_SIZE;
586
587 if (chan->fcs == L2CAP_FCS_CRC16)
588 hlen += 2;
589
590 BT_DBG("chan %p, control 0x%2.2x", chan, control);
591
592 count = min_t(unsigned int, conn->mtu, hlen);
593
594 control |= __set_sframe(chan);
595
596 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
597 control |= __set_ctrl_final(chan);
598
599 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
600 control |= __set_ctrl_poll(chan);
601
602 skb = bt_skb_alloc(count, GFP_ATOMIC);
603 if (!skb)
604 return;
605
606 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
607 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
608 lh->cid = cpu_to_le16(chan->dcid);
609 put_unaligned_le16(control, skb_put(skb, 2));
610
611 if (chan->fcs == L2CAP_FCS_CRC16) {
612 u16 fcs = crc16(0, (u8 *)lh, count - 2);
613 put_unaligned_le16(fcs, skb_put(skb, 2));
614 }
615
616 if (lmp_no_flush_capable(conn->hcon->hdev))
617 flags = ACL_START_NO_FLUSH;
618 else
619 flags = ACL_START;
620
621 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
622
623 hci_send_acl(chan->conn->hcon, skb, flags);
624 }
625
626 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
627 {
628 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
629 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
630 set_bit(CONN_RNR_SENT, &chan->conn_state);
631 } else
632 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
633
634 control |= __set_reqseq(chan, chan->buffer_seq);
635
636 l2cap_send_sframe(chan, control);
637 }
638
639 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
640 {
641 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
642 }
643
644 static void l2cap_do_start(struct l2cap_chan *chan)
645 {
646 struct l2cap_conn *conn = chan->conn;
647
648 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
649 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
650 return;
651
652 if (l2cap_check_security(chan) &&
653 __l2cap_no_conn_pending(chan)) {
654 struct l2cap_conn_req req;
655 req.scid = cpu_to_le16(chan->scid);
656 req.psm = chan->psm;
657
658 chan->ident = l2cap_get_ident(conn);
659 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
660
661 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
662 sizeof(req), &req);
663 }
664 } else {
665 struct l2cap_info_req req;
666 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
667
668 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
669 conn->info_ident = l2cap_get_ident(conn);
670
671 mod_timer(&conn->info_timer, jiffies +
672 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
673
674 l2cap_send_cmd(conn, conn->info_ident,
675 L2CAP_INFO_REQ, sizeof(req), &req);
676 }
677 }
678
679 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
680 {
681 u32 local_feat_mask = l2cap_feat_mask;
682 if (!disable_ertm)
683 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
684
685 switch (mode) {
686 case L2CAP_MODE_ERTM:
687 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
688 case L2CAP_MODE_STREAMING:
689 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
690 default:
691 return 0x00;
692 }
693 }
694
695 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
696 {
697 struct sock *sk;
698 struct l2cap_disconn_req req;
699
700 if (!conn)
701 return;
702
703 sk = chan->sk;
704
705 if (chan->mode == L2CAP_MODE_ERTM) {
706 __clear_retrans_timer(chan);
707 __clear_monitor_timer(chan);
708 __clear_ack_timer(chan);
709 }
710
711 req.dcid = cpu_to_le16(chan->dcid);
712 req.scid = cpu_to_le16(chan->scid);
713 l2cap_send_cmd(conn, l2cap_get_ident(conn),
714 L2CAP_DISCONN_REQ, sizeof(req), &req);
715
716 l2cap_state_change(chan, BT_DISCONN);
717 sk->sk_err = err;
718 }
719
720 /* ---- L2CAP connections ---- */
721 static void l2cap_conn_start(struct l2cap_conn *conn)
722 {
723 struct l2cap_chan *chan, *tmp;
724
725 BT_DBG("conn %p", conn);
726
727 read_lock(&conn->chan_lock);
728
729 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
730 struct sock *sk = chan->sk;
731
732 bh_lock_sock(sk);
733
734 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
735 bh_unlock_sock(sk);
736 continue;
737 }
738
739 if (chan->state == BT_CONNECT) {
740 struct l2cap_conn_req req;
741
742 if (!l2cap_check_security(chan) ||
743 !__l2cap_no_conn_pending(chan)) {
744 bh_unlock_sock(sk);
745 continue;
746 }
747
748 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
749 && test_bit(CONF_STATE2_DEVICE,
750 &chan->conf_state)) {
751 /* l2cap_chan_close() calls list_del(chan)
752 * so release the lock */
753 read_unlock(&conn->chan_lock);
754 l2cap_chan_close(chan, ECONNRESET);
755 read_lock(&conn->chan_lock);
756 bh_unlock_sock(sk);
757 continue;
758 }
759
760 req.scid = cpu_to_le16(chan->scid);
761 req.psm = chan->psm;
762
763 chan->ident = l2cap_get_ident(conn);
764 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
765
766 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
767 sizeof(req), &req);
768
769 } else if (chan->state == BT_CONNECT2) {
770 struct l2cap_conn_rsp rsp;
771 char buf[128];
772 rsp.scid = cpu_to_le16(chan->dcid);
773 rsp.dcid = cpu_to_le16(chan->scid);
774
775 if (l2cap_check_security(chan)) {
776 if (bt_sk(sk)->defer_setup) {
777 struct sock *parent = bt_sk(sk)->parent;
778 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
779 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
780 if (parent)
781 parent->sk_data_ready(parent, 0);
782
783 } else {
784 l2cap_state_change(chan, BT_CONFIG);
785 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
786 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
787 }
788 } else {
789 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
790 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
791 }
792
793 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
794 sizeof(rsp), &rsp);
795
796 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
797 rsp.result != L2CAP_CR_SUCCESS) {
798 bh_unlock_sock(sk);
799 continue;
800 }
801
802 set_bit(CONF_REQ_SENT, &chan->conf_state);
803 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
804 l2cap_build_conf_req(chan, buf), buf);
805 chan->num_conf_req++;
806 }
807
808 bh_unlock_sock(sk);
809 }
810
811 read_unlock(&conn->chan_lock);
812 }
813
814 /* Find socket with cid and source bdaddr.
815 * Returns closest match, locked.
816 */
817 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
818 {
819 struct l2cap_chan *c, *c1 = NULL;
820
821 read_lock(&chan_list_lock);
822
823 list_for_each_entry(c, &chan_list, global_l) {
824 struct sock *sk = c->sk;
825
826 if (state && c->state != state)
827 continue;
828
829 if (c->scid == cid) {
830 /* Exact match. */
831 if (!bacmp(&bt_sk(sk)->src, src)) {
832 read_unlock(&chan_list_lock);
833 return c;
834 }
835
836 /* Closest match */
837 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
838 c1 = c;
839 }
840 }
841
842 read_unlock(&chan_list_lock);
843
844 return c1;
845 }
846
847 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
848 {
849 struct sock *parent, *sk;
850 struct l2cap_chan *chan, *pchan;
851
852 BT_DBG("");
853
854 /* Check if we have socket listening on cid */
855 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
856 conn->src);
857 if (!pchan)
858 return;
859
860 parent = pchan->sk;
861
862 bh_lock_sock(parent);
863
864 /* Check for backlog size */
865 if (sk_acceptq_is_full(parent)) {
866 BT_DBG("backlog full %d", parent->sk_ack_backlog);
867 goto clean;
868 }
869
870 chan = pchan->ops->new_connection(pchan->data);
871 if (!chan)
872 goto clean;
873
874 sk = chan->sk;
875
876 write_lock_bh(&conn->chan_lock);
877
878 hci_conn_hold(conn->hcon);
879
880 bacpy(&bt_sk(sk)->src, conn->src);
881 bacpy(&bt_sk(sk)->dst, conn->dst);
882
883 bt_accept_enqueue(parent, sk);
884
885 __l2cap_chan_add(conn, chan);
886
887 __set_chan_timer(chan, sk->sk_sndtimeo);
888
889 l2cap_state_change(chan, BT_CONNECTED);
890 parent->sk_data_ready(parent, 0);
891
892 write_unlock_bh(&conn->chan_lock);
893
894 clean:
895 bh_unlock_sock(parent);
896 }
897
898 static void l2cap_chan_ready(struct sock *sk)
899 {
900 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
901 struct sock *parent = bt_sk(sk)->parent;
902
903 BT_DBG("sk %p, parent %p", sk, parent);
904
905 chan->conf_state = 0;
906 __clear_chan_timer(chan);
907
908 l2cap_state_change(chan, BT_CONNECTED);
909 sk->sk_state_change(sk);
910
911 if (parent)
912 parent->sk_data_ready(parent, 0);
913 }
914
915 static void l2cap_conn_ready(struct l2cap_conn *conn)
916 {
917 struct l2cap_chan *chan;
918
919 BT_DBG("conn %p", conn);
920
921 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
922 l2cap_le_conn_ready(conn);
923
924 if (conn->hcon->out && conn->hcon->type == LE_LINK)
925 smp_conn_security(conn, conn->hcon->pending_sec_level);
926
927 read_lock(&conn->chan_lock);
928
929 list_for_each_entry(chan, &conn->chan_l, list) {
930 struct sock *sk = chan->sk;
931
932 bh_lock_sock(sk);
933
934 if (conn->hcon->type == LE_LINK) {
935 if (smp_conn_security(conn, chan->sec_level))
936 l2cap_chan_ready(sk);
937
938 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
939 __clear_chan_timer(chan);
940 l2cap_state_change(chan, BT_CONNECTED);
941 sk->sk_state_change(sk);
942
943 } else if (chan->state == BT_CONNECT)
944 l2cap_do_start(chan);
945
946 bh_unlock_sock(sk);
947 }
948
949 read_unlock(&conn->chan_lock);
950 }
951
952 /* Notify sockets that we cannot guaranty reliability anymore */
953 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
954 {
955 struct l2cap_chan *chan;
956
957 BT_DBG("conn %p", conn);
958
959 read_lock(&conn->chan_lock);
960
961 list_for_each_entry(chan, &conn->chan_l, list) {
962 struct sock *sk = chan->sk;
963
964 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
965 sk->sk_err = err;
966 }
967
968 read_unlock(&conn->chan_lock);
969 }
970
971 static void l2cap_info_timeout(unsigned long arg)
972 {
973 struct l2cap_conn *conn = (void *) arg;
974
975 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
976 conn->info_ident = 0;
977
978 l2cap_conn_start(conn);
979 }
980
981 static void l2cap_conn_del(struct hci_conn *hcon, int err)
982 {
983 struct l2cap_conn *conn = hcon->l2cap_data;
984 struct l2cap_chan *chan, *l;
985 struct sock *sk;
986
987 if (!conn)
988 return;
989
990 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
991
992 kfree_skb(conn->rx_skb);
993
994 /* Kill channels */
995 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
996 sk = chan->sk;
997 bh_lock_sock(sk);
998 l2cap_chan_del(chan, err);
999 bh_unlock_sock(sk);
1000 chan->ops->close(chan->data);
1001 }
1002
1003 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1004 del_timer_sync(&conn->info_timer);
1005
1006 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1007 del_timer(&conn->security_timer);
1008 smp_chan_destroy(conn);
1009 }
1010
1011 hcon->l2cap_data = NULL;
1012 kfree(conn);
1013 }
1014
1015 static void security_timeout(unsigned long arg)
1016 {
1017 struct l2cap_conn *conn = (void *) arg;
1018
1019 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1020 }
1021
1022 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1023 {
1024 struct l2cap_conn *conn = hcon->l2cap_data;
1025
1026 if (conn || status)
1027 return conn;
1028
1029 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1030 if (!conn)
1031 return NULL;
1032
1033 hcon->l2cap_data = conn;
1034 conn->hcon = hcon;
1035
1036 BT_DBG("hcon %p conn %p", hcon, conn);
1037
1038 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1039 conn->mtu = hcon->hdev->le_mtu;
1040 else
1041 conn->mtu = hcon->hdev->acl_mtu;
1042
1043 conn->src = &hcon->hdev->bdaddr;
1044 conn->dst = &hcon->dst;
1045
1046 conn->feat_mask = 0;
1047
1048 spin_lock_init(&conn->lock);
1049 rwlock_init(&conn->chan_lock);
1050
1051 INIT_LIST_HEAD(&conn->chan_l);
1052
1053 if (hcon->type == LE_LINK)
1054 setup_timer(&conn->security_timer, security_timeout,
1055 (unsigned long) conn);
1056 else
1057 setup_timer(&conn->info_timer, l2cap_info_timeout,
1058 (unsigned long) conn);
1059
1060 conn->disc_reason = 0x13;
1061
1062 return conn;
1063 }
1064
1065 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1066 {
1067 write_lock_bh(&conn->chan_lock);
1068 __l2cap_chan_add(conn, chan);
1069 write_unlock_bh(&conn->chan_lock);
1070 }
1071
1072 /* ---- Socket interface ---- */
1073
1074 /* Find socket with psm and source bdaddr.
1075 * Returns closest match.
1076 */
1077 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1078 {
1079 struct l2cap_chan *c, *c1 = NULL;
1080
1081 read_lock(&chan_list_lock);
1082
1083 list_for_each_entry(c, &chan_list, global_l) {
1084 struct sock *sk = c->sk;
1085
1086 if (state && c->state != state)
1087 continue;
1088
1089 if (c->psm == psm) {
1090 /* Exact match. */
1091 if (!bacmp(&bt_sk(sk)->src, src)) {
1092 read_unlock(&chan_list_lock);
1093 return c;
1094 }
1095
1096 /* Closest match */
1097 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1098 c1 = c;
1099 }
1100 }
1101
1102 read_unlock(&chan_list_lock);
1103
1104 return c1;
1105 }
1106
1107 int l2cap_chan_connect(struct l2cap_chan *chan)
1108 {
1109 struct sock *sk = chan->sk;
1110 bdaddr_t *src = &bt_sk(sk)->src;
1111 bdaddr_t *dst = &bt_sk(sk)->dst;
1112 struct l2cap_conn *conn;
1113 struct hci_conn *hcon;
1114 struct hci_dev *hdev;
1115 __u8 auth_type;
1116 int err;
1117
1118 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1119 chan->psm);
1120
1121 hdev = hci_get_route(dst, src);
1122 if (!hdev)
1123 return -EHOSTUNREACH;
1124
1125 hci_dev_lock_bh(hdev);
1126
1127 auth_type = l2cap_get_auth_type(chan);
1128
1129 if (chan->dcid == L2CAP_CID_LE_DATA)
1130 hcon = hci_connect(hdev, LE_LINK, dst,
1131 chan->sec_level, auth_type);
1132 else
1133 hcon = hci_connect(hdev, ACL_LINK, dst,
1134 chan->sec_level, auth_type);
1135
1136 if (IS_ERR(hcon)) {
1137 err = PTR_ERR(hcon);
1138 goto done;
1139 }
1140
1141 conn = l2cap_conn_add(hcon, 0);
1142 if (!conn) {
1143 hci_conn_put(hcon);
1144 err = -ENOMEM;
1145 goto done;
1146 }
1147
1148 /* Update source addr of the socket */
1149 bacpy(src, conn->src);
1150
1151 l2cap_chan_add(conn, chan);
1152
1153 l2cap_state_change(chan, BT_CONNECT);
1154 __set_chan_timer(chan, sk->sk_sndtimeo);
1155
1156 if (hcon->state == BT_CONNECTED) {
1157 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1158 __clear_chan_timer(chan);
1159 if (l2cap_check_security(chan))
1160 l2cap_state_change(chan, BT_CONNECTED);
1161 } else
1162 l2cap_do_start(chan);
1163 }
1164
1165 err = 0;
1166
1167 done:
1168 hci_dev_unlock_bh(hdev);
1169 hci_dev_put(hdev);
1170 return err;
1171 }
1172
1173 int __l2cap_wait_ack(struct sock *sk)
1174 {
1175 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1176 DECLARE_WAITQUEUE(wait, current);
1177 int err = 0;
1178 int timeo = HZ/5;
1179
1180 add_wait_queue(sk_sleep(sk), &wait);
1181 set_current_state(TASK_INTERRUPTIBLE);
1182 while (chan->unacked_frames > 0 && chan->conn) {
1183 if (!timeo)
1184 timeo = HZ/5;
1185
1186 if (signal_pending(current)) {
1187 err = sock_intr_errno(timeo);
1188 break;
1189 }
1190
1191 release_sock(sk);
1192 timeo = schedule_timeout(timeo);
1193 lock_sock(sk);
1194 set_current_state(TASK_INTERRUPTIBLE);
1195
1196 err = sock_error(sk);
1197 if (err)
1198 break;
1199 }
1200 set_current_state(TASK_RUNNING);
1201 remove_wait_queue(sk_sleep(sk), &wait);
1202 return err;
1203 }
1204
1205 static void l2cap_monitor_timeout(unsigned long arg)
1206 {
1207 struct l2cap_chan *chan = (void *) arg;
1208 struct sock *sk = chan->sk;
1209
1210 BT_DBG("chan %p", chan);
1211
1212 bh_lock_sock(sk);
1213 if (chan->retry_count >= chan->remote_max_tx) {
1214 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1215 bh_unlock_sock(sk);
1216 return;
1217 }
1218
1219 chan->retry_count++;
1220 __set_monitor_timer(chan);
1221
1222 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1223 bh_unlock_sock(sk);
1224 }
1225
1226 static void l2cap_retrans_timeout(unsigned long arg)
1227 {
1228 struct l2cap_chan *chan = (void *) arg;
1229 struct sock *sk = chan->sk;
1230
1231 BT_DBG("chan %p", chan);
1232
1233 bh_lock_sock(sk);
1234 chan->retry_count = 1;
1235 __set_monitor_timer(chan);
1236
1237 set_bit(CONN_WAIT_F, &chan->conn_state);
1238
1239 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1240 bh_unlock_sock(sk);
1241 }
1242
1243 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1244 {
1245 struct sk_buff *skb;
1246
1247 while ((skb = skb_peek(&chan->tx_q)) &&
1248 chan->unacked_frames) {
1249 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1250 break;
1251
1252 skb = skb_dequeue(&chan->tx_q);
1253 kfree_skb(skb);
1254
1255 chan->unacked_frames--;
1256 }
1257
1258 if (!chan->unacked_frames)
1259 __clear_retrans_timer(chan);
1260 }
1261
1262 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1263 {
1264 struct hci_conn *hcon = chan->conn->hcon;
1265 u16 flags;
1266
1267 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1268
1269 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1270 lmp_no_flush_capable(hcon->hdev))
1271 flags = ACL_START_NO_FLUSH;
1272 else
1273 flags = ACL_START;
1274
1275 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1276 hci_send_acl(hcon, skb, flags);
1277 }
1278
1279 static void l2cap_streaming_send(struct l2cap_chan *chan)
1280 {
1281 struct sk_buff *skb;
1282 u16 control, fcs;
1283
1284 while ((skb = skb_dequeue(&chan->tx_q))) {
1285 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1286 control |= __set_txseq(chan, chan->next_tx_seq);
1287 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1288
1289 if (chan->fcs == L2CAP_FCS_CRC16) {
1290 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1291 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1292 }
1293
1294 l2cap_do_send(chan, skb);
1295
1296 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1297 }
1298 }
1299
1300 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1301 {
1302 struct sk_buff *skb, *tx_skb;
1303 u16 control, fcs;
1304
1305 skb = skb_peek(&chan->tx_q);
1306 if (!skb)
1307 return;
1308
1309 do {
1310 if (bt_cb(skb)->tx_seq == tx_seq)
1311 break;
1312
1313 if (skb_queue_is_last(&chan->tx_q, skb))
1314 return;
1315
1316 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1317
1318 if (chan->remote_max_tx &&
1319 bt_cb(skb)->retries == chan->remote_max_tx) {
1320 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1321 return;
1322 }
1323
1324 tx_skb = skb_clone(skb, GFP_ATOMIC);
1325 bt_cb(skb)->retries++;
1326 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1327 control &= __get_sar_mask(chan);
1328
1329 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1330 control |= __set_ctrl_final(chan);
1331
1332 control |= __set_reqseq(chan, chan->buffer_seq);
1333 control |= __set_txseq(chan, tx_seq);
1334
1335 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1336
1337 if (chan->fcs == L2CAP_FCS_CRC16) {
1338 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1339 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1340 }
1341
1342 l2cap_do_send(chan, tx_skb);
1343 }
1344
1345 static int l2cap_ertm_send(struct l2cap_chan *chan)
1346 {
1347 struct sk_buff *skb, *tx_skb;
1348 u16 control, fcs;
1349 int nsent = 0;
1350
1351 if (chan->state != BT_CONNECTED)
1352 return -ENOTCONN;
1353
1354 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1355
1356 if (chan->remote_max_tx &&
1357 bt_cb(skb)->retries == chan->remote_max_tx) {
1358 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1359 break;
1360 }
1361
1362 tx_skb = skb_clone(skb, GFP_ATOMIC);
1363
1364 bt_cb(skb)->retries++;
1365
1366 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1367 control &= __get_sar_mask(chan);
1368
1369 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1370 control |= __set_ctrl_final(chan);
1371
1372 control |= __set_reqseq(chan, chan->buffer_seq);
1373 control |= __set_txseq(chan, chan->next_tx_seq);
1374 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1375
1376
1377 if (chan->fcs == L2CAP_FCS_CRC16) {
1378 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1379 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1380 }
1381
1382 l2cap_do_send(chan, tx_skb);
1383
1384 __set_retrans_timer(chan);
1385
1386 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1387 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1388
1389 if (bt_cb(skb)->retries == 1)
1390 chan->unacked_frames++;
1391
1392 chan->frames_sent++;
1393
1394 if (skb_queue_is_last(&chan->tx_q, skb))
1395 chan->tx_send_head = NULL;
1396 else
1397 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1398
1399 nsent++;
1400 }
1401
1402 return nsent;
1403 }
1404
1405 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1406 {
1407 int ret;
1408
1409 if (!skb_queue_empty(&chan->tx_q))
1410 chan->tx_send_head = chan->tx_q.next;
1411
1412 chan->next_tx_seq = chan->expected_ack_seq;
1413 ret = l2cap_ertm_send(chan);
1414 return ret;
1415 }
1416
1417 static void l2cap_send_ack(struct l2cap_chan *chan)
1418 {
1419 u16 control = 0;
1420
1421 control |= __set_reqseq(chan, chan->buffer_seq);
1422
1423 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1424 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1425 set_bit(CONN_RNR_SENT, &chan->conn_state);
1426 l2cap_send_sframe(chan, control);
1427 return;
1428 }
1429
1430 if (l2cap_ertm_send(chan) > 0)
1431 return;
1432
1433 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1434 l2cap_send_sframe(chan, control);
1435 }
1436
1437 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1438 {
1439 struct srej_list *tail;
1440 u16 control;
1441
1442 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1443 control |= __set_ctrl_final(chan);
1444
1445 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1446 control |= __set_reqseq(chan, tail->tx_seq);
1447
1448 l2cap_send_sframe(chan, control);
1449 }
1450
1451 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1452 {
1453 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1454 struct sk_buff **frag;
1455 int err, sent = 0;
1456
1457 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1458 return -EFAULT;
1459
1460 sent += count;
1461 len -= count;
1462
1463 /* Continuation fragments (no L2CAP header) */
1464 frag = &skb_shinfo(skb)->frag_list;
1465 while (len) {
1466 count = min_t(unsigned int, conn->mtu, len);
1467
1468 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1469 if (!*frag)
1470 return err;
1471 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1472 return -EFAULT;
1473
1474 sent += count;
1475 len -= count;
1476
1477 frag = &(*frag)->next;
1478 }
1479
1480 return sent;
1481 }
1482
1483 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1484 {
1485 struct sock *sk = chan->sk;
1486 struct l2cap_conn *conn = chan->conn;
1487 struct sk_buff *skb;
1488 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1489 struct l2cap_hdr *lh;
1490
1491 BT_DBG("sk %p len %d", sk, (int)len);
1492
1493 count = min_t(unsigned int, (conn->mtu - hlen), len);
1494 skb = bt_skb_send_alloc(sk, count + hlen,
1495 msg->msg_flags & MSG_DONTWAIT, &err);
1496 if (!skb)
1497 return ERR_PTR(err);
1498
1499 /* Create L2CAP header */
1500 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1501 lh->cid = cpu_to_le16(chan->dcid);
1502 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1503 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1504
1505 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1506 if (unlikely(err < 0)) {
1507 kfree_skb(skb);
1508 return ERR_PTR(err);
1509 }
1510 return skb;
1511 }
1512
1513 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1514 {
1515 struct sock *sk = chan->sk;
1516 struct l2cap_conn *conn = chan->conn;
1517 struct sk_buff *skb;
1518 int err, count, hlen = L2CAP_HDR_SIZE;
1519 struct l2cap_hdr *lh;
1520
1521 BT_DBG("sk %p len %d", sk, (int)len);
1522
1523 count = min_t(unsigned int, (conn->mtu - hlen), len);
1524 skb = bt_skb_send_alloc(sk, count + hlen,
1525 msg->msg_flags & MSG_DONTWAIT, &err);
1526 if (!skb)
1527 return ERR_PTR(err);
1528
1529 /* Create L2CAP header */
1530 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1531 lh->cid = cpu_to_le16(chan->dcid);
1532 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1533
1534 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1535 if (unlikely(err < 0)) {
1536 kfree_skb(skb);
1537 return ERR_PTR(err);
1538 }
1539 return skb;
1540 }
1541
1542 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1543 struct msghdr *msg, size_t len,
1544 u16 control, u16 sdulen)
1545 {
1546 struct sock *sk = chan->sk;
1547 struct l2cap_conn *conn = chan->conn;
1548 struct sk_buff *skb;
1549 int err, count, hlen;
1550 struct l2cap_hdr *lh;
1551
1552 BT_DBG("sk %p len %d", sk, (int)len);
1553
1554 if (!conn)
1555 return ERR_PTR(-ENOTCONN);
1556
1557 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1558 hlen = L2CAP_EXT_HDR_SIZE;
1559 else
1560 hlen = L2CAP_ENH_HDR_SIZE;
1561
1562 if (sdulen)
1563 hlen += 2;
1564
1565 if (chan->fcs == L2CAP_FCS_CRC16)
1566 hlen += 2;
1567
1568 count = min_t(unsigned int, (conn->mtu - hlen), len);
1569 skb = bt_skb_send_alloc(sk, count + hlen,
1570 msg->msg_flags & MSG_DONTWAIT, &err);
1571 if (!skb)
1572 return ERR_PTR(err);
1573
1574 /* Create L2CAP header */
1575 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1576 lh->cid = cpu_to_le16(chan->dcid);
1577 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1578 put_unaligned_le16(control, skb_put(skb, 2));
1579 if (sdulen)
1580 put_unaligned_le16(sdulen, skb_put(skb, 2));
1581
1582 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1583 if (unlikely(err < 0)) {
1584 kfree_skb(skb);
1585 return ERR_PTR(err);
1586 }
1587
1588 if (chan->fcs == L2CAP_FCS_CRC16)
1589 put_unaligned_le16(0, skb_put(skb, 2));
1590
1591 bt_cb(skb)->retries = 0;
1592 return skb;
1593 }
1594
1595 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1596 {
1597 struct sk_buff *skb;
1598 struct sk_buff_head sar_queue;
1599 u16 control;
1600 size_t size = 0;
1601
1602 skb_queue_head_init(&sar_queue);
1603 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1604 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1605 if (IS_ERR(skb))
1606 return PTR_ERR(skb);
1607
1608 __skb_queue_tail(&sar_queue, skb);
1609 len -= chan->remote_mps;
1610 size += chan->remote_mps;
1611
1612 while (len > 0) {
1613 size_t buflen;
1614
1615 if (len > chan->remote_mps) {
1616 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1617 buflen = chan->remote_mps;
1618 } else {
1619 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1620 buflen = len;
1621 }
1622
1623 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1624 if (IS_ERR(skb)) {
1625 skb_queue_purge(&sar_queue);
1626 return PTR_ERR(skb);
1627 }
1628
1629 __skb_queue_tail(&sar_queue, skb);
1630 len -= buflen;
1631 size += buflen;
1632 }
1633 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1634 if (chan->tx_send_head == NULL)
1635 chan->tx_send_head = sar_queue.next;
1636
1637 return size;
1638 }
1639
1640 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1641 {
1642 struct sk_buff *skb;
1643 u16 control;
1644 int err;
1645
1646 /* Connectionless channel */
1647 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1648 skb = l2cap_create_connless_pdu(chan, msg, len);
1649 if (IS_ERR(skb))
1650 return PTR_ERR(skb);
1651
1652 l2cap_do_send(chan, skb);
1653 return len;
1654 }
1655
1656 switch (chan->mode) {
1657 case L2CAP_MODE_BASIC:
1658 /* Check outgoing MTU */
1659 if (len > chan->omtu)
1660 return -EMSGSIZE;
1661
1662 /* Create a basic PDU */
1663 skb = l2cap_create_basic_pdu(chan, msg, len);
1664 if (IS_ERR(skb))
1665 return PTR_ERR(skb);
1666
1667 l2cap_do_send(chan, skb);
1668 err = len;
1669 break;
1670
1671 case L2CAP_MODE_ERTM:
1672 case L2CAP_MODE_STREAMING:
1673 /* Entire SDU fits into one PDU */
1674 if (len <= chan->remote_mps) {
1675 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1676 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1677 0);
1678 if (IS_ERR(skb))
1679 return PTR_ERR(skb);
1680
1681 __skb_queue_tail(&chan->tx_q, skb);
1682
1683 if (chan->tx_send_head == NULL)
1684 chan->tx_send_head = skb;
1685
1686 } else {
1687 /* Segment SDU into multiples PDUs */
1688 err = l2cap_sar_segment_sdu(chan, msg, len);
1689 if (err < 0)
1690 return err;
1691 }
1692
1693 if (chan->mode == L2CAP_MODE_STREAMING) {
1694 l2cap_streaming_send(chan);
1695 err = len;
1696 break;
1697 }
1698
1699 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1700 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1701 err = len;
1702 break;
1703 }
1704
1705 err = l2cap_ertm_send(chan);
1706 if (err >= 0)
1707 err = len;
1708
1709 break;
1710
1711 default:
1712 BT_DBG("bad state %1.1x", chan->mode);
1713 err = -EBADFD;
1714 }
1715
1716 return err;
1717 }
1718
1719 /* Copy frame to all raw sockets on that connection */
1720 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1721 {
1722 struct sk_buff *nskb;
1723 struct l2cap_chan *chan;
1724
1725 BT_DBG("conn %p", conn);
1726
1727 read_lock(&conn->chan_lock);
1728 list_for_each_entry(chan, &conn->chan_l, list) {
1729 struct sock *sk = chan->sk;
1730 if (chan->chan_type != L2CAP_CHAN_RAW)
1731 continue;
1732
1733 /* Don't send frame to the socket it came from */
1734 if (skb->sk == sk)
1735 continue;
1736 nskb = skb_clone(skb, GFP_ATOMIC);
1737 if (!nskb)
1738 continue;
1739
1740 if (chan->ops->recv(chan->data, nskb))
1741 kfree_skb(nskb);
1742 }
1743 read_unlock(&conn->chan_lock);
1744 }
1745
1746 /* ---- L2CAP signalling commands ---- */
1747 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1748 u8 code, u8 ident, u16 dlen, void *data)
1749 {
1750 struct sk_buff *skb, **frag;
1751 struct l2cap_cmd_hdr *cmd;
1752 struct l2cap_hdr *lh;
1753 int len, count;
1754
1755 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1756 conn, code, ident, dlen);
1757
1758 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1759 count = min_t(unsigned int, conn->mtu, len);
1760
1761 skb = bt_skb_alloc(count, GFP_ATOMIC);
1762 if (!skb)
1763 return NULL;
1764
1765 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1766 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1767
1768 if (conn->hcon->type == LE_LINK)
1769 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1770 else
1771 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1772
1773 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1774 cmd->code = code;
1775 cmd->ident = ident;
1776 cmd->len = cpu_to_le16(dlen);
1777
1778 if (dlen) {
1779 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1780 memcpy(skb_put(skb, count), data, count);
1781 data += count;
1782 }
1783
1784 len -= skb->len;
1785
1786 /* Continuation fragments (no L2CAP header) */
1787 frag = &skb_shinfo(skb)->frag_list;
1788 while (len) {
1789 count = min_t(unsigned int, conn->mtu, len);
1790
1791 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1792 if (!*frag)
1793 goto fail;
1794
1795 memcpy(skb_put(*frag, count), data, count);
1796
1797 len -= count;
1798 data += count;
1799
1800 frag = &(*frag)->next;
1801 }
1802
1803 return skb;
1804
1805 fail:
1806 kfree_skb(skb);
1807 return NULL;
1808 }
1809
1810 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1811 {
1812 struct l2cap_conf_opt *opt = *ptr;
1813 int len;
1814
1815 len = L2CAP_CONF_OPT_SIZE + opt->len;
1816 *ptr += len;
1817
1818 *type = opt->type;
1819 *olen = opt->len;
1820
1821 switch (opt->len) {
1822 case 1:
1823 *val = *((u8 *) opt->val);
1824 break;
1825
1826 case 2:
1827 *val = get_unaligned_le16(opt->val);
1828 break;
1829
1830 case 4:
1831 *val = get_unaligned_le32(opt->val);
1832 break;
1833
1834 default:
1835 *val = (unsigned long) opt->val;
1836 break;
1837 }
1838
1839 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1840 return len;
1841 }
1842
1843 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1844 {
1845 struct l2cap_conf_opt *opt = *ptr;
1846
1847 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1848
1849 opt->type = type;
1850 opt->len = len;
1851
1852 switch (len) {
1853 case 1:
1854 *((u8 *) opt->val) = val;
1855 break;
1856
1857 case 2:
1858 put_unaligned_le16(val, opt->val);
1859 break;
1860
1861 case 4:
1862 put_unaligned_le32(val, opt->val);
1863 break;
1864
1865 default:
1866 memcpy(opt->val, (void *) val, len);
1867 break;
1868 }
1869
1870 *ptr += L2CAP_CONF_OPT_SIZE + len;
1871 }
1872
1873 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1874 {
1875 struct l2cap_conf_efs efs;
1876
1877 switch(chan->mode) {
1878 case L2CAP_MODE_ERTM:
1879 efs.id = chan->local_id;
1880 efs.stype = chan->local_stype;
1881 efs.msdu = cpu_to_le16(chan->local_msdu);
1882 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1883 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1884 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1885 break;
1886
1887 case L2CAP_MODE_STREAMING:
1888 efs.id = 1;
1889 efs.stype = L2CAP_SERV_BESTEFFORT;
1890 efs.msdu = cpu_to_le16(chan->local_msdu);
1891 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1892 efs.acc_lat = 0;
1893 efs.flush_to = 0;
1894 break;
1895
1896 default:
1897 return;
1898 }
1899
1900 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1901 (unsigned long) &efs);
1902 }
1903
1904 static void l2cap_ack_timeout(unsigned long arg)
1905 {
1906 struct l2cap_chan *chan = (void *) arg;
1907
1908 bh_lock_sock(chan->sk);
1909 l2cap_send_ack(chan);
1910 bh_unlock_sock(chan->sk);
1911 }
1912
1913 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1914 {
1915 struct sock *sk = chan->sk;
1916
1917 chan->expected_ack_seq = 0;
1918 chan->unacked_frames = 0;
1919 chan->buffer_seq = 0;
1920 chan->num_acked = 0;
1921 chan->frames_sent = 0;
1922
1923 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1924 (unsigned long) chan);
1925 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1926 (unsigned long) chan);
1927 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1928
1929 skb_queue_head_init(&chan->srej_q);
1930
1931 INIT_LIST_HEAD(&chan->srej_l);
1932
1933
1934 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1935 }
1936
1937 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1938 {
1939 switch (mode) {
1940 case L2CAP_MODE_STREAMING:
1941 case L2CAP_MODE_ERTM:
1942 if (l2cap_mode_supported(mode, remote_feat_mask))
1943 return mode;
1944 /* fall through */
1945 default:
1946 return L2CAP_MODE_BASIC;
1947 }
1948 }
1949
1950 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1951 {
1952 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1953 }
1954
1955 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1956 {
1957 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1958 }
1959
1960 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1961 {
1962 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1963 __l2cap_ews_supported(chan))
1964 /* use extended control field */
1965 set_bit(FLAG_EXT_CTRL, &chan->flags);
1966 else
1967 chan->tx_win = min_t(u16, chan->tx_win,
1968 L2CAP_DEFAULT_TX_WINDOW);
1969 }
1970
1971 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1972 {
1973 struct l2cap_conf_req *req = data;
1974 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1975 void *ptr = req->data;
1976
1977 BT_DBG("chan %p", chan);
1978
1979 if (chan->num_conf_req || chan->num_conf_rsp)
1980 goto done;
1981
1982 switch (chan->mode) {
1983 case L2CAP_MODE_STREAMING:
1984 case L2CAP_MODE_ERTM:
1985 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1986 break;
1987
1988 if (__l2cap_efs_supported(chan))
1989 set_bit(FLAG_EFS_ENABLE, &chan->flags);
1990
1991 /* fall through */
1992 default:
1993 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1994 break;
1995 }
1996
1997 done:
1998 if (chan->imtu != L2CAP_DEFAULT_MTU)
1999 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2000
2001 switch (chan->mode) {
2002 case L2CAP_MODE_BASIC:
2003 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2004 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2005 break;
2006
2007 rfc.mode = L2CAP_MODE_BASIC;
2008 rfc.txwin_size = 0;
2009 rfc.max_transmit = 0;
2010 rfc.retrans_timeout = 0;
2011 rfc.monitor_timeout = 0;
2012 rfc.max_pdu_size = 0;
2013
2014 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2015 (unsigned long) &rfc);
2016 break;
2017
2018 case L2CAP_MODE_ERTM:
2019 rfc.mode = L2CAP_MODE_ERTM;
2020 rfc.max_transmit = chan->max_tx;
2021 rfc.retrans_timeout = 0;
2022 rfc.monitor_timeout = 0;
2023 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2024 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
2025 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2026
2027 l2cap_txwin_setup(chan);
2028
2029 rfc.txwin_size = min_t(u16, chan->tx_win,
2030 L2CAP_DEFAULT_TX_WINDOW);
2031
2032 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2033 (unsigned long) &rfc);
2034
2035 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2036 l2cap_add_opt_efs(&ptr, chan);
2037
2038 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2039 break;
2040
2041 if (chan->fcs == L2CAP_FCS_NONE ||
2042 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2043 chan->fcs = L2CAP_FCS_NONE;
2044 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2045 }
2046
2047 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2048 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2049 chan->tx_win);
2050 break;
2051
2052 case L2CAP_MODE_STREAMING:
2053 rfc.mode = L2CAP_MODE_STREAMING;
2054 rfc.txwin_size = 0;
2055 rfc.max_transmit = 0;
2056 rfc.retrans_timeout = 0;
2057 rfc.monitor_timeout = 0;
2058 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2059 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
2060 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2061
2062 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2063 (unsigned long) &rfc);
2064
2065 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2066 l2cap_add_opt_efs(&ptr, chan);
2067
2068 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2069 break;
2070
2071 if (chan->fcs == L2CAP_FCS_NONE ||
2072 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2073 chan->fcs = L2CAP_FCS_NONE;
2074 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2075 }
2076 break;
2077 }
2078
2079 req->dcid = cpu_to_le16(chan->dcid);
2080 req->flags = cpu_to_le16(0);
2081
2082 return ptr - data;
2083 }
2084
2085 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2086 {
2087 struct l2cap_conf_rsp *rsp = data;
2088 void *ptr = rsp->data;
2089 void *req = chan->conf_req;
2090 int len = chan->conf_len;
2091 int type, hint, olen;
2092 unsigned long val;
2093 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2094 u16 mtu = L2CAP_DEFAULT_MTU;
2095 u16 result = L2CAP_CONF_SUCCESS;
2096
2097 BT_DBG("chan %p", chan);
2098
2099 while (len >= L2CAP_CONF_OPT_SIZE) {
2100 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2101
2102 hint = type & L2CAP_CONF_HINT;
2103 type &= L2CAP_CONF_MASK;
2104
2105 switch (type) {
2106 case L2CAP_CONF_MTU:
2107 mtu = val;
2108 break;
2109
2110 case L2CAP_CONF_FLUSH_TO:
2111 chan->flush_to = val;
2112 break;
2113
2114 case L2CAP_CONF_QOS:
2115 break;
2116
2117 case L2CAP_CONF_RFC:
2118 if (olen == sizeof(rfc))
2119 memcpy(&rfc, (void *) val, olen);
2120 break;
2121
2122 case L2CAP_CONF_FCS:
2123 if (val == L2CAP_FCS_NONE)
2124 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2125
2126 break;
2127
2128 case L2CAP_CONF_EWS:
2129 if (!enable_hs)
2130 return -ECONNREFUSED;
2131
2132 set_bit(FLAG_EXT_CTRL, &chan->flags);
2133 set_bit(CONF_EWS_RECV, &chan->conf_state);
2134 chan->remote_tx_win = val;
2135 break;
2136
2137 default:
2138 if (hint)
2139 break;
2140
2141 result = L2CAP_CONF_UNKNOWN;
2142 *((u8 *) ptr++) = type;
2143 break;
2144 }
2145 }
2146
2147 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2148 goto done;
2149
2150 switch (chan->mode) {
2151 case L2CAP_MODE_STREAMING:
2152 case L2CAP_MODE_ERTM:
2153 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2154 chan->mode = l2cap_select_mode(rfc.mode,
2155 chan->conn->feat_mask);
2156 break;
2157 }
2158
2159 if (chan->mode != rfc.mode)
2160 return -ECONNREFUSED;
2161
2162 break;
2163 }
2164
2165 done:
2166 if (chan->mode != rfc.mode) {
2167 result = L2CAP_CONF_UNACCEPT;
2168 rfc.mode = chan->mode;
2169
2170 if (chan->num_conf_rsp == 1)
2171 return -ECONNREFUSED;
2172
2173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2174 sizeof(rfc), (unsigned long) &rfc);
2175 }
2176
2177
2178 if (result == L2CAP_CONF_SUCCESS) {
2179 /* Configure output options and let the other side know
2180 * which ones we don't like. */
2181
2182 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2183 result = L2CAP_CONF_UNACCEPT;
2184 else {
2185 chan->omtu = mtu;
2186 set_bit(CONF_MTU_DONE, &chan->conf_state);
2187 }
2188 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2189
2190 switch (rfc.mode) {
2191 case L2CAP_MODE_BASIC:
2192 chan->fcs = L2CAP_FCS_NONE;
2193 set_bit(CONF_MODE_DONE, &chan->conf_state);
2194 break;
2195
2196 case L2CAP_MODE_ERTM:
2197 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2198 chan->remote_tx_win = rfc.txwin_size;
2199 else
2200 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2201
2202 chan->remote_max_tx = rfc.max_transmit;
2203
2204 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2205 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2206
2207 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2208
2209 rfc.retrans_timeout =
2210 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2211 rfc.monitor_timeout =
2212 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2213
2214 set_bit(CONF_MODE_DONE, &chan->conf_state);
2215
2216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2217 sizeof(rfc), (unsigned long) &rfc);
2218
2219 break;
2220
2221 case L2CAP_MODE_STREAMING:
2222 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2223 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2224
2225 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2226
2227 set_bit(CONF_MODE_DONE, &chan->conf_state);
2228
2229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2230 sizeof(rfc), (unsigned long) &rfc);
2231
2232 break;
2233
2234 default:
2235 result = L2CAP_CONF_UNACCEPT;
2236
2237 memset(&rfc, 0, sizeof(rfc));
2238 rfc.mode = chan->mode;
2239 }
2240
2241 if (result == L2CAP_CONF_SUCCESS)
2242 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2243 }
2244 rsp->scid = cpu_to_le16(chan->dcid);
2245 rsp->result = cpu_to_le16(result);
2246 rsp->flags = cpu_to_le16(0x0000);
2247
2248 return ptr - data;
2249 }
2250
2251 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2252 {
2253 struct l2cap_conf_req *req = data;
2254 void *ptr = req->data;
2255 int type, olen;
2256 unsigned long val;
2257 struct l2cap_conf_rfc rfc;
2258
2259 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2260
2261 while (len >= L2CAP_CONF_OPT_SIZE) {
2262 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2263
2264 switch (type) {
2265 case L2CAP_CONF_MTU:
2266 if (val < L2CAP_DEFAULT_MIN_MTU) {
2267 *result = L2CAP_CONF_UNACCEPT;
2268 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2269 } else
2270 chan->imtu = val;
2271 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2272 break;
2273
2274 case L2CAP_CONF_FLUSH_TO:
2275 chan->flush_to = val;
2276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2277 2, chan->flush_to);
2278 break;
2279
2280 case L2CAP_CONF_RFC:
2281 if (olen == sizeof(rfc))
2282 memcpy(&rfc, (void *)val, olen);
2283
2284 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2285 rfc.mode != chan->mode)
2286 return -ECONNREFUSED;
2287
2288 chan->fcs = 0;
2289
2290 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2291 sizeof(rfc), (unsigned long) &rfc);
2292 break;
2293
2294 case L2CAP_CONF_EWS:
2295 chan->tx_win = min_t(u16, val,
2296 L2CAP_DEFAULT_EXT_WINDOW);
2297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS,
2298 2, chan->tx_win);
2299 break;
2300 }
2301 }
2302
2303 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2304 return -ECONNREFUSED;
2305
2306 chan->mode = rfc.mode;
2307
2308 if (*result == L2CAP_CONF_SUCCESS) {
2309 switch (rfc.mode) {
2310 case L2CAP_MODE_ERTM:
2311 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2312 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2313 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2314 break;
2315 case L2CAP_MODE_STREAMING:
2316 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2317 }
2318 }
2319
2320 req->dcid = cpu_to_le16(chan->dcid);
2321 req->flags = cpu_to_le16(0x0000);
2322
2323 return ptr - data;
2324 }
2325
2326 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2327 {
2328 struct l2cap_conf_rsp *rsp = data;
2329 void *ptr = rsp->data;
2330
2331 BT_DBG("chan %p", chan);
2332
2333 rsp->scid = cpu_to_le16(chan->dcid);
2334 rsp->result = cpu_to_le16(result);
2335 rsp->flags = cpu_to_le16(flags);
2336
2337 return ptr - data;
2338 }
2339
2340 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2341 {
2342 struct l2cap_conn_rsp rsp;
2343 struct l2cap_conn *conn = chan->conn;
2344 u8 buf[128];
2345
2346 rsp.scid = cpu_to_le16(chan->dcid);
2347 rsp.dcid = cpu_to_le16(chan->scid);
2348 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2349 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2350 l2cap_send_cmd(conn, chan->ident,
2351 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2352
2353 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2354 return;
2355
2356 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2357 l2cap_build_conf_req(chan, buf), buf);
2358 chan->num_conf_req++;
2359 }
2360
2361 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2362 {
2363 int type, olen;
2364 unsigned long val;
2365 struct l2cap_conf_rfc rfc;
2366
2367 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2368
2369 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2370 return;
2371
2372 while (len >= L2CAP_CONF_OPT_SIZE) {
2373 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2374
2375 switch (type) {
2376 case L2CAP_CONF_RFC:
2377 if (olen == sizeof(rfc))
2378 memcpy(&rfc, (void *)val, olen);
2379 goto done;
2380 }
2381 }
2382
2383 done:
2384 switch (rfc.mode) {
2385 case L2CAP_MODE_ERTM:
2386 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2387 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2388 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2389 break;
2390 case L2CAP_MODE_STREAMING:
2391 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2392 }
2393 }
2394
2395 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2396 {
2397 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2398
2399 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2400 return 0;
2401
2402 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2403 cmd->ident == conn->info_ident) {
2404 del_timer(&conn->info_timer);
2405
2406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2407 conn->info_ident = 0;
2408
2409 l2cap_conn_start(conn);
2410 }
2411
2412 return 0;
2413 }
2414
2415 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2416 {
2417 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2418 struct l2cap_conn_rsp rsp;
2419 struct l2cap_chan *chan = NULL, *pchan;
2420 struct sock *parent, *sk = NULL;
2421 int result, status = L2CAP_CS_NO_INFO;
2422
2423 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2424 __le16 psm = req->psm;
2425
2426 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2427
2428 /* Check if we have socket listening on psm */
2429 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2430 if (!pchan) {
2431 result = L2CAP_CR_BAD_PSM;
2432 goto sendresp;
2433 }
2434
2435 parent = pchan->sk;
2436
2437 bh_lock_sock(parent);
2438
2439 /* Check if the ACL is secure enough (if not SDP) */
2440 if (psm != cpu_to_le16(0x0001) &&
2441 !hci_conn_check_link_mode(conn->hcon)) {
2442 conn->disc_reason = 0x05;
2443 result = L2CAP_CR_SEC_BLOCK;
2444 goto response;
2445 }
2446
2447 result = L2CAP_CR_NO_MEM;
2448
2449 /* Check for backlog size */
2450 if (sk_acceptq_is_full(parent)) {
2451 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2452 goto response;
2453 }
2454
2455 chan = pchan->ops->new_connection(pchan->data);
2456 if (!chan)
2457 goto response;
2458
2459 sk = chan->sk;
2460
2461 write_lock_bh(&conn->chan_lock);
2462
2463 /* Check if we already have channel with that dcid */
2464 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2465 write_unlock_bh(&conn->chan_lock);
2466 sock_set_flag(sk, SOCK_ZAPPED);
2467 chan->ops->close(chan->data);
2468 goto response;
2469 }
2470
2471 hci_conn_hold(conn->hcon);
2472
2473 bacpy(&bt_sk(sk)->src, conn->src);
2474 bacpy(&bt_sk(sk)->dst, conn->dst);
2475 chan->psm = psm;
2476 chan->dcid = scid;
2477
2478 bt_accept_enqueue(parent, sk);
2479
2480 __l2cap_chan_add(conn, chan);
2481
2482 dcid = chan->scid;
2483
2484 __set_chan_timer(chan, sk->sk_sndtimeo);
2485
2486 chan->ident = cmd->ident;
2487
2488 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2489 if (l2cap_check_security(chan)) {
2490 if (bt_sk(sk)->defer_setup) {
2491 l2cap_state_change(chan, BT_CONNECT2);
2492 result = L2CAP_CR_PEND;
2493 status = L2CAP_CS_AUTHOR_PEND;
2494 parent->sk_data_ready(parent, 0);
2495 } else {
2496 l2cap_state_change(chan, BT_CONFIG);
2497 result = L2CAP_CR_SUCCESS;
2498 status = L2CAP_CS_NO_INFO;
2499 }
2500 } else {
2501 l2cap_state_change(chan, BT_CONNECT2);
2502 result = L2CAP_CR_PEND;
2503 status = L2CAP_CS_AUTHEN_PEND;
2504 }
2505 } else {
2506 l2cap_state_change(chan, BT_CONNECT2);
2507 result = L2CAP_CR_PEND;
2508 status = L2CAP_CS_NO_INFO;
2509 }
2510
2511 write_unlock_bh(&conn->chan_lock);
2512
2513 response:
2514 bh_unlock_sock(parent);
2515
2516 sendresp:
2517 rsp.scid = cpu_to_le16(scid);
2518 rsp.dcid = cpu_to_le16(dcid);
2519 rsp.result = cpu_to_le16(result);
2520 rsp.status = cpu_to_le16(status);
2521 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2522
2523 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2524 struct l2cap_info_req info;
2525 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2526
2527 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2528 conn->info_ident = l2cap_get_ident(conn);
2529
2530 mod_timer(&conn->info_timer, jiffies +
2531 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2532
2533 l2cap_send_cmd(conn, conn->info_ident,
2534 L2CAP_INFO_REQ, sizeof(info), &info);
2535 }
2536
2537 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2538 result == L2CAP_CR_SUCCESS) {
2539 u8 buf[128];
2540 set_bit(CONF_REQ_SENT, &chan->conf_state);
2541 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2542 l2cap_build_conf_req(chan, buf), buf);
2543 chan->num_conf_req++;
2544 }
2545
2546 return 0;
2547 }
2548
2549 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2550 {
2551 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2552 u16 scid, dcid, result, status;
2553 struct l2cap_chan *chan;
2554 struct sock *sk;
2555 u8 req[128];
2556
2557 scid = __le16_to_cpu(rsp->scid);
2558 dcid = __le16_to_cpu(rsp->dcid);
2559 result = __le16_to_cpu(rsp->result);
2560 status = __le16_to_cpu(rsp->status);
2561
2562 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2563
2564 if (scid) {
2565 chan = l2cap_get_chan_by_scid(conn, scid);
2566 if (!chan)
2567 return -EFAULT;
2568 } else {
2569 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2570 if (!chan)
2571 return -EFAULT;
2572 }
2573
2574 sk = chan->sk;
2575
2576 switch (result) {
2577 case L2CAP_CR_SUCCESS:
2578 l2cap_state_change(chan, BT_CONFIG);
2579 chan->ident = 0;
2580 chan->dcid = dcid;
2581 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2582
2583 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2584 break;
2585
2586 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2587 l2cap_build_conf_req(chan, req), req);
2588 chan->num_conf_req++;
2589 break;
2590
2591 case L2CAP_CR_PEND:
2592 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2593 break;
2594
2595 default:
2596 /* don't delete l2cap channel if sk is owned by user */
2597 if (sock_owned_by_user(sk)) {
2598 l2cap_state_change(chan, BT_DISCONN);
2599 __clear_chan_timer(chan);
2600 __set_chan_timer(chan, HZ / 5);
2601 break;
2602 }
2603
2604 l2cap_chan_del(chan, ECONNREFUSED);
2605 break;
2606 }
2607
2608 bh_unlock_sock(sk);
2609 return 0;
2610 }
2611
2612 static inline void set_default_fcs(struct l2cap_chan *chan)
2613 {
2614 /* FCS is enabled only in ERTM or streaming mode, if one or both
2615 * sides request it.
2616 */
2617 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2618 chan->fcs = L2CAP_FCS_NONE;
2619 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2620 chan->fcs = L2CAP_FCS_CRC16;
2621 }
2622
2623 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2624 {
2625 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2626 u16 dcid, flags;
2627 u8 rsp[64];
2628 struct l2cap_chan *chan;
2629 struct sock *sk;
2630 int len;
2631
2632 dcid = __le16_to_cpu(req->dcid);
2633 flags = __le16_to_cpu(req->flags);
2634
2635 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2636
2637 chan = l2cap_get_chan_by_scid(conn, dcid);
2638 if (!chan)
2639 return -ENOENT;
2640
2641 sk = chan->sk;
2642
2643 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2644 struct l2cap_cmd_rej_cid rej;
2645
2646 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2647 rej.scid = cpu_to_le16(chan->scid);
2648 rej.dcid = cpu_to_le16(chan->dcid);
2649
2650 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2651 sizeof(rej), &rej);
2652 goto unlock;
2653 }
2654
2655 /* Reject if config buffer is too small. */
2656 len = cmd_len - sizeof(*req);
2657 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2659 l2cap_build_conf_rsp(chan, rsp,
2660 L2CAP_CONF_REJECT, flags), rsp);
2661 goto unlock;
2662 }
2663
2664 /* Store config. */
2665 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2666 chan->conf_len += len;
2667
2668 if (flags & 0x0001) {
2669 /* Incomplete config. Send empty response. */
2670 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2671 l2cap_build_conf_rsp(chan, rsp,
2672 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2673 goto unlock;
2674 }
2675
2676 /* Complete config. */
2677 len = l2cap_parse_conf_req(chan, rsp);
2678 if (len < 0) {
2679 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2680 goto unlock;
2681 }
2682
2683 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2684 chan->num_conf_rsp++;
2685
2686 /* Reset config buffer. */
2687 chan->conf_len = 0;
2688
2689 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2690 goto unlock;
2691
2692 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2693 set_default_fcs(chan);
2694
2695 l2cap_state_change(chan, BT_CONNECTED);
2696
2697 chan->next_tx_seq = 0;
2698 chan->expected_tx_seq = 0;
2699 skb_queue_head_init(&chan->tx_q);
2700 if (chan->mode == L2CAP_MODE_ERTM)
2701 l2cap_ertm_init(chan);
2702
2703 l2cap_chan_ready(sk);
2704 goto unlock;
2705 }
2706
2707 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2708 u8 buf[64];
2709 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2710 l2cap_build_conf_req(chan, buf), buf);
2711 chan->num_conf_req++;
2712 }
2713
2714 unlock:
2715 bh_unlock_sock(sk);
2716 return 0;
2717 }
2718
2719 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2720 {
2721 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2722 u16 scid, flags, result;
2723 struct l2cap_chan *chan;
2724 struct sock *sk;
2725 int len = cmd->len - sizeof(*rsp);
2726
2727 scid = __le16_to_cpu(rsp->scid);
2728 flags = __le16_to_cpu(rsp->flags);
2729 result = __le16_to_cpu(rsp->result);
2730
2731 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2732 scid, flags, result);
2733
2734 chan = l2cap_get_chan_by_scid(conn, scid);
2735 if (!chan)
2736 return 0;
2737
2738 sk = chan->sk;
2739
2740 switch (result) {
2741 case L2CAP_CONF_SUCCESS:
2742 l2cap_conf_rfc_get(chan, rsp->data, len);
2743 break;
2744
2745 case L2CAP_CONF_UNACCEPT:
2746 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2747 char req[64];
2748
2749 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2750 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2751 goto done;
2752 }
2753
2754 /* throw out any old stored conf requests */
2755 result = L2CAP_CONF_SUCCESS;
2756 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2757 req, &result);
2758 if (len < 0) {
2759 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2760 goto done;
2761 }
2762
2763 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2764 L2CAP_CONF_REQ, len, req);
2765 chan->num_conf_req++;
2766 if (result != L2CAP_CONF_SUCCESS)
2767 goto done;
2768 break;
2769 }
2770
2771 default:
2772 sk->sk_err = ECONNRESET;
2773 __set_chan_timer(chan, HZ * 5);
2774 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2775 goto done;
2776 }
2777
2778 if (flags & 0x01)
2779 goto done;
2780
2781 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2782
2783 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2784 set_default_fcs(chan);
2785
2786 l2cap_state_change(chan, BT_CONNECTED);
2787 chan->next_tx_seq = 0;
2788 chan->expected_tx_seq = 0;
2789 skb_queue_head_init(&chan->tx_q);
2790 if (chan->mode == L2CAP_MODE_ERTM)
2791 l2cap_ertm_init(chan);
2792
2793 l2cap_chan_ready(sk);
2794 }
2795
2796 done:
2797 bh_unlock_sock(sk);
2798 return 0;
2799 }
2800
2801 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2802 {
2803 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2804 struct l2cap_disconn_rsp rsp;
2805 u16 dcid, scid;
2806 struct l2cap_chan *chan;
2807 struct sock *sk;
2808
2809 scid = __le16_to_cpu(req->scid);
2810 dcid = __le16_to_cpu(req->dcid);
2811
2812 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2813
2814 chan = l2cap_get_chan_by_scid(conn, dcid);
2815 if (!chan)
2816 return 0;
2817
2818 sk = chan->sk;
2819
2820 rsp.dcid = cpu_to_le16(chan->scid);
2821 rsp.scid = cpu_to_le16(chan->dcid);
2822 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2823
2824 sk->sk_shutdown = SHUTDOWN_MASK;
2825
2826 /* don't delete l2cap channel if sk is owned by user */
2827 if (sock_owned_by_user(sk)) {
2828 l2cap_state_change(chan, BT_DISCONN);
2829 __clear_chan_timer(chan);
2830 __set_chan_timer(chan, HZ / 5);
2831 bh_unlock_sock(sk);
2832 return 0;
2833 }
2834
2835 l2cap_chan_del(chan, ECONNRESET);
2836 bh_unlock_sock(sk);
2837
2838 chan->ops->close(chan->data);
2839 return 0;
2840 }
2841
2842 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2843 {
2844 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2845 u16 dcid, scid;
2846 struct l2cap_chan *chan;
2847 struct sock *sk;
2848
2849 scid = __le16_to_cpu(rsp->scid);
2850 dcid = __le16_to_cpu(rsp->dcid);
2851
2852 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2853
2854 chan = l2cap_get_chan_by_scid(conn, scid);
2855 if (!chan)
2856 return 0;
2857
2858 sk = chan->sk;
2859
2860 /* don't delete l2cap channel if sk is owned by user */
2861 if (sock_owned_by_user(sk)) {
2862 l2cap_state_change(chan,BT_DISCONN);
2863 __clear_chan_timer(chan);
2864 __set_chan_timer(chan, HZ / 5);
2865 bh_unlock_sock(sk);
2866 return 0;
2867 }
2868
2869 l2cap_chan_del(chan, 0);
2870 bh_unlock_sock(sk);
2871
2872 chan->ops->close(chan->data);
2873 return 0;
2874 }
2875
2876 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2877 {
2878 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2879 u16 type;
2880
2881 type = __le16_to_cpu(req->type);
2882
2883 BT_DBG("type 0x%4.4x", type);
2884
2885 if (type == L2CAP_IT_FEAT_MASK) {
2886 u8 buf[8];
2887 u32 feat_mask = l2cap_feat_mask;
2888 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2889 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2890 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2891 if (!disable_ertm)
2892 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2893 | L2CAP_FEAT_FCS;
2894 if (enable_hs)
2895 feat_mask |= L2CAP_FEAT_EXT_FLOW
2896 | L2CAP_FEAT_EXT_WINDOW;
2897
2898 put_unaligned_le32(feat_mask, rsp->data);
2899 l2cap_send_cmd(conn, cmd->ident,
2900 L2CAP_INFO_RSP, sizeof(buf), buf);
2901 } else if (type == L2CAP_IT_FIXED_CHAN) {
2902 u8 buf[12];
2903 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2904 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2905 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2906 memcpy(buf + 4, l2cap_fixed_chan, 8);
2907 l2cap_send_cmd(conn, cmd->ident,
2908 L2CAP_INFO_RSP, sizeof(buf), buf);
2909 } else {
2910 struct l2cap_info_rsp rsp;
2911 rsp.type = cpu_to_le16(type);
2912 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2913 l2cap_send_cmd(conn, cmd->ident,
2914 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2915 }
2916
2917 return 0;
2918 }
2919
2920 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2921 {
2922 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2923 u16 type, result;
2924
2925 type = __le16_to_cpu(rsp->type);
2926 result = __le16_to_cpu(rsp->result);
2927
2928 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2929
2930 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2931 if (cmd->ident != conn->info_ident ||
2932 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2933 return 0;
2934
2935 del_timer(&conn->info_timer);
2936
2937 if (result != L2CAP_IR_SUCCESS) {
2938 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2939 conn->info_ident = 0;
2940
2941 l2cap_conn_start(conn);
2942
2943 return 0;
2944 }
2945
2946 if (type == L2CAP_IT_FEAT_MASK) {
2947 conn->feat_mask = get_unaligned_le32(rsp->data);
2948
2949 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2950 struct l2cap_info_req req;
2951 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2952
2953 conn->info_ident = l2cap_get_ident(conn);
2954
2955 l2cap_send_cmd(conn, conn->info_ident,
2956 L2CAP_INFO_REQ, sizeof(req), &req);
2957 } else {
2958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2959 conn->info_ident = 0;
2960
2961 l2cap_conn_start(conn);
2962 }
2963 } else if (type == L2CAP_IT_FIXED_CHAN) {
2964 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2965 conn->info_ident = 0;
2966
2967 l2cap_conn_start(conn);
2968 }
2969
2970 return 0;
2971 }
2972
2973 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2974 u16 to_multiplier)
2975 {
2976 u16 max_latency;
2977
2978 if (min > max || min < 6 || max > 3200)
2979 return -EINVAL;
2980
2981 if (to_multiplier < 10 || to_multiplier > 3200)
2982 return -EINVAL;
2983
2984 if (max >= to_multiplier * 8)
2985 return -EINVAL;
2986
2987 max_latency = (to_multiplier * 8 / max) - 1;
2988 if (latency > 499 || latency > max_latency)
2989 return -EINVAL;
2990
2991 return 0;
2992 }
2993
2994 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2995 struct l2cap_cmd_hdr *cmd, u8 *data)
2996 {
2997 struct hci_conn *hcon = conn->hcon;
2998 struct l2cap_conn_param_update_req *req;
2999 struct l2cap_conn_param_update_rsp rsp;
3000 u16 min, max, latency, to_multiplier, cmd_len;
3001 int err;
3002
3003 if (!(hcon->link_mode & HCI_LM_MASTER))
3004 return -EINVAL;
3005
3006 cmd_len = __le16_to_cpu(cmd->len);
3007 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3008 return -EPROTO;
3009
3010 req = (struct l2cap_conn_param_update_req *) data;
3011 min = __le16_to_cpu(req->min);
3012 max = __le16_to_cpu(req->max);
3013 latency = __le16_to_cpu(req->latency);
3014 to_multiplier = __le16_to_cpu(req->to_multiplier);
3015
3016 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3017 min, max, latency, to_multiplier);
3018
3019 memset(&rsp, 0, sizeof(rsp));
3020
3021 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3022 if (err)
3023 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3024 else
3025 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3026
3027 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3028 sizeof(rsp), &rsp);
3029
3030 if (!err)
3031 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3032
3033 return 0;
3034 }
3035
3036 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3037 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3038 {
3039 int err = 0;
3040
3041 switch (cmd->code) {
3042 case L2CAP_COMMAND_REJ:
3043 l2cap_command_rej(conn, cmd, data);
3044 break;
3045
3046 case L2CAP_CONN_REQ:
3047 err = l2cap_connect_req(conn, cmd, data);
3048 break;
3049
3050 case L2CAP_CONN_RSP:
3051 err = l2cap_connect_rsp(conn, cmd, data);
3052 break;
3053
3054 case L2CAP_CONF_REQ:
3055 err = l2cap_config_req(conn, cmd, cmd_len, data);
3056 break;
3057
3058 case L2CAP_CONF_RSP:
3059 err = l2cap_config_rsp(conn, cmd, data);
3060 break;
3061
3062 case L2CAP_DISCONN_REQ:
3063 err = l2cap_disconnect_req(conn, cmd, data);
3064 break;
3065
3066 case L2CAP_DISCONN_RSP:
3067 err = l2cap_disconnect_rsp(conn, cmd, data);
3068 break;
3069
3070 case L2CAP_ECHO_REQ:
3071 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3072 break;
3073
3074 case L2CAP_ECHO_RSP:
3075 break;
3076
3077 case L2CAP_INFO_REQ:
3078 err = l2cap_information_req(conn, cmd, data);
3079 break;
3080
3081 case L2CAP_INFO_RSP:
3082 err = l2cap_information_rsp(conn, cmd, data);
3083 break;
3084
3085 default:
3086 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3087 err = -EINVAL;
3088 break;
3089 }
3090
3091 return err;
3092 }
3093
3094 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3095 struct l2cap_cmd_hdr *cmd, u8 *data)
3096 {
3097 switch (cmd->code) {
3098 case L2CAP_COMMAND_REJ:
3099 return 0;
3100
3101 case L2CAP_CONN_PARAM_UPDATE_REQ:
3102 return l2cap_conn_param_update_req(conn, cmd, data);
3103
3104 case L2CAP_CONN_PARAM_UPDATE_RSP:
3105 return 0;
3106
3107 default:
3108 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3109 return -EINVAL;
3110 }
3111 }
3112
3113 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3114 struct sk_buff *skb)
3115 {
3116 u8 *data = skb->data;
3117 int len = skb->len;
3118 struct l2cap_cmd_hdr cmd;
3119 int err;
3120
3121 l2cap_raw_recv(conn, skb);
3122
3123 while (len >= L2CAP_CMD_HDR_SIZE) {
3124 u16 cmd_len;
3125 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3126 data += L2CAP_CMD_HDR_SIZE;
3127 len -= L2CAP_CMD_HDR_SIZE;
3128
3129 cmd_len = le16_to_cpu(cmd.len);
3130
3131 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3132
3133 if (cmd_len > len || !cmd.ident) {
3134 BT_DBG("corrupted command");
3135 break;
3136 }
3137
3138 if (conn->hcon->type == LE_LINK)
3139 err = l2cap_le_sig_cmd(conn, &cmd, data);
3140 else
3141 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3142
3143 if (err) {
3144 struct l2cap_cmd_rej_unk rej;
3145
3146 BT_ERR("Wrong link type (%d)", err);
3147
3148 /* FIXME: Map err to a valid reason */
3149 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3150 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3151 }
3152
3153 data += cmd_len;
3154 len -= cmd_len;
3155 }
3156
3157 kfree_skb(skb);
3158 }
3159
3160 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3161 {
3162 u16 our_fcs, rcv_fcs;
3163 int hdr_size;
3164
3165 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3166 hdr_size = L2CAP_EXT_HDR_SIZE;
3167 else
3168 hdr_size = L2CAP_ENH_HDR_SIZE;
3169
3170 if (chan->fcs == L2CAP_FCS_CRC16) {
3171 skb_trim(skb, skb->len - 2);
3172 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3173 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3174
3175 if (our_fcs != rcv_fcs)
3176 return -EBADMSG;
3177 }
3178 return 0;
3179 }
3180
3181 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3182 {
3183 u16 control = 0;
3184
3185 chan->frames_sent = 0;
3186
3187 control |= __set_reqseq(chan, chan->buffer_seq);
3188
3189 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3190 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3191 l2cap_send_sframe(chan, control);
3192 set_bit(CONN_RNR_SENT, &chan->conn_state);
3193 }
3194
3195 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3196 l2cap_retransmit_frames(chan);
3197
3198 l2cap_ertm_send(chan);
3199
3200 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3201 chan->frames_sent == 0) {
3202 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3203 l2cap_send_sframe(chan, control);
3204 }
3205 }
3206
3207 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3208 {
3209 struct sk_buff *next_skb;
3210 int tx_seq_offset, next_tx_seq_offset;
3211
3212 bt_cb(skb)->tx_seq = tx_seq;
3213 bt_cb(skb)->sar = sar;
3214
3215 next_skb = skb_peek(&chan->srej_q);
3216 if (!next_skb) {
3217 __skb_queue_tail(&chan->srej_q, skb);
3218 return 0;
3219 }
3220
3221 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3222 if (tx_seq_offset < 0)
3223 tx_seq_offset += 64;
3224
3225 do {
3226 if (bt_cb(next_skb)->tx_seq == tx_seq)
3227 return -EINVAL;
3228
3229 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3230 chan->buffer_seq) % 64;
3231 if (next_tx_seq_offset < 0)
3232 next_tx_seq_offset += 64;
3233
3234 if (next_tx_seq_offset > tx_seq_offset) {
3235 __skb_queue_before(&chan->srej_q, next_skb, skb);
3236 return 0;
3237 }
3238
3239 if (skb_queue_is_last(&chan->srej_q, next_skb))
3240 break;
3241
3242 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3243
3244 __skb_queue_tail(&chan->srej_q, skb);
3245
3246 return 0;
3247 }
3248
3249 static void append_skb_frag(struct sk_buff *skb,
3250 struct sk_buff *new_frag, struct sk_buff **last_frag)
3251 {
3252 /* skb->len reflects data in skb as well as all fragments
3253 * skb->data_len reflects only data in fragments
3254 */
3255 if (!skb_has_frag_list(skb))
3256 skb_shinfo(skb)->frag_list = new_frag;
3257
3258 new_frag->next = NULL;
3259
3260 (*last_frag)->next = new_frag;
3261 *last_frag = new_frag;
3262
3263 skb->len += new_frag->len;
3264 skb->data_len += new_frag->len;
3265 skb->truesize += new_frag->truesize;
3266 }
3267
3268 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3269 {
3270 int err = -EINVAL;
3271
3272 switch (__get_ctrl_sar(chan, control)) {
3273 case L2CAP_SAR_UNSEGMENTED:
3274 if (chan->sdu)
3275 break;
3276
3277 err = chan->ops->recv(chan->data, skb);
3278 break;
3279
3280 case L2CAP_SAR_START:
3281 if (chan->sdu)
3282 break;
3283
3284 chan->sdu_len = get_unaligned_le16(skb->data);
3285 skb_pull(skb, 2);
3286
3287 if (chan->sdu_len > chan->imtu) {
3288 err = -EMSGSIZE;
3289 break;
3290 }
3291
3292 if (skb->len >= chan->sdu_len)
3293 break;
3294
3295 chan->sdu = skb;
3296 chan->sdu_last_frag = skb;
3297
3298 skb = NULL;
3299 err = 0;
3300 break;
3301
3302 case L2CAP_SAR_CONTINUE:
3303 if (!chan->sdu)
3304 break;
3305
3306 append_skb_frag(chan->sdu, skb,
3307 &chan->sdu_last_frag);
3308 skb = NULL;
3309
3310 if (chan->sdu->len >= chan->sdu_len)
3311 break;
3312
3313 err = 0;
3314 break;
3315
3316 case L2CAP_SAR_END:
3317 if (!chan->sdu)
3318 break;
3319
3320 append_skb_frag(chan->sdu, skb,
3321 &chan->sdu_last_frag);
3322 skb = NULL;
3323
3324 if (chan->sdu->len != chan->sdu_len)
3325 break;
3326
3327 err = chan->ops->recv(chan->data, chan->sdu);
3328
3329 if (!err) {
3330 /* Reassembly complete */
3331 chan->sdu = NULL;
3332 chan->sdu_last_frag = NULL;
3333 chan->sdu_len = 0;
3334 }
3335 break;
3336 }
3337
3338 if (err) {
3339 kfree_skb(skb);
3340 kfree_skb(chan->sdu);
3341 chan->sdu = NULL;
3342 chan->sdu_last_frag = NULL;
3343 chan->sdu_len = 0;
3344 }
3345
3346 return err;
3347 }
3348
3349 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3350 {
3351 u16 control;
3352
3353 BT_DBG("chan %p, Enter local busy", chan);
3354
3355 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3356
3357 control = __set_reqseq(chan, chan->buffer_seq);
3358 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3359 l2cap_send_sframe(chan, control);
3360
3361 set_bit(CONN_RNR_SENT, &chan->conn_state);
3362
3363 __clear_ack_timer(chan);
3364 }
3365
3366 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3367 {
3368 u16 control;
3369
3370 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3371 goto done;
3372
3373 control = __set_reqseq(chan, chan->buffer_seq);
3374 control |= __set_ctrl_poll(chan);
3375 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3376 l2cap_send_sframe(chan, control);
3377 chan->retry_count = 1;
3378
3379 __clear_retrans_timer(chan);
3380 __set_monitor_timer(chan);
3381
3382 set_bit(CONN_WAIT_F, &chan->conn_state);
3383
3384 done:
3385 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3386 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3387
3388 BT_DBG("chan %p, Exit local busy", chan);
3389 }
3390
3391 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3392 {
3393 if (chan->mode == L2CAP_MODE_ERTM) {
3394 if (busy)
3395 l2cap_ertm_enter_local_busy(chan);
3396 else
3397 l2cap_ertm_exit_local_busy(chan);
3398 }
3399 }
3400
3401 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3402 {
3403 struct sk_buff *skb;
3404 u16 control;
3405
3406 while ((skb = skb_peek(&chan->srej_q)) &&
3407 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3408 int err;
3409
3410 if (bt_cb(skb)->tx_seq != tx_seq)
3411 break;
3412
3413 skb = skb_dequeue(&chan->srej_q);
3414 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3415 err = l2cap_reassemble_sdu(chan, skb, control);
3416
3417 if (err < 0) {
3418 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3419 break;
3420 }
3421
3422 chan->buffer_seq_srej =
3423 (chan->buffer_seq_srej + 1) % 64;
3424 tx_seq = (tx_seq + 1) % 64;
3425 }
3426 }
3427
3428 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3429 {
3430 struct srej_list *l, *tmp;
3431 u16 control;
3432
3433 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3434 if (l->tx_seq == tx_seq) {
3435 list_del(&l->list);
3436 kfree(l);
3437 return;
3438 }
3439 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3440 control |= __set_reqseq(chan, l->tx_seq);
3441 l2cap_send_sframe(chan, control);
3442 list_del(&l->list);
3443 list_add_tail(&l->list, &chan->srej_l);
3444 }
3445 }
3446
3447 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3448 {
3449 struct srej_list *new;
3450 u16 control;
3451
3452 while (tx_seq != chan->expected_tx_seq) {
3453 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3454 control |= __set_reqseq(chan, chan->expected_tx_seq);
3455 l2cap_send_sframe(chan, control);
3456
3457 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3458 new->tx_seq = chan->expected_tx_seq;
3459 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3460 list_add_tail(&new->list, &chan->srej_l);
3461 }
3462 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3463 }
3464
3465 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3466 {
3467 u16 tx_seq = __get_txseq(chan, rx_control);
3468 u16 req_seq = __get_reqseq(chan, rx_control);
3469 u8 sar = __get_ctrl_sar(chan, rx_control);
3470 int tx_seq_offset, expected_tx_seq_offset;
3471 int num_to_ack = (chan->tx_win/6) + 1;
3472 int err = 0;
3473
3474 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3475 tx_seq, rx_control);
3476
3477 if (__is_ctrl_final(chan, rx_control) &&
3478 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3479 __clear_monitor_timer(chan);
3480 if (chan->unacked_frames > 0)
3481 __set_retrans_timer(chan);
3482 clear_bit(CONN_WAIT_F, &chan->conn_state);
3483 }
3484
3485 chan->expected_ack_seq = req_seq;
3486 l2cap_drop_acked_frames(chan);
3487
3488 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3489 if (tx_seq_offset < 0)
3490 tx_seq_offset += 64;
3491
3492 /* invalid tx_seq */
3493 if (tx_seq_offset >= chan->tx_win) {
3494 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3495 goto drop;
3496 }
3497
3498 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3499 goto drop;
3500
3501 if (tx_seq == chan->expected_tx_seq)
3502 goto expected;
3503
3504 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3505 struct srej_list *first;
3506
3507 first = list_first_entry(&chan->srej_l,
3508 struct srej_list, list);
3509 if (tx_seq == first->tx_seq) {
3510 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3511 l2cap_check_srej_gap(chan, tx_seq);
3512
3513 list_del(&first->list);
3514 kfree(first);
3515
3516 if (list_empty(&chan->srej_l)) {
3517 chan->buffer_seq = chan->buffer_seq_srej;
3518 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3519 l2cap_send_ack(chan);
3520 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3521 }
3522 } else {
3523 struct srej_list *l;
3524
3525 /* duplicated tx_seq */
3526 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3527 goto drop;
3528
3529 list_for_each_entry(l, &chan->srej_l, list) {
3530 if (l->tx_seq == tx_seq) {
3531 l2cap_resend_srejframe(chan, tx_seq);
3532 return 0;
3533 }
3534 }
3535 l2cap_send_srejframe(chan, tx_seq);
3536 }
3537 } else {
3538 expected_tx_seq_offset =
3539 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3540 if (expected_tx_seq_offset < 0)
3541 expected_tx_seq_offset += 64;
3542
3543 /* duplicated tx_seq */
3544 if (tx_seq_offset < expected_tx_seq_offset)
3545 goto drop;
3546
3547 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3548
3549 BT_DBG("chan %p, Enter SREJ", chan);
3550
3551 INIT_LIST_HEAD(&chan->srej_l);
3552 chan->buffer_seq_srej = chan->buffer_seq;
3553
3554 __skb_queue_head_init(&chan->srej_q);
3555 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3556
3557 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3558
3559 l2cap_send_srejframe(chan, tx_seq);
3560
3561 __clear_ack_timer(chan);
3562 }
3563 return 0;
3564
3565 expected:
3566 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3567
3568 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3569 bt_cb(skb)->tx_seq = tx_seq;
3570 bt_cb(skb)->sar = sar;
3571 __skb_queue_tail(&chan->srej_q, skb);
3572 return 0;
3573 }
3574
3575 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3576 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3577 if (err < 0) {
3578 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3579 return err;
3580 }
3581
3582 if (__is_ctrl_final(chan, rx_control)) {
3583 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3584 l2cap_retransmit_frames(chan);
3585 }
3586
3587 __set_ack_timer(chan);
3588
3589 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3590 if (chan->num_acked == num_to_ack - 1)
3591 l2cap_send_ack(chan);
3592
3593 return 0;
3594
3595 drop:
3596 kfree_skb(skb);
3597 return 0;
3598 }
3599
3600 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3601 {
3602 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan,
3603 __get_reqseq(chan, rx_control), rx_control);
3604
3605 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3606 l2cap_drop_acked_frames(chan);
3607
3608 if (__is_ctrl_poll(chan, rx_control)) {
3609 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3610 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3611 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3612 (chan->unacked_frames > 0))
3613 __set_retrans_timer(chan);
3614
3615 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3616 l2cap_send_srejtail(chan);
3617 } else {
3618 l2cap_send_i_or_rr_or_rnr(chan);
3619 }
3620
3621 } else if (__is_ctrl_final(chan, rx_control)) {
3622 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3623
3624 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3625 l2cap_retransmit_frames(chan);
3626
3627 } else {
3628 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3629 (chan->unacked_frames > 0))
3630 __set_retrans_timer(chan);
3631
3632 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3633 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3634 l2cap_send_ack(chan);
3635 else
3636 l2cap_ertm_send(chan);
3637 }
3638 }
3639
3640 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3641 {
3642 u16 tx_seq = __get_reqseq(chan, rx_control);
3643
3644 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3645
3646 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3647
3648 chan->expected_ack_seq = tx_seq;
3649 l2cap_drop_acked_frames(chan);
3650
3651 if (__is_ctrl_final(chan, rx_control)) {
3652 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3653 l2cap_retransmit_frames(chan);
3654 } else {
3655 l2cap_retransmit_frames(chan);
3656
3657 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3658 set_bit(CONN_REJ_ACT, &chan->conn_state);
3659 }
3660 }
3661 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3662 {
3663 u16 tx_seq = __get_reqseq(chan, rx_control);
3664
3665 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3666
3667 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3668
3669 if (__is_ctrl_poll(chan, rx_control)) {
3670 chan->expected_ack_seq = tx_seq;
3671 l2cap_drop_acked_frames(chan);
3672
3673 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3674 l2cap_retransmit_one_frame(chan, tx_seq);
3675
3676 l2cap_ertm_send(chan);
3677
3678 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3679 chan->srej_save_reqseq = tx_seq;
3680 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3681 }
3682 } else if (__is_ctrl_final(chan, rx_control)) {
3683 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3684 chan->srej_save_reqseq == tx_seq)
3685 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3686 else
3687 l2cap_retransmit_one_frame(chan, tx_seq);
3688 } else {
3689 l2cap_retransmit_one_frame(chan, tx_seq);
3690 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3691 chan->srej_save_reqseq = tx_seq;
3692 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3693 }
3694 }
3695 }
3696
3697 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3698 {
3699 u16 tx_seq = __get_reqseq(chan, rx_control);
3700
3701 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3702
3703 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3704 chan->expected_ack_seq = tx_seq;
3705 l2cap_drop_acked_frames(chan);
3706
3707 if (__is_ctrl_poll(chan, rx_control))
3708 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3709
3710 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3711 __clear_retrans_timer(chan);
3712 if (__is_ctrl_poll(chan, rx_control))
3713 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3714 return;
3715 }
3716
3717 if (__is_ctrl_poll(chan, rx_control)) {
3718 l2cap_send_srejtail(chan);
3719 } else {
3720 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3721 l2cap_send_sframe(chan, rx_control);
3722 }
3723 }
3724
3725 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3726 {
3727 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3728
3729 if (__is_ctrl_final(chan, rx_control) &&
3730 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3731 __clear_monitor_timer(chan);
3732 if (chan->unacked_frames > 0)
3733 __set_retrans_timer(chan);
3734 clear_bit(CONN_WAIT_F, &chan->conn_state);
3735 }
3736
3737 switch (__get_ctrl_super(chan, rx_control)) {
3738 case L2CAP_SUPER_RR:
3739 l2cap_data_channel_rrframe(chan, rx_control);
3740 break;
3741
3742 case L2CAP_SUPER_REJ:
3743 l2cap_data_channel_rejframe(chan, rx_control);
3744 break;
3745
3746 case L2CAP_SUPER_SREJ:
3747 l2cap_data_channel_srejframe(chan, rx_control);
3748 break;
3749
3750 case L2CAP_SUPER_RNR:
3751 l2cap_data_channel_rnrframe(chan, rx_control);
3752 break;
3753 }
3754
3755 kfree_skb(skb);
3756 return 0;
3757 }
3758
3759 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3760 {
3761 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3762 u16 control;
3763 u16 req_seq;
3764 int len, next_tx_seq_offset, req_seq_offset;
3765
3766 control = get_unaligned_le16(skb->data);
3767 skb_pull(skb, 2);
3768 len = skb->len;
3769
3770 /*
3771 * We can just drop the corrupted I-frame here.
3772 * Receiver will miss it and start proper recovery
3773 * procedures and ask retransmission.
3774 */
3775 if (l2cap_check_fcs(chan, skb))
3776 goto drop;
3777
3778 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3779 len -= 2;
3780
3781 if (chan->fcs == L2CAP_FCS_CRC16)
3782 len -= 2;
3783
3784 if (len > chan->mps) {
3785 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3786 goto drop;
3787 }
3788
3789 req_seq = __get_reqseq(chan, control);
3790 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3791 if (req_seq_offset < 0)
3792 req_seq_offset += 64;
3793
3794 next_tx_seq_offset =
3795 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3796 if (next_tx_seq_offset < 0)
3797 next_tx_seq_offset += 64;
3798
3799 /* check for invalid req-seq */
3800 if (req_seq_offset > next_tx_seq_offset) {
3801 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3802 goto drop;
3803 }
3804
3805 if (!__is_sframe(chan, control)) {
3806 if (len < 0) {
3807 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3808 goto drop;
3809 }
3810
3811 l2cap_data_channel_iframe(chan, control, skb);
3812 } else {
3813 if (len != 0) {
3814 BT_ERR("%d", len);
3815 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3816 goto drop;
3817 }
3818
3819 l2cap_data_channel_sframe(chan, control, skb);
3820 }
3821
3822 return 0;
3823
3824 drop:
3825 kfree_skb(skb);
3826 return 0;
3827 }
3828
3829 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3830 {
3831 struct l2cap_chan *chan;
3832 struct sock *sk = NULL;
3833 u16 control;
3834 u16 tx_seq;
3835 int len;
3836
3837 chan = l2cap_get_chan_by_scid(conn, cid);
3838 if (!chan) {
3839 BT_DBG("unknown cid 0x%4.4x", cid);
3840 goto drop;
3841 }
3842
3843 sk = chan->sk;
3844
3845 BT_DBG("chan %p, len %d", chan, skb->len);
3846
3847 if (chan->state != BT_CONNECTED)
3848 goto drop;
3849
3850 switch (chan->mode) {
3851 case L2CAP_MODE_BASIC:
3852 /* If socket recv buffers overflows we drop data here
3853 * which is *bad* because L2CAP has to be reliable.
3854 * But we don't have any other choice. L2CAP doesn't
3855 * provide flow control mechanism. */
3856
3857 if (chan->imtu < skb->len)
3858 goto drop;
3859
3860 if (!chan->ops->recv(chan->data, skb))
3861 goto done;
3862 break;
3863
3864 case L2CAP_MODE_ERTM:
3865 if (!sock_owned_by_user(sk)) {
3866 l2cap_ertm_data_rcv(sk, skb);
3867 } else {
3868 if (sk_add_backlog(sk, skb))
3869 goto drop;
3870 }
3871
3872 goto done;
3873
3874 case L2CAP_MODE_STREAMING:
3875 control = get_unaligned_le16(skb->data);
3876 skb_pull(skb, 2);
3877 len = skb->len;
3878
3879 if (l2cap_check_fcs(chan, skb))
3880 goto drop;
3881
3882 if (__is_sar_start(chan, control))
3883 len -= 2;
3884
3885 if (chan->fcs == L2CAP_FCS_CRC16)
3886 len -= 2;
3887
3888 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
3889 goto drop;
3890
3891 tx_seq = __get_txseq(chan, control);
3892
3893 if (chan->expected_tx_seq != tx_seq) {
3894 /* Frame(s) missing - must discard partial SDU */
3895 kfree_skb(chan->sdu);
3896 chan->sdu = NULL;
3897 chan->sdu_last_frag = NULL;
3898 chan->sdu_len = 0;
3899
3900 /* TODO: Notify userland of missing data */
3901 }
3902
3903 chan->expected_tx_seq = (tx_seq + 1) % 64;
3904
3905 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3906 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3907
3908 goto done;
3909
3910 default:
3911 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3912 break;
3913 }
3914
3915 drop:
3916 kfree_skb(skb);
3917
3918 done:
3919 if (sk)
3920 bh_unlock_sock(sk);
3921
3922 return 0;
3923 }
3924
3925 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3926 {
3927 struct sock *sk = NULL;
3928 struct l2cap_chan *chan;
3929
3930 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3931 if (!chan)
3932 goto drop;
3933
3934 sk = chan->sk;
3935
3936 bh_lock_sock(sk);
3937
3938 BT_DBG("sk %p, len %d", sk, skb->len);
3939
3940 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3941 goto drop;
3942
3943 if (chan->imtu < skb->len)
3944 goto drop;
3945
3946 if (!chan->ops->recv(chan->data, skb))
3947 goto done;
3948
3949 drop:
3950 kfree_skb(skb);
3951
3952 done:
3953 if (sk)
3954 bh_unlock_sock(sk);
3955 return 0;
3956 }
3957
3958 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3959 {
3960 struct sock *sk = NULL;
3961 struct l2cap_chan *chan;
3962
3963 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3964 if (!chan)
3965 goto drop;
3966
3967 sk = chan->sk;
3968
3969 bh_lock_sock(sk);
3970
3971 BT_DBG("sk %p, len %d", sk, skb->len);
3972
3973 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3974 goto drop;
3975
3976 if (chan->imtu < skb->len)
3977 goto drop;
3978
3979 if (!chan->ops->recv(chan->data, skb))
3980 goto done;
3981
3982 drop:
3983 kfree_skb(skb);
3984
3985 done:
3986 if (sk)
3987 bh_unlock_sock(sk);
3988 return 0;
3989 }
3990
3991 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3992 {
3993 struct l2cap_hdr *lh = (void *) skb->data;
3994 u16 cid, len;
3995 __le16 psm;
3996
3997 skb_pull(skb, L2CAP_HDR_SIZE);
3998 cid = __le16_to_cpu(lh->cid);
3999 len = __le16_to_cpu(lh->len);
4000
4001 if (len != skb->len) {
4002 kfree_skb(skb);
4003 return;
4004 }
4005
4006 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4007
4008 switch (cid) {
4009 case L2CAP_CID_LE_SIGNALING:
4010 case L2CAP_CID_SIGNALING:
4011 l2cap_sig_channel(conn, skb);
4012 break;
4013
4014 case L2CAP_CID_CONN_LESS:
4015 psm = get_unaligned_le16(skb->data);
4016 skb_pull(skb, 2);
4017 l2cap_conless_channel(conn, psm, skb);
4018 break;
4019
4020 case L2CAP_CID_LE_DATA:
4021 l2cap_att_channel(conn, cid, skb);
4022 break;
4023
4024 case L2CAP_CID_SMP:
4025 if (smp_sig_channel(conn, skb))
4026 l2cap_conn_del(conn->hcon, EACCES);
4027 break;
4028
4029 default:
4030 l2cap_data_channel(conn, cid, skb);
4031 break;
4032 }
4033 }
4034
4035 /* ---- L2CAP interface with lower layer (HCI) ---- */
4036
4037 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4038 {
4039 int exact = 0, lm1 = 0, lm2 = 0;
4040 struct l2cap_chan *c;
4041
4042 if (type != ACL_LINK)
4043 return -EINVAL;
4044
4045 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4046
4047 /* Find listening sockets and check their link_mode */
4048 read_lock(&chan_list_lock);
4049 list_for_each_entry(c, &chan_list, global_l) {
4050 struct sock *sk = c->sk;
4051
4052 if (c->state != BT_LISTEN)
4053 continue;
4054
4055 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4056 lm1 |= HCI_LM_ACCEPT;
4057 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4058 lm1 |= HCI_LM_MASTER;
4059 exact++;
4060 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4061 lm2 |= HCI_LM_ACCEPT;
4062 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4063 lm2 |= HCI_LM_MASTER;
4064 }
4065 }
4066 read_unlock(&chan_list_lock);
4067
4068 return exact ? lm1 : lm2;
4069 }
4070
4071 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4072 {
4073 struct l2cap_conn *conn;
4074
4075 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4076
4077 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4078 return -EINVAL;
4079
4080 if (!status) {
4081 conn = l2cap_conn_add(hcon, status);
4082 if (conn)
4083 l2cap_conn_ready(conn);
4084 } else
4085 l2cap_conn_del(hcon, bt_to_errno(status));
4086
4087 return 0;
4088 }
4089
4090 static int l2cap_disconn_ind(struct hci_conn *hcon)
4091 {
4092 struct l2cap_conn *conn = hcon->l2cap_data;
4093
4094 BT_DBG("hcon %p", hcon);
4095
4096 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4097 return 0x13;
4098
4099 return conn->disc_reason;
4100 }
4101
4102 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4103 {
4104 BT_DBG("hcon %p reason %d", hcon, reason);
4105
4106 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4107 return -EINVAL;
4108
4109 l2cap_conn_del(hcon, bt_to_errno(reason));
4110
4111 return 0;
4112 }
4113
4114 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4115 {
4116 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4117 return;
4118
4119 if (encrypt == 0x00) {
4120 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4121 __clear_chan_timer(chan);
4122 __set_chan_timer(chan, HZ * 5);
4123 } else if (chan->sec_level == BT_SECURITY_HIGH)
4124 l2cap_chan_close(chan, ECONNREFUSED);
4125 } else {
4126 if (chan->sec_level == BT_SECURITY_MEDIUM)
4127 __clear_chan_timer(chan);
4128 }
4129 }
4130
4131 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4132 {
4133 struct l2cap_conn *conn = hcon->l2cap_data;
4134 struct l2cap_chan *chan;
4135
4136 if (!conn)
4137 return 0;
4138
4139 BT_DBG("conn %p", conn);
4140
4141 if (hcon->type == LE_LINK) {
4142 smp_distribute_keys(conn, 0);
4143 del_timer(&conn->security_timer);
4144 }
4145
4146 read_lock(&conn->chan_lock);
4147
4148 list_for_each_entry(chan, &conn->chan_l, list) {
4149 struct sock *sk = chan->sk;
4150
4151 bh_lock_sock(sk);
4152
4153 BT_DBG("chan->scid %d", chan->scid);
4154
4155 if (chan->scid == L2CAP_CID_LE_DATA) {
4156 if (!status && encrypt) {
4157 chan->sec_level = hcon->sec_level;
4158 l2cap_chan_ready(sk);
4159 }
4160
4161 bh_unlock_sock(sk);
4162 continue;
4163 }
4164
4165 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4166 bh_unlock_sock(sk);
4167 continue;
4168 }
4169
4170 if (!status && (chan->state == BT_CONNECTED ||
4171 chan->state == BT_CONFIG)) {
4172 l2cap_check_encryption(chan, encrypt);
4173 bh_unlock_sock(sk);
4174 continue;
4175 }
4176
4177 if (chan->state == BT_CONNECT) {
4178 if (!status) {
4179 struct l2cap_conn_req req;
4180 req.scid = cpu_to_le16(chan->scid);
4181 req.psm = chan->psm;
4182
4183 chan->ident = l2cap_get_ident(conn);
4184 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4185
4186 l2cap_send_cmd(conn, chan->ident,
4187 L2CAP_CONN_REQ, sizeof(req), &req);
4188 } else {
4189 __clear_chan_timer(chan);
4190 __set_chan_timer(chan, HZ / 10);
4191 }
4192 } else if (chan->state == BT_CONNECT2) {
4193 struct l2cap_conn_rsp rsp;
4194 __u16 res, stat;
4195
4196 if (!status) {
4197 if (bt_sk(sk)->defer_setup) {
4198 struct sock *parent = bt_sk(sk)->parent;
4199 res = L2CAP_CR_PEND;
4200 stat = L2CAP_CS_AUTHOR_PEND;
4201 if (parent)
4202 parent->sk_data_ready(parent, 0);
4203 } else {
4204 l2cap_state_change(chan, BT_CONFIG);
4205 res = L2CAP_CR_SUCCESS;
4206 stat = L2CAP_CS_NO_INFO;
4207 }
4208 } else {
4209 l2cap_state_change(chan, BT_DISCONN);
4210 __set_chan_timer(chan, HZ / 10);
4211 res = L2CAP_CR_SEC_BLOCK;
4212 stat = L2CAP_CS_NO_INFO;
4213 }
4214
4215 rsp.scid = cpu_to_le16(chan->dcid);
4216 rsp.dcid = cpu_to_le16(chan->scid);
4217 rsp.result = cpu_to_le16(res);
4218 rsp.status = cpu_to_le16(stat);
4219 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4220 sizeof(rsp), &rsp);
4221 }
4222
4223 bh_unlock_sock(sk);
4224 }
4225
4226 read_unlock(&conn->chan_lock);
4227
4228 return 0;
4229 }
4230
4231 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4232 {
4233 struct l2cap_conn *conn = hcon->l2cap_data;
4234
4235 if (!conn)
4236 conn = l2cap_conn_add(hcon, 0);
4237
4238 if (!conn)
4239 goto drop;
4240
4241 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4242
4243 if (!(flags & ACL_CONT)) {
4244 struct l2cap_hdr *hdr;
4245 struct l2cap_chan *chan;
4246 u16 cid;
4247 int len;
4248
4249 if (conn->rx_len) {
4250 BT_ERR("Unexpected start frame (len %d)", skb->len);
4251 kfree_skb(conn->rx_skb);
4252 conn->rx_skb = NULL;
4253 conn->rx_len = 0;
4254 l2cap_conn_unreliable(conn, ECOMM);
4255 }
4256
4257 /* Start fragment always begin with Basic L2CAP header */
4258 if (skb->len < L2CAP_HDR_SIZE) {
4259 BT_ERR("Frame is too short (len %d)", skb->len);
4260 l2cap_conn_unreliable(conn, ECOMM);
4261 goto drop;
4262 }
4263
4264 hdr = (struct l2cap_hdr *) skb->data;
4265 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4266 cid = __le16_to_cpu(hdr->cid);
4267
4268 if (len == skb->len) {
4269 /* Complete frame received */
4270 l2cap_recv_frame(conn, skb);
4271 return 0;
4272 }
4273
4274 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4275
4276 if (skb->len > len) {
4277 BT_ERR("Frame is too long (len %d, expected len %d)",
4278 skb->len, len);
4279 l2cap_conn_unreliable(conn, ECOMM);
4280 goto drop;
4281 }
4282
4283 chan = l2cap_get_chan_by_scid(conn, cid);
4284
4285 if (chan && chan->sk) {
4286 struct sock *sk = chan->sk;
4287
4288 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4289 BT_ERR("Frame exceeding recv MTU (len %d, "
4290 "MTU %d)", len,
4291 chan->imtu);
4292 bh_unlock_sock(sk);
4293 l2cap_conn_unreliable(conn, ECOMM);
4294 goto drop;
4295 }
4296 bh_unlock_sock(sk);
4297 }
4298
4299 /* Allocate skb for the complete frame (with header) */
4300 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4301 if (!conn->rx_skb)
4302 goto drop;
4303
4304 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4305 skb->len);
4306 conn->rx_len = len - skb->len;
4307 } else {
4308 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4309
4310 if (!conn->rx_len) {
4311 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4312 l2cap_conn_unreliable(conn, ECOMM);
4313 goto drop;
4314 }
4315
4316 if (skb->len > conn->rx_len) {
4317 BT_ERR("Fragment is too long (len %d, expected %d)",
4318 skb->len, conn->rx_len);
4319 kfree_skb(conn->rx_skb);
4320 conn->rx_skb = NULL;
4321 conn->rx_len = 0;
4322 l2cap_conn_unreliable(conn, ECOMM);
4323 goto drop;
4324 }
4325
4326 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4327 skb->len);
4328 conn->rx_len -= skb->len;
4329
4330 if (!conn->rx_len) {
4331 /* Complete frame received */
4332 l2cap_recv_frame(conn, conn->rx_skb);
4333 conn->rx_skb = NULL;
4334 }
4335 }
4336
4337 drop:
4338 kfree_skb(skb);
4339 return 0;
4340 }
4341
4342 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4343 {
4344 struct l2cap_chan *c;
4345
4346 read_lock_bh(&chan_list_lock);
4347
4348 list_for_each_entry(c, &chan_list, global_l) {
4349 struct sock *sk = c->sk;
4350
4351 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4352 batostr(&bt_sk(sk)->src),
4353 batostr(&bt_sk(sk)->dst),
4354 c->state, __le16_to_cpu(c->psm),
4355 c->scid, c->dcid, c->imtu, c->omtu,
4356 c->sec_level, c->mode);
4357 }
4358
4359 read_unlock_bh(&chan_list_lock);
4360
4361 return 0;
4362 }
4363
4364 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4365 {
4366 return single_open(file, l2cap_debugfs_show, inode->i_private);
4367 }
4368
4369 static const struct file_operations l2cap_debugfs_fops = {
4370 .open = l2cap_debugfs_open,
4371 .read = seq_read,
4372 .llseek = seq_lseek,
4373 .release = single_release,
4374 };
4375
4376 static struct dentry *l2cap_debugfs;
4377
4378 static struct hci_proto l2cap_hci_proto = {
4379 .name = "L2CAP",
4380 .id = HCI_PROTO_L2CAP,
4381 .connect_ind = l2cap_connect_ind,
4382 .connect_cfm = l2cap_connect_cfm,
4383 .disconn_ind = l2cap_disconn_ind,
4384 .disconn_cfm = l2cap_disconn_cfm,
4385 .security_cfm = l2cap_security_cfm,
4386 .recv_acldata = l2cap_recv_acldata
4387 };
4388
4389 int __init l2cap_init(void)
4390 {
4391 int err;
4392
4393 err = l2cap_init_sockets();
4394 if (err < 0)
4395 return err;
4396
4397 err = hci_register_proto(&l2cap_hci_proto);
4398 if (err < 0) {
4399 BT_ERR("L2CAP protocol registration failed");
4400 bt_sock_unregister(BTPROTO_L2CAP);
4401 goto error;
4402 }
4403
4404 if (bt_debugfs) {
4405 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4406 bt_debugfs, NULL, &l2cap_debugfs_fops);
4407 if (!l2cap_debugfs)
4408 BT_ERR("Failed to create L2CAP debug file");
4409 }
4410
4411 return 0;
4412
4413 error:
4414 l2cap_cleanup_sockets();
4415 return err;
4416 }
4417
4418 void l2cap_exit(void)
4419 {
4420 debugfs_remove(l2cap_debugfs);
4421
4422 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4423 BT_ERR("L2CAP protocol unregistration failed");
4424
4425 l2cap_cleanup_sockets();
4426 }
4427
4428 module_param(disable_ertm, bool, 0644);
4429 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4430
4431 module_param(enable_hs, bool, 0644);
4432 MODULE_PARM_DESC(enable_hs, "Enable High Speed");