Bluetooth: Add a flag to indicate that SMP is going on
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP core. */
28
29#include <linux/module.h>
30
31#include <linux/types.h>
32#include <linux/capability.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/device.h>
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#include <linux/uaccess.h>
48#include <linux/crc16.h>
49#include <net/sock.h>
50
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h>
57#include <net/bluetooth/smp.h>
58
59int disable_ertm;
60
61static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62static u8 l2cap_fixed_chan[8] = { 0x02, };
63
64static LIST_HEAD(chan_list);
65static DEFINE_RWLOCK(chan_list_lock);
66
67static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
74
75static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76
77/* ---- L2CAP channels ---- */
78
79static inline void chan_hold(struct l2cap_chan *c)
80{
81 atomic_inc(&c->refcnt);
82}
83
84static inline void chan_put(struct l2cap_chan *c)
85{
86 if (atomic_dec_and_test(&c->refcnt))
87 kfree(c);
88}
89
90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
91{
92 struct l2cap_chan *c;
93
94 list_for_each_entry(c, &conn->chan_l, list) {
95 if (c->dcid == cid)
96 return c;
97 }
98 return NULL;
99
100}
101
102static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103{
104 struct l2cap_chan *c;
105
106 list_for_each_entry(c, &conn->chan_l, list) {
107 if (c->scid == cid)
108 return c;
109 }
110 return NULL;
111}
112
113/* Find channel with given SCID.
114 * Returns locked socket */
115static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116{
117 struct l2cap_chan *c;
118
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 bh_lock_sock(c->sk);
123 read_unlock(&conn->chan_lock);
124 return c;
125}
126
127static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128{
129 struct l2cap_chan *c;
130
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
133 return c;
134 }
135 return NULL;
136}
137
138static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139{
140 struct l2cap_chan *c;
141
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
144 if (c)
145 bh_lock_sock(c->sk);
146 read_unlock(&conn->chan_lock);
147 return c;
148}
149
150static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
151{
152 struct l2cap_chan *c;
153
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
156 goto found;
157 }
158
159 c = NULL;
160found:
161 return c;
162}
163
164int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165{
166 int err;
167
168 write_lock_bh(&chan_list_lock);
169
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
173 }
174
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
181
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
189 }
190 }
191
192done:
193 write_unlock_bh(&chan_list_lock);
194 return err;
195}
196
197int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
198{
199 write_lock_bh(&chan_list_lock);
200
201 chan->scid = scid;
202
203 write_unlock_bh(&chan_list_lock);
204
205 return 0;
206}
207
208static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
209{
210 u16 cid = L2CAP_CID_DYN_START;
211
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
215 }
216
217 return 0;
218}
219
220static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221{
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
223
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan);
226}
227
228static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
229{
230 BT_DBG("chan %p state %d", chan, chan->state);
231
232 if (timer_pending(timer) && del_timer(timer))
233 chan_put(chan);
234}
235
236static void l2cap_state_change(struct l2cap_chan *chan, int state)
237{
238 chan->state = state;
239 chan->ops->state_change(chan->data, state);
240}
241
242static void l2cap_chan_timeout(unsigned long arg)
243{
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
246 int reason;
247
248 BT_DBG("chan %p state %d", chan, chan->state);
249
250 bh_lock_sock(sk);
251
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5);
255 bh_unlock_sock(sk);
256 chan_put(chan);
257 return;
258 }
259
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
265 else
266 reason = ETIMEDOUT;
267
268 l2cap_chan_close(chan, reason);
269
270 bh_unlock_sock(sk);
271
272 chan->ops->close(chan->data);
273 chan_put(chan);
274}
275
276struct l2cap_chan *l2cap_chan_create(struct sock *sk)
277{
278 struct l2cap_chan *chan;
279
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
281 if (!chan)
282 return NULL;
283
284 chan->sk = sk;
285
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
289
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
291
292 chan->state = BT_OPEN;
293
294 atomic_set(&chan->refcnt, 1);
295
296 return chan;
297}
298
299void l2cap_chan_destroy(struct l2cap_chan *chan)
300{
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
304
305 chan_put(chan);
306}
307
308static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
309{
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
312
313 conn->disc_reason = 0x13;
314
315 chan->conn = conn;
316
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
319 /* LE connection */
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
323 } else {
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
327 }
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
333 } else {
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
338 }
339
340 chan_hold(chan);
341
342 list_add(&chan->list, &conn->chan_l);
343}
344
345/* Delete channel.
346 * Must be called on the locked socket. */
347static void l2cap_chan_del(struct l2cap_chan *chan, int err)
348{
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
352
353 __clear_chan_timer(chan);
354
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
356
357 if (conn) {
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
362 chan_put(chan);
363
364 chan->conn = NULL;
365 hci_conn_put(conn->hcon);
366 }
367
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
370
371 if (err)
372 sk->sk_err = err;
373
374 if (parent) {
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
377 } else
378 sk->sk_state_change(sk);
379
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
382 return;
383
384 skb_queue_purge(&chan->tx_q);
385
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
388
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
392
393 skb_queue_purge(&chan->srej_q);
394
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
396 list_del(&l->list);
397 kfree(l);
398 }
399 }
400}
401
402static void l2cap_chan_cleanup_listen(struct sock *parent)
403{
404 struct sock *sk;
405
406 BT_DBG("parent %p", parent);
407
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
412 lock_sock(sk);
413 l2cap_chan_close(chan, ECONNRESET);
414 release_sock(sk);
415 chan->ops->close(chan->data);
416 }
417}
418
419void l2cap_chan_close(struct l2cap_chan *chan, int reason)
420{
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
423
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
425
426 switch (chan->state) {
427 case BT_LISTEN:
428 l2cap_chan_cleanup_listen(sk);
429
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
432 break;
433
434 case BT_CONNECTED:
435 case BT_CONFIG:
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
441 } else
442 l2cap_chan_del(chan, reason);
443 break;
444
445 case BT_CONNECT2:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
449 __u16 result;
450
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
453 else
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
456
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 sizeof(rsp), &rsp);
463 }
464
465 l2cap_chan_del(chan, reason);
466 break;
467
468 case BT_CONNECT:
469 case BT_DISCONN:
470 l2cap_chan_del(chan, reason);
471 break;
472
473 default:
474 sock_set_flag(sk, SOCK_ZAPPED);
475 break;
476 }
477}
478
479static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
480{
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
487 default:
488 return HCI_AT_NO_BONDING;
489 }
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
493
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
496 else
497 return HCI_AT_NO_BONDING;
498 } else {
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
504 default:
505 return HCI_AT_NO_BONDING;
506 }
507 }
508}
509
510/* Service level security */
511static inline int l2cap_check_security(struct l2cap_chan *chan)
512{
513 struct l2cap_conn *conn = chan->conn;
514 __u8 auth_type;
515
516 auth_type = l2cap_get_auth_type(chan);
517
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
519}
520
521static u8 l2cap_get_ident(struct l2cap_conn *conn)
522{
523 u8 id;
524
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
529 */
530
531 spin_lock_bh(&conn->lock);
532
533 if (++conn->tx_ident > 128)
534 conn->tx_ident = 1;
535
536 id = conn->tx_ident;
537
538 spin_unlock_bh(&conn->lock);
539
540 return id;
541}
542
543static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
544{
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
546 u8 flags;
547
548 BT_DBG("code 0x%2.2x", code);
549
550 if (!skb)
551 return;
552
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
555 else
556 flags = ACL_START;
557
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
559
560 hci_send_acl(conn->hcon, skb, flags);
561}
562
563static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
564{
565 struct sk_buff *skb;
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
569 u8 flags;
570
571 if (chan->state != BT_CONNECTED)
572 return;
573
574 if (chan->fcs == L2CAP_FCS_CRC16)
575 hlen += 2;
576
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
578
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
581
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
584
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
587
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
589 if (!skb)
590 return;
591
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
596
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
600 }
601
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
604 else
605 flags = ACL_START;
606
607 bt_cb(skb)->force_active = chan->force_active;
608
609 hci_send_acl(chan->conn->hcon, skb, flags);
610}
611
612static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
613{
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
617 } else
618 control |= L2CAP_SUPER_RCV_READY;
619
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
621
622 l2cap_send_sframe(chan, control);
623}
624
625static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
626{
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
628}
629
630static void l2cap_do_start(struct l2cap_chan *chan)
631{
632 struct l2cap_conn *conn = chan->conn;
633
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
636 return;
637
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
642 req.psm = chan->psm;
643
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
646
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
648 sizeof(req), &req);
649 }
650 } else {
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
653
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
656
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
659
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
662 }
663}
664
665static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
666{
667 u32 local_feat_mask = l2cap_feat_mask;
668 if (!disable_ertm)
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
670
671 switch (mode) {
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
676 default:
677 return 0x00;
678 }
679}
680
681static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
682{
683 struct sock *sk;
684 struct l2cap_disconn_req req;
685
686 if (!conn)
687 return;
688
689 sk = chan->sk;
690
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
695 }
696
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
701
702 l2cap_state_change(chan, BT_DISCONN);
703 sk->sk_err = err;
704}
705
706/* ---- L2CAP connections ---- */
707static void l2cap_conn_start(struct l2cap_conn *conn)
708{
709 struct l2cap_chan *chan, *tmp;
710
711 BT_DBG("conn %p", conn);
712
713 read_lock(&conn->chan_lock);
714
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
717
718 bh_lock_sock(sk);
719
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
721 bh_unlock_sock(sk);
722 continue;
723 }
724
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
727
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
730 bh_unlock_sock(sk);
731 continue;
732 }
733
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
742 bh_unlock_sock(sk);
743 continue;
744 }
745
746 req.scid = cpu_to_le16(chan->scid);
747 req.psm = chan->psm;
748
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
751
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
753 sizeof(req), &req);
754
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
757 char buf[128];
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
760
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
766 if (parent)
767 parent->sk_data_ready(parent, 0);
768
769 } else {
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
773 }
774 } else {
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
777 }
778
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
780 sizeof(rsp), &rsp);
781
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
784 bh_unlock_sock(sk);
785 continue;
786 }
787
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
792 }
793
794 bh_unlock_sock(sk);
795 }
796
797 read_unlock(&conn->chan_lock);
798}
799
800/* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
802 */
803static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
804{
805 struct l2cap_chan *c, *c1 = NULL;
806
807 read_lock(&chan_list_lock);
808
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
811
812 if (state && c->state != state)
813 continue;
814
815 if (c->scid == cid) {
816 /* Exact match. */
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
819 return c;
820 }
821
822 /* Closest match */
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
824 c1 = c;
825 }
826 }
827
828 read_unlock(&chan_list_lock);
829
830 return c1;
831}
832
833static void l2cap_le_conn_ready(struct l2cap_conn *conn)
834{
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
837
838 BT_DBG("");
839
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
842 conn->src);
843 if (!pchan)
844 return;
845
846 parent = pchan->sk;
847
848 bh_lock_sock(parent);
849
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
853 goto clean;
854 }
855
856 chan = pchan->ops->new_connection(pchan->data);
857 if (!chan)
858 goto clean;
859
860 sk = chan->sk;
861
862 write_lock_bh(&conn->chan_lock);
863
864 hci_conn_hold(conn->hcon);
865
866 bacpy(&bt_sk(sk)->src, conn->src);
867 bacpy(&bt_sk(sk)->dst, conn->dst);
868
869 bt_accept_enqueue(parent, sk);
870
871 __l2cap_chan_add(conn, chan);
872
873 __set_chan_timer(chan, sk->sk_sndtimeo);
874
875 l2cap_state_change(chan, BT_CONNECTED);
876 parent->sk_data_ready(parent, 0);
877
878 write_unlock_bh(&conn->chan_lock);
879
880clean:
881 bh_unlock_sock(parent);
882}
883
884static void l2cap_chan_ready(struct sock *sk)
885{
886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 struct sock *parent = bt_sk(sk)->parent;
888
889 BT_DBG("sk %p, parent %p", sk, parent);
890
891 chan->conf_state = 0;
892 __clear_chan_timer(chan);
893
894 l2cap_state_change(chan, BT_CONNECTED);
895 sk->sk_state_change(sk);
896
897 if (parent)
898 parent->sk_data_ready(parent, 0);
899}
900
901static void l2cap_conn_ready(struct l2cap_conn *conn)
902{
903 struct l2cap_chan *chan;
904
905 BT_DBG("conn %p", conn);
906
907 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 l2cap_le_conn_ready(conn);
909
910 read_lock(&conn->chan_lock);
911
912 list_for_each_entry(chan, &conn->chan_l, list) {
913 struct sock *sk = chan->sk;
914
915 bh_lock_sock(sk);
916
917 if (conn->hcon->type == LE_LINK) {
918 if (smp_conn_security(conn, chan->sec_level))
919 l2cap_chan_ready(sk);
920
921 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
922 __clear_chan_timer(chan);
923 l2cap_state_change(chan, BT_CONNECTED);
924 sk->sk_state_change(sk);
925
926 } else if (chan->state == BT_CONNECT)
927 l2cap_do_start(chan);
928
929 bh_unlock_sock(sk);
930 }
931
932 read_unlock(&conn->chan_lock);
933}
934
935/* Notify sockets that we cannot guaranty reliability anymore */
936static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
937{
938 struct l2cap_chan *chan;
939
940 BT_DBG("conn %p", conn);
941
942 read_lock(&conn->chan_lock);
943
944 list_for_each_entry(chan, &conn->chan_l, list) {
945 struct sock *sk = chan->sk;
946
947 if (chan->force_reliable)
948 sk->sk_err = err;
949 }
950
951 read_unlock(&conn->chan_lock);
952}
953
954static void l2cap_info_timeout(unsigned long arg)
955{
956 struct l2cap_conn *conn = (void *) arg;
957
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
959 conn->info_ident = 0;
960
961 l2cap_conn_start(conn);
962}
963
964static void l2cap_conn_del(struct hci_conn *hcon, int err)
965{
966 struct l2cap_conn *conn = hcon->l2cap_data;
967 struct l2cap_chan *chan, *l;
968 struct sock *sk;
969
970 if (!conn)
971 return;
972
973 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
974
975 kfree_skb(conn->rx_skb);
976
977 /* Kill channels */
978 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
979 sk = chan->sk;
980 bh_lock_sock(sk);
981 l2cap_chan_del(chan, err);
982 bh_unlock_sock(sk);
983 chan->ops->close(chan->data);
984 }
985
986 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
987 del_timer_sync(&conn->info_timer);
988
989 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
990 del_timer(&conn->security_timer);
991 hci_conn_put(hcon);
992 }
993
994 hcon->l2cap_data = NULL;
995 kfree(conn);
996}
997
998static void security_timeout(unsigned long arg)
999{
1000 struct l2cap_conn *conn = (void *) arg;
1001
1002 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1003}
1004
1005static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1006{
1007 struct l2cap_conn *conn = hcon->l2cap_data;
1008
1009 if (conn || status)
1010 return conn;
1011
1012 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1013 if (!conn)
1014 return NULL;
1015
1016 hcon->l2cap_data = conn;
1017 conn->hcon = hcon;
1018
1019 BT_DBG("hcon %p conn %p", hcon, conn);
1020
1021 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1022 conn->mtu = hcon->hdev->le_mtu;
1023 else
1024 conn->mtu = hcon->hdev->acl_mtu;
1025
1026 conn->src = &hcon->hdev->bdaddr;
1027 conn->dst = &hcon->dst;
1028
1029 conn->feat_mask = 0;
1030
1031 spin_lock_init(&conn->lock);
1032 rwlock_init(&conn->chan_lock);
1033
1034 INIT_LIST_HEAD(&conn->chan_l);
1035
1036 if (hcon->type == LE_LINK)
1037 setup_timer(&conn->security_timer, security_timeout,
1038 (unsigned long) conn);
1039 else
1040 setup_timer(&conn->info_timer, l2cap_info_timeout,
1041 (unsigned long) conn);
1042
1043 conn->disc_reason = 0x13;
1044
1045 return conn;
1046}
1047
1048static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1049{
1050 write_lock_bh(&conn->chan_lock);
1051 __l2cap_chan_add(conn, chan);
1052 write_unlock_bh(&conn->chan_lock);
1053}
1054
1055/* ---- Socket interface ---- */
1056
1057/* Find socket with psm and source bdaddr.
1058 * Returns closest match.
1059 */
1060static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1061{
1062 struct l2cap_chan *c, *c1 = NULL;
1063
1064 read_lock(&chan_list_lock);
1065
1066 list_for_each_entry(c, &chan_list, global_l) {
1067 struct sock *sk = c->sk;
1068
1069 if (state && c->state != state)
1070 continue;
1071
1072 if (c->psm == psm) {
1073 /* Exact match. */
1074 if (!bacmp(&bt_sk(sk)->src, src)) {
1075 read_unlock(&chan_list_lock);
1076 return c;
1077 }
1078
1079 /* Closest match */
1080 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1081 c1 = c;
1082 }
1083 }
1084
1085 read_unlock(&chan_list_lock);
1086
1087 return c1;
1088}
1089
1090int l2cap_chan_connect(struct l2cap_chan *chan)
1091{
1092 struct sock *sk = chan->sk;
1093 bdaddr_t *src = &bt_sk(sk)->src;
1094 bdaddr_t *dst = &bt_sk(sk)->dst;
1095 struct l2cap_conn *conn;
1096 struct hci_conn *hcon;
1097 struct hci_dev *hdev;
1098 __u8 auth_type;
1099 int err;
1100
1101 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1102 chan->psm);
1103
1104 hdev = hci_get_route(dst, src);
1105 if (!hdev)
1106 return -EHOSTUNREACH;
1107
1108 hci_dev_lock_bh(hdev);
1109
1110 auth_type = l2cap_get_auth_type(chan);
1111
1112 if (chan->dcid == L2CAP_CID_LE_DATA)
1113 hcon = hci_connect(hdev, LE_LINK, dst,
1114 chan->sec_level, auth_type);
1115 else
1116 hcon = hci_connect(hdev, ACL_LINK, dst,
1117 chan->sec_level, auth_type);
1118
1119 if (IS_ERR(hcon)) {
1120 err = PTR_ERR(hcon);
1121 goto done;
1122 }
1123
1124 conn = l2cap_conn_add(hcon, 0);
1125 if (!conn) {
1126 hci_conn_put(hcon);
1127 err = -ENOMEM;
1128 goto done;
1129 }
1130
1131 /* Update source addr of the socket */
1132 bacpy(src, conn->src);
1133
1134 l2cap_chan_add(conn, chan);
1135
1136 l2cap_state_change(chan, BT_CONNECT);
1137 __set_chan_timer(chan, sk->sk_sndtimeo);
1138
1139 if (hcon->state == BT_CONNECTED) {
1140 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1141 __clear_chan_timer(chan);
1142 if (l2cap_check_security(chan))
1143 l2cap_state_change(chan, BT_CONNECTED);
1144 } else
1145 l2cap_do_start(chan);
1146 }
1147
1148 err = 0;
1149
1150done:
1151 hci_dev_unlock_bh(hdev);
1152 hci_dev_put(hdev);
1153 return err;
1154}
1155
1156int __l2cap_wait_ack(struct sock *sk)
1157{
1158 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1159 DECLARE_WAITQUEUE(wait, current);
1160 int err = 0;
1161 int timeo = HZ/5;
1162
1163 add_wait_queue(sk_sleep(sk), &wait);
1164 set_current_state(TASK_INTERRUPTIBLE);
1165 while (chan->unacked_frames > 0 && chan->conn) {
1166 if (!timeo)
1167 timeo = HZ/5;
1168
1169 if (signal_pending(current)) {
1170 err = sock_intr_errno(timeo);
1171 break;
1172 }
1173
1174 release_sock(sk);
1175 timeo = schedule_timeout(timeo);
1176 lock_sock(sk);
1177 set_current_state(TASK_INTERRUPTIBLE);
1178
1179 err = sock_error(sk);
1180 if (err)
1181 break;
1182 }
1183 set_current_state(TASK_RUNNING);
1184 remove_wait_queue(sk_sleep(sk), &wait);
1185 return err;
1186}
1187
1188static void l2cap_monitor_timeout(unsigned long arg)
1189{
1190 struct l2cap_chan *chan = (void *) arg;
1191 struct sock *sk = chan->sk;
1192
1193 BT_DBG("chan %p", chan);
1194
1195 bh_lock_sock(sk);
1196 if (chan->retry_count >= chan->remote_max_tx) {
1197 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1198 bh_unlock_sock(sk);
1199 return;
1200 }
1201
1202 chan->retry_count++;
1203 __set_monitor_timer(chan);
1204
1205 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1206 bh_unlock_sock(sk);
1207}
1208
1209static void l2cap_retrans_timeout(unsigned long arg)
1210{
1211 struct l2cap_chan *chan = (void *) arg;
1212 struct sock *sk = chan->sk;
1213
1214 BT_DBG("chan %p", chan);
1215
1216 bh_lock_sock(sk);
1217 chan->retry_count = 1;
1218 __set_monitor_timer(chan);
1219
1220 set_bit(CONN_WAIT_F, &chan->conn_state);
1221
1222 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1223 bh_unlock_sock(sk);
1224}
1225
1226static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1227{
1228 struct sk_buff *skb;
1229
1230 while ((skb = skb_peek(&chan->tx_q)) &&
1231 chan->unacked_frames) {
1232 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1233 break;
1234
1235 skb = skb_dequeue(&chan->tx_q);
1236 kfree_skb(skb);
1237
1238 chan->unacked_frames--;
1239 }
1240
1241 if (!chan->unacked_frames)
1242 __clear_retrans_timer(chan);
1243}
1244
1245void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1246{
1247 struct hci_conn *hcon = chan->conn->hcon;
1248 u16 flags;
1249
1250 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1251
1252 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1253 flags = ACL_START_NO_FLUSH;
1254 else
1255 flags = ACL_START;
1256
1257 bt_cb(skb)->force_active = chan->force_active;
1258 hci_send_acl(hcon, skb, flags);
1259}
1260
1261void l2cap_streaming_send(struct l2cap_chan *chan)
1262{
1263 struct sk_buff *skb;
1264 u16 control, fcs;
1265
1266 while ((skb = skb_dequeue(&chan->tx_q))) {
1267 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1268 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1269 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1270
1271 if (chan->fcs == L2CAP_FCS_CRC16) {
1272 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1273 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1274 }
1275
1276 l2cap_do_send(chan, skb);
1277
1278 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1279 }
1280}
1281
1282static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1283{
1284 struct sk_buff *skb, *tx_skb;
1285 u16 control, fcs;
1286
1287 skb = skb_peek(&chan->tx_q);
1288 if (!skb)
1289 return;
1290
1291 do {
1292 if (bt_cb(skb)->tx_seq == tx_seq)
1293 break;
1294
1295 if (skb_queue_is_last(&chan->tx_q, skb))
1296 return;
1297
1298 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1299
1300 if (chan->remote_max_tx &&
1301 bt_cb(skb)->retries == chan->remote_max_tx) {
1302 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1303 return;
1304 }
1305
1306 tx_skb = skb_clone(skb, GFP_ATOMIC);
1307 bt_cb(skb)->retries++;
1308 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1309 control &= L2CAP_CTRL_SAR;
1310
1311 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1312 control |= L2CAP_CTRL_FINAL;
1313
1314 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1315 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1316
1317 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1318
1319 if (chan->fcs == L2CAP_FCS_CRC16) {
1320 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1321 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1322 }
1323
1324 l2cap_do_send(chan, tx_skb);
1325}
1326
1327int l2cap_ertm_send(struct l2cap_chan *chan)
1328{
1329 struct sk_buff *skb, *tx_skb;
1330 u16 control, fcs;
1331 int nsent = 0;
1332
1333 if (chan->state != BT_CONNECTED)
1334 return -ENOTCONN;
1335
1336 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1337
1338 if (chan->remote_max_tx &&
1339 bt_cb(skb)->retries == chan->remote_max_tx) {
1340 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1341 break;
1342 }
1343
1344 tx_skb = skb_clone(skb, GFP_ATOMIC);
1345
1346 bt_cb(skb)->retries++;
1347
1348 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1349 control &= L2CAP_CTRL_SAR;
1350
1351 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1352 control |= L2CAP_CTRL_FINAL;
1353
1354 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1355 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1356 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1357
1358
1359 if (chan->fcs == L2CAP_FCS_CRC16) {
1360 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1361 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1362 }
1363
1364 l2cap_do_send(chan, tx_skb);
1365
1366 __set_retrans_timer(chan);
1367
1368 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1369 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1370
1371 if (bt_cb(skb)->retries == 1)
1372 chan->unacked_frames++;
1373
1374 chan->frames_sent++;
1375
1376 if (skb_queue_is_last(&chan->tx_q, skb))
1377 chan->tx_send_head = NULL;
1378 else
1379 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1380
1381 nsent++;
1382 }
1383
1384 return nsent;
1385}
1386
1387static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1388{
1389 int ret;
1390
1391 if (!skb_queue_empty(&chan->tx_q))
1392 chan->tx_send_head = chan->tx_q.next;
1393
1394 chan->next_tx_seq = chan->expected_ack_seq;
1395 ret = l2cap_ertm_send(chan);
1396 return ret;
1397}
1398
1399static void l2cap_send_ack(struct l2cap_chan *chan)
1400{
1401 u16 control = 0;
1402
1403 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1404
1405 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1406 control |= L2CAP_SUPER_RCV_NOT_READY;
1407 set_bit(CONN_RNR_SENT, &chan->conn_state);
1408 l2cap_send_sframe(chan, control);
1409 return;
1410 }
1411
1412 if (l2cap_ertm_send(chan) > 0)
1413 return;
1414
1415 control |= L2CAP_SUPER_RCV_READY;
1416 l2cap_send_sframe(chan, control);
1417}
1418
1419static void l2cap_send_srejtail(struct l2cap_chan *chan)
1420{
1421 struct srej_list *tail;
1422 u16 control;
1423
1424 control = L2CAP_SUPER_SELECT_REJECT;
1425 control |= L2CAP_CTRL_FINAL;
1426
1427 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1428 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1429
1430 l2cap_send_sframe(chan, control);
1431}
1432
1433static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1434{
1435 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1436 struct sk_buff **frag;
1437 int err, sent = 0;
1438
1439 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1440 return -EFAULT;
1441
1442 sent += count;
1443 len -= count;
1444
1445 /* Continuation fragments (no L2CAP header) */
1446 frag = &skb_shinfo(skb)->frag_list;
1447 while (len) {
1448 count = min_t(unsigned int, conn->mtu, len);
1449
1450 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1451 if (!*frag)
1452 return err;
1453 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1454 return -EFAULT;
1455
1456 sent += count;
1457 len -= count;
1458
1459 frag = &(*frag)->next;
1460 }
1461
1462 return sent;
1463}
1464
1465struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1466{
1467 struct sock *sk = chan->sk;
1468 struct l2cap_conn *conn = chan->conn;
1469 struct sk_buff *skb;
1470 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1471 struct l2cap_hdr *lh;
1472
1473 BT_DBG("sk %p len %d", sk, (int)len);
1474
1475 count = min_t(unsigned int, (conn->mtu - hlen), len);
1476 skb = bt_skb_send_alloc(sk, count + hlen,
1477 msg->msg_flags & MSG_DONTWAIT, &err);
1478 if (!skb)
1479 return ERR_PTR(err);
1480
1481 /* Create L2CAP header */
1482 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1483 lh->cid = cpu_to_le16(chan->dcid);
1484 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1485 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1486
1487 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1488 if (unlikely(err < 0)) {
1489 kfree_skb(skb);
1490 return ERR_PTR(err);
1491 }
1492 return skb;
1493}
1494
1495struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1496{
1497 struct sock *sk = chan->sk;
1498 struct l2cap_conn *conn = chan->conn;
1499 struct sk_buff *skb;
1500 int err, count, hlen = L2CAP_HDR_SIZE;
1501 struct l2cap_hdr *lh;
1502
1503 BT_DBG("sk %p len %d", sk, (int)len);
1504
1505 count = min_t(unsigned int, (conn->mtu - hlen), len);
1506 skb = bt_skb_send_alloc(sk, count + hlen,
1507 msg->msg_flags & MSG_DONTWAIT, &err);
1508 if (!skb)
1509 return ERR_PTR(err);
1510
1511 /* Create L2CAP header */
1512 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1513 lh->cid = cpu_to_le16(chan->dcid);
1514 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1515
1516 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1517 if (unlikely(err < 0)) {
1518 kfree_skb(skb);
1519 return ERR_PTR(err);
1520 }
1521 return skb;
1522}
1523
1524struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1525{
1526 struct sock *sk = chan->sk;
1527 struct l2cap_conn *conn = chan->conn;
1528 struct sk_buff *skb;
1529 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1530 struct l2cap_hdr *lh;
1531
1532 BT_DBG("sk %p len %d", sk, (int)len);
1533
1534 if (!conn)
1535 return ERR_PTR(-ENOTCONN);
1536
1537 if (sdulen)
1538 hlen += 2;
1539
1540 if (chan->fcs == L2CAP_FCS_CRC16)
1541 hlen += 2;
1542
1543 count = min_t(unsigned int, (conn->mtu - hlen), len);
1544 skb = bt_skb_send_alloc(sk, count + hlen,
1545 msg->msg_flags & MSG_DONTWAIT, &err);
1546 if (!skb)
1547 return ERR_PTR(err);
1548
1549 /* Create L2CAP header */
1550 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1551 lh->cid = cpu_to_le16(chan->dcid);
1552 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1553 put_unaligned_le16(control, skb_put(skb, 2));
1554 if (sdulen)
1555 put_unaligned_le16(sdulen, skb_put(skb, 2));
1556
1557 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1558 if (unlikely(err < 0)) {
1559 kfree_skb(skb);
1560 return ERR_PTR(err);
1561 }
1562
1563 if (chan->fcs == L2CAP_FCS_CRC16)
1564 put_unaligned_le16(0, skb_put(skb, 2));
1565
1566 bt_cb(skb)->retries = 0;
1567 return skb;
1568}
1569
1570int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1571{
1572 struct sk_buff *skb;
1573 struct sk_buff_head sar_queue;
1574 u16 control;
1575 size_t size = 0;
1576
1577 skb_queue_head_init(&sar_queue);
1578 control = L2CAP_SDU_START;
1579 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1580 if (IS_ERR(skb))
1581 return PTR_ERR(skb);
1582
1583 __skb_queue_tail(&sar_queue, skb);
1584 len -= chan->remote_mps;
1585 size += chan->remote_mps;
1586
1587 while (len > 0) {
1588 size_t buflen;
1589
1590 if (len > chan->remote_mps) {
1591 control = L2CAP_SDU_CONTINUE;
1592 buflen = chan->remote_mps;
1593 } else {
1594 control = L2CAP_SDU_END;
1595 buflen = len;
1596 }
1597
1598 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1599 if (IS_ERR(skb)) {
1600 skb_queue_purge(&sar_queue);
1601 return PTR_ERR(skb);
1602 }
1603
1604 __skb_queue_tail(&sar_queue, skb);
1605 len -= buflen;
1606 size += buflen;
1607 }
1608 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1609 if (chan->tx_send_head == NULL)
1610 chan->tx_send_head = sar_queue.next;
1611
1612 return size;
1613}
1614
1615int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1616{
1617 struct sk_buff *skb;
1618 u16 control;
1619 int err;
1620
1621 /* Connectionless channel */
1622 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1623 skb = l2cap_create_connless_pdu(chan, msg, len);
1624 if (IS_ERR(skb))
1625 return PTR_ERR(skb);
1626
1627 l2cap_do_send(chan, skb);
1628 return len;
1629 }
1630
1631 switch (chan->mode) {
1632 case L2CAP_MODE_BASIC:
1633 /* Check outgoing MTU */
1634 if (len > chan->omtu)
1635 return -EMSGSIZE;
1636
1637 /* Create a basic PDU */
1638 skb = l2cap_create_basic_pdu(chan, msg, len);
1639 if (IS_ERR(skb))
1640 return PTR_ERR(skb);
1641
1642 l2cap_do_send(chan, skb);
1643 err = len;
1644 break;
1645
1646 case L2CAP_MODE_ERTM:
1647 case L2CAP_MODE_STREAMING:
1648 /* Entire SDU fits into one PDU */
1649 if (len <= chan->remote_mps) {
1650 control = L2CAP_SDU_UNSEGMENTED;
1651 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1652 0);
1653 if (IS_ERR(skb))
1654 return PTR_ERR(skb);
1655
1656 __skb_queue_tail(&chan->tx_q, skb);
1657
1658 if (chan->tx_send_head == NULL)
1659 chan->tx_send_head = skb;
1660
1661 } else {
1662 /* Segment SDU into multiples PDUs */
1663 err = l2cap_sar_segment_sdu(chan, msg, len);
1664 if (err < 0)
1665 return err;
1666 }
1667
1668 if (chan->mode == L2CAP_MODE_STREAMING) {
1669 l2cap_streaming_send(chan);
1670 err = len;
1671 break;
1672 }
1673
1674 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1675 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1676 err = len;
1677 break;
1678 }
1679
1680 err = l2cap_ertm_send(chan);
1681 if (err >= 0)
1682 err = len;
1683
1684 break;
1685
1686 default:
1687 BT_DBG("bad state %1.1x", chan->mode);
1688 err = -EBADFD;
1689 }
1690
1691 return err;
1692}
1693
1694/* Copy frame to all raw sockets on that connection */
1695static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1696{
1697 struct sk_buff *nskb;
1698 struct l2cap_chan *chan;
1699
1700 BT_DBG("conn %p", conn);
1701
1702 read_lock(&conn->chan_lock);
1703 list_for_each_entry(chan, &conn->chan_l, list) {
1704 struct sock *sk = chan->sk;
1705 if (chan->chan_type != L2CAP_CHAN_RAW)
1706 continue;
1707
1708 /* Don't send frame to the socket it came from */
1709 if (skb->sk == sk)
1710 continue;
1711 nskb = skb_clone(skb, GFP_ATOMIC);
1712 if (!nskb)
1713 continue;
1714
1715 if (chan->ops->recv(chan->data, nskb))
1716 kfree_skb(nskb);
1717 }
1718 read_unlock(&conn->chan_lock);
1719}
1720
1721/* ---- L2CAP signalling commands ---- */
1722static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1723 u8 code, u8 ident, u16 dlen, void *data)
1724{
1725 struct sk_buff *skb, **frag;
1726 struct l2cap_cmd_hdr *cmd;
1727 struct l2cap_hdr *lh;
1728 int len, count;
1729
1730 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1731 conn, code, ident, dlen);
1732
1733 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1734 count = min_t(unsigned int, conn->mtu, len);
1735
1736 skb = bt_skb_alloc(count, GFP_ATOMIC);
1737 if (!skb)
1738 return NULL;
1739
1740 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1741 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1742
1743 if (conn->hcon->type == LE_LINK)
1744 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1745 else
1746 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1747
1748 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1749 cmd->code = code;
1750 cmd->ident = ident;
1751 cmd->len = cpu_to_le16(dlen);
1752
1753 if (dlen) {
1754 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1755 memcpy(skb_put(skb, count), data, count);
1756 data += count;
1757 }
1758
1759 len -= skb->len;
1760
1761 /* Continuation fragments (no L2CAP header) */
1762 frag = &skb_shinfo(skb)->frag_list;
1763 while (len) {
1764 count = min_t(unsigned int, conn->mtu, len);
1765
1766 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1767 if (!*frag)
1768 goto fail;
1769
1770 memcpy(skb_put(*frag, count), data, count);
1771
1772 len -= count;
1773 data += count;
1774
1775 frag = &(*frag)->next;
1776 }
1777
1778 return skb;
1779
1780fail:
1781 kfree_skb(skb);
1782 return NULL;
1783}
1784
1785static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1786{
1787 struct l2cap_conf_opt *opt = *ptr;
1788 int len;
1789
1790 len = L2CAP_CONF_OPT_SIZE + opt->len;
1791 *ptr += len;
1792
1793 *type = opt->type;
1794 *olen = opt->len;
1795
1796 switch (opt->len) {
1797 case 1:
1798 *val = *((u8 *) opt->val);
1799 break;
1800
1801 case 2:
1802 *val = get_unaligned_le16(opt->val);
1803 break;
1804
1805 case 4:
1806 *val = get_unaligned_le32(opt->val);
1807 break;
1808
1809 default:
1810 *val = (unsigned long) opt->val;
1811 break;
1812 }
1813
1814 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1815 return len;
1816}
1817
1818static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1819{
1820 struct l2cap_conf_opt *opt = *ptr;
1821
1822 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1823
1824 opt->type = type;
1825 opt->len = len;
1826
1827 switch (len) {
1828 case 1:
1829 *((u8 *) opt->val) = val;
1830 break;
1831
1832 case 2:
1833 put_unaligned_le16(val, opt->val);
1834 break;
1835
1836 case 4:
1837 put_unaligned_le32(val, opt->val);
1838 break;
1839
1840 default:
1841 memcpy(opt->val, (void *) val, len);
1842 break;
1843 }
1844
1845 *ptr += L2CAP_CONF_OPT_SIZE + len;
1846}
1847
1848static void l2cap_ack_timeout(unsigned long arg)
1849{
1850 struct l2cap_chan *chan = (void *) arg;
1851
1852 bh_lock_sock(chan->sk);
1853 l2cap_send_ack(chan);
1854 bh_unlock_sock(chan->sk);
1855}
1856
1857static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1858{
1859 struct sock *sk = chan->sk;
1860
1861 chan->expected_ack_seq = 0;
1862 chan->unacked_frames = 0;
1863 chan->buffer_seq = 0;
1864 chan->num_acked = 0;
1865 chan->frames_sent = 0;
1866
1867 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1868 (unsigned long) chan);
1869 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1870 (unsigned long) chan);
1871 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1872
1873 skb_queue_head_init(&chan->srej_q);
1874
1875 INIT_LIST_HEAD(&chan->srej_l);
1876
1877
1878 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1879}
1880
1881static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1882{
1883 switch (mode) {
1884 case L2CAP_MODE_STREAMING:
1885 case L2CAP_MODE_ERTM:
1886 if (l2cap_mode_supported(mode, remote_feat_mask))
1887 return mode;
1888 /* fall through */
1889 default:
1890 return L2CAP_MODE_BASIC;
1891 }
1892}
1893
1894static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1895{
1896 struct l2cap_conf_req *req = data;
1897 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1898 void *ptr = req->data;
1899
1900 BT_DBG("chan %p", chan);
1901
1902 if (chan->num_conf_req || chan->num_conf_rsp)
1903 goto done;
1904
1905 switch (chan->mode) {
1906 case L2CAP_MODE_STREAMING:
1907 case L2CAP_MODE_ERTM:
1908 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1909 break;
1910
1911 /* fall through */
1912 default:
1913 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1914 break;
1915 }
1916
1917done:
1918 if (chan->imtu != L2CAP_DEFAULT_MTU)
1919 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1920
1921 switch (chan->mode) {
1922 case L2CAP_MODE_BASIC:
1923 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1924 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1925 break;
1926
1927 rfc.mode = L2CAP_MODE_BASIC;
1928 rfc.txwin_size = 0;
1929 rfc.max_transmit = 0;
1930 rfc.retrans_timeout = 0;
1931 rfc.monitor_timeout = 0;
1932 rfc.max_pdu_size = 0;
1933
1934 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1935 (unsigned long) &rfc);
1936 break;
1937
1938 case L2CAP_MODE_ERTM:
1939 rfc.mode = L2CAP_MODE_ERTM;
1940 rfc.txwin_size = chan->tx_win;
1941 rfc.max_transmit = chan->max_tx;
1942 rfc.retrans_timeout = 0;
1943 rfc.monitor_timeout = 0;
1944 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1945 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1946 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1947
1948 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1949 (unsigned long) &rfc);
1950
1951 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1952 break;
1953
1954 if (chan->fcs == L2CAP_FCS_NONE ||
1955 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1956 chan->fcs = L2CAP_FCS_NONE;
1957 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1958 }
1959 break;
1960
1961 case L2CAP_MODE_STREAMING:
1962 rfc.mode = L2CAP_MODE_STREAMING;
1963 rfc.txwin_size = 0;
1964 rfc.max_transmit = 0;
1965 rfc.retrans_timeout = 0;
1966 rfc.monitor_timeout = 0;
1967 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1968 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1969 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1970
1971 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1972 (unsigned long) &rfc);
1973
1974 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1975 break;
1976
1977 if (chan->fcs == L2CAP_FCS_NONE ||
1978 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1979 chan->fcs = L2CAP_FCS_NONE;
1980 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1981 }
1982 break;
1983 }
1984
1985 req->dcid = cpu_to_le16(chan->dcid);
1986 req->flags = cpu_to_le16(0);
1987
1988 return ptr - data;
1989}
1990
1991static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1992{
1993 struct l2cap_conf_rsp *rsp = data;
1994 void *ptr = rsp->data;
1995 void *req = chan->conf_req;
1996 int len = chan->conf_len;
1997 int type, hint, olen;
1998 unsigned long val;
1999 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2000 u16 mtu = L2CAP_DEFAULT_MTU;
2001 u16 result = L2CAP_CONF_SUCCESS;
2002
2003 BT_DBG("chan %p", chan);
2004
2005 while (len >= L2CAP_CONF_OPT_SIZE) {
2006 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2007
2008 hint = type & L2CAP_CONF_HINT;
2009 type &= L2CAP_CONF_MASK;
2010
2011 switch (type) {
2012 case L2CAP_CONF_MTU:
2013 mtu = val;
2014 break;
2015
2016 case L2CAP_CONF_FLUSH_TO:
2017 chan->flush_to = val;
2018 break;
2019
2020 case L2CAP_CONF_QOS:
2021 break;
2022
2023 case L2CAP_CONF_RFC:
2024 if (olen == sizeof(rfc))
2025 memcpy(&rfc, (void *) val, olen);
2026 break;
2027
2028 case L2CAP_CONF_FCS:
2029 if (val == L2CAP_FCS_NONE)
2030 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2031
2032 break;
2033
2034 default:
2035 if (hint)
2036 break;
2037
2038 result = L2CAP_CONF_UNKNOWN;
2039 *((u8 *) ptr++) = type;
2040 break;
2041 }
2042 }
2043
2044 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2045 goto done;
2046
2047 switch (chan->mode) {
2048 case L2CAP_MODE_STREAMING:
2049 case L2CAP_MODE_ERTM:
2050 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2051 chan->mode = l2cap_select_mode(rfc.mode,
2052 chan->conn->feat_mask);
2053 break;
2054 }
2055
2056 if (chan->mode != rfc.mode)
2057 return -ECONNREFUSED;
2058
2059 break;
2060 }
2061
2062done:
2063 if (chan->mode != rfc.mode) {
2064 result = L2CAP_CONF_UNACCEPT;
2065 rfc.mode = chan->mode;
2066
2067 if (chan->num_conf_rsp == 1)
2068 return -ECONNREFUSED;
2069
2070 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2071 sizeof(rfc), (unsigned long) &rfc);
2072 }
2073
2074
2075 if (result == L2CAP_CONF_SUCCESS) {
2076 /* Configure output options and let the other side know
2077 * which ones we don't like. */
2078
2079 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2080 result = L2CAP_CONF_UNACCEPT;
2081 else {
2082 chan->omtu = mtu;
2083 set_bit(CONF_MTU_DONE, &chan->conf_state);
2084 }
2085 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2086
2087 switch (rfc.mode) {
2088 case L2CAP_MODE_BASIC:
2089 chan->fcs = L2CAP_FCS_NONE;
2090 set_bit(CONF_MODE_DONE, &chan->conf_state);
2091 break;
2092
2093 case L2CAP_MODE_ERTM:
2094 chan->remote_tx_win = rfc.txwin_size;
2095 chan->remote_max_tx = rfc.max_transmit;
2096
2097 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2098 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2099
2100 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2101
2102 rfc.retrans_timeout =
2103 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2104 rfc.monitor_timeout =
2105 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2106
2107 set_bit(CONF_MODE_DONE, &chan->conf_state);
2108
2109 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2110 sizeof(rfc), (unsigned long) &rfc);
2111
2112 break;
2113
2114 case L2CAP_MODE_STREAMING:
2115 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2116 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2117
2118 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2119
2120 set_bit(CONF_MODE_DONE, &chan->conf_state);
2121
2122 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2123 sizeof(rfc), (unsigned long) &rfc);
2124
2125 break;
2126
2127 default:
2128 result = L2CAP_CONF_UNACCEPT;
2129
2130 memset(&rfc, 0, sizeof(rfc));
2131 rfc.mode = chan->mode;
2132 }
2133
2134 if (result == L2CAP_CONF_SUCCESS)
2135 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2136 }
2137 rsp->scid = cpu_to_le16(chan->dcid);
2138 rsp->result = cpu_to_le16(result);
2139 rsp->flags = cpu_to_le16(0x0000);
2140
2141 return ptr - data;
2142}
2143
2144static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2145{
2146 struct l2cap_conf_req *req = data;
2147 void *ptr = req->data;
2148 int type, olen;
2149 unsigned long val;
2150 struct l2cap_conf_rfc rfc;
2151
2152 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2153
2154 while (len >= L2CAP_CONF_OPT_SIZE) {
2155 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2156
2157 switch (type) {
2158 case L2CAP_CONF_MTU:
2159 if (val < L2CAP_DEFAULT_MIN_MTU) {
2160 *result = L2CAP_CONF_UNACCEPT;
2161 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2162 } else
2163 chan->imtu = val;
2164 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2165 break;
2166
2167 case L2CAP_CONF_FLUSH_TO:
2168 chan->flush_to = val;
2169 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2170 2, chan->flush_to);
2171 break;
2172
2173 case L2CAP_CONF_RFC:
2174 if (olen == sizeof(rfc))
2175 memcpy(&rfc, (void *)val, olen);
2176
2177 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2178 rfc.mode != chan->mode)
2179 return -ECONNREFUSED;
2180
2181 chan->fcs = 0;
2182
2183 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2184 sizeof(rfc), (unsigned long) &rfc);
2185 break;
2186 }
2187 }
2188
2189 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2190 return -ECONNREFUSED;
2191
2192 chan->mode = rfc.mode;
2193
2194 if (*result == L2CAP_CONF_SUCCESS) {
2195 switch (rfc.mode) {
2196 case L2CAP_MODE_ERTM:
2197 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2198 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2199 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2200 break;
2201 case L2CAP_MODE_STREAMING:
2202 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2203 }
2204 }
2205
2206 req->dcid = cpu_to_le16(chan->dcid);
2207 req->flags = cpu_to_le16(0x0000);
2208
2209 return ptr - data;
2210}
2211
2212static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2213{
2214 struct l2cap_conf_rsp *rsp = data;
2215 void *ptr = rsp->data;
2216
2217 BT_DBG("chan %p", chan);
2218
2219 rsp->scid = cpu_to_le16(chan->dcid);
2220 rsp->result = cpu_to_le16(result);
2221 rsp->flags = cpu_to_le16(flags);
2222
2223 return ptr - data;
2224}
2225
2226void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2227{
2228 struct l2cap_conn_rsp rsp;
2229 struct l2cap_conn *conn = chan->conn;
2230 u8 buf[128];
2231
2232 rsp.scid = cpu_to_le16(chan->dcid);
2233 rsp.dcid = cpu_to_le16(chan->scid);
2234 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2235 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2236 l2cap_send_cmd(conn, chan->ident,
2237 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2238
2239 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2240 return;
2241
2242 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2243 l2cap_build_conf_req(chan, buf), buf);
2244 chan->num_conf_req++;
2245}
2246
2247static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2248{
2249 int type, olen;
2250 unsigned long val;
2251 struct l2cap_conf_rfc rfc;
2252
2253 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2254
2255 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2256 return;
2257
2258 while (len >= L2CAP_CONF_OPT_SIZE) {
2259 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2260
2261 switch (type) {
2262 case L2CAP_CONF_RFC:
2263 if (olen == sizeof(rfc))
2264 memcpy(&rfc, (void *)val, olen);
2265 goto done;
2266 }
2267 }
2268
2269done:
2270 switch (rfc.mode) {
2271 case L2CAP_MODE_ERTM:
2272 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2273 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2274 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2275 break;
2276 case L2CAP_MODE_STREAMING:
2277 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2278 }
2279}
2280
2281static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2282{
2283 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2284
2285 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2286 return 0;
2287
2288 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2289 cmd->ident == conn->info_ident) {
2290 del_timer(&conn->info_timer);
2291
2292 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2293 conn->info_ident = 0;
2294
2295 l2cap_conn_start(conn);
2296 }
2297
2298 return 0;
2299}
2300
2301static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2302{
2303 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2304 struct l2cap_conn_rsp rsp;
2305 struct l2cap_chan *chan = NULL, *pchan;
2306 struct sock *parent, *sk = NULL;
2307 int result, status = L2CAP_CS_NO_INFO;
2308
2309 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2310 __le16 psm = req->psm;
2311
2312 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2313
2314 /* Check if we have socket listening on psm */
2315 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2316 if (!pchan) {
2317 result = L2CAP_CR_BAD_PSM;
2318 goto sendresp;
2319 }
2320
2321 parent = pchan->sk;
2322
2323 bh_lock_sock(parent);
2324
2325 /* Check if the ACL is secure enough (if not SDP) */
2326 if (psm != cpu_to_le16(0x0001) &&
2327 !hci_conn_check_link_mode(conn->hcon)) {
2328 conn->disc_reason = 0x05;
2329 result = L2CAP_CR_SEC_BLOCK;
2330 goto response;
2331 }
2332
2333 result = L2CAP_CR_NO_MEM;
2334
2335 /* Check for backlog size */
2336 if (sk_acceptq_is_full(parent)) {
2337 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2338 goto response;
2339 }
2340
2341 chan = pchan->ops->new_connection(pchan->data);
2342 if (!chan)
2343 goto response;
2344
2345 sk = chan->sk;
2346
2347 write_lock_bh(&conn->chan_lock);
2348
2349 /* Check if we already have channel with that dcid */
2350 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2351 write_unlock_bh(&conn->chan_lock);
2352 sock_set_flag(sk, SOCK_ZAPPED);
2353 chan->ops->close(chan->data);
2354 goto response;
2355 }
2356
2357 hci_conn_hold(conn->hcon);
2358
2359 bacpy(&bt_sk(sk)->src, conn->src);
2360 bacpy(&bt_sk(sk)->dst, conn->dst);
2361 chan->psm = psm;
2362 chan->dcid = scid;
2363
2364 bt_accept_enqueue(parent, sk);
2365
2366 __l2cap_chan_add(conn, chan);
2367
2368 dcid = chan->scid;
2369
2370 __set_chan_timer(chan, sk->sk_sndtimeo);
2371
2372 chan->ident = cmd->ident;
2373
2374 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2375 if (l2cap_check_security(chan)) {
2376 if (bt_sk(sk)->defer_setup) {
2377 l2cap_state_change(chan, BT_CONNECT2);
2378 result = L2CAP_CR_PEND;
2379 status = L2CAP_CS_AUTHOR_PEND;
2380 parent->sk_data_ready(parent, 0);
2381 } else {
2382 l2cap_state_change(chan, BT_CONFIG);
2383 result = L2CAP_CR_SUCCESS;
2384 status = L2CAP_CS_NO_INFO;
2385 }
2386 } else {
2387 l2cap_state_change(chan, BT_CONNECT2);
2388 result = L2CAP_CR_PEND;
2389 status = L2CAP_CS_AUTHEN_PEND;
2390 }
2391 } else {
2392 l2cap_state_change(chan, BT_CONNECT2);
2393 result = L2CAP_CR_PEND;
2394 status = L2CAP_CS_NO_INFO;
2395 }
2396
2397 write_unlock_bh(&conn->chan_lock);
2398
2399response:
2400 bh_unlock_sock(parent);
2401
2402sendresp:
2403 rsp.scid = cpu_to_le16(scid);
2404 rsp.dcid = cpu_to_le16(dcid);
2405 rsp.result = cpu_to_le16(result);
2406 rsp.status = cpu_to_le16(status);
2407 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2408
2409 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2410 struct l2cap_info_req info;
2411 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2412
2413 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2414 conn->info_ident = l2cap_get_ident(conn);
2415
2416 mod_timer(&conn->info_timer, jiffies +
2417 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2418
2419 l2cap_send_cmd(conn, conn->info_ident,
2420 L2CAP_INFO_REQ, sizeof(info), &info);
2421 }
2422
2423 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2424 result == L2CAP_CR_SUCCESS) {
2425 u8 buf[128];
2426 set_bit(CONF_REQ_SENT, &chan->conf_state);
2427 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2428 l2cap_build_conf_req(chan, buf), buf);
2429 chan->num_conf_req++;
2430 }
2431
2432 return 0;
2433}
2434
2435static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2436{
2437 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2438 u16 scid, dcid, result, status;
2439 struct l2cap_chan *chan;
2440 struct sock *sk;
2441 u8 req[128];
2442
2443 scid = __le16_to_cpu(rsp->scid);
2444 dcid = __le16_to_cpu(rsp->dcid);
2445 result = __le16_to_cpu(rsp->result);
2446 status = __le16_to_cpu(rsp->status);
2447
2448 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2449
2450 if (scid) {
2451 chan = l2cap_get_chan_by_scid(conn, scid);
2452 if (!chan)
2453 return -EFAULT;
2454 } else {
2455 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2456 if (!chan)
2457 return -EFAULT;
2458 }
2459
2460 sk = chan->sk;
2461
2462 switch (result) {
2463 case L2CAP_CR_SUCCESS:
2464 l2cap_state_change(chan, BT_CONFIG);
2465 chan->ident = 0;
2466 chan->dcid = dcid;
2467 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2468
2469 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2470 break;
2471
2472 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2473 l2cap_build_conf_req(chan, req), req);
2474 chan->num_conf_req++;
2475 break;
2476
2477 case L2CAP_CR_PEND:
2478 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2479 break;
2480
2481 default:
2482 /* don't delete l2cap channel if sk is owned by user */
2483 if (sock_owned_by_user(sk)) {
2484 l2cap_state_change(chan, BT_DISCONN);
2485 __clear_chan_timer(chan);
2486 __set_chan_timer(chan, HZ / 5);
2487 break;
2488 }
2489
2490 l2cap_chan_del(chan, ECONNREFUSED);
2491 break;
2492 }
2493
2494 bh_unlock_sock(sk);
2495 return 0;
2496}
2497
2498static inline void set_default_fcs(struct l2cap_chan *chan)
2499{
2500 /* FCS is enabled only in ERTM or streaming mode, if one or both
2501 * sides request it.
2502 */
2503 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2504 chan->fcs = L2CAP_FCS_NONE;
2505 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2506 chan->fcs = L2CAP_FCS_CRC16;
2507}
2508
2509static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2510{
2511 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2512 u16 dcid, flags;
2513 u8 rsp[64];
2514 struct l2cap_chan *chan;
2515 struct sock *sk;
2516 int len;
2517
2518 dcid = __le16_to_cpu(req->dcid);
2519 flags = __le16_to_cpu(req->flags);
2520
2521 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2522
2523 chan = l2cap_get_chan_by_scid(conn, dcid);
2524 if (!chan)
2525 return -ENOENT;
2526
2527 sk = chan->sk;
2528
2529 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2530 struct l2cap_cmd_rej_cid rej;
2531
2532 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2533 rej.scid = cpu_to_le16(chan->scid);
2534 rej.dcid = cpu_to_le16(chan->dcid);
2535
2536 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2537 sizeof(rej), &rej);
2538 goto unlock;
2539 }
2540
2541 /* Reject if config buffer is too small. */
2542 len = cmd_len - sizeof(*req);
2543 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2544 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2545 l2cap_build_conf_rsp(chan, rsp,
2546 L2CAP_CONF_REJECT, flags), rsp);
2547 goto unlock;
2548 }
2549
2550 /* Store config. */
2551 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2552 chan->conf_len += len;
2553
2554 if (flags & 0x0001) {
2555 /* Incomplete config. Send empty response. */
2556 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2557 l2cap_build_conf_rsp(chan, rsp,
2558 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2559 goto unlock;
2560 }
2561
2562 /* Complete config. */
2563 len = l2cap_parse_conf_req(chan, rsp);
2564 if (len < 0) {
2565 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2566 goto unlock;
2567 }
2568
2569 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2570 chan->num_conf_rsp++;
2571
2572 /* Reset config buffer. */
2573 chan->conf_len = 0;
2574
2575 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2576 goto unlock;
2577
2578 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2579 set_default_fcs(chan);
2580
2581 l2cap_state_change(chan, BT_CONNECTED);
2582
2583 chan->next_tx_seq = 0;
2584 chan->expected_tx_seq = 0;
2585 skb_queue_head_init(&chan->tx_q);
2586 if (chan->mode == L2CAP_MODE_ERTM)
2587 l2cap_ertm_init(chan);
2588
2589 l2cap_chan_ready(sk);
2590 goto unlock;
2591 }
2592
2593 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2594 u8 buf[64];
2595 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2596 l2cap_build_conf_req(chan, buf), buf);
2597 chan->num_conf_req++;
2598 }
2599
2600unlock:
2601 bh_unlock_sock(sk);
2602 return 0;
2603}
2604
2605static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2606{
2607 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2608 u16 scid, flags, result;
2609 struct l2cap_chan *chan;
2610 struct sock *sk;
2611 int len = cmd->len - sizeof(*rsp);
2612
2613 scid = __le16_to_cpu(rsp->scid);
2614 flags = __le16_to_cpu(rsp->flags);
2615 result = __le16_to_cpu(rsp->result);
2616
2617 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2618 scid, flags, result);
2619
2620 chan = l2cap_get_chan_by_scid(conn, scid);
2621 if (!chan)
2622 return 0;
2623
2624 sk = chan->sk;
2625
2626 switch (result) {
2627 case L2CAP_CONF_SUCCESS:
2628 l2cap_conf_rfc_get(chan, rsp->data, len);
2629 break;
2630
2631 case L2CAP_CONF_UNACCEPT:
2632 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2633 char req[64];
2634
2635 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2636 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2637 goto done;
2638 }
2639
2640 /* throw out any old stored conf requests */
2641 result = L2CAP_CONF_SUCCESS;
2642 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2643 req, &result);
2644 if (len < 0) {
2645 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2646 goto done;
2647 }
2648
2649 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2650 L2CAP_CONF_REQ, len, req);
2651 chan->num_conf_req++;
2652 if (result != L2CAP_CONF_SUCCESS)
2653 goto done;
2654 break;
2655 }
2656
2657 default:
2658 sk->sk_err = ECONNRESET;
2659 __set_chan_timer(chan, HZ * 5);
2660 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2661 goto done;
2662 }
2663
2664 if (flags & 0x01)
2665 goto done;
2666
2667 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2668
2669 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2670 set_default_fcs(chan);
2671
2672 l2cap_state_change(chan, BT_CONNECTED);
2673 chan->next_tx_seq = 0;
2674 chan->expected_tx_seq = 0;
2675 skb_queue_head_init(&chan->tx_q);
2676 if (chan->mode == L2CAP_MODE_ERTM)
2677 l2cap_ertm_init(chan);
2678
2679 l2cap_chan_ready(sk);
2680 }
2681
2682done:
2683 bh_unlock_sock(sk);
2684 return 0;
2685}
2686
2687static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2688{
2689 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2690 struct l2cap_disconn_rsp rsp;
2691 u16 dcid, scid;
2692 struct l2cap_chan *chan;
2693 struct sock *sk;
2694
2695 scid = __le16_to_cpu(req->scid);
2696 dcid = __le16_to_cpu(req->dcid);
2697
2698 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2699
2700 chan = l2cap_get_chan_by_scid(conn, dcid);
2701 if (!chan)
2702 return 0;
2703
2704 sk = chan->sk;
2705
2706 rsp.dcid = cpu_to_le16(chan->scid);
2707 rsp.scid = cpu_to_le16(chan->dcid);
2708 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2709
2710 sk->sk_shutdown = SHUTDOWN_MASK;
2711
2712 /* don't delete l2cap channel if sk is owned by user */
2713 if (sock_owned_by_user(sk)) {
2714 l2cap_state_change(chan, BT_DISCONN);
2715 __clear_chan_timer(chan);
2716 __set_chan_timer(chan, HZ / 5);
2717 bh_unlock_sock(sk);
2718 return 0;
2719 }
2720
2721 l2cap_chan_del(chan, ECONNRESET);
2722 bh_unlock_sock(sk);
2723
2724 chan->ops->close(chan->data);
2725 return 0;
2726}
2727
2728static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2729{
2730 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2731 u16 dcid, scid;
2732 struct l2cap_chan *chan;
2733 struct sock *sk;
2734
2735 scid = __le16_to_cpu(rsp->scid);
2736 dcid = __le16_to_cpu(rsp->dcid);
2737
2738 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2739
2740 chan = l2cap_get_chan_by_scid(conn, scid);
2741 if (!chan)
2742 return 0;
2743
2744 sk = chan->sk;
2745
2746 /* don't delete l2cap channel if sk is owned by user */
2747 if (sock_owned_by_user(sk)) {
2748 l2cap_state_change(chan,BT_DISCONN);
2749 __clear_chan_timer(chan);
2750 __set_chan_timer(chan, HZ / 5);
2751 bh_unlock_sock(sk);
2752 return 0;
2753 }
2754
2755 l2cap_chan_del(chan, 0);
2756 bh_unlock_sock(sk);
2757
2758 chan->ops->close(chan->data);
2759 return 0;
2760}
2761
2762static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2763{
2764 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2765 u16 type;
2766
2767 type = __le16_to_cpu(req->type);
2768
2769 BT_DBG("type 0x%4.4x", type);
2770
2771 if (type == L2CAP_IT_FEAT_MASK) {
2772 u8 buf[8];
2773 u32 feat_mask = l2cap_feat_mask;
2774 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2775 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2776 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2777 if (!disable_ertm)
2778 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2779 | L2CAP_FEAT_FCS;
2780 put_unaligned_le32(feat_mask, rsp->data);
2781 l2cap_send_cmd(conn, cmd->ident,
2782 L2CAP_INFO_RSP, sizeof(buf), buf);
2783 } else if (type == L2CAP_IT_FIXED_CHAN) {
2784 u8 buf[12];
2785 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2786 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2787 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2788 memcpy(buf + 4, l2cap_fixed_chan, 8);
2789 l2cap_send_cmd(conn, cmd->ident,
2790 L2CAP_INFO_RSP, sizeof(buf), buf);
2791 } else {
2792 struct l2cap_info_rsp rsp;
2793 rsp.type = cpu_to_le16(type);
2794 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2795 l2cap_send_cmd(conn, cmd->ident,
2796 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2797 }
2798
2799 return 0;
2800}
2801
2802static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2803{
2804 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2805 u16 type, result;
2806
2807 type = __le16_to_cpu(rsp->type);
2808 result = __le16_to_cpu(rsp->result);
2809
2810 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2811
2812 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2813 if (cmd->ident != conn->info_ident ||
2814 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2815 return 0;
2816
2817 del_timer(&conn->info_timer);
2818
2819 if (result != L2CAP_IR_SUCCESS) {
2820 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2821 conn->info_ident = 0;
2822
2823 l2cap_conn_start(conn);
2824
2825 return 0;
2826 }
2827
2828 if (type == L2CAP_IT_FEAT_MASK) {
2829 conn->feat_mask = get_unaligned_le32(rsp->data);
2830
2831 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2832 struct l2cap_info_req req;
2833 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2834
2835 conn->info_ident = l2cap_get_ident(conn);
2836
2837 l2cap_send_cmd(conn, conn->info_ident,
2838 L2CAP_INFO_REQ, sizeof(req), &req);
2839 } else {
2840 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2841 conn->info_ident = 0;
2842
2843 l2cap_conn_start(conn);
2844 }
2845 } else if (type == L2CAP_IT_FIXED_CHAN) {
2846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2847 conn->info_ident = 0;
2848
2849 l2cap_conn_start(conn);
2850 }
2851
2852 return 0;
2853}
2854
2855static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2856 u16 to_multiplier)
2857{
2858 u16 max_latency;
2859
2860 if (min > max || min < 6 || max > 3200)
2861 return -EINVAL;
2862
2863 if (to_multiplier < 10 || to_multiplier > 3200)
2864 return -EINVAL;
2865
2866 if (max >= to_multiplier * 8)
2867 return -EINVAL;
2868
2869 max_latency = (to_multiplier * 8 / max) - 1;
2870 if (latency > 499 || latency > max_latency)
2871 return -EINVAL;
2872
2873 return 0;
2874}
2875
2876static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2877 struct l2cap_cmd_hdr *cmd, u8 *data)
2878{
2879 struct hci_conn *hcon = conn->hcon;
2880 struct l2cap_conn_param_update_req *req;
2881 struct l2cap_conn_param_update_rsp rsp;
2882 u16 min, max, latency, to_multiplier, cmd_len;
2883 int err;
2884
2885 if (!(hcon->link_mode & HCI_LM_MASTER))
2886 return -EINVAL;
2887
2888 cmd_len = __le16_to_cpu(cmd->len);
2889 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2890 return -EPROTO;
2891
2892 req = (struct l2cap_conn_param_update_req *) data;
2893 min = __le16_to_cpu(req->min);
2894 max = __le16_to_cpu(req->max);
2895 latency = __le16_to_cpu(req->latency);
2896 to_multiplier = __le16_to_cpu(req->to_multiplier);
2897
2898 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2899 min, max, latency, to_multiplier);
2900
2901 memset(&rsp, 0, sizeof(rsp));
2902
2903 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2904 if (err)
2905 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2906 else
2907 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2908
2909 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2910 sizeof(rsp), &rsp);
2911
2912 if (!err)
2913 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2914
2915 return 0;
2916}
2917
2918static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2919 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2920{
2921 int err = 0;
2922
2923 switch (cmd->code) {
2924 case L2CAP_COMMAND_REJ:
2925 l2cap_command_rej(conn, cmd, data);
2926 break;
2927
2928 case L2CAP_CONN_REQ:
2929 err = l2cap_connect_req(conn, cmd, data);
2930 break;
2931
2932 case L2CAP_CONN_RSP:
2933 err = l2cap_connect_rsp(conn, cmd, data);
2934 break;
2935
2936 case L2CAP_CONF_REQ:
2937 err = l2cap_config_req(conn, cmd, cmd_len, data);
2938 break;
2939
2940 case L2CAP_CONF_RSP:
2941 err = l2cap_config_rsp(conn, cmd, data);
2942 break;
2943
2944 case L2CAP_DISCONN_REQ:
2945 err = l2cap_disconnect_req(conn, cmd, data);
2946 break;
2947
2948 case L2CAP_DISCONN_RSP:
2949 err = l2cap_disconnect_rsp(conn, cmd, data);
2950 break;
2951
2952 case L2CAP_ECHO_REQ:
2953 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2954 break;
2955
2956 case L2CAP_ECHO_RSP:
2957 break;
2958
2959 case L2CAP_INFO_REQ:
2960 err = l2cap_information_req(conn, cmd, data);
2961 break;
2962
2963 case L2CAP_INFO_RSP:
2964 err = l2cap_information_rsp(conn, cmd, data);
2965 break;
2966
2967 default:
2968 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2969 err = -EINVAL;
2970 break;
2971 }
2972
2973 return err;
2974}
2975
2976static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2977 struct l2cap_cmd_hdr *cmd, u8 *data)
2978{
2979 switch (cmd->code) {
2980 case L2CAP_COMMAND_REJ:
2981 return 0;
2982
2983 case L2CAP_CONN_PARAM_UPDATE_REQ:
2984 return l2cap_conn_param_update_req(conn, cmd, data);
2985
2986 case L2CAP_CONN_PARAM_UPDATE_RSP:
2987 return 0;
2988
2989 default:
2990 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2991 return -EINVAL;
2992 }
2993}
2994
2995static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2996 struct sk_buff *skb)
2997{
2998 u8 *data = skb->data;
2999 int len = skb->len;
3000 struct l2cap_cmd_hdr cmd;
3001 int err;
3002
3003 l2cap_raw_recv(conn, skb);
3004
3005 while (len >= L2CAP_CMD_HDR_SIZE) {
3006 u16 cmd_len;
3007 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3008 data += L2CAP_CMD_HDR_SIZE;
3009 len -= L2CAP_CMD_HDR_SIZE;
3010
3011 cmd_len = le16_to_cpu(cmd.len);
3012
3013 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3014
3015 if (cmd_len > len || !cmd.ident) {
3016 BT_DBG("corrupted command");
3017 break;
3018 }
3019
3020 if (conn->hcon->type == LE_LINK)
3021 err = l2cap_le_sig_cmd(conn, &cmd, data);
3022 else
3023 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3024
3025 if (err) {
3026 struct l2cap_cmd_rej_unk rej;
3027
3028 BT_ERR("Wrong link type (%d)", err);
3029
3030 /* FIXME: Map err to a valid reason */
3031 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3032 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3033 }
3034
3035 data += cmd_len;
3036 len -= cmd_len;
3037 }
3038
3039 kfree_skb(skb);
3040}
3041
3042static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3043{
3044 u16 our_fcs, rcv_fcs;
3045 int hdr_size = L2CAP_HDR_SIZE + 2;
3046
3047 if (chan->fcs == L2CAP_FCS_CRC16) {
3048 skb_trim(skb, skb->len - 2);
3049 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3050 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3051
3052 if (our_fcs != rcv_fcs)
3053 return -EBADMSG;
3054 }
3055 return 0;
3056}
3057
3058static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3059{
3060 u16 control = 0;
3061
3062 chan->frames_sent = 0;
3063
3064 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3065
3066 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3067 control |= L2CAP_SUPER_RCV_NOT_READY;
3068 l2cap_send_sframe(chan, control);
3069 set_bit(CONN_RNR_SENT, &chan->conn_state);
3070 }
3071
3072 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3073 l2cap_retransmit_frames(chan);
3074
3075 l2cap_ertm_send(chan);
3076
3077 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3078 chan->frames_sent == 0) {
3079 control |= L2CAP_SUPER_RCV_READY;
3080 l2cap_send_sframe(chan, control);
3081 }
3082}
3083
3084static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3085{
3086 struct sk_buff *next_skb;
3087 int tx_seq_offset, next_tx_seq_offset;
3088
3089 bt_cb(skb)->tx_seq = tx_seq;
3090 bt_cb(skb)->sar = sar;
3091
3092 next_skb = skb_peek(&chan->srej_q);
3093 if (!next_skb) {
3094 __skb_queue_tail(&chan->srej_q, skb);
3095 return 0;
3096 }
3097
3098 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3099 if (tx_seq_offset < 0)
3100 tx_seq_offset += 64;
3101
3102 do {
3103 if (bt_cb(next_skb)->tx_seq == tx_seq)
3104 return -EINVAL;
3105
3106 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3107 chan->buffer_seq) % 64;
3108 if (next_tx_seq_offset < 0)
3109 next_tx_seq_offset += 64;
3110
3111 if (next_tx_seq_offset > tx_seq_offset) {
3112 __skb_queue_before(&chan->srej_q, next_skb, skb);
3113 return 0;
3114 }
3115
3116 if (skb_queue_is_last(&chan->srej_q, next_skb))
3117 break;
3118
3119 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3120
3121 __skb_queue_tail(&chan->srej_q, skb);
3122
3123 return 0;
3124}
3125
3126static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3127{
3128 struct sk_buff *_skb;
3129 int err;
3130
3131 switch (control & L2CAP_CTRL_SAR) {
3132 case L2CAP_SDU_UNSEGMENTED:
3133 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3134 goto drop;
3135
3136 return chan->ops->recv(chan->data, skb);
3137
3138 case L2CAP_SDU_START:
3139 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3140 goto drop;
3141
3142 chan->sdu_len = get_unaligned_le16(skb->data);
3143
3144 if (chan->sdu_len > chan->imtu)
3145 goto disconnect;
3146
3147 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3148 if (!chan->sdu)
3149 return -ENOMEM;
3150
3151 /* pull sdu_len bytes only after alloc, because of Local Busy
3152 * condition we have to be sure that this will be executed
3153 * only once, i.e., when alloc does not fail */
3154 skb_pull(skb, 2);
3155
3156 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3157
3158 set_bit(CONN_SAR_SDU, &chan->conn_state);
3159 chan->partial_sdu_len = skb->len;
3160 break;
3161
3162 case L2CAP_SDU_CONTINUE:
3163 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3164 goto disconnect;
3165
3166 if (!chan->sdu)
3167 goto disconnect;
3168
3169 chan->partial_sdu_len += skb->len;
3170 if (chan->partial_sdu_len > chan->sdu_len)
3171 goto drop;
3172
3173 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3174
3175 break;
3176
3177 case L2CAP_SDU_END:
3178 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3179 goto disconnect;
3180
3181 if (!chan->sdu)
3182 goto disconnect;
3183
3184 chan->partial_sdu_len += skb->len;
3185
3186 if (chan->partial_sdu_len > chan->imtu)
3187 goto drop;
3188
3189 if (chan->partial_sdu_len != chan->sdu_len)
3190 goto drop;
3191
3192 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3193
3194 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3195 if (!_skb) {
3196 return -ENOMEM;
3197 }
3198
3199 err = chan->ops->recv(chan->data, _skb);
3200 if (err < 0) {
3201 kfree_skb(_skb);
3202 return err;
3203 }
3204
3205 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3206
3207 kfree_skb(chan->sdu);
3208 break;
3209 }
3210
3211 kfree_skb(skb);
3212 return 0;
3213
3214drop:
3215 kfree_skb(chan->sdu);
3216 chan->sdu = NULL;
3217
3218disconnect:
3219 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3220 kfree_skb(skb);
3221 return 0;
3222}
3223
3224static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3225{
3226 u16 control;
3227
3228 BT_DBG("chan %p, Enter local busy", chan);
3229
3230 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3231
3232 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3233 control |= L2CAP_SUPER_RCV_NOT_READY;
3234 l2cap_send_sframe(chan, control);
3235
3236 set_bit(CONN_RNR_SENT, &chan->conn_state);
3237
3238 __clear_ack_timer(chan);
3239}
3240
3241static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3242{
3243 u16 control;
3244
3245 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3246 goto done;
3247
3248 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3249 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3250 l2cap_send_sframe(chan, control);
3251 chan->retry_count = 1;
3252
3253 __clear_retrans_timer(chan);
3254 __set_monitor_timer(chan);
3255
3256 set_bit(CONN_WAIT_F, &chan->conn_state);
3257
3258done:
3259 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3260 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3261
3262 BT_DBG("chan %p, Exit local busy", chan);
3263}
3264
3265void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3266{
3267 if (chan->mode == L2CAP_MODE_ERTM) {
3268 if (busy)
3269 l2cap_ertm_enter_local_busy(chan);
3270 else
3271 l2cap_ertm_exit_local_busy(chan);
3272 }
3273}
3274
3275static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3276{
3277 struct sk_buff *_skb;
3278 int err = -EINVAL;
3279
3280 /*
3281 * TODO: We have to notify the userland if some data is lost with the
3282 * Streaming Mode.
3283 */
3284
3285 switch (control & L2CAP_CTRL_SAR) {
3286 case L2CAP_SDU_UNSEGMENTED:
3287 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3288 kfree_skb(chan->sdu);
3289 break;
3290 }
3291
3292 err = chan->ops->recv(chan->data, skb);
3293 if (!err)
3294 return 0;
3295
3296 break;
3297
3298 case L2CAP_SDU_START:
3299 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3300 kfree_skb(chan->sdu);
3301 break;
3302 }
3303
3304 chan->sdu_len = get_unaligned_le16(skb->data);
3305 skb_pull(skb, 2);
3306
3307 if (chan->sdu_len > chan->imtu) {
3308 err = -EMSGSIZE;
3309 break;
3310 }
3311
3312 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3313 if (!chan->sdu) {
3314 err = -ENOMEM;
3315 break;
3316 }
3317
3318 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3319
3320 set_bit(CONN_SAR_SDU, &chan->conn_state);
3321 chan->partial_sdu_len = skb->len;
3322 err = 0;
3323 break;
3324
3325 case L2CAP_SDU_CONTINUE:
3326 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3327 break;
3328
3329 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3330
3331 chan->partial_sdu_len += skb->len;
3332 if (chan->partial_sdu_len > chan->sdu_len)
3333 kfree_skb(chan->sdu);
3334 else
3335 err = 0;
3336
3337 break;
3338
3339 case L2CAP_SDU_END:
3340 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3341 break;
3342
3343 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3344
3345 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3346 chan->partial_sdu_len += skb->len;
3347
3348 if (chan->partial_sdu_len > chan->imtu)
3349 goto drop;
3350
3351 if (chan->partial_sdu_len == chan->sdu_len) {
3352 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3353 err = chan->ops->recv(chan->data, _skb);
3354 if (err < 0)
3355 kfree_skb(_skb);
3356 }
3357 err = 0;
3358
3359drop:
3360 kfree_skb(chan->sdu);
3361 break;
3362 }
3363
3364 kfree_skb(skb);
3365 return err;
3366}
3367
3368static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3369{
3370 struct sk_buff *skb;
3371 u16 control;
3372
3373 while ((skb = skb_peek(&chan->srej_q)) &&
3374 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3375 int err;
3376
3377 if (bt_cb(skb)->tx_seq != tx_seq)
3378 break;
3379
3380 skb = skb_dequeue(&chan->srej_q);
3381 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3382 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3383
3384 if (err < 0) {
3385 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3386 break;
3387 }
3388
3389 chan->buffer_seq_srej =
3390 (chan->buffer_seq_srej + 1) % 64;
3391 tx_seq = (tx_seq + 1) % 64;
3392 }
3393}
3394
3395static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3396{
3397 struct srej_list *l, *tmp;
3398 u16 control;
3399
3400 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3401 if (l->tx_seq == tx_seq) {
3402 list_del(&l->list);
3403 kfree(l);
3404 return;
3405 }
3406 control = L2CAP_SUPER_SELECT_REJECT;
3407 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3408 l2cap_send_sframe(chan, control);
3409 list_del(&l->list);
3410 list_add_tail(&l->list, &chan->srej_l);
3411 }
3412}
3413
3414static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3415{
3416 struct srej_list *new;
3417 u16 control;
3418
3419 while (tx_seq != chan->expected_tx_seq) {
3420 control = L2CAP_SUPER_SELECT_REJECT;
3421 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3422 l2cap_send_sframe(chan, control);
3423
3424 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3425 new->tx_seq = chan->expected_tx_seq;
3426 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3427 list_add_tail(&new->list, &chan->srej_l);
3428 }
3429 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3430}
3431
3432static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3433{
3434 u8 tx_seq = __get_txseq(rx_control);
3435 u8 req_seq = __get_reqseq(rx_control);
3436 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3437 int tx_seq_offset, expected_tx_seq_offset;
3438 int num_to_ack = (chan->tx_win/6) + 1;
3439 int err = 0;
3440
3441 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3442 tx_seq, rx_control);
3443
3444 if (L2CAP_CTRL_FINAL & rx_control &&
3445 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3446 __clear_monitor_timer(chan);
3447 if (chan->unacked_frames > 0)
3448 __set_retrans_timer(chan);
3449 clear_bit(CONN_WAIT_F, &chan->conn_state);
3450 }
3451
3452 chan->expected_ack_seq = req_seq;
3453 l2cap_drop_acked_frames(chan);
3454
3455 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3456 if (tx_seq_offset < 0)
3457 tx_seq_offset += 64;
3458
3459 /* invalid tx_seq */
3460 if (tx_seq_offset >= chan->tx_win) {
3461 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3462 goto drop;
3463 }
3464
3465 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3466 goto drop;
3467
3468 if (tx_seq == chan->expected_tx_seq)
3469 goto expected;
3470
3471 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3472 struct srej_list *first;
3473
3474 first = list_first_entry(&chan->srej_l,
3475 struct srej_list, list);
3476 if (tx_seq == first->tx_seq) {
3477 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3478 l2cap_check_srej_gap(chan, tx_seq);
3479
3480 list_del(&first->list);
3481 kfree(first);
3482
3483 if (list_empty(&chan->srej_l)) {
3484 chan->buffer_seq = chan->buffer_seq_srej;
3485 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3486 l2cap_send_ack(chan);
3487 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3488 }
3489 } else {
3490 struct srej_list *l;
3491
3492 /* duplicated tx_seq */
3493 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3494 goto drop;
3495
3496 list_for_each_entry(l, &chan->srej_l, list) {
3497 if (l->tx_seq == tx_seq) {
3498 l2cap_resend_srejframe(chan, tx_seq);
3499 return 0;
3500 }
3501 }
3502 l2cap_send_srejframe(chan, tx_seq);
3503 }
3504 } else {
3505 expected_tx_seq_offset =
3506 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3507 if (expected_tx_seq_offset < 0)
3508 expected_tx_seq_offset += 64;
3509
3510 /* duplicated tx_seq */
3511 if (tx_seq_offset < expected_tx_seq_offset)
3512 goto drop;
3513
3514 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3515
3516 BT_DBG("chan %p, Enter SREJ", chan);
3517
3518 INIT_LIST_HEAD(&chan->srej_l);
3519 chan->buffer_seq_srej = chan->buffer_seq;
3520
3521 __skb_queue_head_init(&chan->srej_q);
3522 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3523
3524 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3525
3526 l2cap_send_srejframe(chan, tx_seq);
3527
3528 __clear_ack_timer(chan);
3529 }
3530 return 0;
3531
3532expected:
3533 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3534
3535 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3536 bt_cb(skb)->tx_seq = tx_seq;
3537 bt_cb(skb)->sar = sar;
3538 __skb_queue_tail(&chan->srej_q, skb);
3539 return 0;
3540 }
3541
3542 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
3543 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3544 if (err < 0) {
3545 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3546 return err;
3547 }
3548
3549 if (rx_control & L2CAP_CTRL_FINAL) {
3550 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3551 l2cap_retransmit_frames(chan);
3552 }
3553
3554 __set_ack_timer(chan);
3555
3556 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3557 if (chan->num_acked == num_to_ack - 1)
3558 l2cap_send_ack(chan);
3559
3560 return 0;
3561
3562drop:
3563 kfree_skb(skb);
3564 return 0;
3565}
3566
3567static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3568{
3569 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3570 rx_control);
3571
3572 chan->expected_ack_seq = __get_reqseq(rx_control);
3573 l2cap_drop_acked_frames(chan);
3574
3575 if (rx_control & L2CAP_CTRL_POLL) {
3576 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3577 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3578 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3579 (chan->unacked_frames > 0))
3580 __set_retrans_timer(chan);
3581
3582 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3583 l2cap_send_srejtail(chan);
3584 } else {
3585 l2cap_send_i_or_rr_or_rnr(chan);
3586 }
3587
3588 } else if (rx_control & L2CAP_CTRL_FINAL) {
3589 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3590
3591 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3592 l2cap_retransmit_frames(chan);
3593
3594 } else {
3595 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3596 (chan->unacked_frames > 0))
3597 __set_retrans_timer(chan);
3598
3599 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3600 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3601 l2cap_send_ack(chan);
3602 else
3603 l2cap_ertm_send(chan);
3604 }
3605}
3606
3607static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3608{
3609 u8 tx_seq = __get_reqseq(rx_control);
3610
3611 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3612
3613 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3614
3615 chan->expected_ack_seq = tx_seq;
3616 l2cap_drop_acked_frames(chan);
3617
3618 if (rx_control & L2CAP_CTRL_FINAL) {
3619 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3620 l2cap_retransmit_frames(chan);
3621 } else {
3622 l2cap_retransmit_frames(chan);
3623
3624 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3625 set_bit(CONN_REJ_ACT, &chan->conn_state);
3626 }
3627}
3628static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3629{
3630 u8 tx_seq = __get_reqseq(rx_control);
3631
3632 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3633
3634 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3635
3636 if (rx_control & L2CAP_CTRL_POLL) {
3637 chan->expected_ack_seq = tx_seq;
3638 l2cap_drop_acked_frames(chan);
3639
3640 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3641 l2cap_retransmit_one_frame(chan, tx_seq);
3642
3643 l2cap_ertm_send(chan);
3644
3645 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3646 chan->srej_save_reqseq = tx_seq;
3647 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3648 }
3649 } else if (rx_control & L2CAP_CTRL_FINAL) {
3650 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3651 chan->srej_save_reqseq == tx_seq)
3652 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3653 else
3654 l2cap_retransmit_one_frame(chan, tx_seq);
3655 } else {
3656 l2cap_retransmit_one_frame(chan, tx_seq);
3657 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3658 chan->srej_save_reqseq = tx_seq;
3659 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3660 }
3661 }
3662}
3663
3664static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3665{
3666 u8 tx_seq = __get_reqseq(rx_control);
3667
3668 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3669
3670 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3671 chan->expected_ack_seq = tx_seq;
3672 l2cap_drop_acked_frames(chan);
3673
3674 if (rx_control & L2CAP_CTRL_POLL)
3675 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3676
3677 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3678 __clear_retrans_timer(chan);
3679 if (rx_control & L2CAP_CTRL_POLL)
3680 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3681 return;
3682 }
3683
3684 if (rx_control & L2CAP_CTRL_POLL)
3685 l2cap_send_srejtail(chan);
3686 else
3687 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3688}
3689
3690static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3691{
3692 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3693
3694 if (L2CAP_CTRL_FINAL & rx_control &&
3695 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3696 __clear_monitor_timer(chan);
3697 if (chan->unacked_frames > 0)
3698 __set_retrans_timer(chan);
3699 clear_bit(CONN_WAIT_F, &chan->conn_state);
3700 }
3701
3702 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3703 case L2CAP_SUPER_RCV_READY:
3704 l2cap_data_channel_rrframe(chan, rx_control);
3705 break;
3706
3707 case L2CAP_SUPER_REJECT:
3708 l2cap_data_channel_rejframe(chan, rx_control);
3709 break;
3710
3711 case L2CAP_SUPER_SELECT_REJECT:
3712 l2cap_data_channel_srejframe(chan, rx_control);
3713 break;
3714
3715 case L2CAP_SUPER_RCV_NOT_READY:
3716 l2cap_data_channel_rnrframe(chan, rx_control);
3717 break;
3718 }
3719
3720 kfree_skb(skb);
3721 return 0;
3722}
3723
3724static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3725{
3726 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3727 u16 control;
3728 u8 req_seq;
3729 int len, next_tx_seq_offset, req_seq_offset;
3730
3731 control = get_unaligned_le16(skb->data);
3732 skb_pull(skb, 2);
3733 len = skb->len;
3734
3735 /*
3736 * We can just drop the corrupted I-frame here.
3737 * Receiver will miss it and start proper recovery
3738 * procedures and ask retransmission.
3739 */
3740 if (l2cap_check_fcs(chan, skb))
3741 goto drop;
3742
3743 if (__is_sar_start(control) && __is_iframe(control))
3744 len -= 2;
3745
3746 if (chan->fcs == L2CAP_FCS_CRC16)
3747 len -= 2;
3748
3749 if (len > chan->mps) {
3750 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3751 goto drop;
3752 }
3753
3754 req_seq = __get_reqseq(control);
3755 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3756 if (req_seq_offset < 0)
3757 req_seq_offset += 64;
3758
3759 next_tx_seq_offset =
3760 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3761 if (next_tx_seq_offset < 0)
3762 next_tx_seq_offset += 64;
3763
3764 /* check for invalid req-seq */
3765 if (req_seq_offset > next_tx_seq_offset) {
3766 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3767 goto drop;
3768 }
3769
3770 if (__is_iframe(control)) {
3771 if (len < 0) {
3772 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3773 goto drop;
3774 }
3775
3776 l2cap_data_channel_iframe(chan, control, skb);
3777 } else {
3778 if (len != 0) {
3779 BT_ERR("%d", len);
3780 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3781 goto drop;
3782 }
3783
3784 l2cap_data_channel_sframe(chan, control, skb);
3785 }
3786
3787 return 0;
3788
3789drop:
3790 kfree_skb(skb);
3791 return 0;
3792}
3793
3794static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3795{
3796 struct l2cap_chan *chan;
3797 struct sock *sk = NULL;
3798 u16 control;
3799 u8 tx_seq;
3800 int len;
3801
3802 chan = l2cap_get_chan_by_scid(conn, cid);
3803 if (!chan) {
3804 BT_DBG("unknown cid 0x%4.4x", cid);
3805 goto drop;
3806 }
3807
3808 sk = chan->sk;
3809
3810 BT_DBG("chan %p, len %d", chan, skb->len);
3811
3812 if (chan->state != BT_CONNECTED)
3813 goto drop;
3814
3815 switch (chan->mode) {
3816 case L2CAP_MODE_BASIC:
3817 /* If socket recv buffers overflows we drop data here
3818 * which is *bad* because L2CAP has to be reliable.
3819 * But we don't have any other choice. L2CAP doesn't
3820 * provide flow control mechanism. */
3821
3822 if (chan->imtu < skb->len)
3823 goto drop;
3824
3825 if (!chan->ops->recv(chan->data, skb))
3826 goto done;
3827 break;
3828
3829 case L2CAP_MODE_ERTM:
3830 if (!sock_owned_by_user(sk)) {
3831 l2cap_ertm_data_rcv(sk, skb);
3832 } else {
3833 if (sk_add_backlog(sk, skb))
3834 goto drop;
3835 }
3836
3837 goto done;
3838
3839 case L2CAP_MODE_STREAMING:
3840 control = get_unaligned_le16(skb->data);
3841 skb_pull(skb, 2);
3842 len = skb->len;
3843
3844 if (l2cap_check_fcs(chan, skb))
3845 goto drop;
3846
3847 if (__is_sar_start(control))
3848 len -= 2;
3849
3850 if (chan->fcs == L2CAP_FCS_CRC16)
3851 len -= 2;
3852
3853 if (len > chan->mps || len < 0 || __is_sframe(control))
3854 goto drop;
3855
3856 tx_seq = __get_txseq(control);
3857
3858 if (chan->expected_tx_seq == tx_seq)
3859 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3860 else
3861 chan->expected_tx_seq = (tx_seq + 1) % 64;
3862
3863 l2cap_streaming_reassembly_sdu(chan, skb, control);
3864
3865 goto done;
3866
3867 default:
3868 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3869 break;
3870 }
3871
3872drop:
3873 kfree_skb(skb);
3874
3875done:
3876 if (sk)
3877 bh_unlock_sock(sk);
3878
3879 return 0;
3880}
3881
3882static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3883{
3884 struct sock *sk = NULL;
3885 struct l2cap_chan *chan;
3886
3887 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3888 if (!chan)
3889 goto drop;
3890
3891 sk = chan->sk;
3892
3893 bh_lock_sock(sk);
3894
3895 BT_DBG("sk %p, len %d", sk, skb->len);
3896
3897 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3898 goto drop;
3899
3900 if (chan->imtu < skb->len)
3901 goto drop;
3902
3903 if (!chan->ops->recv(chan->data, skb))
3904 goto done;
3905
3906drop:
3907 kfree_skb(skb);
3908
3909done:
3910 if (sk)
3911 bh_unlock_sock(sk);
3912 return 0;
3913}
3914
3915static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3916{
3917 struct sock *sk = NULL;
3918 struct l2cap_chan *chan;
3919
3920 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3921 if (!chan)
3922 goto drop;
3923
3924 sk = chan->sk;
3925
3926 bh_lock_sock(sk);
3927
3928 BT_DBG("sk %p, len %d", sk, skb->len);
3929
3930 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3931 goto drop;
3932
3933 if (chan->imtu < skb->len)
3934 goto drop;
3935
3936 if (!chan->ops->recv(chan->data, skb))
3937 goto done;
3938
3939drop:
3940 kfree_skb(skb);
3941
3942done:
3943 if (sk)
3944 bh_unlock_sock(sk);
3945 return 0;
3946}
3947
3948static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3949{
3950 struct l2cap_hdr *lh = (void *) skb->data;
3951 u16 cid, len;
3952 __le16 psm;
3953
3954 skb_pull(skb, L2CAP_HDR_SIZE);
3955 cid = __le16_to_cpu(lh->cid);
3956 len = __le16_to_cpu(lh->len);
3957
3958 if (len != skb->len) {
3959 kfree_skb(skb);
3960 return;
3961 }
3962
3963 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3964
3965 switch (cid) {
3966 case L2CAP_CID_LE_SIGNALING:
3967 case L2CAP_CID_SIGNALING:
3968 l2cap_sig_channel(conn, skb);
3969 break;
3970
3971 case L2CAP_CID_CONN_LESS:
3972 psm = get_unaligned_le16(skb->data);
3973 skb_pull(skb, 2);
3974 l2cap_conless_channel(conn, psm, skb);
3975 break;
3976
3977 case L2CAP_CID_LE_DATA:
3978 l2cap_att_channel(conn, cid, skb);
3979 break;
3980
3981 case L2CAP_CID_SMP:
3982 if (smp_sig_channel(conn, skb))
3983 l2cap_conn_del(conn->hcon, EACCES);
3984 break;
3985
3986 default:
3987 l2cap_data_channel(conn, cid, skb);
3988 break;
3989 }
3990}
3991
3992/* ---- L2CAP interface with lower layer (HCI) ---- */
3993
3994static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3995{
3996 int exact = 0, lm1 = 0, lm2 = 0;
3997 struct l2cap_chan *c;
3998
3999 if (type != ACL_LINK)
4000 return -EINVAL;
4001
4002 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4003
4004 /* Find listening sockets and check their link_mode */
4005 read_lock(&chan_list_lock);
4006 list_for_each_entry(c, &chan_list, global_l) {
4007 struct sock *sk = c->sk;
4008
4009 if (c->state != BT_LISTEN)
4010 continue;
4011
4012 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4013 lm1 |= HCI_LM_ACCEPT;
4014 if (c->role_switch)
4015 lm1 |= HCI_LM_MASTER;
4016 exact++;
4017 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4018 lm2 |= HCI_LM_ACCEPT;
4019 if (c->role_switch)
4020 lm2 |= HCI_LM_MASTER;
4021 }
4022 }
4023 read_unlock(&chan_list_lock);
4024
4025 return exact ? lm1 : lm2;
4026}
4027
4028static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4029{
4030 struct l2cap_conn *conn;
4031
4032 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4033
4034 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4035 return -EINVAL;
4036
4037 if (!status) {
4038 conn = l2cap_conn_add(hcon, status);
4039 if (conn)
4040 l2cap_conn_ready(conn);
4041 } else
4042 l2cap_conn_del(hcon, bt_to_errno(status));
4043
4044 return 0;
4045}
4046
4047static int l2cap_disconn_ind(struct hci_conn *hcon)
4048{
4049 struct l2cap_conn *conn = hcon->l2cap_data;
4050
4051 BT_DBG("hcon %p", hcon);
4052
4053 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4054 return 0x13;
4055
4056 return conn->disc_reason;
4057}
4058
4059static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4060{
4061 BT_DBG("hcon %p reason %d", hcon, reason);
4062
4063 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4064 return -EINVAL;
4065
4066 l2cap_conn_del(hcon, bt_to_errno(reason));
4067
4068 return 0;
4069}
4070
4071static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4072{
4073 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4074 return;
4075
4076 if (encrypt == 0x00) {
4077 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4078 __clear_chan_timer(chan);
4079 __set_chan_timer(chan, HZ * 5);
4080 } else if (chan->sec_level == BT_SECURITY_HIGH)
4081 l2cap_chan_close(chan, ECONNREFUSED);
4082 } else {
4083 if (chan->sec_level == BT_SECURITY_MEDIUM)
4084 __clear_chan_timer(chan);
4085 }
4086}
4087
4088static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4089{
4090 struct l2cap_conn *conn = hcon->l2cap_data;
4091 struct l2cap_chan *chan;
4092
4093 if (!conn)
4094 return 0;
4095
4096 BT_DBG("conn %p", conn);
4097
4098 read_lock(&conn->chan_lock);
4099
4100 list_for_each_entry(chan, &conn->chan_l, list) {
4101 struct sock *sk = chan->sk;
4102
4103 bh_lock_sock(sk);
4104
4105 BT_DBG("chan->scid %d", chan->scid);
4106
4107 if (chan->scid == L2CAP_CID_LE_DATA) {
4108 if (!status && encrypt) {
4109 chan->sec_level = hcon->sec_level;
4110 del_timer(&conn->security_timer);
4111 l2cap_chan_ready(sk);
4112 smp_distribute_keys(conn, 0);
4113 }
4114
4115 bh_unlock_sock(sk);
4116 continue;
4117 }
4118
4119 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4120 bh_unlock_sock(sk);
4121 continue;
4122 }
4123
4124 if (!status && (chan->state == BT_CONNECTED ||
4125 chan->state == BT_CONFIG)) {
4126 l2cap_check_encryption(chan, encrypt);
4127 bh_unlock_sock(sk);
4128 continue;
4129 }
4130
4131 if (chan->state == BT_CONNECT) {
4132 if (!status) {
4133 struct l2cap_conn_req req;
4134 req.scid = cpu_to_le16(chan->scid);
4135 req.psm = chan->psm;
4136
4137 chan->ident = l2cap_get_ident(conn);
4138 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4139
4140 l2cap_send_cmd(conn, chan->ident,
4141 L2CAP_CONN_REQ, sizeof(req), &req);
4142 } else {
4143 __clear_chan_timer(chan);
4144 __set_chan_timer(chan, HZ / 10);
4145 }
4146 } else if (chan->state == BT_CONNECT2) {
4147 struct l2cap_conn_rsp rsp;
4148 __u16 res, stat;
4149
4150 if (!status) {
4151 if (bt_sk(sk)->defer_setup) {
4152 struct sock *parent = bt_sk(sk)->parent;
4153 res = L2CAP_CR_PEND;
4154 stat = L2CAP_CS_AUTHOR_PEND;
4155 if (parent)
4156 parent->sk_data_ready(parent, 0);
4157 } else {
4158 l2cap_state_change(chan, BT_CONFIG);
4159 res = L2CAP_CR_SUCCESS;
4160 stat = L2CAP_CS_NO_INFO;
4161 }
4162 } else {
4163 l2cap_state_change(chan, BT_DISCONN);
4164 __set_chan_timer(chan, HZ / 10);
4165 res = L2CAP_CR_SEC_BLOCK;
4166 stat = L2CAP_CS_NO_INFO;
4167 }
4168
4169 rsp.scid = cpu_to_le16(chan->dcid);
4170 rsp.dcid = cpu_to_le16(chan->scid);
4171 rsp.result = cpu_to_le16(res);
4172 rsp.status = cpu_to_le16(stat);
4173 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4174 sizeof(rsp), &rsp);
4175 }
4176
4177 bh_unlock_sock(sk);
4178 }
4179
4180 read_unlock(&conn->chan_lock);
4181
4182 return 0;
4183}
4184
4185static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4186{
4187 struct l2cap_conn *conn = hcon->l2cap_data;
4188
4189 if (!conn)
4190 conn = l2cap_conn_add(hcon, 0);
4191
4192 if (!conn)
4193 goto drop;
4194
4195 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4196
4197 if (!(flags & ACL_CONT)) {
4198 struct l2cap_hdr *hdr;
4199 struct l2cap_chan *chan;
4200 u16 cid;
4201 int len;
4202
4203 if (conn->rx_len) {
4204 BT_ERR("Unexpected start frame (len %d)", skb->len);
4205 kfree_skb(conn->rx_skb);
4206 conn->rx_skb = NULL;
4207 conn->rx_len = 0;
4208 l2cap_conn_unreliable(conn, ECOMM);
4209 }
4210
4211 /* Start fragment always begin with Basic L2CAP header */
4212 if (skb->len < L2CAP_HDR_SIZE) {
4213 BT_ERR("Frame is too short (len %d)", skb->len);
4214 l2cap_conn_unreliable(conn, ECOMM);
4215 goto drop;
4216 }
4217
4218 hdr = (struct l2cap_hdr *) skb->data;
4219 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4220 cid = __le16_to_cpu(hdr->cid);
4221
4222 if (len == skb->len) {
4223 /* Complete frame received */
4224 l2cap_recv_frame(conn, skb);
4225 return 0;
4226 }
4227
4228 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4229
4230 if (skb->len > len) {
4231 BT_ERR("Frame is too long (len %d, expected len %d)",
4232 skb->len, len);
4233 l2cap_conn_unreliable(conn, ECOMM);
4234 goto drop;
4235 }
4236
4237 chan = l2cap_get_chan_by_scid(conn, cid);
4238
4239 if (chan && chan->sk) {
4240 struct sock *sk = chan->sk;
4241
4242 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4243 BT_ERR("Frame exceeding recv MTU (len %d, "
4244 "MTU %d)", len,
4245 chan->imtu);
4246 bh_unlock_sock(sk);
4247 l2cap_conn_unreliable(conn, ECOMM);
4248 goto drop;
4249 }
4250 bh_unlock_sock(sk);
4251 }
4252
4253 /* Allocate skb for the complete frame (with header) */
4254 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4255 if (!conn->rx_skb)
4256 goto drop;
4257
4258 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4259 skb->len);
4260 conn->rx_len = len - skb->len;
4261 } else {
4262 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4263
4264 if (!conn->rx_len) {
4265 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4266 l2cap_conn_unreliable(conn, ECOMM);
4267 goto drop;
4268 }
4269
4270 if (skb->len > conn->rx_len) {
4271 BT_ERR("Fragment is too long (len %d, expected %d)",
4272 skb->len, conn->rx_len);
4273 kfree_skb(conn->rx_skb);
4274 conn->rx_skb = NULL;
4275 conn->rx_len = 0;
4276 l2cap_conn_unreliable(conn, ECOMM);
4277 goto drop;
4278 }
4279
4280 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4281 skb->len);
4282 conn->rx_len -= skb->len;
4283
4284 if (!conn->rx_len) {
4285 /* Complete frame received */
4286 l2cap_recv_frame(conn, conn->rx_skb);
4287 conn->rx_skb = NULL;
4288 }
4289 }
4290
4291drop:
4292 kfree_skb(skb);
4293 return 0;
4294}
4295
4296static int l2cap_debugfs_show(struct seq_file *f, void *p)
4297{
4298 struct l2cap_chan *c;
4299
4300 read_lock_bh(&chan_list_lock);
4301
4302 list_for_each_entry(c, &chan_list, global_l) {
4303 struct sock *sk = c->sk;
4304
4305 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4306 batostr(&bt_sk(sk)->src),
4307 batostr(&bt_sk(sk)->dst),
4308 c->state, __le16_to_cpu(c->psm),
4309 c->scid, c->dcid, c->imtu, c->omtu,
4310 c->sec_level, c->mode);
4311}
4312
4313 read_unlock_bh(&chan_list_lock);
4314
4315 return 0;
4316}
4317
4318static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4319{
4320 return single_open(file, l2cap_debugfs_show, inode->i_private);
4321}
4322
4323static const struct file_operations l2cap_debugfs_fops = {
4324 .open = l2cap_debugfs_open,
4325 .read = seq_read,
4326 .llseek = seq_lseek,
4327 .release = single_release,
4328};
4329
4330static struct dentry *l2cap_debugfs;
4331
4332static struct hci_proto l2cap_hci_proto = {
4333 .name = "L2CAP",
4334 .id = HCI_PROTO_L2CAP,
4335 .connect_ind = l2cap_connect_ind,
4336 .connect_cfm = l2cap_connect_cfm,
4337 .disconn_ind = l2cap_disconn_ind,
4338 .disconn_cfm = l2cap_disconn_cfm,
4339 .security_cfm = l2cap_security_cfm,
4340 .recv_acldata = l2cap_recv_acldata
4341};
4342
4343int __init l2cap_init(void)
4344{
4345 int err;
4346
4347 err = l2cap_init_sockets();
4348 if (err < 0)
4349 return err;
4350
4351 err = hci_register_proto(&l2cap_hci_proto);
4352 if (err < 0) {
4353 BT_ERR("L2CAP protocol registration failed");
4354 bt_sock_unregister(BTPROTO_L2CAP);
4355 goto error;
4356 }
4357
4358 if (bt_debugfs) {
4359 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4360 bt_debugfs, NULL, &l2cap_debugfs_fops);
4361 if (!l2cap_debugfs)
4362 BT_ERR("Failed to create L2CAP debug file");
4363 }
4364
4365 return 0;
4366
4367error:
4368 l2cap_cleanup_sockets();
4369 return err;
4370}
4371
4372void l2cap_exit(void)
4373{
4374 debugfs_remove(l2cap_debugfs);
4375
4376 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4377 BT_ERR("L2CAP protocol unregistration failed");
4378
4379 l2cap_cleanup_sockets();
4380}
4381
4382module_param(disable_ertm, bool, 0644);
4383MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");