Bluetooth: Remove some sk references from l2cap_core.c
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP core. */
28
29#include <linux/module.h>
30
31#include <linux/types.h>
32#include <linux/capability.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/device.h>
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#include <linux/uaccess.h>
48#include <linux/crc16.h>
49#include <net/sock.h>
50
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h>
57
58int disable_ertm;
59
60static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63static struct workqueue_struct *_busy_wq;
64
65struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67};
68
69static void l2cap_busy_work(struct work_struct *work);
70
71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
74
75static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76
77/* ---- L2CAP channels ---- */
78static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79{
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
85 return c;
86 }
87 return NULL;
88
89}
90
91static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
92{
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
98 return c;
99 }
100 return NULL;
101}
102
103/* Find channel with given SCID.
104 * Returns locked socket */
105static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106{
107 struct l2cap_chan *c;
108
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 bh_lock_sock(c->sk);
113 read_unlock(&conn->chan_lock);
114 return c;
115}
116
117static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
118{
119 struct l2cap_chan *c;
120
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
123 return c;
124 }
125 return NULL;
126}
127
128static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129{
130 struct l2cap_chan *c;
131
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
134 if (c)
135 bh_lock_sock(c->sk);
136 read_unlock(&conn->chan_lock);
137 return c;
138}
139
140static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
141{
142 u16 cid = L2CAP_CID_DYN_START;
143
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
146 return cid;
147 }
148
149 return 0;
150}
151
152static struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
153{
154 struct l2cap_chan *chan;
155
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
157 if (!chan)
158 return NULL;
159
160 chan->sk = sk;
161
162 return chan;
163}
164
165static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
166{
167 struct sock *sk = chan->sk;
168
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
171
172 conn->disc_reason = 0x13;
173
174 l2cap_pi(sk)->conn = conn;
175
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
178 /* LE connection */
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
182 } else {
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
186 }
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
192 } else {
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
197 }
198
199 sock_hold(sk);
200
201 list_add(&chan->list, &conn->chan_l);
202}
203
204/* Delete channel.
205 * Must be called on the locked socket. */
206void l2cap_chan_del(struct l2cap_chan *chan, int err)
207{
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
211
212 l2cap_sock_clear_timer(sk);
213
214 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
215
216 if (conn) {
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
221 __sock_put(sk);
222
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
225 }
226
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
229
230 if (err)
231 sk->sk_err = err;
232
233 if (parent) {
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
236 } else
237 sk->sk_state_change(sk);
238
239 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE &&
240 l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE))
241 goto free;
242
243 skb_queue_purge(TX_QUEUE(sk));
244
245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
246 struct srej_list *l, *tmp;
247
248 del_timer(&chan->retrans_timer);
249 del_timer(&chan->monitor_timer);
250 del_timer(&chan->ack_timer);
251
252 skb_queue_purge(&chan->srej_q);
253 skb_queue_purge(&chan->busy_q);
254
255 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
256 list_del(&l->list);
257 kfree(l);
258 }
259 }
260
261free:
262 kfree(chan);
263}
264
265static inline u8 l2cap_get_auth_type(struct sock *sk)
266{
267 if (sk->sk_type == SOCK_RAW) {
268 switch (l2cap_pi(sk)->sec_level) {
269 case BT_SECURITY_HIGH:
270 return HCI_AT_DEDICATED_BONDING_MITM;
271 case BT_SECURITY_MEDIUM:
272 return HCI_AT_DEDICATED_BONDING;
273 default:
274 return HCI_AT_NO_BONDING;
275 }
276 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
278 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
279
280 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
281 return HCI_AT_NO_BONDING_MITM;
282 else
283 return HCI_AT_NO_BONDING;
284 } else {
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 return HCI_AT_GENERAL_BONDING_MITM;
288 case BT_SECURITY_MEDIUM:
289 return HCI_AT_GENERAL_BONDING;
290 default:
291 return HCI_AT_NO_BONDING;
292 }
293 }
294}
295
296/* Service level security */
297static inline int l2cap_check_security(struct sock *sk)
298{
299 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
300 __u8 auth_type;
301
302 auth_type = l2cap_get_auth_type(sk);
303
304 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
305 auth_type);
306}
307
308u8 l2cap_get_ident(struct l2cap_conn *conn)
309{
310 u8 id;
311
312 /* Get next available identificator.
313 * 1 - 128 are used by kernel.
314 * 129 - 199 are reserved.
315 * 200 - 254 are used by utilities like l2ping, etc.
316 */
317
318 spin_lock_bh(&conn->lock);
319
320 if (++conn->tx_ident > 128)
321 conn->tx_ident = 1;
322
323 id = conn->tx_ident;
324
325 spin_unlock_bh(&conn->lock);
326
327 return id;
328}
329
330void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
331{
332 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
333 u8 flags;
334
335 BT_DBG("code 0x%2.2x", code);
336
337 if (!skb)
338 return;
339
340 if (lmp_no_flush_capable(conn->hcon->hdev))
341 flags = ACL_START_NO_FLUSH;
342 else
343 flags = ACL_START;
344
345 hci_send_acl(conn->hcon, skb, flags);
346}
347
348static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
349{
350 struct sk_buff *skb;
351 struct l2cap_hdr *lh;
352 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
353 struct l2cap_conn *conn = pi->conn;
354 struct sock *sk = (struct sock *)pi;
355 int count, hlen = L2CAP_HDR_SIZE + 2;
356 u8 flags;
357
358 if (sk->sk_state != BT_CONNECTED)
359 return;
360
361 if (pi->fcs == L2CAP_FCS_CRC16)
362 hlen += 2;
363
364 BT_DBG("chan %p, control 0x%2.2x", chan, control);
365
366 count = min_t(unsigned int, conn->mtu, hlen);
367 control |= L2CAP_CTRL_FRAME_TYPE;
368
369 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
370 control |= L2CAP_CTRL_FINAL;
371 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
372 }
373
374 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
375 control |= L2CAP_CTRL_POLL;
376 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
377 }
378
379 skb = bt_skb_alloc(count, GFP_ATOMIC);
380 if (!skb)
381 return;
382
383 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
384 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
385 lh->cid = cpu_to_le16(pi->dcid);
386 put_unaligned_le16(control, skb_put(skb, 2));
387
388 if (pi->fcs == L2CAP_FCS_CRC16) {
389 u16 fcs = crc16(0, (u8 *)lh, count - 2);
390 put_unaligned_le16(fcs, skb_put(skb, 2));
391 }
392
393 if (lmp_no_flush_capable(conn->hcon->hdev))
394 flags = ACL_START_NO_FLUSH;
395 else
396 flags = ACL_START;
397
398 hci_send_acl(pi->conn->hcon, skb, flags);
399}
400
401static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
402{
403 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
404 control |= L2CAP_SUPER_RCV_NOT_READY;
405 chan->conn_state |= L2CAP_CONN_RNR_SENT;
406 } else
407 control |= L2CAP_SUPER_RCV_READY;
408
409 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
410
411 l2cap_send_sframe(chan, control);
412}
413
414static inline int __l2cap_no_conn_pending(struct sock *sk)
415{
416 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
417}
418
419static void l2cap_do_start(struct l2cap_chan *chan)
420{
421 struct sock *sk = chan->sk;
422 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
423
424 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
425 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
426 return;
427
428 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
429 struct l2cap_conn_req req;
430 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
431 req.psm = l2cap_pi(sk)->psm;
432
433 chan->ident = l2cap_get_ident(conn);
434 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
435
436 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
437 sizeof(req), &req);
438 }
439 } else {
440 struct l2cap_info_req req;
441 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
442
443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
444 conn->info_ident = l2cap_get_ident(conn);
445
446 mod_timer(&conn->info_timer, jiffies +
447 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
448
449 l2cap_send_cmd(conn, conn->info_ident,
450 L2CAP_INFO_REQ, sizeof(req), &req);
451 }
452}
453
454static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
455{
456 u32 local_feat_mask = l2cap_feat_mask;
457 if (!disable_ertm)
458 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
459
460 switch (mode) {
461 case L2CAP_MODE_ERTM:
462 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
463 case L2CAP_MODE_STREAMING:
464 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
465 default:
466 return 0x00;
467 }
468}
469
470void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
471{
472 struct sock *sk;
473 struct l2cap_disconn_req req;
474
475 if (!conn)
476 return;
477
478 sk = chan->sk;
479
480 skb_queue_purge(TX_QUEUE(sk));
481
482 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
483 del_timer(&chan->retrans_timer);
484 del_timer(&chan->monitor_timer);
485 del_timer(&chan->ack_timer);
486 }
487
488 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
489 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
490 l2cap_send_cmd(conn, l2cap_get_ident(conn),
491 L2CAP_DISCONN_REQ, sizeof(req), &req);
492
493 sk->sk_state = BT_DISCONN;
494 sk->sk_err = err;
495}
496
497/* ---- L2CAP connections ---- */
498static void l2cap_conn_start(struct l2cap_conn *conn)
499{
500 struct l2cap_chan *chan, *tmp;
501
502 BT_DBG("conn %p", conn);
503
504 read_lock(&conn->chan_lock);
505
506 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
507 struct sock *sk = chan->sk;
508
509 bh_lock_sock(sk);
510
511 if (sk->sk_type != SOCK_SEQPACKET &&
512 sk->sk_type != SOCK_STREAM) {
513 bh_unlock_sock(sk);
514 continue;
515 }
516
517 if (sk->sk_state == BT_CONNECT) {
518 struct l2cap_conn_req req;
519
520 if (!l2cap_check_security(sk) ||
521 !__l2cap_no_conn_pending(sk)) {
522 bh_unlock_sock(sk);
523 continue;
524 }
525
526 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
527 conn->feat_mask)
528 && l2cap_pi(sk)->conf_state &
529 L2CAP_CONF_STATE2_DEVICE) {
530 /* __l2cap_sock_close() calls list_del(chan)
531 * so release the lock */
532 read_unlock_bh(&conn->chan_lock);
533 __l2cap_sock_close(sk, ECONNRESET);
534 read_lock_bh(&conn->chan_lock);
535 bh_unlock_sock(sk);
536 continue;
537 }
538
539 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
540 req.psm = l2cap_pi(sk)->psm;
541
542 chan->ident = l2cap_get_ident(conn);
543 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
544
545 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
546 sizeof(req), &req);
547
548 } else if (sk->sk_state == BT_CONNECT2) {
549 struct l2cap_conn_rsp rsp;
550 char buf[128];
551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
553
554 if (l2cap_check_security(sk)) {
555 if (bt_sk(sk)->defer_setup) {
556 struct sock *parent = bt_sk(sk)->parent;
557 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
558 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
559 parent->sk_data_ready(parent, 0);
560
561 } else {
562 sk->sk_state = BT_CONFIG;
563 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
564 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
565 }
566 } else {
567 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
568 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
569 }
570
571 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
572 sizeof(rsp), &rsp);
573
574 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
575 rsp.result != L2CAP_CR_SUCCESS) {
576 bh_unlock_sock(sk);
577 continue;
578 }
579
580 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
581 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
582 l2cap_build_conf_req(chan, buf), buf);
583 chan->num_conf_req++;
584 }
585
586 bh_unlock_sock(sk);
587 }
588
589 read_unlock(&conn->chan_lock);
590}
591
592/* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
594 */
595static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596{
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
599
600 read_lock(&l2cap_sk_list.lock);
601
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
605
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
610
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
614 }
615 }
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
620
621 return s;
622}
623
624static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625{
626 struct sock *parent, *uninitialized_var(sk);
627 struct l2cap_chan *chan;
628
629 BT_DBG("");
630
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src);
634 if (!parent)
635 return;
636
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 goto clean;
641 }
642
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 if (!sk)
645 goto clean;
646
647 chan = l2cap_chan_alloc(sk);
648 if (!chan) {
649 l2cap_sock_kill(sk);
650 goto clean;
651 }
652
653 write_lock_bh(&conn->chan_lock);
654
655 hci_conn_hold(conn->hcon);
656
657 l2cap_sock_init(sk, parent);
658
659 bacpy(&bt_sk(sk)->src, conn->src);
660 bacpy(&bt_sk(sk)->dst, conn->dst);
661
662 bt_accept_enqueue(parent, sk);
663
664 __l2cap_chan_add(conn, chan);
665
666 l2cap_pi(sk)->chan = chan;
667
668 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
669
670 sk->sk_state = BT_CONNECTED;
671 parent->sk_data_ready(parent, 0);
672
673 write_unlock_bh(&conn->chan_lock);
674
675clean:
676 bh_unlock_sock(parent);
677}
678
679static void l2cap_conn_ready(struct l2cap_conn *conn)
680{
681 struct l2cap_chan *chan;
682
683 BT_DBG("conn %p", conn);
684
685 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
686 l2cap_le_conn_ready(conn);
687
688 read_lock(&conn->chan_lock);
689
690 list_for_each_entry(chan, &conn->chan_l, list) {
691 struct sock *sk = chan->sk;
692
693 bh_lock_sock(sk);
694
695 if (conn->hcon->type == LE_LINK) {
696 l2cap_sock_clear_timer(sk);
697 sk->sk_state = BT_CONNECTED;
698 sk->sk_state_change(sk);
699 }
700
701 if (sk->sk_type != SOCK_SEQPACKET &&
702 sk->sk_type != SOCK_STREAM) {
703 l2cap_sock_clear_timer(sk);
704 sk->sk_state = BT_CONNECTED;
705 sk->sk_state_change(sk);
706 } else if (sk->sk_state == BT_CONNECT)
707 l2cap_do_start(chan);
708
709 bh_unlock_sock(sk);
710 }
711
712 read_unlock(&conn->chan_lock);
713}
714
715/* Notify sockets that we cannot guaranty reliability anymore */
716static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
717{
718 struct l2cap_chan *chan;
719
720 BT_DBG("conn %p", conn);
721
722 read_lock(&conn->chan_lock);
723
724 list_for_each_entry(chan, &conn->chan_l, list) {
725 struct sock *sk = chan->sk;
726
727 if (l2cap_pi(sk)->force_reliable)
728 sk->sk_err = err;
729 }
730
731 read_unlock(&conn->chan_lock);
732}
733
734static void l2cap_info_timeout(unsigned long arg)
735{
736 struct l2cap_conn *conn = (void *) arg;
737
738 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
739 conn->info_ident = 0;
740
741 l2cap_conn_start(conn);
742}
743
744static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
745{
746 struct l2cap_conn *conn = hcon->l2cap_data;
747
748 if (conn || status)
749 return conn;
750
751 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
752 if (!conn)
753 return NULL;
754
755 hcon->l2cap_data = conn;
756 conn->hcon = hcon;
757
758 BT_DBG("hcon %p conn %p", hcon, conn);
759
760 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
761 conn->mtu = hcon->hdev->le_mtu;
762 else
763 conn->mtu = hcon->hdev->acl_mtu;
764
765 conn->src = &hcon->hdev->bdaddr;
766 conn->dst = &hcon->dst;
767
768 conn->feat_mask = 0;
769
770 spin_lock_init(&conn->lock);
771 rwlock_init(&conn->chan_lock);
772
773 INIT_LIST_HEAD(&conn->chan_l);
774
775 if (hcon->type != LE_LINK)
776 setup_timer(&conn->info_timer, l2cap_info_timeout,
777 (unsigned long) conn);
778
779 conn->disc_reason = 0x13;
780
781 return conn;
782}
783
784static void l2cap_conn_del(struct hci_conn *hcon, int err)
785{
786 struct l2cap_conn *conn = hcon->l2cap_data;
787 struct l2cap_chan *chan, *l;
788 struct sock *sk;
789
790 if (!conn)
791 return;
792
793 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
794
795 kfree_skb(conn->rx_skb);
796
797 /* Kill channels */
798 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
799 sk = chan->sk;
800 bh_lock_sock(sk);
801 l2cap_chan_del(chan, err);
802 bh_unlock_sock(sk);
803 l2cap_sock_kill(sk);
804 }
805
806 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
807 del_timer_sync(&conn->info_timer);
808
809 hcon->l2cap_data = NULL;
810 kfree(conn);
811}
812
813static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
814{
815 write_lock_bh(&conn->chan_lock);
816 __l2cap_chan_add(conn, chan);
817 write_unlock_bh(&conn->chan_lock);
818}
819
820/* ---- Socket interface ---- */
821
822/* Find socket with psm and source bdaddr.
823 * Returns closest match.
824 */
825static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
826{
827 struct sock *sk = NULL, *sk1 = NULL;
828 struct hlist_node *node;
829
830 read_lock(&l2cap_sk_list.lock);
831
832 sk_for_each(sk, node, &l2cap_sk_list.head) {
833 if (state && sk->sk_state != state)
834 continue;
835
836 if (l2cap_pi(sk)->psm == psm) {
837 /* Exact match. */
838 if (!bacmp(&bt_sk(sk)->src, src))
839 break;
840
841 /* Closest match */
842 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
843 sk1 = sk;
844 }
845 }
846
847 read_unlock(&l2cap_sk_list.lock);
848
849 return node ? sk : sk1;
850}
851
852int l2cap_do_connect(struct sock *sk)
853{
854 bdaddr_t *src = &bt_sk(sk)->src;
855 bdaddr_t *dst = &bt_sk(sk)->dst;
856 struct l2cap_conn *conn;
857 struct l2cap_chan *chan;
858 struct hci_conn *hcon;
859 struct hci_dev *hdev;
860 __u8 auth_type;
861 int err;
862
863 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
864 l2cap_pi(sk)->psm);
865
866 hdev = hci_get_route(dst, src);
867 if (!hdev)
868 return -EHOSTUNREACH;
869
870 hci_dev_lock_bh(hdev);
871
872 auth_type = l2cap_get_auth_type(sk);
873
874 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
875 hcon = hci_connect(hdev, LE_LINK, dst,
876 l2cap_pi(sk)->sec_level, auth_type);
877 else
878 hcon = hci_connect(hdev, ACL_LINK, dst,
879 l2cap_pi(sk)->sec_level, auth_type);
880
881 if (IS_ERR(hcon)) {
882 err = PTR_ERR(hcon);
883 goto done;
884 }
885
886 conn = l2cap_conn_add(hcon, 0);
887 if (!conn) {
888 hci_conn_put(hcon);
889 err = -ENOMEM;
890 goto done;
891 }
892
893 chan = l2cap_chan_alloc(sk);
894 if (!chan) {
895 hci_conn_put(hcon);
896 err = -ENOMEM;
897 goto done;
898 }
899
900 /* Update source addr of the socket */
901 bacpy(src, conn->src);
902
903 l2cap_chan_add(conn, chan);
904
905 l2cap_pi(sk)->chan = chan;
906
907 sk->sk_state = BT_CONNECT;
908 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
909
910 if (hcon->state == BT_CONNECTED) {
911 if (sk->sk_type != SOCK_SEQPACKET &&
912 sk->sk_type != SOCK_STREAM) {
913 l2cap_sock_clear_timer(sk);
914 if (l2cap_check_security(sk))
915 sk->sk_state = BT_CONNECTED;
916 } else
917 l2cap_do_start(chan);
918 }
919
920 err = 0;
921
922done:
923 hci_dev_unlock_bh(hdev);
924 hci_dev_put(hdev);
925 return err;
926}
927
928int __l2cap_wait_ack(struct sock *sk)
929{
930 DECLARE_WAITQUEUE(wait, current);
931 int err = 0;
932 int timeo = HZ/5;
933
934 add_wait_queue(sk_sleep(sk), &wait);
935 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
936 set_current_state(TASK_INTERRUPTIBLE);
937
938 if (!timeo)
939 timeo = HZ/5;
940
941 if (signal_pending(current)) {
942 err = sock_intr_errno(timeo);
943 break;
944 }
945
946 release_sock(sk);
947 timeo = schedule_timeout(timeo);
948 lock_sock(sk);
949
950 err = sock_error(sk);
951 if (err)
952 break;
953 }
954 set_current_state(TASK_RUNNING);
955 remove_wait_queue(sk_sleep(sk), &wait);
956 return err;
957}
958
959static void l2cap_monitor_timeout(unsigned long arg)
960{
961 struct l2cap_chan *chan = (void *) arg;
962 struct sock *sk = chan->sk;
963
964 BT_DBG("chan %p", chan);
965
966 bh_lock_sock(sk);
967 if (chan->retry_count >= chan->remote_max_tx) {
968 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
969 bh_unlock_sock(sk);
970 return;
971 }
972
973 chan->retry_count++;
974 __mod_monitor_timer();
975
976 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
977 bh_unlock_sock(sk);
978}
979
980static void l2cap_retrans_timeout(unsigned long arg)
981{
982 struct l2cap_chan *chan = (void *) arg;
983 struct sock *sk = chan->sk;
984
985 BT_DBG("chan %p", chan);
986
987 bh_lock_sock(sk);
988 chan->retry_count = 1;
989 __mod_monitor_timer();
990
991 chan->conn_state |= L2CAP_CONN_WAIT_F;
992
993 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
994 bh_unlock_sock(sk);
995}
996
997static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
998{
999 struct sock *sk = chan->sk;
1000 struct sk_buff *skb;
1001
1002 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1003 chan->unacked_frames) {
1004 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1005 break;
1006
1007 skb = skb_dequeue(TX_QUEUE(sk));
1008 kfree_skb(skb);
1009
1010 chan->unacked_frames--;
1011 }
1012
1013 if (!chan->unacked_frames)
1014 del_timer(&chan->retrans_timer);
1015}
1016
1017void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1018{
1019 struct l2cap_pinfo *pi = l2cap_pi(sk);
1020 struct hci_conn *hcon = pi->conn->hcon;
1021 u16 flags;
1022
1023 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1024
1025 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1026 flags = ACL_START_NO_FLUSH;
1027 else
1028 flags = ACL_START;
1029
1030 hci_send_acl(hcon, skb, flags);
1031}
1032
1033void l2cap_streaming_send(struct l2cap_chan *chan)
1034{
1035 struct sock *sk = chan->sk;
1036 struct sk_buff *skb;
1037 struct l2cap_pinfo *pi = l2cap_pi(sk);
1038 u16 control, fcs;
1039
1040 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1041 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1042 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1043 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1044
1045 if (pi->fcs == L2CAP_FCS_CRC16) {
1046 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1047 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1048 }
1049
1050 l2cap_do_send(sk, skb);
1051
1052 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1053 }
1054}
1055
1056static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1057{
1058 struct sock *sk = chan->sk;
1059 struct l2cap_pinfo *pi = l2cap_pi(sk);
1060 struct sk_buff *skb, *tx_skb;
1061 u16 control, fcs;
1062
1063 skb = skb_peek(TX_QUEUE(sk));
1064 if (!skb)
1065 return;
1066
1067 do {
1068 if (bt_cb(skb)->tx_seq == tx_seq)
1069 break;
1070
1071 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1072 return;
1073
1074 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1075
1076 if (chan->remote_max_tx &&
1077 bt_cb(skb)->retries == chan->remote_max_tx) {
1078 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1079 return;
1080 }
1081
1082 tx_skb = skb_clone(skb, GFP_ATOMIC);
1083 bt_cb(skb)->retries++;
1084 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1085
1086 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1087 control |= L2CAP_CTRL_FINAL;
1088 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1089 }
1090
1091 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1092 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1093
1094 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1095
1096 if (pi->fcs == L2CAP_FCS_CRC16) {
1097 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1098 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1099 }
1100
1101 l2cap_do_send(sk, tx_skb);
1102}
1103
1104int l2cap_ertm_send(struct l2cap_chan *chan)
1105{
1106 struct sk_buff *skb, *tx_skb;
1107 struct sock *sk = chan->sk;
1108 struct l2cap_pinfo *pi = l2cap_pi(sk);
1109 u16 control, fcs;
1110 int nsent = 0;
1111
1112 if (sk->sk_state != BT_CONNECTED)
1113 return -ENOTCONN;
1114
1115 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(chan))) {
1116
1117 if (chan->remote_max_tx &&
1118 bt_cb(skb)->retries == chan->remote_max_tx) {
1119 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1120 break;
1121 }
1122
1123 tx_skb = skb_clone(skb, GFP_ATOMIC);
1124
1125 bt_cb(skb)->retries++;
1126
1127 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1128 control &= L2CAP_CTRL_SAR;
1129
1130 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1131 control |= L2CAP_CTRL_FINAL;
1132 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1133 }
1134 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1135 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1136 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1137
1138
1139 if (pi->fcs == L2CAP_FCS_CRC16) {
1140 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1141 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1142 }
1143
1144 l2cap_do_send(sk, tx_skb);
1145
1146 __mod_retrans_timer();
1147
1148 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1149 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1150
1151 if (bt_cb(skb)->retries == 1)
1152 chan->unacked_frames++;
1153
1154 chan->frames_sent++;
1155
1156 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1157 sk->sk_send_head = NULL;
1158 else
1159 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1160
1161 nsent++;
1162 }
1163
1164 return nsent;
1165}
1166
1167static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1168{
1169 struct sock *sk = chan->sk;
1170 int ret;
1171
1172 if (!skb_queue_empty(TX_QUEUE(sk)))
1173 sk->sk_send_head = TX_QUEUE(sk)->next;
1174
1175 chan->next_tx_seq = chan->expected_ack_seq;
1176 ret = l2cap_ertm_send(chan);
1177 return ret;
1178}
1179
1180static void l2cap_send_ack(struct l2cap_chan *chan)
1181{
1182 u16 control = 0;
1183
1184 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1185
1186 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1187 control |= L2CAP_SUPER_RCV_NOT_READY;
1188 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1189 l2cap_send_sframe(chan, control);
1190 return;
1191 }
1192
1193 if (l2cap_ertm_send(chan) > 0)
1194 return;
1195
1196 control |= L2CAP_SUPER_RCV_READY;
1197 l2cap_send_sframe(chan, control);
1198}
1199
1200static void l2cap_send_srejtail(struct l2cap_chan *chan)
1201{
1202 struct srej_list *tail;
1203 u16 control;
1204
1205 control = L2CAP_SUPER_SELECT_REJECT;
1206 control |= L2CAP_CTRL_FINAL;
1207
1208 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1209 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1210
1211 l2cap_send_sframe(chan, control);
1212}
1213
1214static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1215{
1216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1217 struct sk_buff **frag;
1218 int err, sent = 0;
1219
1220 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1221 return -EFAULT;
1222
1223 sent += count;
1224 len -= count;
1225
1226 /* Continuation fragments (no L2CAP header) */
1227 frag = &skb_shinfo(skb)->frag_list;
1228 while (len) {
1229 count = min_t(unsigned int, conn->mtu, len);
1230
1231 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1232 if (!*frag)
1233 return err;
1234 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1235 return -EFAULT;
1236
1237 sent += count;
1238 len -= count;
1239
1240 frag = &(*frag)->next;
1241 }
1242
1243 return sent;
1244}
1245
1246struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1247{
1248 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1249 struct sk_buff *skb;
1250 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1251 struct l2cap_hdr *lh;
1252
1253 BT_DBG("sk %p len %d", sk, (int)len);
1254
1255 count = min_t(unsigned int, (conn->mtu - hlen), len);
1256 skb = bt_skb_send_alloc(sk, count + hlen,
1257 msg->msg_flags & MSG_DONTWAIT, &err);
1258 if (!skb)
1259 return ERR_PTR(err);
1260
1261 /* Create L2CAP header */
1262 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1263 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1264 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1265 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1266
1267 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1268 if (unlikely(err < 0)) {
1269 kfree_skb(skb);
1270 return ERR_PTR(err);
1271 }
1272 return skb;
1273}
1274
1275struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1276{
1277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1278 struct sk_buff *skb;
1279 int err, count, hlen = L2CAP_HDR_SIZE;
1280 struct l2cap_hdr *lh;
1281
1282 BT_DBG("sk %p len %d", sk, (int)len);
1283
1284 count = min_t(unsigned int, (conn->mtu - hlen), len);
1285 skb = bt_skb_send_alloc(sk, count + hlen,
1286 msg->msg_flags & MSG_DONTWAIT, &err);
1287 if (!skb)
1288 return ERR_PTR(err);
1289
1290 /* Create L2CAP header */
1291 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1292 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1293 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1294
1295 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1296 if (unlikely(err < 0)) {
1297 kfree_skb(skb);
1298 return ERR_PTR(err);
1299 }
1300 return skb;
1301}
1302
1303struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1304{
1305 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1306 struct sk_buff *skb;
1307 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1308 struct l2cap_hdr *lh;
1309
1310 BT_DBG("sk %p len %d", sk, (int)len);
1311
1312 if (!conn)
1313 return ERR_PTR(-ENOTCONN);
1314
1315 if (sdulen)
1316 hlen += 2;
1317
1318 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1319 hlen += 2;
1320
1321 count = min_t(unsigned int, (conn->mtu - hlen), len);
1322 skb = bt_skb_send_alloc(sk, count + hlen,
1323 msg->msg_flags & MSG_DONTWAIT, &err);
1324 if (!skb)
1325 return ERR_PTR(err);
1326
1327 /* Create L2CAP header */
1328 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1329 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1330 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1331 put_unaligned_le16(control, skb_put(skb, 2));
1332 if (sdulen)
1333 put_unaligned_le16(sdulen, skb_put(skb, 2));
1334
1335 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1336 if (unlikely(err < 0)) {
1337 kfree_skb(skb);
1338 return ERR_PTR(err);
1339 }
1340
1341 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1342 put_unaligned_le16(0, skb_put(skb, 2));
1343
1344 bt_cb(skb)->retries = 0;
1345 return skb;
1346}
1347
1348int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1349{
1350 struct sock *sk = chan->sk;
1351 struct sk_buff *skb;
1352 struct sk_buff_head sar_queue;
1353 u16 control;
1354 size_t size = 0;
1355
1356 skb_queue_head_init(&sar_queue);
1357 control = L2CAP_SDU_START;
1358 skb = l2cap_create_iframe_pdu(sk, msg, chan->remote_mps, control, len);
1359 if (IS_ERR(skb))
1360 return PTR_ERR(skb);
1361
1362 __skb_queue_tail(&sar_queue, skb);
1363 len -= chan->remote_mps;
1364 size += chan->remote_mps;
1365
1366 while (len > 0) {
1367 size_t buflen;
1368
1369 if (len > chan->remote_mps) {
1370 control = L2CAP_SDU_CONTINUE;
1371 buflen = chan->remote_mps;
1372 } else {
1373 control = L2CAP_SDU_END;
1374 buflen = len;
1375 }
1376
1377 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1378 if (IS_ERR(skb)) {
1379 skb_queue_purge(&sar_queue);
1380 return PTR_ERR(skb);
1381 }
1382
1383 __skb_queue_tail(&sar_queue, skb);
1384 len -= buflen;
1385 size += buflen;
1386 }
1387 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1388 if (sk->sk_send_head == NULL)
1389 sk->sk_send_head = sar_queue.next;
1390
1391 return size;
1392}
1393
1394static void l2cap_chan_ready(struct sock *sk)
1395{
1396 struct sock *parent = bt_sk(sk)->parent;
1397
1398 BT_DBG("sk %p, parent %p", sk, parent);
1399
1400 l2cap_pi(sk)->conf_state = 0;
1401 l2cap_sock_clear_timer(sk);
1402
1403 if (!parent) {
1404 /* Outgoing channel.
1405 * Wake up socket sleeping on connect.
1406 */
1407 sk->sk_state = BT_CONNECTED;
1408 sk->sk_state_change(sk);
1409 } else {
1410 /* Incoming channel.
1411 * Wake up socket sleeping on accept.
1412 */
1413 parent->sk_data_ready(parent, 0);
1414 }
1415}
1416
1417/* Copy frame to all raw sockets on that connection */
1418static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1419{
1420 struct sk_buff *nskb;
1421 struct l2cap_chan *chan;
1422
1423 BT_DBG("conn %p", conn);
1424
1425 read_lock(&conn->chan_lock);
1426 list_for_each_entry(chan, &conn->chan_l, list) {
1427 struct sock *sk = chan->sk;
1428 if (sk->sk_type != SOCK_RAW)
1429 continue;
1430
1431 /* Don't send frame to the socket it came from */
1432 if (skb->sk == sk)
1433 continue;
1434 nskb = skb_clone(skb, GFP_ATOMIC);
1435 if (!nskb)
1436 continue;
1437
1438 if (sock_queue_rcv_skb(sk, nskb))
1439 kfree_skb(nskb);
1440 }
1441 read_unlock(&conn->chan_lock);
1442}
1443
1444/* ---- L2CAP signalling commands ---- */
1445static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1446 u8 code, u8 ident, u16 dlen, void *data)
1447{
1448 struct sk_buff *skb, **frag;
1449 struct l2cap_cmd_hdr *cmd;
1450 struct l2cap_hdr *lh;
1451 int len, count;
1452
1453 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1454 conn, code, ident, dlen);
1455
1456 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1457 count = min_t(unsigned int, conn->mtu, len);
1458
1459 skb = bt_skb_alloc(count, GFP_ATOMIC);
1460 if (!skb)
1461 return NULL;
1462
1463 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1464 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1465
1466 if (conn->hcon->type == LE_LINK)
1467 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1468 else
1469 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1470
1471 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1472 cmd->code = code;
1473 cmd->ident = ident;
1474 cmd->len = cpu_to_le16(dlen);
1475
1476 if (dlen) {
1477 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1478 memcpy(skb_put(skb, count), data, count);
1479 data += count;
1480 }
1481
1482 len -= skb->len;
1483
1484 /* Continuation fragments (no L2CAP header) */
1485 frag = &skb_shinfo(skb)->frag_list;
1486 while (len) {
1487 count = min_t(unsigned int, conn->mtu, len);
1488
1489 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1490 if (!*frag)
1491 goto fail;
1492
1493 memcpy(skb_put(*frag, count), data, count);
1494
1495 len -= count;
1496 data += count;
1497
1498 frag = &(*frag)->next;
1499 }
1500
1501 return skb;
1502
1503fail:
1504 kfree_skb(skb);
1505 return NULL;
1506}
1507
1508static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1509{
1510 struct l2cap_conf_opt *opt = *ptr;
1511 int len;
1512
1513 len = L2CAP_CONF_OPT_SIZE + opt->len;
1514 *ptr += len;
1515
1516 *type = opt->type;
1517 *olen = opt->len;
1518
1519 switch (opt->len) {
1520 case 1:
1521 *val = *((u8 *) opt->val);
1522 break;
1523
1524 case 2:
1525 *val = get_unaligned_le16(opt->val);
1526 break;
1527
1528 case 4:
1529 *val = get_unaligned_le32(opt->val);
1530 break;
1531
1532 default:
1533 *val = (unsigned long) opt->val;
1534 break;
1535 }
1536
1537 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1538 return len;
1539}
1540
1541static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1542{
1543 struct l2cap_conf_opt *opt = *ptr;
1544
1545 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1546
1547 opt->type = type;
1548 opt->len = len;
1549
1550 switch (len) {
1551 case 1:
1552 *((u8 *) opt->val) = val;
1553 break;
1554
1555 case 2:
1556 put_unaligned_le16(val, opt->val);
1557 break;
1558
1559 case 4:
1560 put_unaligned_le32(val, opt->val);
1561 break;
1562
1563 default:
1564 memcpy(opt->val, (void *) val, len);
1565 break;
1566 }
1567
1568 *ptr += L2CAP_CONF_OPT_SIZE + len;
1569}
1570
1571static void l2cap_ack_timeout(unsigned long arg)
1572{
1573 struct l2cap_chan *chan = (void *) arg;
1574
1575 bh_lock_sock(chan->sk);
1576 l2cap_send_ack(chan);
1577 bh_unlock_sock(chan->sk);
1578}
1579
1580static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1581{
1582 struct sock *sk = chan->sk;
1583
1584 chan->expected_ack_seq = 0;
1585 chan->unacked_frames = 0;
1586 chan->buffer_seq = 0;
1587 chan->num_acked = 0;
1588 chan->frames_sent = 0;
1589
1590 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1591 (unsigned long) chan);
1592 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1593 (unsigned long) chan);
1594 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1595
1596 skb_queue_head_init(&chan->srej_q);
1597 skb_queue_head_init(&chan->busy_q);
1598
1599 INIT_LIST_HEAD(&chan->srej_l);
1600
1601 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1602
1603 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1604}
1605
1606static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1607{
1608 switch (mode) {
1609 case L2CAP_MODE_STREAMING:
1610 case L2CAP_MODE_ERTM:
1611 if (l2cap_mode_supported(mode, remote_feat_mask))
1612 return mode;
1613 /* fall through */
1614 default:
1615 return L2CAP_MODE_BASIC;
1616 }
1617}
1618
1619static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1620{
1621 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1622 struct l2cap_conf_req *req = data;
1623 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1624 void *ptr = req->data;
1625
1626 BT_DBG("chan %p", chan);
1627
1628 if (chan->num_conf_req || chan->num_conf_rsp)
1629 goto done;
1630
1631 switch (pi->mode) {
1632 case L2CAP_MODE_STREAMING:
1633 case L2CAP_MODE_ERTM:
1634 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1635 break;
1636
1637 /* fall through */
1638 default:
1639 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1640 break;
1641 }
1642
1643done:
1644 if (pi->imtu != L2CAP_DEFAULT_MTU)
1645 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1646
1647 switch (pi->mode) {
1648 case L2CAP_MODE_BASIC:
1649 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1650 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1651 break;
1652
1653 rfc.mode = L2CAP_MODE_BASIC;
1654 rfc.txwin_size = 0;
1655 rfc.max_transmit = 0;
1656 rfc.retrans_timeout = 0;
1657 rfc.monitor_timeout = 0;
1658 rfc.max_pdu_size = 0;
1659
1660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1661 (unsigned long) &rfc);
1662 break;
1663
1664 case L2CAP_MODE_ERTM:
1665 rfc.mode = L2CAP_MODE_ERTM;
1666 rfc.txwin_size = pi->tx_win;
1667 rfc.max_transmit = pi->max_tx;
1668 rfc.retrans_timeout = 0;
1669 rfc.monitor_timeout = 0;
1670 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1671 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1672 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1673
1674 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1675 (unsigned long) &rfc);
1676
1677 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1678 break;
1679
1680 if (pi->fcs == L2CAP_FCS_NONE ||
1681 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1682 pi->fcs = L2CAP_FCS_NONE;
1683 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1684 }
1685 break;
1686
1687 case L2CAP_MODE_STREAMING:
1688 rfc.mode = L2CAP_MODE_STREAMING;
1689 rfc.txwin_size = 0;
1690 rfc.max_transmit = 0;
1691 rfc.retrans_timeout = 0;
1692 rfc.monitor_timeout = 0;
1693 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1694 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1695 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1696
1697 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1698 (unsigned long) &rfc);
1699
1700 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1701 break;
1702
1703 if (pi->fcs == L2CAP_FCS_NONE ||
1704 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1705 pi->fcs = L2CAP_FCS_NONE;
1706 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1707 }
1708 break;
1709 }
1710
1711 req->dcid = cpu_to_le16(pi->dcid);
1712 req->flags = cpu_to_le16(0);
1713
1714 return ptr - data;
1715}
1716
1717static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1718{
1719 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1720 struct l2cap_conf_rsp *rsp = data;
1721 void *ptr = rsp->data;
1722 void *req = chan->conf_req;
1723 int len = chan->conf_len;
1724 int type, hint, olen;
1725 unsigned long val;
1726 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1727 u16 mtu = L2CAP_DEFAULT_MTU;
1728 u16 result = L2CAP_CONF_SUCCESS;
1729
1730 BT_DBG("chan %p", chan);
1731
1732 while (len >= L2CAP_CONF_OPT_SIZE) {
1733 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1734
1735 hint = type & L2CAP_CONF_HINT;
1736 type &= L2CAP_CONF_MASK;
1737
1738 switch (type) {
1739 case L2CAP_CONF_MTU:
1740 mtu = val;
1741 break;
1742
1743 case L2CAP_CONF_FLUSH_TO:
1744 pi->flush_to = val;
1745 break;
1746
1747 case L2CAP_CONF_QOS:
1748 break;
1749
1750 case L2CAP_CONF_RFC:
1751 if (olen == sizeof(rfc))
1752 memcpy(&rfc, (void *) val, olen);
1753 break;
1754
1755 case L2CAP_CONF_FCS:
1756 if (val == L2CAP_FCS_NONE)
1757 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1758
1759 break;
1760
1761 default:
1762 if (hint)
1763 break;
1764
1765 result = L2CAP_CONF_UNKNOWN;
1766 *((u8 *) ptr++) = type;
1767 break;
1768 }
1769 }
1770
1771 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1772 goto done;
1773
1774 switch (pi->mode) {
1775 case L2CAP_MODE_STREAMING:
1776 case L2CAP_MODE_ERTM:
1777 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1778 pi->mode = l2cap_select_mode(rfc.mode,
1779 pi->conn->feat_mask);
1780 break;
1781 }
1782
1783 if (pi->mode != rfc.mode)
1784 return -ECONNREFUSED;
1785
1786 break;
1787 }
1788
1789done:
1790 if (pi->mode != rfc.mode) {
1791 result = L2CAP_CONF_UNACCEPT;
1792 rfc.mode = pi->mode;
1793
1794 if (chan->num_conf_rsp == 1)
1795 return -ECONNREFUSED;
1796
1797 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1798 sizeof(rfc), (unsigned long) &rfc);
1799 }
1800
1801
1802 if (result == L2CAP_CONF_SUCCESS) {
1803 /* Configure output options and let the other side know
1804 * which ones we don't like. */
1805
1806 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1807 result = L2CAP_CONF_UNACCEPT;
1808 else {
1809 pi->omtu = mtu;
1810 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1811 }
1812 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1813
1814 switch (rfc.mode) {
1815 case L2CAP_MODE_BASIC:
1816 pi->fcs = L2CAP_FCS_NONE;
1817 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1818 break;
1819
1820 case L2CAP_MODE_ERTM:
1821 chan->remote_tx_win = rfc.txwin_size;
1822 chan->remote_max_tx = rfc.max_transmit;
1823
1824 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1825 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1826
1827 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1828
1829 rfc.retrans_timeout =
1830 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1831 rfc.monitor_timeout =
1832 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1833
1834 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1835
1836 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1837 sizeof(rfc), (unsigned long) &rfc);
1838
1839 break;
1840
1841 case L2CAP_MODE_STREAMING:
1842 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1843 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1844
1845 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1846
1847 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1848
1849 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1850 sizeof(rfc), (unsigned long) &rfc);
1851
1852 break;
1853
1854 default:
1855 result = L2CAP_CONF_UNACCEPT;
1856
1857 memset(&rfc, 0, sizeof(rfc));
1858 rfc.mode = pi->mode;
1859 }
1860
1861 if (result == L2CAP_CONF_SUCCESS)
1862 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1863 }
1864 rsp->scid = cpu_to_le16(pi->dcid);
1865 rsp->result = cpu_to_le16(result);
1866 rsp->flags = cpu_to_le16(0x0000);
1867
1868 return ptr - data;
1869}
1870
1871static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1872{
1873 struct l2cap_pinfo *pi = l2cap_pi(sk);
1874 struct l2cap_conf_req *req = data;
1875 void *ptr = req->data;
1876 int type, olen;
1877 unsigned long val;
1878 struct l2cap_conf_rfc rfc;
1879
1880 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1881
1882 while (len >= L2CAP_CONF_OPT_SIZE) {
1883 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1884
1885 switch (type) {
1886 case L2CAP_CONF_MTU:
1887 if (val < L2CAP_DEFAULT_MIN_MTU) {
1888 *result = L2CAP_CONF_UNACCEPT;
1889 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1890 } else
1891 pi->imtu = val;
1892 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1893 break;
1894
1895 case L2CAP_CONF_FLUSH_TO:
1896 pi->flush_to = val;
1897 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1898 2, pi->flush_to);
1899 break;
1900
1901 case L2CAP_CONF_RFC:
1902 if (olen == sizeof(rfc))
1903 memcpy(&rfc, (void *)val, olen);
1904
1905 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1906 rfc.mode != pi->mode)
1907 return -ECONNREFUSED;
1908
1909 pi->fcs = 0;
1910
1911 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1912 sizeof(rfc), (unsigned long) &rfc);
1913 break;
1914 }
1915 }
1916
1917 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1918 return -ECONNREFUSED;
1919
1920 pi->mode = rfc.mode;
1921
1922 if (*result == L2CAP_CONF_SUCCESS) {
1923 switch (rfc.mode) {
1924 case L2CAP_MODE_ERTM:
1925 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1926 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1927 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1928 break;
1929 case L2CAP_MODE_STREAMING:
1930 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1931 }
1932 }
1933
1934 req->dcid = cpu_to_le16(pi->dcid);
1935 req->flags = cpu_to_le16(0x0000);
1936
1937 return ptr - data;
1938}
1939
1940static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1941{
1942 struct l2cap_conf_rsp *rsp = data;
1943 void *ptr = rsp->data;
1944
1945 BT_DBG("sk %p", sk);
1946
1947 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1948 rsp->result = cpu_to_le16(result);
1949 rsp->flags = cpu_to_le16(flags);
1950
1951 return ptr - data;
1952}
1953
1954void __l2cap_connect_rsp_defer(struct sock *sk)
1955{
1956 struct l2cap_conn_rsp rsp;
1957 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1958 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1959 u8 buf[128];
1960
1961 sk->sk_state = BT_CONFIG;
1962
1963 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1964 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1965 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1966 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1967 l2cap_send_cmd(conn, chan->ident,
1968 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1969
1970 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
1971 return;
1972
1973 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1974 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1975 l2cap_build_conf_req(chan, buf), buf);
1976 chan->num_conf_req++;
1977}
1978
1979static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1980{
1981 struct l2cap_pinfo *pi = l2cap_pi(sk);
1982 int type, olen;
1983 unsigned long val;
1984 struct l2cap_conf_rfc rfc;
1985
1986 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1987
1988 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1989 return;
1990
1991 while (len >= L2CAP_CONF_OPT_SIZE) {
1992 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1993
1994 switch (type) {
1995 case L2CAP_CONF_RFC:
1996 if (olen == sizeof(rfc))
1997 memcpy(&rfc, (void *)val, olen);
1998 goto done;
1999 }
2000 }
2001
2002done:
2003 switch (rfc.mode) {
2004 case L2CAP_MODE_ERTM:
2005 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2006 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2007 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2008 break;
2009 case L2CAP_MODE_STREAMING:
2010 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2011 }
2012}
2013
2014static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2015{
2016 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2017
2018 if (rej->reason != 0x0000)
2019 return 0;
2020
2021 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2022 cmd->ident == conn->info_ident) {
2023 del_timer(&conn->info_timer);
2024
2025 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2026 conn->info_ident = 0;
2027
2028 l2cap_conn_start(conn);
2029 }
2030
2031 return 0;
2032}
2033
2034static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2035{
2036 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2037 struct l2cap_conn_rsp rsp;
2038 struct l2cap_chan *chan = NULL;
2039 struct sock *parent, *sk = NULL;
2040 int result, status = L2CAP_CS_NO_INFO;
2041
2042 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2043 __le16 psm = req->psm;
2044
2045 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2046
2047 /* Check if we have socket listening on psm */
2048 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2049 if (!parent) {
2050 result = L2CAP_CR_BAD_PSM;
2051 goto sendresp;
2052 }
2053
2054 bh_lock_sock(parent);
2055
2056 /* Check if the ACL is secure enough (if not SDP) */
2057 if (psm != cpu_to_le16(0x0001) &&
2058 !hci_conn_check_link_mode(conn->hcon)) {
2059 conn->disc_reason = 0x05;
2060 result = L2CAP_CR_SEC_BLOCK;
2061 goto response;
2062 }
2063
2064 result = L2CAP_CR_NO_MEM;
2065
2066 /* Check for backlog size */
2067 if (sk_acceptq_is_full(parent)) {
2068 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2069 goto response;
2070 }
2071
2072 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2073 if (!sk)
2074 goto response;
2075
2076 chan = l2cap_chan_alloc(sk);
2077 if (!chan) {
2078 l2cap_sock_kill(sk);
2079 goto response;
2080 }
2081
2082 write_lock_bh(&conn->chan_lock);
2083
2084 /* Check if we already have channel with that dcid */
2085 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2086 write_unlock_bh(&conn->chan_lock);
2087 sock_set_flag(sk, SOCK_ZAPPED);
2088 l2cap_sock_kill(sk);
2089 goto response;
2090 }
2091
2092 hci_conn_hold(conn->hcon);
2093
2094 l2cap_sock_init(sk, parent);
2095 bacpy(&bt_sk(sk)->src, conn->src);
2096 bacpy(&bt_sk(sk)->dst, conn->dst);
2097 l2cap_pi(sk)->psm = psm;
2098 l2cap_pi(sk)->dcid = scid;
2099
2100 bt_accept_enqueue(parent, sk);
2101
2102 __l2cap_chan_add(conn, chan);
2103
2104 l2cap_pi(sk)->chan = chan;
2105
2106 dcid = l2cap_pi(sk)->scid;
2107
2108 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2109
2110 chan->ident = cmd->ident;
2111
2112 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2113 if (l2cap_check_security(sk)) {
2114 if (bt_sk(sk)->defer_setup) {
2115 sk->sk_state = BT_CONNECT2;
2116 result = L2CAP_CR_PEND;
2117 status = L2CAP_CS_AUTHOR_PEND;
2118 parent->sk_data_ready(parent, 0);
2119 } else {
2120 sk->sk_state = BT_CONFIG;
2121 result = L2CAP_CR_SUCCESS;
2122 status = L2CAP_CS_NO_INFO;
2123 }
2124 } else {
2125 sk->sk_state = BT_CONNECT2;
2126 result = L2CAP_CR_PEND;
2127 status = L2CAP_CS_AUTHEN_PEND;
2128 }
2129 } else {
2130 sk->sk_state = BT_CONNECT2;
2131 result = L2CAP_CR_PEND;
2132 status = L2CAP_CS_NO_INFO;
2133 }
2134
2135 write_unlock_bh(&conn->chan_lock);
2136
2137response:
2138 bh_unlock_sock(parent);
2139
2140sendresp:
2141 rsp.scid = cpu_to_le16(scid);
2142 rsp.dcid = cpu_to_le16(dcid);
2143 rsp.result = cpu_to_le16(result);
2144 rsp.status = cpu_to_le16(status);
2145 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2146
2147 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2148 struct l2cap_info_req info;
2149 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2150
2151 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2152 conn->info_ident = l2cap_get_ident(conn);
2153
2154 mod_timer(&conn->info_timer, jiffies +
2155 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2156
2157 l2cap_send_cmd(conn, conn->info_ident,
2158 L2CAP_INFO_REQ, sizeof(info), &info);
2159 }
2160
2161 if (chan && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2162 result == L2CAP_CR_SUCCESS) {
2163 u8 buf[128];
2164 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2165 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2166 l2cap_build_conf_req(chan, buf), buf);
2167 chan->num_conf_req++;
2168 }
2169
2170 return 0;
2171}
2172
2173static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2174{
2175 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2176 u16 scid, dcid, result, status;
2177 struct l2cap_chan *chan;
2178 struct sock *sk;
2179 u8 req[128];
2180
2181 scid = __le16_to_cpu(rsp->scid);
2182 dcid = __le16_to_cpu(rsp->dcid);
2183 result = __le16_to_cpu(rsp->result);
2184 status = __le16_to_cpu(rsp->status);
2185
2186 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2187
2188 if (scid) {
2189 chan = l2cap_get_chan_by_scid(conn, scid);
2190 if (!chan)
2191 return -EFAULT;
2192 } else {
2193 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2194 if (!chan)
2195 return -EFAULT;
2196 }
2197
2198 sk = chan->sk;
2199
2200 switch (result) {
2201 case L2CAP_CR_SUCCESS:
2202 sk->sk_state = BT_CONFIG;
2203 chan->ident = 0;
2204 l2cap_pi(sk)->dcid = dcid;
2205 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2206
2207 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2208 break;
2209
2210 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2211
2212 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2213 l2cap_build_conf_req(chan, req), req);
2214 chan->num_conf_req++;
2215 break;
2216
2217 case L2CAP_CR_PEND:
2218 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2219 break;
2220
2221 default:
2222 /* don't delete l2cap channel if sk is owned by user */
2223 if (sock_owned_by_user(sk)) {
2224 sk->sk_state = BT_DISCONN;
2225 l2cap_sock_clear_timer(sk);
2226 l2cap_sock_set_timer(sk, HZ / 5);
2227 break;
2228 }
2229
2230 l2cap_chan_del(chan, ECONNREFUSED);
2231 break;
2232 }
2233
2234 bh_unlock_sock(sk);
2235 return 0;
2236}
2237
2238static inline void set_default_fcs(struct l2cap_pinfo *pi)
2239{
2240 /* FCS is enabled only in ERTM or streaming mode, if one or both
2241 * sides request it.
2242 */
2243 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2244 pi->fcs = L2CAP_FCS_NONE;
2245 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2246 pi->fcs = L2CAP_FCS_CRC16;
2247}
2248
2249static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2250{
2251 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2252 u16 dcid, flags;
2253 u8 rsp[64];
2254 struct l2cap_chan *chan;
2255 struct sock *sk;
2256 int len;
2257
2258 dcid = __le16_to_cpu(req->dcid);
2259 flags = __le16_to_cpu(req->flags);
2260
2261 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2262
2263 chan = l2cap_get_chan_by_scid(conn, dcid);
2264 if (!chan)
2265 return -ENOENT;
2266
2267 sk = chan->sk;
2268
2269 if (sk->sk_state != BT_CONFIG) {
2270 struct l2cap_cmd_rej rej;
2271
2272 rej.reason = cpu_to_le16(0x0002);
2273 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2274 sizeof(rej), &rej);
2275 goto unlock;
2276 }
2277
2278 /* Reject if config buffer is too small. */
2279 len = cmd_len - sizeof(*req);
2280 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2281 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2282 l2cap_build_conf_rsp(sk, rsp,
2283 L2CAP_CONF_REJECT, flags), rsp);
2284 goto unlock;
2285 }
2286
2287 /* Store config. */
2288 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2289 chan->conf_len += len;
2290
2291 if (flags & 0x0001) {
2292 /* Incomplete config. Send empty response. */
2293 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2294 l2cap_build_conf_rsp(sk, rsp,
2295 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2296 goto unlock;
2297 }
2298
2299 /* Complete config. */
2300 len = l2cap_parse_conf_req(chan, rsp);
2301 if (len < 0) {
2302 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2303 goto unlock;
2304 }
2305
2306 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2307 chan->num_conf_rsp++;
2308
2309 /* Reset config buffer. */
2310 chan->conf_len = 0;
2311
2312 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2313 goto unlock;
2314
2315 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2316 set_default_fcs(l2cap_pi(sk));
2317
2318 sk->sk_state = BT_CONNECTED;
2319
2320 chan->next_tx_seq = 0;
2321 chan->expected_tx_seq = 0;
2322 __skb_queue_head_init(TX_QUEUE(sk));
2323 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2324 l2cap_ertm_init(chan);
2325
2326 l2cap_chan_ready(sk);
2327 goto unlock;
2328 }
2329
2330 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2331 u8 buf[64];
2332 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2333 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2334 l2cap_build_conf_req(chan, buf), buf);
2335 chan->num_conf_req++;
2336 }
2337
2338unlock:
2339 bh_unlock_sock(sk);
2340 return 0;
2341}
2342
2343static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2344{
2345 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2346 u16 scid, flags, result;
2347 struct l2cap_chan *chan;
2348 struct sock *sk;
2349 int len = cmd->len - sizeof(*rsp);
2350
2351 scid = __le16_to_cpu(rsp->scid);
2352 flags = __le16_to_cpu(rsp->flags);
2353 result = __le16_to_cpu(rsp->result);
2354
2355 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2356 scid, flags, result);
2357
2358 chan = l2cap_get_chan_by_scid(conn, scid);
2359 if (!chan)
2360 return 0;
2361
2362 sk = chan->sk;
2363
2364 switch (result) {
2365 case L2CAP_CONF_SUCCESS:
2366 l2cap_conf_rfc_get(sk, rsp->data, len);
2367 break;
2368
2369 case L2CAP_CONF_UNACCEPT:
2370 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2371 char req[64];
2372
2373 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2374 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2375 goto done;
2376 }
2377
2378 /* throw out any old stored conf requests */
2379 result = L2CAP_CONF_SUCCESS;
2380 len = l2cap_parse_conf_rsp(sk, rsp->data,
2381 len, req, &result);
2382 if (len < 0) {
2383 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2384 goto done;
2385 }
2386
2387 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2388 L2CAP_CONF_REQ, len, req);
2389 chan->num_conf_req++;
2390 if (result != L2CAP_CONF_SUCCESS)
2391 goto done;
2392 break;
2393 }
2394
2395 default:
2396 sk->sk_err = ECONNRESET;
2397 l2cap_sock_set_timer(sk, HZ * 5);
2398 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2399 goto done;
2400 }
2401
2402 if (flags & 0x01)
2403 goto done;
2404
2405 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2406
2407 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2408 set_default_fcs(l2cap_pi(sk));
2409
2410 sk->sk_state = BT_CONNECTED;
2411 chan->next_tx_seq = 0;
2412 chan->expected_tx_seq = 0;
2413 __skb_queue_head_init(TX_QUEUE(sk));
2414 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2415 l2cap_ertm_init(chan);
2416
2417 l2cap_chan_ready(sk);
2418 }
2419
2420done:
2421 bh_unlock_sock(sk);
2422 return 0;
2423}
2424
2425static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2426{
2427 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2428 struct l2cap_disconn_rsp rsp;
2429 u16 dcid, scid;
2430 struct l2cap_chan *chan;
2431 struct sock *sk;
2432
2433 scid = __le16_to_cpu(req->scid);
2434 dcid = __le16_to_cpu(req->dcid);
2435
2436 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2437
2438 chan = l2cap_get_chan_by_scid(conn, dcid);
2439 if (!chan)
2440 return 0;
2441
2442 sk = chan->sk;
2443
2444 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2445 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2446 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2447
2448 sk->sk_shutdown = SHUTDOWN_MASK;
2449
2450 /* don't delete l2cap channel if sk is owned by user */
2451 if (sock_owned_by_user(sk)) {
2452 sk->sk_state = BT_DISCONN;
2453 l2cap_sock_clear_timer(sk);
2454 l2cap_sock_set_timer(sk, HZ / 5);
2455 bh_unlock_sock(sk);
2456 return 0;
2457 }
2458
2459 l2cap_chan_del(chan, ECONNRESET);
2460 bh_unlock_sock(sk);
2461
2462 l2cap_sock_kill(sk);
2463 return 0;
2464}
2465
2466static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2467{
2468 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2469 u16 dcid, scid;
2470 struct l2cap_chan *chan;
2471 struct sock *sk;
2472
2473 scid = __le16_to_cpu(rsp->scid);
2474 dcid = __le16_to_cpu(rsp->dcid);
2475
2476 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2477
2478 chan = l2cap_get_chan_by_scid(conn, scid);
2479 if (!chan)
2480 return 0;
2481
2482 sk = chan->sk;
2483
2484 /* don't delete l2cap channel if sk is owned by user */
2485 if (sock_owned_by_user(sk)) {
2486 sk->sk_state = BT_DISCONN;
2487 l2cap_sock_clear_timer(sk);
2488 l2cap_sock_set_timer(sk, HZ / 5);
2489 bh_unlock_sock(sk);
2490 return 0;
2491 }
2492
2493 l2cap_chan_del(chan, 0);
2494 bh_unlock_sock(sk);
2495
2496 l2cap_sock_kill(sk);
2497 return 0;
2498}
2499
2500static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2501{
2502 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2503 u16 type;
2504
2505 type = __le16_to_cpu(req->type);
2506
2507 BT_DBG("type 0x%4.4x", type);
2508
2509 if (type == L2CAP_IT_FEAT_MASK) {
2510 u8 buf[8];
2511 u32 feat_mask = l2cap_feat_mask;
2512 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2513 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2514 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2515 if (!disable_ertm)
2516 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2517 | L2CAP_FEAT_FCS;
2518 put_unaligned_le32(feat_mask, rsp->data);
2519 l2cap_send_cmd(conn, cmd->ident,
2520 L2CAP_INFO_RSP, sizeof(buf), buf);
2521 } else if (type == L2CAP_IT_FIXED_CHAN) {
2522 u8 buf[12];
2523 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2524 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2525 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2526 memcpy(buf + 4, l2cap_fixed_chan, 8);
2527 l2cap_send_cmd(conn, cmd->ident,
2528 L2CAP_INFO_RSP, sizeof(buf), buf);
2529 } else {
2530 struct l2cap_info_rsp rsp;
2531 rsp.type = cpu_to_le16(type);
2532 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2533 l2cap_send_cmd(conn, cmd->ident,
2534 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2535 }
2536
2537 return 0;
2538}
2539
2540static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2541{
2542 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2543 u16 type, result;
2544
2545 type = __le16_to_cpu(rsp->type);
2546 result = __le16_to_cpu(rsp->result);
2547
2548 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2549
2550 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2551 if (cmd->ident != conn->info_ident ||
2552 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2553 return 0;
2554
2555 del_timer(&conn->info_timer);
2556
2557 if (result != L2CAP_IR_SUCCESS) {
2558 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2559 conn->info_ident = 0;
2560
2561 l2cap_conn_start(conn);
2562
2563 return 0;
2564 }
2565
2566 if (type == L2CAP_IT_FEAT_MASK) {
2567 conn->feat_mask = get_unaligned_le32(rsp->data);
2568
2569 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2570 struct l2cap_info_req req;
2571 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2572
2573 conn->info_ident = l2cap_get_ident(conn);
2574
2575 l2cap_send_cmd(conn, conn->info_ident,
2576 L2CAP_INFO_REQ, sizeof(req), &req);
2577 } else {
2578 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2579 conn->info_ident = 0;
2580
2581 l2cap_conn_start(conn);
2582 }
2583 } else if (type == L2CAP_IT_FIXED_CHAN) {
2584 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2585 conn->info_ident = 0;
2586
2587 l2cap_conn_start(conn);
2588 }
2589
2590 return 0;
2591}
2592
2593static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2594 u16 to_multiplier)
2595{
2596 u16 max_latency;
2597
2598 if (min > max || min < 6 || max > 3200)
2599 return -EINVAL;
2600
2601 if (to_multiplier < 10 || to_multiplier > 3200)
2602 return -EINVAL;
2603
2604 if (max >= to_multiplier * 8)
2605 return -EINVAL;
2606
2607 max_latency = (to_multiplier * 8 / max) - 1;
2608 if (latency > 499 || latency > max_latency)
2609 return -EINVAL;
2610
2611 return 0;
2612}
2613
2614static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2615 struct l2cap_cmd_hdr *cmd, u8 *data)
2616{
2617 struct hci_conn *hcon = conn->hcon;
2618 struct l2cap_conn_param_update_req *req;
2619 struct l2cap_conn_param_update_rsp rsp;
2620 u16 min, max, latency, to_multiplier, cmd_len;
2621 int err;
2622
2623 if (!(hcon->link_mode & HCI_LM_MASTER))
2624 return -EINVAL;
2625
2626 cmd_len = __le16_to_cpu(cmd->len);
2627 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2628 return -EPROTO;
2629
2630 req = (struct l2cap_conn_param_update_req *) data;
2631 min = __le16_to_cpu(req->min);
2632 max = __le16_to_cpu(req->max);
2633 latency = __le16_to_cpu(req->latency);
2634 to_multiplier = __le16_to_cpu(req->to_multiplier);
2635
2636 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2637 min, max, latency, to_multiplier);
2638
2639 memset(&rsp, 0, sizeof(rsp));
2640
2641 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2642 if (err)
2643 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2644 else
2645 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2646
2647 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2648 sizeof(rsp), &rsp);
2649
2650 if (!err)
2651 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2652
2653 return 0;
2654}
2655
2656static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2657 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2658{
2659 int err = 0;
2660
2661 switch (cmd->code) {
2662 case L2CAP_COMMAND_REJ:
2663 l2cap_command_rej(conn, cmd, data);
2664 break;
2665
2666 case L2CAP_CONN_REQ:
2667 err = l2cap_connect_req(conn, cmd, data);
2668 break;
2669
2670 case L2CAP_CONN_RSP:
2671 err = l2cap_connect_rsp(conn, cmd, data);
2672 break;
2673
2674 case L2CAP_CONF_REQ:
2675 err = l2cap_config_req(conn, cmd, cmd_len, data);
2676 break;
2677
2678 case L2CAP_CONF_RSP:
2679 err = l2cap_config_rsp(conn, cmd, data);
2680 break;
2681
2682 case L2CAP_DISCONN_REQ:
2683 err = l2cap_disconnect_req(conn, cmd, data);
2684 break;
2685
2686 case L2CAP_DISCONN_RSP:
2687 err = l2cap_disconnect_rsp(conn, cmd, data);
2688 break;
2689
2690 case L2CAP_ECHO_REQ:
2691 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2692 break;
2693
2694 case L2CAP_ECHO_RSP:
2695 break;
2696
2697 case L2CAP_INFO_REQ:
2698 err = l2cap_information_req(conn, cmd, data);
2699 break;
2700
2701 case L2CAP_INFO_RSP:
2702 err = l2cap_information_rsp(conn, cmd, data);
2703 break;
2704
2705 default:
2706 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2707 err = -EINVAL;
2708 break;
2709 }
2710
2711 return err;
2712}
2713
2714static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2715 struct l2cap_cmd_hdr *cmd, u8 *data)
2716{
2717 switch (cmd->code) {
2718 case L2CAP_COMMAND_REJ:
2719 return 0;
2720
2721 case L2CAP_CONN_PARAM_UPDATE_REQ:
2722 return l2cap_conn_param_update_req(conn, cmd, data);
2723
2724 case L2CAP_CONN_PARAM_UPDATE_RSP:
2725 return 0;
2726
2727 default:
2728 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2729 return -EINVAL;
2730 }
2731}
2732
2733static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2734 struct sk_buff *skb)
2735{
2736 u8 *data = skb->data;
2737 int len = skb->len;
2738 struct l2cap_cmd_hdr cmd;
2739 int err;
2740
2741 l2cap_raw_recv(conn, skb);
2742
2743 while (len >= L2CAP_CMD_HDR_SIZE) {
2744 u16 cmd_len;
2745 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2746 data += L2CAP_CMD_HDR_SIZE;
2747 len -= L2CAP_CMD_HDR_SIZE;
2748
2749 cmd_len = le16_to_cpu(cmd.len);
2750
2751 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2752
2753 if (cmd_len > len || !cmd.ident) {
2754 BT_DBG("corrupted command");
2755 break;
2756 }
2757
2758 if (conn->hcon->type == LE_LINK)
2759 err = l2cap_le_sig_cmd(conn, &cmd, data);
2760 else
2761 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2762
2763 if (err) {
2764 struct l2cap_cmd_rej rej;
2765
2766 BT_ERR("Wrong link type (%d)", err);
2767
2768 /* FIXME: Map err to a valid reason */
2769 rej.reason = cpu_to_le16(0);
2770 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2771 }
2772
2773 data += cmd_len;
2774 len -= cmd_len;
2775 }
2776
2777 kfree_skb(skb);
2778}
2779
2780static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2781{
2782 u16 our_fcs, rcv_fcs;
2783 int hdr_size = L2CAP_HDR_SIZE + 2;
2784
2785 if (pi->fcs == L2CAP_FCS_CRC16) {
2786 skb_trim(skb, skb->len - 2);
2787 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2788 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2789
2790 if (our_fcs != rcv_fcs)
2791 return -EBADMSG;
2792 }
2793 return 0;
2794}
2795
2796static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2797{
2798 u16 control = 0;
2799
2800 chan->frames_sent = 0;
2801
2802 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2803
2804 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2805 control |= L2CAP_SUPER_RCV_NOT_READY;
2806 l2cap_send_sframe(chan, control);
2807 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2808 }
2809
2810 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2811 l2cap_retransmit_frames(chan);
2812
2813 l2cap_ertm_send(chan);
2814
2815 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2816 chan->frames_sent == 0) {
2817 control |= L2CAP_SUPER_RCV_READY;
2818 l2cap_send_sframe(chan, control);
2819 }
2820}
2821
2822static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2823{
2824 struct sk_buff *next_skb;
2825 int tx_seq_offset, next_tx_seq_offset;
2826
2827 bt_cb(skb)->tx_seq = tx_seq;
2828 bt_cb(skb)->sar = sar;
2829
2830 next_skb = skb_peek(&chan->srej_q);
2831 if (!next_skb) {
2832 __skb_queue_tail(&chan->srej_q, skb);
2833 return 0;
2834 }
2835
2836 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2837 if (tx_seq_offset < 0)
2838 tx_seq_offset += 64;
2839
2840 do {
2841 if (bt_cb(next_skb)->tx_seq == tx_seq)
2842 return -EINVAL;
2843
2844 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2845 chan->buffer_seq) % 64;
2846 if (next_tx_seq_offset < 0)
2847 next_tx_seq_offset += 64;
2848
2849 if (next_tx_seq_offset > tx_seq_offset) {
2850 __skb_queue_before(&chan->srej_q, next_skb, skb);
2851 return 0;
2852 }
2853
2854 if (skb_queue_is_last(&chan->srej_q, next_skb))
2855 break;
2856
2857 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2858
2859 __skb_queue_tail(&chan->srej_q, skb);
2860
2861 return 0;
2862}
2863
2864static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2865{
2866 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2867 struct sk_buff *_skb;
2868 int err;
2869
2870 switch (control & L2CAP_CTRL_SAR) {
2871 case L2CAP_SDU_UNSEGMENTED:
2872 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2873 goto drop;
2874
2875 err = sock_queue_rcv_skb(chan->sk, skb);
2876 if (!err)
2877 return err;
2878
2879 break;
2880
2881 case L2CAP_SDU_START:
2882 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2883 goto drop;
2884
2885 chan->sdu_len = get_unaligned_le16(skb->data);
2886
2887 if (chan->sdu_len > pi->imtu)
2888 goto disconnect;
2889
2890 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2891 if (!chan->sdu)
2892 return -ENOMEM;
2893
2894 /* pull sdu_len bytes only after alloc, because of Local Busy
2895 * condition we have to be sure that this will be executed
2896 * only once, i.e., when alloc does not fail */
2897 skb_pull(skb, 2);
2898
2899 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2900
2901 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2902 chan->partial_sdu_len = skb->len;
2903 break;
2904
2905 case L2CAP_SDU_CONTINUE:
2906 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2907 goto disconnect;
2908
2909 if (!chan->sdu)
2910 goto disconnect;
2911
2912 chan->partial_sdu_len += skb->len;
2913 if (chan->partial_sdu_len > chan->sdu_len)
2914 goto drop;
2915
2916 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2917
2918 break;
2919
2920 case L2CAP_SDU_END:
2921 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2922 goto disconnect;
2923
2924 if (!chan->sdu)
2925 goto disconnect;
2926
2927 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2928 chan->partial_sdu_len += skb->len;
2929
2930 if (chan->partial_sdu_len > pi->imtu)
2931 goto drop;
2932
2933 if (chan->partial_sdu_len != chan->sdu_len)
2934 goto drop;
2935
2936 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2937 }
2938
2939 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2940 if (!_skb) {
2941 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2942 return -ENOMEM;
2943 }
2944
2945 err = sock_queue_rcv_skb(chan->sk, _skb);
2946 if (err < 0) {
2947 kfree_skb(_skb);
2948 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2949 return err;
2950 }
2951
2952 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2953 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2954
2955 kfree_skb(chan->sdu);
2956 break;
2957 }
2958
2959 kfree_skb(skb);
2960 return 0;
2961
2962drop:
2963 kfree_skb(chan->sdu);
2964 chan->sdu = NULL;
2965
2966disconnect:
2967 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2968 kfree_skb(skb);
2969 return 0;
2970}
2971
2972static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2973{
2974 struct sk_buff *skb;
2975 u16 control;
2976 int err;
2977
2978 while ((skb = skb_dequeue(&chan->busy_q))) {
2979 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2980 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2981 if (err < 0) {
2982 skb_queue_head(&chan->busy_q, skb);
2983 return -EBUSY;
2984 }
2985
2986 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2987 }
2988
2989 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2990 goto done;
2991
2992 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2993 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2994 l2cap_send_sframe(chan, control);
2995 chan->retry_count = 1;
2996
2997 del_timer(&chan->retrans_timer);
2998 __mod_monitor_timer();
2999
3000 chan->conn_state |= L2CAP_CONN_WAIT_F;
3001
3002done:
3003 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3004 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3005
3006 BT_DBG("chan %p, Exit local busy", chan);
3007
3008 return 0;
3009}
3010
3011static void l2cap_busy_work(struct work_struct *work)
3012{
3013 DECLARE_WAITQUEUE(wait, current);
3014 struct l2cap_chan *chan =
3015 container_of(work, struct l2cap_chan, busy_work);
3016 struct sock *sk = chan->sk;
3017 int n_tries = 0, timeo = HZ/5, err;
3018 struct sk_buff *skb;
3019
3020 lock_sock(sk);
3021
3022 add_wait_queue(sk_sleep(sk), &wait);
3023 while ((skb = skb_peek(&chan->busy_q))) {
3024 set_current_state(TASK_INTERRUPTIBLE);
3025
3026 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3027 err = -EBUSY;
3028 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, EBUSY);
3029 break;
3030 }
3031
3032 if (!timeo)
3033 timeo = HZ/5;
3034
3035 if (signal_pending(current)) {
3036 err = sock_intr_errno(timeo);
3037 break;
3038 }
3039
3040 release_sock(sk);
3041 timeo = schedule_timeout(timeo);
3042 lock_sock(sk);
3043
3044 err = sock_error(sk);
3045 if (err)
3046 break;
3047
3048 if (l2cap_try_push_rx_skb(chan) == 0)
3049 break;
3050 }
3051
3052 set_current_state(TASK_RUNNING);
3053 remove_wait_queue(sk_sleep(sk), &wait);
3054
3055 release_sock(sk);
3056}
3057
3058static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3059{
3060 int sctrl, err;
3061
3062 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3063 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3064 __skb_queue_tail(&chan->busy_q, skb);
3065 return l2cap_try_push_rx_skb(chan);
3066
3067
3068 }
3069
3070 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3071 if (err >= 0) {
3072 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3073 return err;
3074 }
3075
3076 /* Busy Condition */
3077 BT_DBG("chan %p, Enter local busy", chan);
3078
3079 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3080 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3081 __skb_queue_tail(&chan->busy_q, skb);
3082
3083 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3084 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3085 l2cap_send_sframe(chan, sctrl);
3086
3087 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3088
3089 del_timer(&chan->ack_timer);
3090
3091 queue_work(_busy_wq, &chan->busy_work);
3092
3093 return err;
3094}
3095
3096static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3097{
3098 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3099 struct sk_buff *_skb;
3100 int err = -EINVAL;
3101
3102 /*
3103 * TODO: We have to notify the userland if some data is lost with the
3104 * Streaming Mode.
3105 */
3106
3107 switch (control & L2CAP_CTRL_SAR) {
3108 case L2CAP_SDU_UNSEGMENTED:
3109 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3110 kfree_skb(chan->sdu);
3111 break;
3112 }
3113
3114 err = sock_queue_rcv_skb(chan->sk, skb);
3115 if (!err)
3116 return 0;
3117
3118 break;
3119
3120 case L2CAP_SDU_START:
3121 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3122 kfree_skb(chan->sdu);
3123 break;
3124 }
3125
3126 chan->sdu_len = get_unaligned_le16(skb->data);
3127 skb_pull(skb, 2);
3128
3129 if (chan->sdu_len > pi->imtu) {
3130 err = -EMSGSIZE;
3131 break;
3132 }
3133
3134 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3135 if (!chan->sdu) {
3136 err = -ENOMEM;
3137 break;
3138 }
3139
3140 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3141
3142 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3143 chan->partial_sdu_len = skb->len;
3144 err = 0;
3145 break;
3146
3147 case L2CAP_SDU_CONTINUE:
3148 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3149 break;
3150
3151 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3152
3153 chan->partial_sdu_len += skb->len;
3154 if (chan->partial_sdu_len > chan->sdu_len)
3155 kfree_skb(chan->sdu);
3156 else
3157 err = 0;
3158
3159 break;
3160
3161 case L2CAP_SDU_END:
3162 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3163 break;
3164
3165 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3166
3167 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3168 chan->partial_sdu_len += skb->len;
3169
3170 if (chan->partial_sdu_len > pi->imtu)
3171 goto drop;
3172
3173 if (chan->partial_sdu_len == chan->sdu_len) {
3174 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3175 err = sock_queue_rcv_skb(chan->sk, _skb);
3176 if (err < 0)
3177 kfree_skb(_skb);
3178 }
3179 err = 0;
3180
3181drop:
3182 kfree_skb(chan->sdu);
3183 break;
3184 }
3185
3186 kfree_skb(skb);
3187 return err;
3188}
3189
3190static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3191{
3192 struct sk_buff *skb;
3193 u16 control;
3194
3195 while ((skb = skb_peek(&chan->srej_q))) {
3196 if (bt_cb(skb)->tx_seq != tx_seq)
3197 break;
3198
3199 skb = skb_dequeue(&chan->srej_q);
3200 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3201 l2cap_ertm_reassembly_sdu(chan, skb, control);
3202 chan->buffer_seq_srej =
3203 (chan->buffer_seq_srej + 1) % 64;
3204 tx_seq = (tx_seq + 1) % 64;
3205 }
3206}
3207
3208static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3209{
3210 struct srej_list *l, *tmp;
3211 u16 control;
3212
3213 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3214 if (l->tx_seq == tx_seq) {
3215 list_del(&l->list);
3216 kfree(l);
3217 return;
3218 }
3219 control = L2CAP_SUPER_SELECT_REJECT;
3220 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3221 l2cap_send_sframe(chan, control);
3222 list_del(&l->list);
3223 list_add_tail(&l->list, &chan->srej_l);
3224 }
3225}
3226
3227static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3228{
3229 struct srej_list *new;
3230 u16 control;
3231
3232 while (tx_seq != chan->expected_tx_seq) {
3233 control = L2CAP_SUPER_SELECT_REJECT;
3234 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3235 l2cap_send_sframe(chan, control);
3236
3237 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3238 new->tx_seq = chan->expected_tx_seq;
3239 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3240 list_add_tail(&new->list, &chan->srej_l);
3241 }
3242 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3243}
3244
3245static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3246{
3247 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3248 u8 tx_seq = __get_txseq(rx_control);
3249 u8 req_seq = __get_reqseq(rx_control);
3250 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3251 int tx_seq_offset, expected_tx_seq_offset;
3252 int num_to_ack = (pi->tx_win/6) + 1;
3253 int err = 0;
3254
3255 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3256 tx_seq, rx_control);
3257
3258 if (L2CAP_CTRL_FINAL & rx_control &&
3259 chan->conn_state & L2CAP_CONN_WAIT_F) {
3260 del_timer(&chan->monitor_timer);
3261 if (chan->unacked_frames > 0)
3262 __mod_retrans_timer();
3263 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3264 }
3265
3266 chan->expected_ack_seq = req_seq;
3267 l2cap_drop_acked_frames(chan);
3268
3269 if (tx_seq == chan->expected_tx_seq)
3270 goto expected;
3271
3272 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3273 if (tx_seq_offset < 0)
3274 tx_seq_offset += 64;
3275
3276 /* invalid tx_seq */
3277 if (tx_seq_offset >= pi->tx_win) {
3278 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3279 goto drop;
3280 }
3281
3282 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3283 goto drop;
3284
3285 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3286 struct srej_list *first;
3287
3288 first = list_first_entry(&chan->srej_l,
3289 struct srej_list, list);
3290 if (tx_seq == first->tx_seq) {
3291 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3292 l2cap_check_srej_gap(chan, tx_seq);
3293
3294 list_del(&first->list);
3295 kfree(first);
3296
3297 if (list_empty(&chan->srej_l)) {
3298 chan->buffer_seq = chan->buffer_seq_srej;
3299 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3300 l2cap_send_ack(chan);
3301 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3302 }
3303 } else {
3304 struct srej_list *l;
3305
3306 /* duplicated tx_seq */
3307 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3308 goto drop;
3309
3310 list_for_each_entry(l, &chan->srej_l, list) {
3311 if (l->tx_seq == tx_seq) {
3312 l2cap_resend_srejframe(chan, tx_seq);
3313 return 0;
3314 }
3315 }
3316 l2cap_send_srejframe(chan, tx_seq);
3317 }
3318 } else {
3319 expected_tx_seq_offset =
3320 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3321 if (expected_tx_seq_offset < 0)
3322 expected_tx_seq_offset += 64;
3323
3324 /* duplicated tx_seq */
3325 if (tx_seq_offset < expected_tx_seq_offset)
3326 goto drop;
3327
3328 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3329
3330 BT_DBG("chan %p, Enter SREJ", chan);
3331
3332 INIT_LIST_HEAD(&chan->srej_l);
3333 chan->buffer_seq_srej = chan->buffer_seq;
3334
3335 __skb_queue_head_init(&chan->srej_q);
3336 __skb_queue_head_init(&chan->busy_q);
3337 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3338
3339 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3340
3341 l2cap_send_srejframe(chan, tx_seq);
3342
3343 del_timer(&chan->ack_timer);
3344 }
3345 return 0;
3346
3347expected:
3348 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3349
3350 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3351 bt_cb(skb)->tx_seq = tx_seq;
3352 bt_cb(skb)->sar = sar;
3353 __skb_queue_tail(&chan->srej_q, skb);
3354 return 0;
3355 }
3356
3357 err = l2cap_push_rx_skb(chan, skb, rx_control);
3358 if (err < 0)
3359 return 0;
3360
3361 if (rx_control & L2CAP_CTRL_FINAL) {
3362 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3363 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3364 else
3365 l2cap_retransmit_frames(chan);
3366 }
3367
3368 __mod_ack_timer();
3369
3370 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3371 if (chan->num_acked == num_to_ack - 1)
3372 l2cap_send_ack(chan);
3373
3374 return 0;
3375
3376drop:
3377 kfree_skb(skb);
3378 return 0;
3379}
3380
3381static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3382{
3383 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3384 rx_control);
3385
3386 chan->expected_ack_seq = __get_reqseq(rx_control);
3387 l2cap_drop_acked_frames(chan);
3388
3389 if (rx_control & L2CAP_CTRL_POLL) {
3390 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3391 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3392 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3393 (chan->unacked_frames > 0))
3394 __mod_retrans_timer();
3395
3396 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3397 l2cap_send_srejtail(chan);
3398 } else {
3399 l2cap_send_i_or_rr_or_rnr(chan);
3400 }
3401
3402 } else if (rx_control & L2CAP_CTRL_FINAL) {
3403 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3404
3405 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3406 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3407 else
3408 l2cap_retransmit_frames(chan);
3409
3410 } else {
3411 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3412 (chan->unacked_frames > 0))
3413 __mod_retrans_timer();
3414
3415 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3416 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3417 l2cap_send_ack(chan);
3418 else
3419 l2cap_ertm_send(chan);
3420 }
3421}
3422
3423static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3424{
3425 u8 tx_seq = __get_reqseq(rx_control);
3426
3427 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3428
3429 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3430
3431 chan->expected_ack_seq = tx_seq;
3432 l2cap_drop_acked_frames(chan);
3433
3434 if (rx_control & L2CAP_CTRL_FINAL) {
3435 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3436 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3437 else
3438 l2cap_retransmit_frames(chan);
3439 } else {
3440 l2cap_retransmit_frames(chan);
3441
3442 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3443 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3444 }
3445}
3446static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3447{
3448 u8 tx_seq = __get_reqseq(rx_control);
3449
3450 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3451
3452 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3453
3454 if (rx_control & L2CAP_CTRL_POLL) {
3455 chan->expected_ack_seq = tx_seq;
3456 l2cap_drop_acked_frames(chan);
3457
3458 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3459 l2cap_retransmit_one_frame(chan, tx_seq);
3460
3461 l2cap_ertm_send(chan);
3462
3463 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3464 chan->srej_save_reqseq = tx_seq;
3465 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3466 }
3467 } else if (rx_control & L2CAP_CTRL_FINAL) {
3468 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3469 chan->srej_save_reqseq == tx_seq)
3470 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3471 else
3472 l2cap_retransmit_one_frame(chan, tx_seq);
3473 } else {
3474 l2cap_retransmit_one_frame(chan, tx_seq);
3475 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3476 chan->srej_save_reqseq = tx_seq;
3477 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3478 }
3479 }
3480}
3481
3482static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3483{
3484 u8 tx_seq = __get_reqseq(rx_control);
3485
3486 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3487
3488 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3489 chan->expected_ack_seq = tx_seq;
3490 l2cap_drop_acked_frames(chan);
3491
3492 if (rx_control & L2CAP_CTRL_POLL)
3493 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3494
3495 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3496 del_timer(&chan->retrans_timer);
3497 if (rx_control & L2CAP_CTRL_POLL)
3498 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3499 return;
3500 }
3501
3502 if (rx_control & L2CAP_CTRL_POLL)
3503 l2cap_send_srejtail(chan);
3504 else
3505 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3506}
3507
3508static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3509{
3510 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3511
3512 if (L2CAP_CTRL_FINAL & rx_control &&
3513 chan->conn_state & L2CAP_CONN_WAIT_F) {
3514 del_timer(&chan->monitor_timer);
3515 if (chan->unacked_frames > 0)
3516 __mod_retrans_timer();
3517 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3518 }
3519
3520 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3521 case L2CAP_SUPER_RCV_READY:
3522 l2cap_data_channel_rrframe(chan, rx_control);
3523 break;
3524
3525 case L2CAP_SUPER_REJECT:
3526 l2cap_data_channel_rejframe(chan, rx_control);
3527 break;
3528
3529 case L2CAP_SUPER_SELECT_REJECT:
3530 l2cap_data_channel_srejframe(chan, rx_control);
3531 break;
3532
3533 case L2CAP_SUPER_RCV_NOT_READY:
3534 l2cap_data_channel_rnrframe(chan, rx_control);
3535 break;
3536 }
3537
3538 kfree_skb(skb);
3539 return 0;
3540}
3541
3542static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3543{
3544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3545 struct l2cap_pinfo *pi = l2cap_pi(sk);
3546 u16 control;
3547 u8 req_seq;
3548 int len, next_tx_seq_offset, req_seq_offset;
3549
3550 control = get_unaligned_le16(skb->data);
3551 skb_pull(skb, 2);
3552 len = skb->len;
3553
3554 /*
3555 * We can just drop the corrupted I-frame here.
3556 * Receiver will miss it and start proper recovery
3557 * procedures and ask retransmission.
3558 */
3559 if (l2cap_check_fcs(pi, skb))
3560 goto drop;
3561
3562 if (__is_sar_start(control) && __is_iframe(control))
3563 len -= 2;
3564
3565 if (pi->fcs == L2CAP_FCS_CRC16)
3566 len -= 2;
3567
3568 if (len > pi->mps) {
3569 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3570 goto drop;
3571 }
3572
3573 req_seq = __get_reqseq(control);
3574 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3575 if (req_seq_offset < 0)
3576 req_seq_offset += 64;
3577
3578 next_tx_seq_offset =
3579 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3580 if (next_tx_seq_offset < 0)
3581 next_tx_seq_offset += 64;
3582
3583 /* check for invalid req-seq */
3584 if (req_seq_offset > next_tx_seq_offset) {
3585 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3586 goto drop;
3587 }
3588
3589 if (__is_iframe(control)) {
3590 if (len < 0) {
3591 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3592 goto drop;
3593 }
3594
3595 l2cap_data_channel_iframe(chan, control, skb);
3596 } else {
3597 if (len != 0) {
3598 BT_ERR("%d", len);
3599 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3600 goto drop;
3601 }
3602
3603 l2cap_data_channel_sframe(chan, control, skb);
3604 }
3605
3606 return 0;
3607
3608drop:
3609 kfree_skb(skb);
3610 return 0;
3611}
3612
3613static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3614{
3615 struct l2cap_chan *chan;
3616 struct sock *sk;
3617 struct l2cap_pinfo *pi;
3618 u16 control;
3619 u8 tx_seq;
3620 int len;
3621
3622 chan = l2cap_get_chan_by_scid(conn, cid);
3623 if (!chan) {
3624 BT_DBG("unknown cid 0x%4.4x", cid);
3625 goto drop;
3626 }
3627
3628 sk = chan->sk;
3629 pi = l2cap_pi(sk);
3630
3631 BT_DBG("chan %p, len %d", chan, skb->len);
3632
3633 if (sk->sk_state != BT_CONNECTED)
3634 goto drop;
3635
3636 switch (pi->mode) {
3637 case L2CAP_MODE_BASIC:
3638 /* If socket recv buffers overflows we drop data here
3639 * which is *bad* because L2CAP has to be reliable.
3640 * But we don't have any other choice. L2CAP doesn't
3641 * provide flow control mechanism. */
3642
3643 if (pi->imtu < skb->len)
3644 goto drop;
3645
3646 if (!sock_queue_rcv_skb(sk, skb))
3647 goto done;
3648 break;
3649
3650 case L2CAP_MODE_ERTM:
3651 if (!sock_owned_by_user(sk)) {
3652 l2cap_ertm_data_rcv(sk, skb);
3653 } else {
3654 if (sk_add_backlog(sk, skb))
3655 goto drop;
3656 }
3657
3658 goto done;
3659
3660 case L2CAP_MODE_STREAMING:
3661 control = get_unaligned_le16(skb->data);
3662 skb_pull(skb, 2);
3663 len = skb->len;
3664
3665 if (l2cap_check_fcs(pi, skb))
3666 goto drop;
3667
3668 if (__is_sar_start(control))
3669 len -= 2;
3670
3671 if (pi->fcs == L2CAP_FCS_CRC16)
3672 len -= 2;
3673
3674 if (len > pi->mps || len < 0 || __is_sframe(control))
3675 goto drop;
3676
3677 tx_seq = __get_txseq(control);
3678
3679 if (chan->expected_tx_seq == tx_seq)
3680 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3681 else
3682 chan->expected_tx_seq = (tx_seq + 1) % 64;
3683
3684 l2cap_streaming_reassembly_sdu(chan, skb, control);
3685
3686 goto done;
3687
3688 default:
3689 BT_DBG("chan %p: bad mode 0x%2.2x", chan, pi->mode);
3690 break;
3691 }
3692
3693drop:
3694 kfree_skb(skb);
3695
3696done:
3697 if (sk)
3698 bh_unlock_sock(sk);
3699
3700 return 0;
3701}
3702
3703static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3704{
3705 struct sock *sk;
3706
3707 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3708 if (!sk)
3709 goto drop;
3710
3711 bh_lock_sock(sk);
3712
3713 BT_DBG("sk %p, len %d", sk, skb->len);
3714
3715 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3716 goto drop;
3717
3718 if (l2cap_pi(sk)->imtu < skb->len)
3719 goto drop;
3720
3721 if (!sock_queue_rcv_skb(sk, skb))
3722 goto done;
3723
3724drop:
3725 kfree_skb(skb);
3726
3727done:
3728 if (sk)
3729 bh_unlock_sock(sk);
3730 return 0;
3731}
3732
3733static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3734{
3735 struct l2cap_hdr *lh = (void *) skb->data;
3736 u16 cid, len;
3737 __le16 psm;
3738
3739 skb_pull(skb, L2CAP_HDR_SIZE);
3740 cid = __le16_to_cpu(lh->cid);
3741 len = __le16_to_cpu(lh->len);
3742
3743 if (len != skb->len) {
3744 kfree_skb(skb);
3745 return;
3746 }
3747
3748 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3749
3750 switch (cid) {
3751 case L2CAP_CID_LE_SIGNALING:
3752 case L2CAP_CID_SIGNALING:
3753 l2cap_sig_channel(conn, skb);
3754 break;
3755
3756 case L2CAP_CID_CONN_LESS:
3757 psm = get_unaligned_le16(skb->data);
3758 skb_pull(skb, 2);
3759 l2cap_conless_channel(conn, psm, skb);
3760 break;
3761
3762 default:
3763 l2cap_data_channel(conn, cid, skb);
3764 break;
3765 }
3766}
3767
3768/* ---- L2CAP interface with lower layer (HCI) ---- */
3769
3770static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3771{
3772 int exact = 0, lm1 = 0, lm2 = 0;
3773 register struct sock *sk;
3774 struct hlist_node *node;
3775
3776 if (type != ACL_LINK)
3777 return -EINVAL;
3778
3779 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3780
3781 /* Find listening sockets and check their link_mode */
3782 read_lock(&l2cap_sk_list.lock);
3783 sk_for_each(sk, node, &l2cap_sk_list.head) {
3784 if (sk->sk_state != BT_LISTEN)
3785 continue;
3786
3787 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3788 lm1 |= HCI_LM_ACCEPT;
3789 if (l2cap_pi(sk)->role_switch)
3790 lm1 |= HCI_LM_MASTER;
3791 exact++;
3792 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3793 lm2 |= HCI_LM_ACCEPT;
3794 if (l2cap_pi(sk)->role_switch)
3795 lm2 |= HCI_LM_MASTER;
3796 }
3797 }
3798 read_unlock(&l2cap_sk_list.lock);
3799
3800 return exact ? lm1 : lm2;
3801}
3802
3803static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3804{
3805 struct l2cap_conn *conn;
3806
3807 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3808
3809 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3810 return -EINVAL;
3811
3812 if (!status) {
3813 conn = l2cap_conn_add(hcon, status);
3814 if (conn)
3815 l2cap_conn_ready(conn);
3816 } else
3817 l2cap_conn_del(hcon, bt_err(status));
3818
3819 return 0;
3820}
3821
3822static int l2cap_disconn_ind(struct hci_conn *hcon)
3823{
3824 struct l2cap_conn *conn = hcon->l2cap_data;
3825
3826 BT_DBG("hcon %p", hcon);
3827
3828 if (hcon->type != ACL_LINK || !conn)
3829 return 0x13;
3830
3831 return conn->disc_reason;
3832}
3833
3834static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3835{
3836 BT_DBG("hcon %p reason %d", hcon, reason);
3837
3838 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3839 return -EINVAL;
3840
3841 l2cap_conn_del(hcon, bt_err(reason));
3842
3843 return 0;
3844}
3845
3846static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3847{
3848 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3849 return;
3850
3851 if (encrypt == 0x00) {
3852 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3853 l2cap_sock_clear_timer(sk);
3854 l2cap_sock_set_timer(sk, HZ * 5);
3855 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3856 __l2cap_sock_close(sk, ECONNREFUSED);
3857 } else {
3858 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3859 l2cap_sock_clear_timer(sk);
3860 }
3861}
3862
3863static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3864{
3865 struct l2cap_conn *conn = hcon->l2cap_data;
3866 struct l2cap_chan *chan;
3867
3868 if (!conn)
3869 return 0;
3870
3871 BT_DBG("conn %p", conn);
3872
3873 read_lock(&conn->chan_lock);
3874
3875 list_for_each_entry(chan, &conn->chan_l, list) {
3876 struct sock *sk = chan->sk;
3877
3878 bh_lock_sock(sk);
3879
3880 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3881 bh_unlock_sock(sk);
3882 continue;
3883 }
3884
3885 if (!status && (sk->sk_state == BT_CONNECTED ||
3886 sk->sk_state == BT_CONFIG)) {
3887 l2cap_check_encryption(sk, encrypt);
3888 bh_unlock_sock(sk);
3889 continue;
3890 }
3891
3892 if (sk->sk_state == BT_CONNECT) {
3893 if (!status) {
3894 struct l2cap_conn_req req;
3895 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3896 req.psm = l2cap_pi(sk)->psm;
3897
3898 chan->ident = l2cap_get_ident(conn);
3899 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3900
3901 l2cap_send_cmd(conn, chan->ident,
3902 L2CAP_CONN_REQ, sizeof(req), &req);
3903 } else {
3904 l2cap_sock_clear_timer(sk);
3905 l2cap_sock_set_timer(sk, HZ / 10);
3906 }
3907 } else if (sk->sk_state == BT_CONNECT2) {
3908 struct l2cap_conn_rsp rsp;
3909 __u16 result;
3910
3911 if (!status) {
3912 sk->sk_state = BT_CONFIG;
3913 result = L2CAP_CR_SUCCESS;
3914 } else {
3915 sk->sk_state = BT_DISCONN;
3916 l2cap_sock_set_timer(sk, HZ / 10);
3917 result = L2CAP_CR_SEC_BLOCK;
3918 }
3919
3920 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3922 rsp.result = cpu_to_le16(result);
3923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3924 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3925 sizeof(rsp), &rsp);
3926 }
3927
3928 bh_unlock_sock(sk);
3929 }
3930
3931 read_unlock(&conn->chan_lock);
3932
3933 return 0;
3934}
3935
3936static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3937{
3938 struct l2cap_conn *conn = hcon->l2cap_data;
3939
3940 if (!conn)
3941 conn = l2cap_conn_add(hcon, 0);
3942
3943 if (!conn)
3944 goto drop;
3945
3946 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3947
3948 if (!(flags & ACL_CONT)) {
3949 struct l2cap_hdr *hdr;
3950 struct l2cap_chan *chan;
3951 u16 cid;
3952 int len;
3953
3954 if (conn->rx_len) {
3955 BT_ERR("Unexpected start frame (len %d)", skb->len);
3956 kfree_skb(conn->rx_skb);
3957 conn->rx_skb = NULL;
3958 conn->rx_len = 0;
3959 l2cap_conn_unreliable(conn, ECOMM);
3960 }
3961
3962 /* Start fragment always begin with Basic L2CAP header */
3963 if (skb->len < L2CAP_HDR_SIZE) {
3964 BT_ERR("Frame is too short (len %d)", skb->len);
3965 l2cap_conn_unreliable(conn, ECOMM);
3966 goto drop;
3967 }
3968
3969 hdr = (struct l2cap_hdr *) skb->data;
3970 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3971 cid = __le16_to_cpu(hdr->cid);
3972
3973 if (len == skb->len) {
3974 /* Complete frame received */
3975 l2cap_recv_frame(conn, skb);
3976 return 0;
3977 }
3978
3979 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3980
3981 if (skb->len > len) {
3982 BT_ERR("Frame is too long (len %d, expected len %d)",
3983 skb->len, len);
3984 l2cap_conn_unreliable(conn, ECOMM);
3985 goto drop;
3986 }
3987
3988 chan = l2cap_get_chan_by_scid(conn, cid);
3989
3990 if (chan && chan->sk) {
3991 struct sock *sk = chan->sk;
3992
3993 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3994 BT_ERR("Frame exceeding recv MTU (len %d, "
3995 "MTU %d)", len,
3996 l2cap_pi(sk)->imtu);
3997 bh_unlock_sock(sk);
3998 l2cap_conn_unreliable(conn, ECOMM);
3999 goto drop;
4000 }
4001 bh_unlock_sock(sk);
4002 }
4003
4004 /* Allocate skb for the complete frame (with header) */
4005 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4006 if (!conn->rx_skb)
4007 goto drop;
4008
4009 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4010 skb->len);
4011 conn->rx_len = len - skb->len;
4012 } else {
4013 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4014
4015 if (!conn->rx_len) {
4016 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4017 l2cap_conn_unreliable(conn, ECOMM);
4018 goto drop;
4019 }
4020
4021 if (skb->len > conn->rx_len) {
4022 BT_ERR("Fragment is too long (len %d, expected %d)",
4023 skb->len, conn->rx_len);
4024 kfree_skb(conn->rx_skb);
4025 conn->rx_skb = NULL;
4026 conn->rx_len = 0;
4027 l2cap_conn_unreliable(conn, ECOMM);
4028 goto drop;
4029 }
4030
4031 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4032 skb->len);
4033 conn->rx_len -= skb->len;
4034
4035 if (!conn->rx_len) {
4036 /* Complete frame received */
4037 l2cap_recv_frame(conn, conn->rx_skb);
4038 conn->rx_skb = NULL;
4039 }
4040 }
4041
4042drop:
4043 kfree_skb(skb);
4044 return 0;
4045}
4046
4047static int l2cap_debugfs_show(struct seq_file *f, void *p)
4048{
4049 struct sock *sk;
4050 struct hlist_node *node;
4051
4052 read_lock_bh(&l2cap_sk_list.lock);
4053
4054 sk_for_each(sk, node, &l2cap_sk_list.head) {
4055 struct l2cap_pinfo *pi = l2cap_pi(sk);
4056
4057 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4058 batostr(&bt_sk(sk)->src),
4059 batostr(&bt_sk(sk)->dst),
4060 sk->sk_state, __le16_to_cpu(pi->psm),
4061 pi->scid, pi->dcid,
4062 pi->imtu, pi->omtu, pi->sec_level,
4063 pi->mode);
4064 }
4065
4066 read_unlock_bh(&l2cap_sk_list.lock);
4067
4068 return 0;
4069}
4070
4071static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4072{
4073 return single_open(file, l2cap_debugfs_show, inode->i_private);
4074}
4075
4076static const struct file_operations l2cap_debugfs_fops = {
4077 .open = l2cap_debugfs_open,
4078 .read = seq_read,
4079 .llseek = seq_lseek,
4080 .release = single_release,
4081};
4082
4083static struct dentry *l2cap_debugfs;
4084
4085static struct hci_proto l2cap_hci_proto = {
4086 .name = "L2CAP",
4087 .id = HCI_PROTO_L2CAP,
4088 .connect_ind = l2cap_connect_ind,
4089 .connect_cfm = l2cap_connect_cfm,
4090 .disconn_ind = l2cap_disconn_ind,
4091 .disconn_cfm = l2cap_disconn_cfm,
4092 .security_cfm = l2cap_security_cfm,
4093 .recv_acldata = l2cap_recv_acldata
4094};
4095
4096int __init l2cap_init(void)
4097{
4098 int err;
4099
4100 err = l2cap_init_sockets();
4101 if (err < 0)
4102 return err;
4103
4104 _busy_wq = create_singlethread_workqueue("l2cap");
4105 if (!_busy_wq) {
4106 err = -ENOMEM;
4107 goto error;
4108 }
4109
4110 err = hci_register_proto(&l2cap_hci_proto);
4111 if (err < 0) {
4112 BT_ERR("L2CAP protocol registration failed");
4113 bt_sock_unregister(BTPROTO_L2CAP);
4114 goto error;
4115 }
4116
4117 if (bt_debugfs) {
4118 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4119 bt_debugfs, NULL, &l2cap_debugfs_fops);
4120 if (!l2cap_debugfs)
4121 BT_ERR("Failed to create L2CAP debug file");
4122 }
4123
4124 return 0;
4125
4126error:
4127 destroy_workqueue(_busy_wq);
4128 l2cap_cleanup_sockets();
4129 return err;
4130}
4131
4132void l2cap_exit(void)
4133{
4134 debugfs_remove(l2cap_debugfs);
4135
4136 flush_workqueue(_busy_wq);
4137 destroy_workqueue(_busy_wq);
4138
4139 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4140 BT_ERR("L2CAP protocol unregistration failed");
4141
4142 l2cap_cleanup_sockets();
4143}
4144
4145module_param(disable_ertm, bool, 0644);
4146MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");