Bluetooth: Don't lock sock inside l2cap_get_sock_by_scid()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 int disable_ertm;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static struct workqueue_struct *_busy_wq;
64
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 };
68
69 static void l2cap_busy_work(struct work_struct *work);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
74
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
85 return c;
86 }
87 return NULL;
88
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106 {
107 struct l2cap_chan *c;
108
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 bh_lock_sock(c->sk);
113 read_unlock(&conn->chan_lock);
114 return c;
115 }
116
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
118 {
119 struct l2cap_chan *c;
120
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
123 return c;
124 }
125 return NULL;
126 }
127
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 {
130 struct l2cap_chan *c;
131
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
134 if (c)
135 bh_lock_sock(c->sk);
136 read_unlock(&conn->chan_lock);
137 return c;
138 }
139
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
141 {
142 u16 cid = L2CAP_CID_DYN_START;
143
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
146 return cid;
147 }
148
149 return 0;
150 }
151
152 static struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
153 {
154 struct l2cap_chan *chan;
155
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
157 if (!chan)
158 return NULL;
159
160 chan->sk = sk;
161
162 return chan;
163 }
164
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
166 {
167 struct sock *sk = chan->sk;
168
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
171
172 conn->disc_reason = 0x13;
173
174 l2cap_pi(sk)->conn = conn;
175
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
178 /* LE connection */
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
182 } else {
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
186 }
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
192 } else {
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
197 }
198
199 sock_hold(sk);
200
201 list_add(&chan->list, &conn->chan_l);
202 }
203
204 /* Delete channel.
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
207 {
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
211
212 l2cap_sock_clear_timer(sk);
213
214 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
215
216 if (conn) {
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
221 __sock_put(sk);
222
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
225 }
226
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
229
230 if (err)
231 sk->sk_err = err;
232
233 if (parent) {
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
236 } else
237 sk->sk_state_change(sk);
238
239 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE &&
240 l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE))
241 goto free;
242
243 skb_queue_purge(&chan->tx_q);
244
245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
246 struct srej_list *l, *tmp;
247
248 del_timer(&chan->retrans_timer);
249 del_timer(&chan->monitor_timer);
250 del_timer(&chan->ack_timer);
251
252 skb_queue_purge(&chan->srej_q);
253 skb_queue_purge(&chan->busy_q);
254
255 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
256 list_del(&l->list);
257 kfree(l);
258 }
259 }
260
261 free:
262 kfree(chan);
263 }
264
265 static inline u8 l2cap_get_auth_type(struct sock *sk)
266 {
267 if (sk->sk_type == SOCK_RAW) {
268 switch (l2cap_pi(sk)->sec_level) {
269 case BT_SECURITY_HIGH:
270 return HCI_AT_DEDICATED_BONDING_MITM;
271 case BT_SECURITY_MEDIUM:
272 return HCI_AT_DEDICATED_BONDING;
273 default:
274 return HCI_AT_NO_BONDING;
275 }
276 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
278 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
279
280 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
281 return HCI_AT_NO_BONDING_MITM;
282 else
283 return HCI_AT_NO_BONDING;
284 } else {
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 return HCI_AT_GENERAL_BONDING_MITM;
288 case BT_SECURITY_MEDIUM:
289 return HCI_AT_GENERAL_BONDING;
290 default:
291 return HCI_AT_NO_BONDING;
292 }
293 }
294 }
295
296 /* Service level security */
297 static inline int l2cap_check_security(struct sock *sk)
298 {
299 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
300 __u8 auth_type;
301
302 auth_type = l2cap_get_auth_type(sk);
303
304 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
305 auth_type);
306 }
307
308 u8 l2cap_get_ident(struct l2cap_conn *conn)
309 {
310 u8 id;
311
312 /* Get next available identificator.
313 * 1 - 128 are used by kernel.
314 * 129 - 199 are reserved.
315 * 200 - 254 are used by utilities like l2ping, etc.
316 */
317
318 spin_lock_bh(&conn->lock);
319
320 if (++conn->tx_ident > 128)
321 conn->tx_ident = 1;
322
323 id = conn->tx_ident;
324
325 spin_unlock_bh(&conn->lock);
326
327 return id;
328 }
329
330 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
331 {
332 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
333 u8 flags;
334
335 BT_DBG("code 0x%2.2x", code);
336
337 if (!skb)
338 return;
339
340 if (lmp_no_flush_capable(conn->hcon->hdev))
341 flags = ACL_START_NO_FLUSH;
342 else
343 flags = ACL_START;
344
345 hci_send_acl(conn->hcon, skb, flags);
346 }
347
348 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
349 {
350 struct sk_buff *skb;
351 struct l2cap_hdr *lh;
352 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
353 struct l2cap_conn *conn = pi->conn;
354 struct sock *sk = (struct sock *)pi;
355 int count, hlen = L2CAP_HDR_SIZE + 2;
356 u8 flags;
357
358 if (sk->sk_state != BT_CONNECTED)
359 return;
360
361 if (pi->fcs == L2CAP_FCS_CRC16)
362 hlen += 2;
363
364 BT_DBG("chan %p, control 0x%2.2x", chan, control);
365
366 count = min_t(unsigned int, conn->mtu, hlen);
367 control |= L2CAP_CTRL_FRAME_TYPE;
368
369 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
370 control |= L2CAP_CTRL_FINAL;
371 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
372 }
373
374 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
375 control |= L2CAP_CTRL_POLL;
376 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
377 }
378
379 skb = bt_skb_alloc(count, GFP_ATOMIC);
380 if (!skb)
381 return;
382
383 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
384 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
385 lh->cid = cpu_to_le16(pi->dcid);
386 put_unaligned_le16(control, skb_put(skb, 2));
387
388 if (pi->fcs == L2CAP_FCS_CRC16) {
389 u16 fcs = crc16(0, (u8 *)lh, count - 2);
390 put_unaligned_le16(fcs, skb_put(skb, 2));
391 }
392
393 if (lmp_no_flush_capable(conn->hcon->hdev))
394 flags = ACL_START_NO_FLUSH;
395 else
396 flags = ACL_START;
397
398 hci_send_acl(pi->conn->hcon, skb, flags);
399 }
400
401 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
402 {
403 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
404 control |= L2CAP_SUPER_RCV_NOT_READY;
405 chan->conn_state |= L2CAP_CONN_RNR_SENT;
406 } else
407 control |= L2CAP_SUPER_RCV_READY;
408
409 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
410
411 l2cap_send_sframe(chan, control);
412 }
413
414 static inline int __l2cap_no_conn_pending(struct sock *sk)
415 {
416 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
417 }
418
419 static void l2cap_do_start(struct l2cap_chan *chan)
420 {
421 struct sock *sk = chan->sk;
422 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
423
424 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
425 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
426 return;
427
428 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
429 struct l2cap_conn_req req;
430 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
431 req.psm = l2cap_pi(sk)->psm;
432
433 chan->ident = l2cap_get_ident(conn);
434 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
435
436 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
437 sizeof(req), &req);
438 }
439 } else {
440 struct l2cap_info_req req;
441 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
442
443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
444 conn->info_ident = l2cap_get_ident(conn);
445
446 mod_timer(&conn->info_timer, jiffies +
447 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
448
449 l2cap_send_cmd(conn, conn->info_ident,
450 L2CAP_INFO_REQ, sizeof(req), &req);
451 }
452 }
453
454 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
455 {
456 u32 local_feat_mask = l2cap_feat_mask;
457 if (!disable_ertm)
458 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
459
460 switch (mode) {
461 case L2CAP_MODE_ERTM:
462 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
463 case L2CAP_MODE_STREAMING:
464 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
465 default:
466 return 0x00;
467 }
468 }
469
470 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
471 {
472 struct sock *sk;
473 struct l2cap_disconn_req req;
474
475 if (!conn)
476 return;
477
478 sk = chan->sk;
479
480 skb_queue_purge(&chan->tx_q);
481
482 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
483 del_timer(&chan->retrans_timer);
484 del_timer(&chan->monitor_timer);
485 del_timer(&chan->ack_timer);
486 }
487
488 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
489 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
490 l2cap_send_cmd(conn, l2cap_get_ident(conn),
491 L2CAP_DISCONN_REQ, sizeof(req), &req);
492
493 sk->sk_state = BT_DISCONN;
494 sk->sk_err = err;
495 }
496
497 /* ---- L2CAP connections ---- */
498 static void l2cap_conn_start(struct l2cap_conn *conn)
499 {
500 struct l2cap_chan *chan, *tmp;
501
502 BT_DBG("conn %p", conn);
503
504 read_lock(&conn->chan_lock);
505
506 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
507 struct sock *sk = chan->sk;
508
509 bh_lock_sock(sk);
510
511 if (sk->sk_type != SOCK_SEQPACKET &&
512 sk->sk_type != SOCK_STREAM) {
513 bh_unlock_sock(sk);
514 continue;
515 }
516
517 if (sk->sk_state == BT_CONNECT) {
518 struct l2cap_conn_req req;
519
520 if (!l2cap_check_security(sk) ||
521 !__l2cap_no_conn_pending(sk)) {
522 bh_unlock_sock(sk);
523 continue;
524 }
525
526 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
527 conn->feat_mask)
528 && l2cap_pi(sk)->conf_state &
529 L2CAP_CONF_STATE2_DEVICE) {
530 /* __l2cap_sock_close() calls list_del(chan)
531 * so release the lock */
532 read_unlock_bh(&conn->chan_lock);
533 __l2cap_sock_close(sk, ECONNRESET);
534 read_lock_bh(&conn->chan_lock);
535 bh_unlock_sock(sk);
536 continue;
537 }
538
539 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
540 req.psm = l2cap_pi(sk)->psm;
541
542 chan->ident = l2cap_get_ident(conn);
543 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
544
545 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
546 sizeof(req), &req);
547
548 } else if (sk->sk_state == BT_CONNECT2) {
549 struct l2cap_conn_rsp rsp;
550 char buf[128];
551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
553
554 if (l2cap_check_security(sk)) {
555 if (bt_sk(sk)->defer_setup) {
556 struct sock *parent = bt_sk(sk)->parent;
557 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
558 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
559 parent->sk_data_ready(parent, 0);
560
561 } else {
562 sk->sk_state = BT_CONFIG;
563 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
564 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
565 }
566 } else {
567 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
568 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
569 }
570
571 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
572 sizeof(rsp), &rsp);
573
574 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
575 rsp.result != L2CAP_CR_SUCCESS) {
576 bh_unlock_sock(sk);
577 continue;
578 }
579
580 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
581 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
582 l2cap_build_conf_req(chan, buf), buf);
583 chan->num_conf_req++;
584 }
585
586 bh_unlock_sock(sk);
587 }
588
589 read_unlock(&conn->chan_lock);
590 }
591
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
594 */
595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596 {
597 struct sock *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
599
600 read_lock(&l2cap_sk_list.lock);
601
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
605
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
610
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
614 }
615 }
616
617 read_unlock(&l2cap_sk_list.lock);
618
619 return node ? sk : sk1;
620 }
621
622 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
623 {
624 struct sock *parent, *sk;
625 struct l2cap_chan *chan;
626
627 BT_DBG("");
628
629 /* Check if we have socket listening on cid */
630 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
631 conn->src);
632 if (!parent)
633 return;
634
635 /* Check for backlog size */
636 if (sk_acceptq_is_full(parent)) {
637 BT_DBG("backlog full %d", parent->sk_ack_backlog);
638 goto clean;
639 }
640
641 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
642 if (!sk)
643 goto clean;
644
645 chan = l2cap_chan_alloc(sk);
646 if (!chan) {
647 l2cap_sock_kill(sk);
648 goto clean;
649 }
650
651 write_lock_bh(&conn->chan_lock);
652
653 hci_conn_hold(conn->hcon);
654
655 l2cap_sock_init(sk, parent);
656
657 bacpy(&bt_sk(sk)->src, conn->src);
658 bacpy(&bt_sk(sk)->dst, conn->dst);
659
660 bt_accept_enqueue(parent, sk);
661
662 __l2cap_chan_add(conn, chan);
663
664 l2cap_pi(sk)->chan = chan;
665
666 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
667
668 sk->sk_state = BT_CONNECTED;
669 parent->sk_data_ready(parent, 0);
670
671 write_unlock_bh(&conn->chan_lock);
672
673 clean:
674 bh_unlock_sock(parent);
675 }
676
677 static void l2cap_conn_ready(struct l2cap_conn *conn)
678 {
679 struct l2cap_chan *chan;
680
681 BT_DBG("conn %p", conn);
682
683 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
684 l2cap_le_conn_ready(conn);
685
686 read_lock(&conn->chan_lock);
687
688 list_for_each_entry(chan, &conn->chan_l, list) {
689 struct sock *sk = chan->sk;
690
691 bh_lock_sock(sk);
692
693 if (conn->hcon->type == LE_LINK) {
694 l2cap_sock_clear_timer(sk);
695 sk->sk_state = BT_CONNECTED;
696 sk->sk_state_change(sk);
697 }
698
699 if (sk->sk_type != SOCK_SEQPACKET &&
700 sk->sk_type != SOCK_STREAM) {
701 l2cap_sock_clear_timer(sk);
702 sk->sk_state = BT_CONNECTED;
703 sk->sk_state_change(sk);
704 } else if (sk->sk_state == BT_CONNECT)
705 l2cap_do_start(chan);
706
707 bh_unlock_sock(sk);
708 }
709
710 read_unlock(&conn->chan_lock);
711 }
712
713 /* Notify sockets that we cannot guaranty reliability anymore */
714 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
715 {
716 struct l2cap_chan *chan;
717
718 BT_DBG("conn %p", conn);
719
720 read_lock(&conn->chan_lock);
721
722 list_for_each_entry(chan, &conn->chan_l, list) {
723 struct sock *sk = chan->sk;
724
725 if (l2cap_pi(sk)->force_reliable)
726 sk->sk_err = err;
727 }
728
729 read_unlock(&conn->chan_lock);
730 }
731
732 static void l2cap_info_timeout(unsigned long arg)
733 {
734 struct l2cap_conn *conn = (void *) arg;
735
736 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
737 conn->info_ident = 0;
738
739 l2cap_conn_start(conn);
740 }
741
742 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
743 {
744 struct l2cap_conn *conn = hcon->l2cap_data;
745
746 if (conn || status)
747 return conn;
748
749 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
750 if (!conn)
751 return NULL;
752
753 hcon->l2cap_data = conn;
754 conn->hcon = hcon;
755
756 BT_DBG("hcon %p conn %p", hcon, conn);
757
758 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
759 conn->mtu = hcon->hdev->le_mtu;
760 else
761 conn->mtu = hcon->hdev->acl_mtu;
762
763 conn->src = &hcon->hdev->bdaddr;
764 conn->dst = &hcon->dst;
765
766 conn->feat_mask = 0;
767
768 spin_lock_init(&conn->lock);
769 rwlock_init(&conn->chan_lock);
770
771 INIT_LIST_HEAD(&conn->chan_l);
772
773 if (hcon->type != LE_LINK)
774 setup_timer(&conn->info_timer, l2cap_info_timeout,
775 (unsigned long) conn);
776
777 conn->disc_reason = 0x13;
778
779 return conn;
780 }
781
782 static void l2cap_conn_del(struct hci_conn *hcon, int err)
783 {
784 struct l2cap_conn *conn = hcon->l2cap_data;
785 struct l2cap_chan *chan, *l;
786 struct sock *sk;
787
788 if (!conn)
789 return;
790
791 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
792
793 kfree_skb(conn->rx_skb);
794
795 /* Kill channels */
796 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
797 sk = chan->sk;
798 bh_lock_sock(sk);
799 l2cap_chan_del(chan, err);
800 bh_unlock_sock(sk);
801 l2cap_sock_kill(sk);
802 }
803
804 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
805 del_timer_sync(&conn->info_timer);
806
807 hcon->l2cap_data = NULL;
808 kfree(conn);
809 }
810
811 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
812 {
813 write_lock_bh(&conn->chan_lock);
814 __l2cap_chan_add(conn, chan);
815 write_unlock_bh(&conn->chan_lock);
816 }
817
818 /* ---- Socket interface ---- */
819
820 /* Find socket with psm and source bdaddr.
821 * Returns closest match.
822 */
823 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
824 {
825 struct sock *sk = NULL, *sk1 = NULL;
826 struct hlist_node *node;
827
828 read_lock(&l2cap_sk_list.lock);
829
830 sk_for_each(sk, node, &l2cap_sk_list.head) {
831 if (state && sk->sk_state != state)
832 continue;
833
834 if (l2cap_pi(sk)->psm == psm) {
835 /* Exact match. */
836 if (!bacmp(&bt_sk(sk)->src, src))
837 break;
838
839 /* Closest match */
840 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
841 sk1 = sk;
842 }
843 }
844
845 read_unlock(&l2cap_sk_list.lock);
846
847 return node ? sk : sk1;
848 }
849
850 int l2cap_do_connect(struct sock *sk)
851 {
852 bdaddr_t *src = &bt_sk(sk)->src;
853 bdaddr_t *dst = &bt_sk(sk)->dst;
854 struct l2cap_conn *conn;
855 struct l2cap_chan *chan;
856 struct hci_conn *hcon;
857 struct hci_dev *hdev;
858 __u8 auth_type;
859 int err;
860
861 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
862 l2cap_pi(sk)->psm);
863
864 hdev = hci_get_route(dst, src);
865 if (!hdev)
866 return -EHOSTUNREACH;
867
868 hci_dev_lock_bh(hdev);
869
870 auth_type = l2cap_get_auth_type(sk);
871
872 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
873 hcon = hci_connect(hdev, LE_LINK, dst,
874 l2cap_pi(sk)->sec_level, auth_type);
875 else
876 hcon = hci_connect(hdev, ACL_LINK, dst,
877 l2cap_pi(sk)->sec_level, auth_type);
878
879 if (IS_ERR(hcon)) {
880 err = PTR_ERR(hcon);
881 goto done;
882 }
883
884 conn = l2cap_conn_add(hcon, 0);
885 if (!conn) {
886 hci_conn_put(hcon);
887 err = -ENOMEM;
888 goto done;
889 }
890
891 chan = l2cap_chan_alloc(sk);
892 if (!chan) {
893 hci_conn_put(hcon);
894 err = -ENOMEM;
895 goto done;
896 }
897
898 /* Update source addr of the socket */
899 bacpy(src, conn->src);
900
901 l2cap_chan_add(conn, chan);
902
903 l2cap_pi(sk)->chan = chan;
904
905 sk->sk_state = BT_CONNECT;
906 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
907
908 if (hcon->state == BT_CONNECTED) {
909 if (sk->sk_type != SOCK_SEQPACKET &&
910 sk->sk_type != SOCK_STREAM) {
911 l2cap_sock_clear_timer(sk);
912 if (l2cap_check_security(sk))
913 sk->sk_state = BT_CONNECTED;
914 } else
915 l2cap_do_start(chan);
916 }
917
918 err = 0;
919
920 done:
921 hci_dev_unlock_bh(hdev);
922 hci_dev_put(hdev);
923 return err;
924 }
925
926 int __l2cap_wait_ack(struct sock *sk)
927 {
928 DECLARE_WAITQUEUE(wait, current);
929 int err = 0;
930 int timeo = HZ/5;
931
932 add_wait_queue(sk_sleep(sk), &wait);
933 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
934 set_current_state(TASK_INTERRUPTIBLE);
935
936 if (!timeo)
937 timeo = HZ/5;
938
939 if (signal_pending(current)) {
940 err = sock_intr_errno(timeo);
941 break;
942 }
943
944 release_sock(sk);
945 timeo = schedule_timeout(timeo);
946 lock_sock(sk);
947
948 err = sock_error(sk);
949 if (err)
950 break;
951 }
952 set_current_state(TASK_RUNNING);
953 remove_wait_queue(sk_sleep(sk), &wait);
954 return err;
955 }
956
957 static void l2cap_monitor_timeout(unsigned long arg)
958 {
959 struct l2cap_chan *chan = (void *) arg;
960 struct sock *sk = chan->sk;
961
962 BT_DBG("chan %p", chan);
963
964 bh_lock_sock(sk);
965 if (chan->retry_count >= chan->remote_max_tx) {
966 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
967 bh_unlock_sock(sk);
968 return;
969 }
970
971 chan->retry_count++;
972 __mod_monitor_timer();
973
974 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
975 bh_unlock_sock(sk);
976 }
977
978 static void l2cap_retrans_timeout(unsigned long arg)
979 {
980 struct l2cap_chan *chan = (void *) arg;
981 struct sock *sk = chan->sk;
982
983 BT_DBG("chan %p", chan);
984
985 bh_lock_sock(sk);
986 chan->retry_count = 1;
987 __mod_monitor_timer();
988
989 chan->conn_state |= L2CAP_CONN_WAIT_F;
990
991 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
992 bh_unlock_sock(sk);
993 }
994
995 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
996 {
997 struct sk_buff *skb;
998
999 while ((skb = skb_peek(&chan->tx_q)) &&
1000 chan->unacked_frames) {
1001 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1002 break;
1003
1004 skb = skb_dequeue(&chan->tx_q);
1005 kfree_skb(skb);
1006
1007 chan->unacked_frames--;
1008 }
1009
1010 if (!chan->unacked_frames)
1011 del_timer(&chan->retrans_timer);
1012 }
1013
1014 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1015 {
1016 struct l2cap_pinfo *pi = l2cap_pi(sk);
1017 struct hci_conn *hcon = pi->conn->hcon;
1018 u16 flags;
1019
1020 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1021
1022 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1023 flags = ACL_START_NO_FLUSH;
1024 else
1025 flags = ACL_START;
1026
1027 hci_send_acl(hcon, skb, flags);
1028 }
1029
1030 void l2cap_streaming_send(struct l2cap_chan *chan)
1031 {
1032 struct sock *sk = chan->sk;
1033 struct sk_buff *skb;
1034 struct l2cap_pinfo *pi = l2cap_pi(sk);
1035 u16 control, fcs;
1036
1037 while ((skb = skb_dequeue(&chan->tx_q))) {
1038 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1039 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1040 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1041
1042 if (pi->fcs == L2CAP_FCS_CRC16) {
1043 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1044 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1045 }
1046
1047 l2cap_do_send(sk, skb);
1048
1049 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1050 }
1051 }
1052
1053 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1054 {
1055 struct sock *sk = chan->sk;
1056 struct l2cap_pinfo *pi = l2cap_pi(sk);
1057 struct sk_buff *skb, *tx_skb;
1058 u16 control, fcs;
1059
1060 skb = skb_peek(&chan->tx_q);
1061 if (!skb)
1062 return;
1063
1064 do {
1065 if (bt_cb(skb)->tx_seq == tx_seq)
1066 break;
1067
1068 if (skb_queue_is_last(&chan->tx_q, skb))
1069 return;
1070
1071 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1072
1073 if (chan->remote_max_tx &&
1074 bt_cb(skb)->retries == chan->remote_max_tx) {
1075 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1076 return;
1077 }
1078
1079 tx_skb = skb_clone(skb, GFP_ATOMIC);
1080 bt_cb(skb)->retries++;
1081 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1082
1083 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1084 control |= L2CAP_CTRL_FINAL;
1085 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1086 }
1087
1088 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1089 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1090
1091 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1092
1093 if (pi->fcs == L2CAP_FCS_CRC16) {
1094 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1095 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1096 }
1097
1098 l2cap_do_send(sk, tx_skb);
1099 }
1100
1101 int l2cap_ertm_send(struct l2cap_chan *chan)
1102 {
1103 struct sk_buff *skb, *tx_skb;
1104 struct sock *sk = chan->sk;
1105 struct l2cap_pinfo *pi = l2cap_pi(sk);
1106 u16 control, fcs;
1107 int nsent = 0;
1108
1109 if (sk->sk_state != BT_CONNECTED)
1110 return -ENOTCONN;
1111
1112 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1113
1114 if (chan->remote_max_tx &&
1115 bt_cb(skb)->retries == chan->remote_max_tx) {
1116 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1117 break;
1118 }
1119
1120 tx_skb = skb_clone(skb, GFP_ATOMIC);
1121
1122 bt_cb(skb)->retries++;
1123
1124 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1125 control &= L2CAP_CTRL_SAR;
1126
1127 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1128 control |= L2CAP_CTRL_FINAL;
1129 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1130 }
1131 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1132 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1133 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1134
1135
1136 if (pi->fcs == L2CAP_FCS_CRC16) {
1137 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1138 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1139 }
1140
1141 l2cap_do_send(sk, tx_skb);
1142
1143 __mod_retrans_timer();
1144
1145 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1146 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1147
1148 if (bt_cb(skb)->retries == 1)
1149 chan->unacked_frames++;
1150
1151 chan->frames_sent++;
1152
1153 if (skb_queue_is_last(&chan->tx_q, skb))
1154 chan->tx_send_head = NULL;
1155 else
1156 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1157
1158 nsent++;
1159 }
1160
1161 return nsent;
1162 }
1163
1164 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1165 {
1166 int ret;
1167
1168 if (!skb_queue_empty(&chan->tx_q))
1169 chan->tx_send_head = chan->tx_q.next;
1170
1171 chan->next_tx_seq = chan->expected_ack_seq;
1172 ret = l2cap_ertm_send(chan);
1173 return ret;
1174 }
1175
1176 static void l2cap_send_ack(struct l2cap_chan *chan)
1177 {
1178 u16 control = 0;
1179
1180 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1181
1182 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1183 control |= L2CAP_SUPER_RCV_NOT_READY;
1184 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1185 l2cap_send_sframe(chan, control);
1186 return;
1187 }
1188
1189 if (l2cap_ertm_send(chan) > 0)
1190 return;
1191
1192 control |= L2CAP_SUPER_RCV_READY;
1193 l2cap_send_sframe(chan, control);
1194 }
1195
1196 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1197 {
1198 struct srej_list *tail;
1199 u16 control;
1200
1201 control = L2CAP_SUPER_SELECT_REJECT;
1202 control |= L2CAP_CTRL_FINAL;
1203
1204 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1205 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1206
1207 l2cap_send_sframe(chan, control);
1208 }
1209
1210 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1211 {
1212 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1213 struct sk_buff **frag;
1214 int err, sent = 0;
1215
1216 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1217 return -EFAULT;
1218
1219 sent += count;
1220 len -= count;
1221
1222 /* Continuation fragments (no L2CAP header) */
1223 frag = &skb_shinfo(skb)->frag_list;
1224 while (len) {
1225 count = min_t(unsigned int, conn->mtu, len);
1226
1227 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1228 if (!*frag)
1229 return err;
1230 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1231 return -EFAULT;
1232
1233 sent += count;
1234 len -= count;
1235
1236 frag = &(*frag)->next;
1237 }
1238
1239 return sent;
1240 }
1241
1242 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1243 {
1244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1245 struct sk_buff *skb;
1246 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1247 struct l2cap_hdr *lh;
1248
1249 BT_DBG("sk %p len %d", sk, (int)len);
1250
1251 count = min_t(unsigned int, (conn->mtu - hlen), len);
1252 skb = bt_skb_send_alloc(sk, count + hlen,
1253 msg->msg_flags & MSG_DONTWAIT, &err);
1254 if (!skb)
1255 return ERR_PTR(err);
1256
1257 /* Create L2CAP header */
1258 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1259 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1260 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1261 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1262
1263 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1264 if (unlikely(err < 0)) {
1265 kfree_skb(skb);
1266 return ERR_PTR(err);
1267 }
1268 return skb;
1269 }
1270
1271 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1272 {
1273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1274 struct sk_buff *skb;
1275 int err, count, hlen = L2CAP_HDR_SIZE;
1276 struct l2cap_hdr *lh;
1277
1278 BT_DBG("sk %p len %d", sk, (int)len);
1279
1280 count = min_t(unsigned int, (conn->mtu - hlen), len);
1281 skb = bt_skb_send_alloc(sk, count + hlen,
1282 msg->msg_flags & MSG_DONTWAIT, &err);
1283 if (!skb)
1284 return ERR_PTR(err);
1285
1286 /* Create L2CAP header */
1287 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1288 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1289 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1290
1291 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1292 if (unlikely(err < 0)) {
1293 kfree_skb(skb);
1294 return ERR_PTR(err);
1295 }
1296 return skb;
1297 }
1298
1299 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1300 {
1301 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1302 struct sk_buff *skb;
1303 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1304 struct l2cap_hdr *lh;
1305
1306 BT_DBG("sk %p len %d", sk, (int)len);
1307
1308 if (!conn)
1309 return ERR_PTR(-ENOTCONN);
1310
1311 if (sdulen)
1312 hlen += 2;
1313
1314 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1315 hlen += 2;
1316
1317 count = min_t(unsigned int, (conn->mtu - hlen), len);
1318 skb = bt_skb_send_alloc(sk, count + hlen,
1319 msg->msg_flags & MSG_DONTWAIT, &err);
1320 if (!skb)
1321 return ERR_PTR(err);
1322
1323 /* Create L2CAP header */
1324 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1325 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1326 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1327 put_unaligned_le16(control, skb_put(skb, 2));
1328 if (sdulen)
1329 put_unaligned_le16(sdulen, skb_put(skb, 2));
1330
1331 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1332 if (unlikely(err < 0)) {
1333 kfree_skb(skb);
1334 return ERR_PTR(err);
1335 }
1336
1337 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1338 put_unaligned_le16(0, skb_put(skb, 2));
1339
1340 bt_cb(skb)->retries = 0;
1341 return skb;
1342 }
1343
1344 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1345 {
1346 struct sock *sk = chan->sk;
1347 struct sk_buff *skb;
1348 struct sk_buff_head sar_queue;
1349 u16 control;
1350 size_t size = 0;
1351
1352 skb_queue_head_init(&sar_queue);
1353 control = L2CAP_SDU_START;
1354 skb = l2cap_create_iframe_pdu(sk, msg, chan->remote_mps, control, len);
1355 if (IS_ERR(skb))
1356 return PTR_ERR(skb);
1357
1358 __skb_queue_tail(&sar_queue, skb);
1359 len -= chan->remote_mps;
1360 size += chan->remote_mps;
1361
1362 while (len > 0) {
1363 size_t buflen;
1364
1365 if (len > chan->remote_mps) {
1366 control = L2CAP_SDU_CONTINUE;
1367 buflen = chan->remote_mps;
1368 } else {
1369 control = L2CAP_SDU_END;
1370 buflen = len;
1371 }
1372
1373 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1374 if (IS_ERR(skb)) {
1375 skb_queue_purge(&sar_queue);
1376 return PTR_ERR(skb);
1377 }
1378
1379 __skb_queue_tail(&sar_queue, skb);
1380 len -= buflen;
1381 size += buflen;
1382 }
1383 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1384 if (chan->tx_send_head == NULL)
1385 chan->tx_send_head = sar_queue.next;
1386
1387 return size;
1388 }
1389
1390 static void l2cap_chan_ready(struct sock *sk)
1391 {
1392 struct sock *parent = bt_sk(sk)->parent;
1393
1394 BT_DBG("sk %p, parent %p", sk, parent);
1395
1396 l2cap_pi(sk)->conf_state = 0;
1397 l2cap_sock_clear_timer(sk);
1398
1399 if (!parent) {
1400 /* Outgoing channel.
1401 * Wake up socket sleeping on connect.
1402 */
1403 sk->sk_state = BT_CONNECTED;
1404 sk->sk_state_change(sk);
1405 } else {
1406 /* Incoming channel.
1407 * Wake up socket sleeping on accept.
1408 */
1409 parent->sk_data_ready(parent, 0);
1410 }
1411 }
1412
1413 /* Copy frame to all raw sockets on that connection */
1414 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1415 {
1416 struct sk_buff *nskb;
1417 struct l2cap_chan *chan;
1418
1419 BT_DBG("conn %p", conn);
1420
1421 read_lock(&conn->chan_lock);
1422 list_for_each_entry(chan, &conn->chan_l, list) {
1423 struct sock *sk = chan->sk;
1424 if (sk->sk_type != SOCK_RAW)
1425 continue;
1426
1427 /* Don't send frame to the socket it came from */
1428 if (skb->sk == sk)
1429 continue;
1430 nskb = skb_clone(skb, GFP_ATOMIC);
1431 if (!nskb)
1432 continue;
1433
1434 if (sock_queue_rcv_skb(sk, nskb))
1435 kfree_skb(nskb);
1436 }
1437 read_unlock(&conn->chan_lock);
1438 }
1439
1440 /* ---- L2CAP signalling commands ---- */
1441 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1442 u8 code, u8 ident, u16 dlen, void *data)
1443 {
1444 struct sk_buff *skb, **frag;
1445 struct l2cap_cmd_hdr *cmd;
1446 struct l2cap_hdr *lh;
1447 int len, count;
1448
1449 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1450 conn, code, ident, dlen);
1451
1452 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1453 count = min_t(unsigned int, conn->mtu, len);
1454
1455 skb = bt_skb_alloc(count, GFP_ATOMIC);
1456 if (!skb)
1457 return NULL;
1458
1459 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1460 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1461
1462 if (conn->hcon->type == LE_LINK)
1463 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1464 else
1465 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1466
1467 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1468 cmd->code = code;
1469 cmd->ident = ident;
1470 cmd->len = cpu_to_le16(dlen);
1471
1472 if (dlen) {
1473 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1474 memcpy(skb_put(skb, count), data, count);
1475 data += count;
1476 }
1477
1478 len -= skb->len;
1479
1480 /* Continuation fragments (no L2CAP header) */
1481 frag = &skb_shinfo(skb)->frag_list;
1482 while (len) {
1483 count = min_t(unsigned int, conn->mtu, len);
1484
1485 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1486 if (!*frag)
1487 goto fail;
1488
1489 memcpy(skb_put(*frag, count), data, count);
1490
1491 len -= count;
1492 data += count;
1493
1494 frag = &(*frag)->next;
1495 }
1496
1497 return skb;
1498
1499 fail:
1500 kfree_skb(skb);
1501 return NULL;
1502 }
1503
1504 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1505 {
1506 struct l2cap_conf_opt *opt = *ptr;
1507 int len;
1508
1509 len = L2CAP_CONF_OPT_SIZE + opt->len;
1510 *ptr += len;
1511
1512 *type = opt->type;
1513 *olen = opt->len;
1514
1515 switch (opt->len) {
1516 case 1:
1517 *val = *((u8 *) opt->val);
1518 break;
1519
1520 case 2:
1521 *val = get_unaligned_le16(opt->val);
1522 break;
1523
1524 case 4:
1525 *val = get_unaligned_le32(opt->val);
1526 break;
1527
1528 default:
1529 *val = (unsigned long) opt->val;
1530 break;
1531 }
1532
1533 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1534 return len;
1535 }
1536
1537 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1538 {
1539 struct l2cap_conf_opt *opt = *ptr;
1540
1541 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1542
1543 opt->type = type;
1544 opt->len = len;
1545
1546 switch (len) {
1547 case 1:
1548 *((u8 *) opt->val) = val;
1549 break;
1550
1551 case 2:
1552 put_unaligned_le16(val, opt->val);
1553 break;
1554
1555 case 4:
1556 put_unaligned_le32(val, opt->val);
1557 break;
1558
1559 default:
1560 memcpy(opt->val, (void *) val, len);
1561 break;
1562 }
1563
1564 *ptr += L2CAP_CONF_OPT_SIZE + len;
1565 }
1566
1567 static void l2cap_ack_timeout(unsigned long arg)
1568 {
1569 struct l2cap_chan *chan = (void *) arg;
1570
1571 bh_lock_sock(chan->sk);
1572 l2cap_send_ack(chan);
1573 bh_unlock_sock(chan->sk);
1574 }
1575
1576 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1577 {
1578 struct sock *sk = chan->sk;
1579
1580 chan->expected_ack_seq = 0;
1581 chan->unacked_frames = 0;
1582 chan->buffer_seq = 0;
1583 chan->num_acked = 0;
1584 chan->frames_sent = 0;
1585
1586 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1587 (unsigned long) chan);
1588 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1589 (unsigned long) chan);
1590 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1591
1592 skb_queue_head_init(&chan->srej_q);
1593 skb_queue_head_init(&chan->busy_q);
1594
1595 INIT_LIST_HEAD(&chan->srej_l);
1596
1597 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1598
1599 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1600 }
1601
1602 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1603 {
1604 switch (mode) {
1605 case L2CAP_MODE_STREAMING:
1606 case L2CAP_MODE_ERTM:
1607 if (l2cap_mode_supported(mode, remote_feat_mask))
1608 return mode;
1609 /* fall through */
1610 default:
1611 return L2CAP_MODE_BASIC;
1612 }
1613 }
1614
1615 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1616 {
1617 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1618 struct l2cap_conf_req *req = data;
1619 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1620 void *ptr = req->data;
1621
1622 BT_DBG("chan %p", chan);
1623
1624 if (chan->num_conf_req || chan->num_conf_rsp)
1625 goto done;
1626
1627 switch (pi->mode) {
1628 case L2CAP_MODE_STREAMING:
1629 case L2CAP_MODE_ERTM:
1630 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1631 break;
1632
1633 /* fall through */
1634 default:
1635 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1636 break;
1637 }
1638
1639 done:
1640 if (pi->imtu != L2CAP_DEFAULT_MTU)
1641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1642
1643 switch (pi->mode) {
1644 case L2CAP_MODE_BASIC:
1645 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1646 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1647 break;
1648
1649 rfc.mode = L2CAP_MODE_BASIC;
1650 rfc.txwin_size = 0;
1651 rfc.max_transmit = 0;
1652 rfc.retrans_timeout = 0;
1653 rfc.monitor_timeout = 0;
1654 rfc.max_pdu_size = 0;
1655
1656 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1657 (unsigned long) &rfc);
1658 break;
1659
1660 case L2CAP_MODE_ERTM:
1661 rfc.mode = L2CAP_MODE_ERTM;
1662 rfc.txwin_size = pi->tx_win;
1663 rfc.max_transmit = pi->max_tx;
1664 rfc.retrans_timeout = 0;
1665 rfc.monitor_timeout = 0;
1666 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1667 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1668 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1669
1670 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1671 (unsigned long) &rfc);
1672
1673 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1674 break;
1675
1676 if (pi->fcs == L2CAP_FCS_NONE ||
1677 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1678 pi->fcs = L2CAP_FCS_NONE;
1679 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1680 }
1681 break;
1682
1683 case L2CAP_MODE_STREAMING:
1684 rfc.mode = L2CAP_MODE_STREAMING;
1685 rfc.txwin_size = 0;
1686 rfc.max_transmit = 0;
1687 rfc.retrans_timeout = 0;
1688 rfc.monitor_timeout = 0;
1689 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1690 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1691 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1692
1693 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1694 (unsigned long) &rfc);
1695
1696 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1697 break;
1698
1699 if (pi->fcs == L2CAP_FCS_NONE ||
1700 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1701 pi->fcs = L2CAP_FCS_NONE;
1702 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1703 }
1704 break;
1705 }
1706
1707 req->dcid = cpu_to_le16(pi->dcid);
1708 req->flags = cpu_to_le16(0);
1709
1710 return ptr - data;
1711 }
1712
1713 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1714 {
1715 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1716 struct l2cap_conf_rsp *rsp = data;
1717 void *ptr = rsp->data;
1718 void *req = chan->conf_req;
1719 int len = chan->conf_len;
1720 int type, hint, olen;
1721 unsigned long val;
1722 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1723 u16 mtu = L2CAP_DEFAULT_MTU;
1724 u16 result = L2CAP_CONF_SUCCESS;
1725
1726 BT_DBG("chan %p", chan);
1727
1728 while (len >= L2CAP_CONF_OPT_SIZE) {
1729 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1730
1731 hint = type & L2CAP_CONF_HINT;
1732 type &= L2CAP_CONF_MASK;
1733
1734 switch (type) {
1735 case L2CAP_CONF_MTU:
1736 mtu = val;
1737 break;
1738
1739 case L2CAP_CONF_FLUSH_TO:
1740 pi->flush_to = val;
1741 break;
1742
1743 case L2CAP_CONF_QOS:
1744 break;
1745
1746 case L2CAP_CONF_RFC:
1747 if (olen == sizeof(rfc))
1748 memcpy(&rfc, (void *) val, olen);
1749 break;
1750
1751 case L2CAP_CONF_FCS:
1752 if (val == L2CAP_FCS_NONE)
1753 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1754
1755 break;
1756
1757 default:
1758 if (hint)
1759 break;
1760
1761 result = L2CAP_CONF_UNKNOWN;
1762 *((u8 *) ptr++) = type;
1763 break;
1764 }
1765 }
1766
1767 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1768 goto done;
1769
1770 switch (pi->mode) {
1771 case L2CAP_MODE_STREAMING:
1772 case L2CAP_MODE_ERTM:
1773 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1774 pi->mode = l2cap_select_mode(rfc.mode,
1775 pi->conn->feat_mask);
1776 break;
1777 }
1778
1779 if (pi->mode != rfc.mode)
1780 return -ECONNREFUSED;
1781
1782 break;
1783 }
1784
1785 done:
1786 if (pi->mode != rfc.mode) {
1787 result = L2CAP_CONF_UNACCEPT;
1788 rfc.mode = pi->mode;
1789
1790 if (chan->num_conf_rsp == 1)
1791 return -ECONNREFUSED;
1792
1793 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1794 sizeof(rfc), (unsigned long) &rfc);
1795 }
1796
1797
1798 if (result == L2CAP_CONF_SUCCESS) {
1799 /* Configure output options and let the other side know
1800 * which ones we don't like. */
1801
1802 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1803 result = L2CAP_CONF_UNACCEPT;
1804 else {
1805 pi->omtu = mtu;
1806 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1807 }
1808 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1809
1810 switch (rfc.mode) {
1811 case L2CAP_MODE_BASIC:
1812 pi->fcs = L2CAP_FCS_NONE;
1813 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1814 break;
1815
1816 case L2CAP_MODE_ERTM:
1817 chan->remote_tx_win = rfc.txwin_size;
1818 chan->remote_max_tx = rfc.max_transmit;
1819
1820 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1821 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1822
1823 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1824
1825 rfc.retrans_timeout =
1826 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1827 rfc.monitor_timeout =
1828 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1829
1830 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1831
1832 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1833 sizeof(rfc), (unsigned long) &rfc);
1834
1835 break;
1836
1837 case L2CAP_MODE_STREAMING:
1838 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1839 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1840
1841 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1842
1843 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1844
1845 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1846 sizeof(rfc), (unsigned long) &rfc);
1847
1848 break;
1849
1850 default:
1851 result = L2CAP_CONF_UNACCEPT;
1852
1853 memset(&rfc, 0, sizeof(rfc));
1854 rfc.mode = pi->mode;
1855 }
1856
1857 if (result == L2CAP_CONF_SUCCESS)
1858 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1859 }
1860 rsp->scid = cpu_to_le16(pi->dcid);
1861 rsp->result = cpu_to_le16(result);
1862 rsp->flags = cpu_to_le16(0x0000);
1863
1864 return ptr - data;
1865 }
1866
1867 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1868 {
1869 struct l2cap_pinfo *pi = l2cap_pi(sk);
1870 struct l2cap_conf_req *req = data;
1871 void *ptr = req->data;
1872 int type, olen;
1873 unsigned long val;
1874 struct l2cap_conf_rfc rfc;
1875
1876 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1877
1878 while (len >= L2CAP_CONF_OPT_SIZE) {
1879 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1880
1881 switch (type) {
1882 case L2CAP_CONF_MTU:
1883 if (val < L2CAP_DEFAULT_MIN_MTU) {
1884 *result = L2CAP_CONF_UNACCEPT;
1885 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1886 } else
1887 pi->imtu = val;
1888 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1889 break;
1890
1891 case L2CAP_CONF_FLUSH_TO:
1892 pi->flush_to = val;
1893 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1894 2, pi->flush_to);
1895 break;
1896
1897 case L2CAP_CONF_RFC:
1898 if (olen == sizeof(rfc))
1899 memcpy(&rfc, (void *)val, olen);
1900
1901 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1902 rfc.mode != pi->mode)
1903 return -ECONNREFUSED;
1904
1905 pi->fcs = 0;
1906
1907 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1908 sizeof(rfc), (unsigned long) &rfc);
1909 break;
1910 }
1911 }
1912
1913 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1914 return -ECONNREFUSED;
1915
1916 pi->mode = rfc.mode;
1917
1918 if (*result == L2CAP_CONF_SUCCESS) {
1919 switch (rfc.mode) {
1920 case L2CAP_MODE_ERTM:
1921 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1922 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1923 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1924 break;
1925 case L2CAP_MODE_STREAMING:
1926 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1927 }
1928 }
1929
1930 req->dcid = cpu_to_le16(pi->dcid);
1931 req->flags = cpu_to_le16(0x0000);
1932
1933 return ptr - data;
1934 }
1935
1936 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1937 {
1938 struct l2cap_conf_rsp *rsp = data;
1939 void *ptr = rsp->data;
1940
1941 BT_DBG("sk %p", sk);
1942
1943 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1944 rsp->result = cpu_to_le16(result);
1945 rsp->flags = cpu_to_le16(flags);
1946
1947 return ptr - data;
1948 }
1949
1950 void __l2cap_connect_rsp_defer(struct sock *sk)
1951 {
1952 struct l2cap_conn_rsp rsp;
1953 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1954 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1955 u8 buf[128];
1956
1957 sk->sk_state = BT_CONFIG;
1958
1959 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1960 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1961 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1962 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1963 l2cap_send_cmd(conn, chan->ident,
1964 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1965
1966 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
1967 return;
1968
1969 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1970 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1971 l2cap_build_conf_req(chan, buf), buf);
1972 chan->num_conf_req++;
1973 }
1974
1975 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1976 {
1977 struct l2cap_pinfo *pi = l2cap_pi(sk);
1978 int type, olen;
1979 unsigned long val;
1980 struct l2cap_conf_rfc rfc;
1981
1982 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1983
1984 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1985 return;
1986
1987 while (len >= L2CAP_CONF_OPT_SIZE) {
1988 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1989
1990 switch (type) {
1991 case L2CAP_CONF_RFC:
1992 if (olen == sizeof(rfc))
1993 memcpy(&rfc, (void *)val, olen);
1994 goto done;
1995 }
1996 }
1997
1998 done:
1999 switch (rfc.mode) {
2000 case L2CAP_MODE_ERTM:
2001 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2002 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2003 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2004 break;
2005 case L2CAP_MODE_STREAMING:
2006 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2007 }
2008 }
2009
2010 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2011 {
2012 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2013
2014 if (rej->reason != 0x0000)
2015 return 0;
2016
2017 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2018 cmd->ident == conn->info_ident) {
2019 del_timer(&conn->info_timer);
2020
2021 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2022 conn->info_ident = 0;
2023
2024 l2cap_conn_start(conn);
2025 }
2026
2027 return 0;
2028 }
2029
2030 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2031 {
2032 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2033 struct l2cap_conn_rsp rsp;
2034 struct l2cap_chan *chan = NULL;
2035 struct sock *parent, *sk = NULL;
2036 int result, status = L2CAP_CS_NO_INFO;
2037
2038 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2039 __le16 psm = req->psm;
2040
2041 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2042
2043 /* Check if we have socket listening on psm */
2044 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2045 if (!parent) {
2046 result = L2CAP_CR_BAD_PSM;
2047 goto sendresp;
2048 }
2049
2050 bh_lock_sock(parent);
2051
2052 /* Check if the ACL is secure enough (if not SDP) */
2053 if (psm != cpu_to_le16(0x0001) &&
2054 !hci_conn_check_link_mode(conn->hcon)) {
2055 conn->disc_reason = 0x05;
2056 result = L2CAP_CR_SEC_BLOCK;
2057 goto response;
2058 }
2059
2060 result = L2CAP_CR_NO_MEM;
2061
2062 /* Check for backlog size */
2063 if (sk_acceptq_is_full(parent)) {
2064 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2065 goto response;
2066 }
2067
2068 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2069 if (!sk)
2070 goto response;
2071
2072 chan = l2cap_chan_alloc(sk);
2073 if (!chan) {
2074 l2cap_sock_kill(sk);
2075 goto response;
2076 }
2077
2078 write_lock_bh(&conn->chan_lock);
2079
2080 /* Check if we already have channel with that dcid */
2081 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2082 write_unlock_bh(&conn->chan_lock);
2083 sock_set_flag(sk, SOCK_ZAPPED);
2084 l2cap_sock_kill(sk);
2085 goto response;
2086 }
2087
2088 hci_conn_hold(conn->hcon);
2089
2090 l2cap_sock_init(sk, parent);
2091 bacpy(&bt_sk(sk)->src, conn->src);
2092 bacpy(&bt_sk(sk)->dst, conn->dst);
2093 l2cap_pi(sk)->psm = psm;
2094 l2cap_pi(sk)->dcid = scid;
2095
2096 bt_accept_enqueue(parent, sk);
2097
2098 __l2cap_chan_add(conn, chan);
2099
2100 l2cap_pi(sk)->chan = chan;
2101
2102 dcid = l2cap_pi(sk)->scid;
2103
2104 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2105
2106 chan->ident = cmd->ident;
2107
2108 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2109 if (l2cap_check_security(sk)) {
2110 if (bt_sk(sk)->defer_setup) {
2111 sk->sk_state = BT_CONNECT2;
2112 result = L2CAP_CR_PEND;
2113 status = L2CAP_CS_AUTHOR_PEND;
2114 parent->sk_data_ready(parent, 0);
2115 } else {
2116 sk->sk_state = BT_CONFIG;
2117 result = L2CAP_CR_SUCCESS;
2118 status = L2CAP_CS_NO_INFO;
2119 }
2120 } else {
2121 sk->sk_state = BT_CONNECT2;
2122 result = L2CAP_CR_PEND;
2123 status = L2CAP_CS_AUTHEN_PEND;
2124 }
2125 } else {
2126 sk->sk_state = BT_CONNECT2;
2127 result = L2CAP_CR_PEND;
2128 status = L2CAP_CS_NO_INFO;
2129 }
2130
2131 write_unlock_bh(&conn->chan_lock);
2132
2133 response:
2134 bh_unlock_sock(parent);
2135
2136 sendresp:
2137 rsp.scid = cpu_to_le16(scid);
2138 rsp.dcid = cpu_to_le16(dcid);
2139 rsp.result = cpu_to_le16(result);
2140 rsp.status = cpu_to_le16(status);
2141 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2142
2143 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2144 struct l2cap_info_req info;
2145 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2146
2147 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2148 conn->info_ident = l2cap_get_ident(conn);
2149
2150 mod_timer(&conn->info_timer, jiffies +
2151 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2152
2153 l2cap_send_cmd(conn, conn->info_ident,
2154 L2CAP_INFO_REQ, sizeof(info), &info);
2155 }
2156
2157 if (chan && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2158 result == L2CAP_CR_SUCCESS) {
2159 u8 buf[128];
2160 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2161 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2162 l2cap_build_conf_req(chan, buf), buf);
2163 chan->num_conf_req++;
2164 }
2165
2166 return 0;
2167 }
2168
2169 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2170 {
2171 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2172 u16 scid, dcid, result, status;
2173 struct l2cap_chan *chan;
2174 struct sock *sk;
2175 u8 req[128];
2176
2177 scid = __le16_to_cpu(rsp->scid);
2178 dcid = __le16_to_cpu(rsp->dcid);
2179 result = __le16_to_cpu(rsp->result);
2180 status = __le16_to_cpu(rsp->status);
2181
2182 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2183
2184 if (scid) {
2185 chan = l2cap_get_chan_by_scid(conn, scid);
2186 if (!chan)
2187 return -EFAULT;
2188 } else {
2189 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2190 if (!chan)
2191 return -EFAULT;
2192 }
2193
2194 sk = chan->sk;
2195
2196 switch (result) {
2197 case L2CAP_CR_SUCCESS:
2198 sk->sk_state = BT_CONFIG;
2199 chan->ident = 0;
2200 l2cap_pi(sk)->dcid = dcid;
2201 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2202
2203 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2204 break;
2205
2206 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2207
2208 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2209 l2cap_build_conf_req(chan, req), req);
2210 chan->num_conf_req++;
2211 break;
2212
2213 case L2CAP_CR_PEND:
2214 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2215 break;
2216
2217 default:
2218 /* don't delete l2cap channel if sk is owned by user */
2219 if (sock_owned_by_user(sk)) {
2220 sk->sk_state = BT_DISCONN;
2221 l2cap_sock_clear_timer(sk);
2222 l2cap_sock_set_timer(sk, HZ / 5);
2223 break;
2224 }
2225
2226 l2cap_chan_del(chan, ECONNREFUSED);
2227 break;
2228 }
2229
2230 bh_unlock_sock(sk);
2231 return 0;
2232 }
2233
2234 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2235 {
2236 /* FCS is enabled only in ERTM or streaming mode, if one or both
2237 * sides request it.
2238 */
2239 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2240 pi->fcs = L2CAP_FCS_NONE;
2241 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2242 pi->fcs = L2CAP_FCS_CRC16;
2243 }
2244
2245 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2246 {
2247 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2248 u16 dcid, flags;
2249 u8 rsp[64];
2250 struct l2cap_chan *chan;
2251 struct sock *sk;
2252 int len;
2253
2254 dcid = __le16_to_cpu(req->dcid);
2255 flags = __le16_to_cpu(req->flags);
2256
2257 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2258
2259 chan = l2cap_get_chan_by_scid(conn, dcid);
2260 if (!chan)
2261 return -ENOENT;
2262
2263 sk = chan->sk;
2264
2265 if (sk->sk_state != BT_CONFIG) {
2266 struct l2cap_cmd_rej rej;
2267
2268 rej.reason = cpu_to_le16(0x0002);
2269 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2270 sizeof(rej), &rej);
2271 goto unlock;
2272 }
2273
2274 /* Reject if config buffer is too small. */
2275 len = cmd_len - sizeof(*req);
2276 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2277 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2278 l2cap_build_conf_rsp(sk, rsp,
2279 L2CAP_CONF_REJECT, flags), rsp);
2280 goto unlock;
2281 }
2282
2283 /* Store config. */
2284 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2285 chan->conf_len += len;
2286
2287 if (flags & 0x0001) {
2288 /* Incomplete config. Send empty response. */
2289 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2290 l2cap_build_conf_rsp(sk, rsp,
2291 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2292 goto unlock;
2293 }
2294
2295 /* Complete config. */
2296 len = l2cap_parse_conf_req(chan, rsp);
2297 if (len < 0) {
2298 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2299 goto unlock;
2300 }
2301
2302 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2303 chan->num_conf_rsp++;
2304
2305 /* Reset config buffer. */
2306 chan->conf_len = 0;
2307
2308 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2309 goto unlock;
2310
2311 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2312 set_default_fcs(l2cap_pi(sk));
2313
2314 sk->sk_state = BT_CONNECTED;
2315
2316 chan->next_tx_seq = 0;
2317 chan->expected_tx_seq = 0;
2318 skb_queue_head_init(&chan->tx_q);
2319 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2320 l2cap_ertm_init(chan);
2321
2322 l2cap_chan_ready(sk);
2323 goto unlock;
2324 }
2325
2326 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2327 u8 buf[64];
2328 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2329 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2330 l2cap_build_conf_req(chan, buf), buf);
2331 chan->num_conf_req++;
2332 }
2333
2334 unlock:
2335 bh_unlock_sock(sk);
2336 return 0;
2337 }
2338
2339 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2340 {
2341 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2342 u16 scid, flags, result;
2343 struct l2cap_chan *chan;
2344 struct sock *sk;
2345 int len = cmd->len - sizeof(*rsp);
2346
2347 scid = __le16_to_cpu(rsp->scid);
2348 flags = __le16_to_cpu(rsp->flags);
2349 result = __le16_to_cpu(rsp->result);
2350
2351 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2352 scid, flags, result);
2353
2354 chan = l2cap_get_chan_by_scid(conn, scid);
2355 if (!chan)
2356 return 0;
2357
2358 sk = chan->sk;
2359
2360 switch (result) {
2361 case L2CAP_CONF_SUCCESS:
2362 l2cap_conf_rfc_get(sk, rsp->data, len);
2363 break;
2364
2365 case L2CAP_CONF_UNACCEPT:
2366 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2367 char req[64];
2368
2369 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2370 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2371 goto done;
2372 }
2373
2374 /* throw out any old stored conf requests */
2375 result = L2CAP_CONF_SUCCESS;
2376 len = l2cap_parse_conf_rsp(sk, rsp->data,
2377 len, req, &result);
2378 if (len < 0) {
2379 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2380 goto done;
2381 }
2382
2383 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2384 L2CAP_CONF_REQ, len, req);
2385 chan->num_conf_req++;
2386 if (result != L2CAP_CONF_SUCCESS)
2387 goto done;
2388 break;
2389 }
2390
2391 default:
2392 sk->sk_err = ECONNRESET;
2393 l2cap_sock_set_timer(sk, HZ * 5);
2394 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2395 goto done;
2396 }
2397
2398 if (flags & 0x01)
2399 goto done;
2400
2401 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2402
2403 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2404 set_default_fcs(l2cap_pi(sk));
2405
2406 sk->sk_state = BT_CONNECTED;
2407 chan->next_tx_seq = 0;
2408 chan->expected_tx_seq = 0;
2409 skb_queue_head_init(&chan->tx_q);
2410 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2411 l2cap_ertm_init(chan);
2412
2413 l2cap_chan_ready(sk);
2414 }
2415
2416 done:
2417 bh_unlock_sock(sk);
2418 return 0;
2419 }
2420
2421 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2422 {
2423 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2424 struct l2cap_disconn_rsp rsp;
2425 u16 dcid, scid;
2426 struct l2cap_chan *chan;
2427 struct sock *sk;
2428
2429 scid = __le16_to_cpu(req->scid);
2430 dcid = __le16_to_cpu(req->dcid);
2431
2432 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2433
2434 chan = l2cap_get_chan_by_scid(conn, dcid);
2435 if (!chan)
2436 return 0;
2437
2438 sk = chan->sk;
2439
2440 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2441 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2442 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2443
2444 sk->sk_shutdown = SHUTDOWN_MASK;
2445
2446 /* don't delete l2cap channel if sk is owned by user */
2447 if (sock_owned_by_user(sk)) {
2448 sk->sk_state = BT_DISCONN;
2449 l2cap_sock_clear_timer(sk);
2450 l2cap_sock_set_timer(sk, HZ / 5);
2451 bh_unlock_sock(sk);
2452 return 0;
2453 }
2454
2455 l2cap_chan_del(chan, ECONNRESET);
2456 bh_unlock_sock(sk);
2457
2458 l2cap_sock_kill(sk);
2459 return 0;
2460 }
2461
2462 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2463 {
2464 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2465 u16 dcid, scid;
2466 struct l2cap_chan *chan;
2467 struct sock *sk;
2468
2469 scid = __le16_to_cpu(rsp->scid);
2470 dcid = __le16_to_cpu(rsp->dcid);
2471
2472 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2473
2474 chan = l2cap_get_chan_by_scid(conn, scid);
2475 if (!chan)
2476 return 0;
2477
2478 sk = chan->sk;
2479
2480 /* don't delete l2cap channel if sk is owned by user */
2481 if (sock_owned_by_user(sk)) {
2482 sk->sk_state = BT_DISCONN;
2483 l2cap_sock_clear_timer(sk);
2484 l2cap_sock_set_timer(sk, HZ / 5);
2485 bh_unlock_sock(sk);
2486 return 0;
2487 }
2488
2489 l2cap_chan_del(chan, 0);
2490 bh_unlock_sock(sk);
2491
2492 l2cap_sock_kill(sk);
2493 return 0;
2494 }
2495
2496 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2497 {
2498 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2499 u16 type;
2500
2501 type = __le16_to_cpu(req->type);
2502
2503 BT_DBG("type 0x%4.4x", type);
2504
2505 if (type == L2CAP_IT_FEAT_MASK) {
2506 u8 buf[8];
2507 u32 feat_mask = l2cap_feat_mask;
2508 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2509 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2510 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2511 if (!disable_ertm)
2512 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2513 | L2CAP_FEAT_FCS;
2514 put_unaligned_le32(feat_mask, rsp->data);
2515 l2cap_send_cmd(conn, cmd->ident,
2516 L2CAP_INFO_RSP, sizeof(buf), buf);
2517 } else if (type == L2CAP_IT_FIXED_CHAN) {
2518 u8 buf[12];
2519 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2520 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2521 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2522 memcpy(buf + 4, l2cap_fixed_chan, 8);
2523 l2cap_send_cmd(conn, cmd->ident,
2524 L2CAP_INFO_RSP, sizeof(buf), buf);
2525 } else {
2526 struct l2cap_info_rsp rsp;
2527 rsp.type = cpu_to_le16(type);
2528 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2529 l2cap_send_cmd(conn, cmd->ident,
2530 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2531 }
2532
2533 return 0;
2534 }
2535
2536 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2537 {
2538 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2539 u16 type, result;
2540
2541 type = __le16_to_cpu(rsp->type);
2542 result = __le16_to_cpu(rsp->result);
2543
2544 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2545
2546 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2547 if (cmd->ident != conn->info_ident ||
2548 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2549 return 0;
2550
2551 del_timer(&conn->info_timer);
2552
2553 if (result != L2CAP_IR_SUCCESS) {
2554 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2555 conn->info_ident = 0;
2556
2557 l2cap_conn_start(conn);
2558
2559 return 0;
2560 }
2561
2562 if (type == L2CAP_IT_FEAT_MASK) {
2563 conn->feat_mask = get_unaligned_le32(rsp->data);
2564
2565 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2566 struct l2cap_info_req req;
2567 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2568
2569 conn->info_ident = l2cap_get_ident(conn);
2570
2571 l2cap_send_cmd(conn, conn->info_ident,
2572 L2CAP_INFO_REQ, sizeof(req), &req);
2573 } else {
2574 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2575 conn->info_ident = 0;
2576
2577 l2cap_conn_start(conn);
2578 }
2579 } else if (type == L2CAP_IT_FIXED_CHAN) {
2580 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2581 conn->info_ident = 0;
2582
2583 l2cap_conn_start(conn);
2584 }
2585
2586 return 0;
2587 }
2588
2589 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2590 u16 to_multiplier)
2591 {
2592 u16 max_latency;
2593
2594 if (min > max || min < 6 || max > 3200)
2595 return -EINVAL;
2596
2597 if (to_multiplier < 10 || to_multiplier > 3200)
2598 return -EINVAL;
2599
2600 if (max >= to_multiplier * 8)
2601 return -EINVAL;
2602
2603 max_latency = (to_multiplier * 8 / max) - 1;
2604 if (latency > 499 || latency > max_latency)
2605 return -EINVAL;
2606
2607 return 0;
2608 }
2609
2610 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2611 struct l2cap_cmd_hdr *cmd, u8 *data)
2612 {
2613 struct hci_conn *hcon = conn->hcon;
2614 struct l2cap_conn_param_update_req *req;
2615 struct l2cap_conn_param_update_rsp rsp;
2616 u16 min, max, latency, to_multiplier, cmd_len;
2617 int err;
2618
2619 if (!(hcon->link_mode & HCI_LM_MASTER))
2620 return -EINVAL;
2621
2622 cmd_len = __le16_to_cpu(cmd->len);
2623 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2624 return -EPROTO;
2625
2626 req = (struct l2cap_conn_param_update_req *) data;
2627 min = __le16_to_cpu(req->min);
2628 max = __le16_to_cpu(req->max);
2629 latency = __le16_to_cpu(req->latency);
2630 to_multiplier = __le16_to_cpu(req->to_multiplier);
2631
2632 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2633 min, max, latency, to_multiplier);
2634
2635 memset(&rsp, 0, sizeof(rsp));
2636
2637 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2638 if (err)
2639 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2640 else
2641 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2642
2643 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2644 sizeof(rsp), &rsp);
2645
2646 if (!err)
2647 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2648
2649 return 0;
2650 }
2651
2652 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2653 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2654 {
2655 int err = 0;
2656
2657 switch (cmd->code) {
2658 case L2CAP_COMMAND_REJ:
2659 l2cap_command_rej(conn, cmd, data);
2660 break;
2661
2662 case L2CAP_CONN_REQ:
2663 err = l2cap_connect_req(conn, cmd, data);
2664 break;
2665
2666 case L2CAP_CONN_RSP:
2667 err = l2cap_connect_rsp(conn, cmd, data);
2668 break;
2669
2670 case L2CAP_CONF_REQ:
2671 err = l2cap_config_req(conn, cmd, cmd_len, data);
2672 break;
2673
2674 case L2CAP_CONF_RSP:
2675 err = l2cap_config_rsp(conn, cmd, data);
2676 break;
2677
2678 case L2CAP_DISCONN_REQ:
2679 err = l2cap_disconnect_req(conn, cmd, data);
2680 break;
2681
2682 case L2CAP_DISCONN_RSP:
2683 err = l2cap_disconnect_rsp(conn, cmd, data);
2684 break;
2685
2686 case L2CAP_ECHO_REQ:
2687 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2688 break;
2689
2690 case L2CAP_ECHO_RSP:
2691 break;
2692
2693 case L2CAP_INFO_REQ:
2694 err = l2cap_information_req(conn, cmd, data);
2695 break;
2696
2697 case L2CAP_INFO_RSP:
2698 err = l2cap_information_rsp(conn, cmd, data);
2699 break;
2700
2701 default:
2702 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2703 err = -EINVAL;
2704 break;
2705 }
2706
2707 return err;
2708 }
2709
2710 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2711 struct l2cap_cmd_hdr *cmd, u8 *data)
2712 {
2713 switch (cmd->code) {
2714 case L2CAP_COMMAND_REJ:
2715 return 0;
2716
2717 case L2CAP_CONN_PARAM_UPDATE_REQ:
2718 return l2cap_conn_param_update_req(conn, cmd, data);
2719
2720 case L2CAP_CONN_PARAM_UPDATE_RSP:
2721 return 0;
2722
2723 default:
2724 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2725 return -EINVAL;
2726 }
2727 }
2728
2729 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2730 struct sk_buff *skb)
2731 {
2732 u8 *data = skb->data;
2733 int len = skb->len;
2734 struct l2cap_cmd_hdr cmd;
2735 int err;
2736
2737 l2cap_raw_recv(conn, skb);
2738
2739 while (len >= L2CAP_CMD_HDR_SIZE) {
2740 u16 cmd_len;
2741 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2742 data += L2CAP_CMD_HDR_SIZE;
2743 len -= L2CAP_CMD_HDR_SIZE;
2744
2745 cmd_len = le16_to_cpu(cmd.len);
2746
2747 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2748
2749 if (cmd_len > len || !cmd.ident) {
2750 BT_DBG("corrupted command");
2751 break;
2752 }
2753
2754 if (conn->hcon->type == LE_LINK)
2755 err = l2cap_le_sig_cmd(conn, &cmd, data);
2756 else
2757 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2758
2759 if (err) {
2760 struct l2cap_cmd_rej rej;
2761
2762 BT_ERR("Wrong link type (%d)", err);
2763
2764 /* FIXME: Map err to a valid reason */
2765 rej.reason = cpu_to_le16(0);
2766 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2767 }
2768
2769 data += cmd_len;
2770 len -= cmd_len;
2771 }
2772
2773 kfree_skb(skb);
2774 }
2775
2776 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2777 {
2778 u16 our_fcs, rcv_fcs;
2779 int hdr_size = L2CAP_HDR_SIZE + 2;
2780
2781 if (pi->fcs == L2CAP_FCS_CRC16) {
2782 skb_trim(skb, skb->len - 2);
2783 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2784 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2785
2786 if (our_fcs != rcv_fcs)
2787 return -EBADMSG;
2788 }
2789 return 0;
2790 }
2791
2792 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2793 {
2794 u16 control = 0;
2795
2796 chan->frames_sent = 0;
2797
2798 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2799
2800 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2801 control |= L2CAP_SUPER_RCV_NOT_READY;
2802 l2cap_send_sframe(chan, control);
2803 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2804 }
2805
2806 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2807 l2cap_retransmit_frames(chan);
2808
2809 l2cap_ertm_send(chan);
2810
2811 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2812 chan->frames_sent == 0) {
2813 control |= L2CAP_SUPER_RCV_READY;
2814 l2cap_send_sframe(chan, control);
2815 }
2816 }
2817
2818 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2819 {
2820 struct sk_buff *next_skb;
2821 int tx_seq_offset, next_tx_seq_offset;
2822
2823 bt_cb(skb)->tx_seq = tx_seq;
2824 bt_cb(skb)->sar = sar;
2825
2826 next_skb = skb_peek(&chan->srej_q);
2827 if (!next_skb) {
2828 __skb_queue_tail(&chan->srej_q, skb);
2829 return 0;
2830 }
2831
2832 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2833 if (tx_seq_offset < 0)
2834 tx_seq_offset += 64;
2835
2836 do {
2837 if (bt_cb(next_skb)->tx_seq == tx_seq)
2838 return -EINVAL;
2839
2840 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2841 chan->buffer_seq) % 64;
2842 if (next_tx_seq_offset < 0)
2843 next_tx_seq_offset += 64;
2844
2845 if (next_tx_seq_offset > tx_seq_offset) {
2846 __skb_queue_before(&chan->srej_q, next_skb, skb);
2847 return 0;
2848 }
2849
2850 if (skb_queue_is_last(&chan->srej_q, next_skb))
2851 break;
2852
2853 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2854
2855 __skb_queue_tail(&chan->srej_q, skb);
2856
2857 return 0;
2858 }
2859
2860 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2861 {
2862 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2863 struct sk_buff *_skb;
2864 int err;
2865
2866 switch (control & L2CAP_CTRL_SAR) {
2867 case L2CAP_SDU_UNSEGMENTED:
2868 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2869 goto drop;
2870
2871 err = sock_queue_rcv_skb(chan->sk, skb);
2872 if (!err)
2873 return err;
2874
2875 break;
2876
2877 case L2CAP_SDU_START:
2878 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2879 goto drop;
2880
2881 chan->sdu_len = get_unaligned_le16(skb->data);
2882
2883 if (chan->sdu_len > pi->imtu)
2884 goto disconnect;
2885
2886 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2887 if (!chan->sdu)
2888 return -ENOMEM;
2889
2890 /* pull sdu_len bytes only after alloc, because of Local Busy
2891 * condition we have to be sure that this will be executed
2892 * only once, i.e., when alloc does not fail */
2893 skb_pull(skb, 2);
2894
2895 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2896
2897 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2898 chan->partial_sdu_len = skb->len;
2899 break;
2900
2901 case L2CAP_SDU_CONTINUE:
2902 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2903 goto disconnect;
2904
2905 if (!chan->sdu)
2906 goto disconnect;
2907
2908 chan->partial_sdu_len += skb->len;
2909 if (chan->partial_sdu_len > chan->sdu_len)
2910 goto drop;
2911
2912 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2913
2914 break;
2915
2916 case L2CAP_SDU_END:
2917 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2918 goto disconnect;
2919
2920 if (!chan->sdu)
2921 goto disconnect;
2922
2923 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2924 chan->partial_sdu_len += skb->len;
2925
2926 if (chan->partial_sdu_len > pi->imtu)
2927 goto drop;
2928
2929 if (chan->partial_sdu_len != chan->sdu_len)
2930 goto drop;
2931
2932 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2933 }
2934
2935 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2936 if (!_skb) {
2937 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2938 return -ENOMEM;
2939 }
2940
2941 err = sock_queue_rcv_skb(chan->sk, _skb);
2942 if (err < 0) {
2943 kfree_skb(_skb);
2944 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2945 return err;
2946 }
2947
2948 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2949 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2950
2951 kfree_skb(chan->sdu);
2952 break;
2953 }
2954
2955 kfree_skb(skb);
2956 return 0;
2957
2958 drop:
2959 kfree_skb(chan->sdu);
2960 chan->sdu = NULL;
2961
2962 disconnect:
2963 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2964 kfree_skb(skb);
2965 return 0;
2966 }
2967
2968 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2969 {
2970 struct sk_buff *skb;
2971 u16 control;
2972 int err;
2973
2974 while ((skb = skb_dequeue(&chan->busy_q))) {
2975 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2976 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2977 if (err < 0) {
2978 skb_queue_head(&chan->busy_q, skb);
2979 return -EBUSY;
2980 }
2981
2982 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2983 }
2984
2985 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2986 goto done;
2987
2988 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2989 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2990 l2cap_send_sframe(chan, control);
2991 chan->retry_count = 1;
2992
2993 del_timer(&chan->retrans_timer);
2994 __mod_monitor_timer();
2995
2996 chan->conn_state |= L2CAP_CONN_WAIT_F;
2997
2998 done:
2999 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3000 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3001
3002 BT_DBG("chan %p, Exit local busy", chan);
3003
3004 return 0;
3005 }
3006
3007 static void l2cap_busy_work(struct work_struct *work)
3008 {
3009 DECLARE_WAITQUEUE(wait, current);
3010 struct l2cap_chan *chan =
3011 container_of(work, struct l2cap_chan, busy_work);
3012 struct sock *sk = chan->sk;
3013 int n_tries = 0, timeo = HZ/5, err;
3014 struct sk_buff *skb;
3015
3016 lock_sock(sk);
3017
3018 add_wait_queue(sk_sleep(sk), &wait);
3019 while ((skb = skb_peek(&chan->busy_q))) {
3020 set_current_state(TASK_INTERRUPTIBLE);
3021
3022 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3023 err = -EBUSY;
3024 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, EBUSY);
3025 break;
3026 }
3027
3028 if (!timeo)
3029 timeo = HZ/5;
3030
3031 if (signal_pending(current)) {
3032 err = sock_intr_errno(timeo);
3033 break;
3034 }
3035
3036 release_sock(sk);
3037 timeo = schedule_timeout(timeo);
3038 lock_sock(sk);
3039
3040 err = sock_error(sk);
3041 if (err)
3042 break;
3043
3044 if (l2cap_try_push_rx_skb(chan) == 0)
3045 break;
3046 }
3047
3048 set_current_state(TASK_RUNNING);
3049 remove_wait_queue(sk_sleep(sk), &wait);
3050
3051 release_sock(sk);
3052 }
3053
3054 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3055 {
3056 int sctrl, err;
3057
3058 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3059 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3060 __skb_queue_tail(&chan->busy_q, skb);
3061 return l2cap_try_push_rx_skb(chan);
3062
3063
3064 }
3065
3066 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3067 if (err >= 0) {
3068 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3069 return err;
3070 }
3071
3072 /* Busy Condition */
3073 BT_DBG("chan %p, Enter local busy", chan);
3074
3075 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3076 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3077 __skb_queue_tail(&chan->busy_q, skb);
3078
3079 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3080 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3081 l2cap_send_sframe(chan, sctrl);
3082
3083 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3084
3085 del_timer(&chan->ack_timer);
3086
3087 queue_work(_busy_wq, &chan->busy_work);
3088
3089 return err;
3090 }
3091
3092 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3093 {
3094 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3095 struct sk_buff *_skb;
3096 int err = -EINVAL;
3097
3098 /*
3099 * TODO: We have to notify the userland if some data is lost with the
3100 * Streaming Mode.
3101 */
3102
3103 switch (control & L2CAP_CTRL_SAR) {
3104 case L2CAP_SDU_UNSEGMENTED:
3105 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3106 kfree_skb(chan->sdu);
3107 break;
3108 }
3109
3110 err = sock_queue_rcv_skb(chan->sk, skb);
3111 if (!err)
3112 return 0;
3113
3114 break;
3115
3116 case L2CAP_SDU_START:
3117 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3118 kfree_skb(chan->sdu);
3119 break;
3120 }
3121
3122 chan->sdu_len = get_unaligned_le16(skb->data);
3123 skb_pull(skb, 2);
3124
3125 if (chan->sdu_len > pi->imtu) {
3126 err = -EMSGSIZE;
3127 break;
3128 }
3129
3130 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3131 if (!chan->sdu) {
3132 err = -ENOMEM;
3133 break;
3134 }
3135
3136 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3137
3138 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3139 chan->partial_sdu_len = skb->len;
3140 err = 0;
3141 break;
3142
3143 case L2CAP_SDU_CONTINUE:
3144 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3145 break;
3146
3147 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3148
3149 chan->partial_sdu_len += skb->len;
3150 if (chan->partial_sdu_len > chan->sdu_len)
3151 kfree_skb(chan->sdu);
3152 else
3153 err = 0;
3154
3155 break;
3156
3157 case L2CAP_SDU_END:
3158 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3159 break;
3160
3161 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3162
3163 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3164 chan->partial_sdu_len += skb->len;
3165
3166 if (chan->partial_sdu_len > pi->imtu)
3167 goto drop;
3168
3169 if (chan->partial_sdu_len == chan->sdu_len) {
3170 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3171 err = sock_queue_rcv_skb(chan->sk, _skb);
3172 if (err < 0)
3173 kfree_skb(_skb);
3174 }
3175 err = 0;
3176
3177 drop:
3178 kfree_skb(chan->sdu);
3179 break;
3180 }
3181
3182 kfree_skb(skb);
3183 return err;
3184 }
3185
3186 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3187 {
3188 struct sk_buff *skb;
3189 u16 control;
3190
3191 while ((skb = skb_peek(&chan->srej_q))) {
3192 if (bt_cb(skb)->tx_seq != tx_seq)
3193 break;
3194
3195 skb = skb_dequeue(&chan->srej_q);
3196 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3197 l2cap_ertm_reassembly_sdu(chan, skb, control);
3198 chan->buffer_seq_srej =
3199 (chan->buffer_seq_srej + 1) % 64;
3200 tx_seq = (tx_seq + 1) % 64;
3201 }
3202 }
3203
3204 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3205 {
3206 struct srej_list *l, *tmp;
3207 u16 control;
3208
3209 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3210 if (l->tx_seq == tx_seq) {
3211 list_del(&l->list);
3212 kfree(l);
3213 return;
3214 }
3215 control = L2CAP_SUPER_SELECT_REJECT;
3216 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3217 l2cap_send_sframe(chan, control);
3218 list_del(&l->list);
3219 list_add_tail(&l->list, &chan->srej_l);
3220 }
3221 }
3222
3223 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3224 {
3225 struct srej_list *new;
3226 u16 control;
3227
3228 while (tx_seq != chan->expected_tx_seq) {
3229 control = L2CAP_SUPER_SELECT_REJECT;
3230 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3231 l2cap_send_sframe(chan, control);
3232
3233 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3234 new->tx_seq = chan->expected_tx_seq;
3235 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3236 list_add_tail(&new->list, &chan->srej_l);
3237 }
3238 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3239 }
3240
3241 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3242 {
3243 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3244 u8 tx_seq = __get_txseq(rx_control);
3245 u8 req_seq = __get_reqseq(rx_control);
3246 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3247 int tx_seq_offset, expected_tx_seq_offset;
3248 int num_to_ack = (pi->tx_win/6) + 1;
3249 int err = 0;
3250
3251 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3252 tx_seq, rx_control);
3253
3254 if (L2CAP_CTRL_FINAL & rx_control &&
3255 chan->conn_state & L2CAP_CONN_WAIT_F) {
3256 del_timer(&chan->monitor_timer);
3257 if (chan->unacked_frames > 0)
3258 __mod_retrans_timer();
3259 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3260 }
3261
3262 chan->expected_ack_seq = req_seq;
3263 l2cap_drop_acked_frames(chan);
3264
3265 if (tx_seq == chan->expected_tx_seq)
3266 goto expected;
3267
3268 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3269 if (tx_seq_offset < 0)
3270 tx_seq_offset += 64;
3271
3272 /* invalid tx_seq */
3273 if (tx_seq_offset >= pi->tx_win) {
3274 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3275 goto drop;
3276 }
3277
3278 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3279 goto drop;
3280
3281 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3282 struct srej_list *first;
3283
3284 first = list_first_entry(&chan->srej_l,
3285 struct srej_list, list);
3286 if (tx_seq == first->tx_seq) {
3287 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3288 l2cap_check_srej_gap(chan, tx_seq);
3289
3290 list_del(&first->list);
3291 kfree(first);
3292
3293 if (list_empty(&chan->srej_l)) {
3294 chan->buffer_seq = chan->buffer_seq_srej;
3295 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3296 l2cap_send_ack(chan);
3297 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3298 }
3299 } else {
3300 struct srej_list *l;
3301
3302 /* duplicated tx_seq */
3303 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3304 goto drop;
3305
3306 list_for_each_entry(l, &chan->srej_l, list) {
3307 if (l->tx_seq == tx_seq) {
3308 l2cap_resend_srejframe(chan, tx_seq);
3309 return 0;
3310 }
3311 }
3312 l2cap_send_srejframe(chan, tx_seq);
3313 }
3314 } else {
3315 expected_tx_seq_offset =
3316 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3317 if (expected_tx_seq_offset < 0)
3318 expected_tx_seq_offset += 64;
3319
3320 /* duplicated tx_seq */
3321 if (tx_seq_offset < expected_tx_seq_offset)
3322 goto drop;
3323
3324 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3325
3326 BT_DBG("chan %p, Enter SREJ", chan);
3327
3328 INIT_LIST_HEAD(&chan->srej_l);
3329 chan->buffer_seq_srej = chan->buffer_seq;
3330
3331 __skb_queue_head_init(&chan->srej_q);
3332 __skb_queue_head_init(&chan->busy_q);
3333 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3334
3335 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3336
3337 l2cap_send_srejframe(chan, tx_seq);
3338
3339 del_timer(&chan->ack_timer);
3340 }
3341 return 0;
3342
3343 expected:
3344 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3345
3346 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3347 bt_cb(skb)->tx_seq = tx_seq;
3348 bt_cb(skb)->sar = sar;
3349 __skb_queue_tail(&chan->srej_q, skb);
3350 return 0;
3351 }
3352
3353 err = l2cap_push_rx_skb(chan, skb, rx_control);
3354 if (err < 0)
3355 return 0;
3356
3357 if (rx_control & L2CAP_CTRL_FINAL) {
3358 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3359 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3360 else
3361 l2cap_retransmit_frames(chan);
3362 }
3363
3364 __mod_ack_timer();
3365
3366 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3367 if (chan->num_acked == num_to_ack - 1)
3368 l2cap_send_ack(chan);
3369
3370 return 0;
3371
3372 drop:
3373 kfree_skb(skb);
3374 return 0;
3375 }
3376
3377 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3378 {
3379 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3380 rx_control);
3381
3382 chan->expected_ack_seq = __get_reqseq(rx_control);
3383 l2cap_drop_acked_frames(chan);
3384
3385 if (rx_control & L2CAP_CTRL_POLL) {
3386 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3387 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3388 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3389 (chan->unacked_frames > 0))
3390 __mod_retrans_timer();
3391
3392 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3393 l2cap_send_srejtail(chan);
3394 } else {
3395 l2cap_send_i_or_rr_or_rnr(chan);
3396 }
3397
3398 } else if (rx_control & L2CAP_CTRL_FINAL) {
3399 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3400
3401 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3402 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3403 else
3404 l2cap_retransmit_frames(chan);
3405
3406 } else {
3407 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3408 (chan->unacked_frames > 0))
3409 __mod_retrans_timer();
3410
3411 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3412 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3413 l2cap_send_ack(chan);
3414 else
3415 l2cap_ertm_send(chan);
3416 }
3417 }
3418
3419 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3420 {
3421 u8 tx_seq = __get_reqseq(rx_control);
3422
3423 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3424
3425 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3426
3427 chan->expected_ack_seq = tx_seq;
3428 l2cap_drop_acked_frames(chan);
3429
3430 if (rx_control & L2CAP_CTRL_FINAL) {
3431 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3432 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3433 else
3434 l2cap_retransmit_frames(chan);
3435 } else {
3436 l2cap_retransmit_frames(chan);
3437
3438 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3439 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3440 }
3441 }
3442 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3443 {
3444 u8 tx_seq = __get_reqseq(rx_control);
3445
3446 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3447
3448 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3449
3450 if (rx_control & L2CAP_CTRL_POLL) {
3451 chan->expected_ack_seq = tx_seq;
3452 l2cap_drop_acked_frames(chan);
3453
3454 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3455 l2cap_retransmit_one_frame(chan, tx_seq);
3456
3457 l2cap_ertm_send(chan);
3458
3459 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3460 chan->srej_save_reqseq = tx_seq;
3461 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3462 }
3463 } else if (rx_control & L2CAP_CTRL_FINAL) {
3464 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3465 chan->srej_save_reqseq == tx_seq)
3466 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3467 else
3468 l2cap_retransmit_one_frame(chan, tx_seq);
3469 } else {
3470 l2cap_retransmit_one_frame(chan, tx_seq);
3471 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3472 chan->srej_save_reqseq = tx_seq;
3473 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3474 }
3475 }
3476 }
3477
3478 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3479 {
3480 u8 tx_seq = __get_reqseq(rx_control);
3481
3482 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3483
3484 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3485 chan->expected_ack_seq = tx_seq;
3486 l2cap_drop_acked_frames(chan);
3487
3488 if (rx_control & L2CAP_CTRL_POLL)
3489 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3490
3491 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3492 del_timer(&chan->retrans_timer);
3493 if (rx_control & L2CAP_CTRL_POLL)
3494 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3495 return;
3496 }
3497
3498 if (rx_control & L2CAP_CTRL_POLL)
3499 l2cap_send_srejtail(chan);
3500 else
3501 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3502 }
3503
3504 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3505 {
3506 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3507
3508 if (L2CAP_CTRL_FINAL & rx_control &&
3509 chan->conn_state & L2CAP_CONN_WAIT_F) {
3510 del_timer(&chan->monitor_timer);
3511 if (chan->unacked_frames > 0)
3512 __mod_retrans_timer();
3513 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3514 }
3515
3516 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3517 case L2CAP_SUPER_RCV_READY:
3518 l2cap_data_channel_rrframe(chan, rx_control);
3519 break;
3520
3521 case L2CAP_SUPER_REJECT:
3522 l2cap_data_channel_rejframe(chan, rx_control);
3523 break;
3524
3525 case L2CAP_SUPER_SELECT_REJECT:
3526 l2cap_data_channel_srejframe(chan, rx_control);
3527 break;
3528
3529 case L2CAP_SUPER_RCV_NOT_READY:
3530 l2cap_data_channel_rnrframe(chan, rx_control);
3531 break;
3532 }
3533
3534 kfree_skb(skb);
3535 return 0;
3536 }
3537
3538 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3539 {
3540 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3541 struct l2cap_pinfo *pi = l2cap_pi(sk);
3542 u16 control;
3543 u8 req_seq;
3544 int len, next_tx_seq_offset, req_seq_offset;
3545
3546 control = get_unaligned_le16(skb->data);
3547 skb_pull(skb, 2);
3548 len = skb->len;
3549
3550 /*
3551 * We can just drop the corrupted I-frame here.
3552 * Receiver will miss it and start proper recovery
3553 * procedures and ask retransmission.
3554 */
3555 if (l2cap_check_fcs(pi, skb))
3556 goto drop;
3557
3558 if (__is_sar_start(control) && __is_iframe(control))
3559 len -= 2;
3560
3561 if (pi->fcs == L2CAP_FCS_CRC16)
3562 len -= 2;
3563
3564 if (len > pi->mps) {
3565 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3566 goto drop;
3567 }
3568
3569 req_seq = __get_reqseq(control);
3570 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3571 if (req_seq_offset < 0)
3572 req_seq_offset += 64;
3573
3574 next_tx_seq_offset =
3575 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3576 if (next_tx_seq_offset < 0)
3577 next_tx_seq_offset += 64;
3578
3579 /* check for invalid req-seq */
3580 if (req_seq_offset > next_tx_seq_offset) {
3581 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3582 goto drop;
3583 }
3584
3585 if (__is_iframe(control)) {
3586 if (len < 0) {
3587 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3588 goto drop;
3589 }
3590
3591 l2cap_data_channel_iframe(chan, control, skb);
3592 } else {
3593 if (len != 0) {
3594 BT_ERR("%d", len);
3595 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3596 goto drop;
3597 }
3598
3599 l2cap_data_channel_sframe(chan, control, skb);
3600 }
3601
3602 return 0;
3603
3604 drop:
3605 kfree_skb(skb);
3606 return 0;
3607 }
3608
3609 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3610 {
3611 struct l2cap_chan *chan;
3612 struct sock *sk;
3613 struct l2cap_pinfo *pi;
3614 u16 control;
3615 u8 tx_seq;
3616 int len;
3617
3618 chan = l2cap_get_chan_by_scid(conn, cid);
3619 if (!chan) {
3620 BT_DBG("unknown cid 0x%4.4x", cid);
3621 goto drop;
3622 }
3623
3624 sk = chan->sk;
3625 pi = l2cap_pi(sk);
3626
3627 BT_DBG("chan %p, len %d", chan, skb->len);
3628
3629 if (sk->sk_state != BT_CONNECTED)
3630 goto drop;
3631
3632 switch (pi->mode) {
3633 case L2CAP_MODE_BASIC:
3634 /* If socket recv buffers overflows we drop data here
3635 * which is *bad* because L2CAP has to be reliable.
3636 * But we don't have any other choice. L2CAP doesn't
3637 * provide flow control mechanism. */
3638
3639 if (pi->imtu < skb->len)
3640 goto drop;
3641
3642 if (!sock_queue_rcv_skb(sk, skb))
3643 goto done;
3644 break;
3645
3646 case L2CAP_MODE_ERTM:
3647 if (!sock_owned_by_user(sk)) {
3648 l2cap_ertm_data_rcv(sk, skb);
3649 } else {
3650 if (sk_add_backlog(sk, skb))
3651 goto drop;
3652 }
3653
3654 goto done;
3655
3656 case L2CAP_MODE_STREAMING:
3657 control = get_unaligned_le16(skb->data);
3658 skb_pull(skb, 2);
3659 len = skb->len;
3660
3661 if (l2cap_check_fcs(pi, skb))
3662 goto drop;
3663
3664 if (__is_sar_start(control))
3665 len -= 2;
3666
3667 if (pi->fcs == L2CAP_FCS_CRC16)
3668 len -= 2;
3669
3670 if (len > pi->mps || len < 0 || __is_sframe(control))
3671 goto drop;
3672
3673 tx_seq = __get_txseq(control);
3674
3675 if (chan->expected_tx_seq == tx_seq)
3676 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3677 else
3678 chan->expected_tx_seq = (tx_seq + 1) % 64;
3679
3680 l2cap_streaming_reassembly_sdu(chan, skb, control);
3681
3682 goto done;
3683
3684 default:
3685 BT_DBG("chan %p: bad mode 0x%2.2x", chan, pi->mode);
3686 break;
3687 }
3688
3689 drop:
3690 kfree_skb(skb);
3691
3692 done:
3693 if (sk)
3694 bh_unlock_sock(sk);
3695
3696 return 0;
3697 }
3698
3699 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3700 {
3701 struct sock *sk;
3702
3703 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3704 if (!sk)
3705 goto drop;
3706
3707 bh_lock_sock(sk);
3708
3709 BT_DBG("sk %p, len %d", sk, skb->len);
3710
3711 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3712 goto drop;
3713
3714 if (l2cap_pi(sk)->imtu < skb->len)
3715 goto drop;
3716
3717 if (!sock_queue_rcv_skb(sk, skb))
3718 goto done;
3719
3720 drop:
3721 kfree_skb(skb);
3722
3723 done:
3724 if (sk)
3725 bh_unlock_sock(sk);
3726 return 0;
3727 }
3728
3729 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3730 {
3731 struct sock *sk;
3732
3733 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
3734 if (!sk)
3735 goto drop;
3736
3737 bh_lock_sock(sk);
3738
3739 BT_DBG("sk %p, len %d", sk, skb->len);
3740
3741 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3742 goto drop;
3743
3744 if (l2cap_pi(sk)->imtu < skb->len)
3745 goto drop;
3746
3747 if (!sock_queue_rcv_skb(sk, skb))
3748 goto done;
3749
3750 drop:
3751 kfree_skb(skb);
3752
3753 done:
3754 if (sk)
3755 bh_unlock_sock(sk);
3756 return 0;
3757 }
3758
3759 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3760 {
3761 struct l2cap_hdr *lh = (void *) skb->data;
3762 u16 cid, len;
3763 __le16 psm;
3764
3765 skb_pull(skb, L2CAP_HDR_SIZE);
3766 cid = __le16_to_cpu(lh->cid);
3767 len = __le16_to_cpu(lh->len);
3768
3769 if (len != skb->len) {
3770 kfree_skb(skb);
3771 return;
3772 }
3773
3774 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3775
3776 switch (cid) {
3777 case L2CAP_CID_LE_SIGNALING:
3778 case L2CAP_CID_SIGNALING:
3779 l2cap_sig_channel(conn, skb);
3780 break;
3781
3782 case L2CAP_CID_CONN_LESS:
3783 psm = get_unaligned_le16(skb->data);
3784 skb_pull(skb, 2);
3785 l2cap_conless_channel(conn, psm, skb);
3786 break;
3787
3788 case L2CAP_CID_LE_DATA:
3789 l2cap_att_channel(conn, cid, skb);
3790 break;
3791
3792 default:
3793 l2cap_data_channel(conn, cid, skb);
3794 break;
3795 }
3796 }
3797
3798 /* ---- L2CAP interface with lower layer (HCI) ---- */
3799
3800 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3801 {
3802 int exact = 0, lm1 = 0, lm2 = 0;
3803 register struct sock *sk;
3804 struct hlist_node *node;
3805
3806 if (type != ACL_LINK)
3807 return -EINVAL;
3808
3809 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3810
3811 /* Find listening sockets and check their link_mode */
3812 read_lock(&l2cap_sk_list.lock);
3813 sk_for_each(sk, node, &l2cap_sk_list.head) {
3814 if (sk->sk_state != BT_LISTEN)
3815 continue;
3816
3817 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3818 lm1 |= HCI_LM_ACCEPT;
3819 if (l2cap_pi(sk)->role_switch)
3820 lm1 |= HCI_LM_MASTER;
3821 exact++;
3822 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3823 lm2 |= HCI_LM_ACCEPT;
3824 if (l2cap_pi(sk)->role_switch)
3825 lm2 |= HCI_LM_MASTER;
3826 }
3827 }
3828 read_unlock(&l2cap_sk_list.lock);
3829
3830 return exact ? lm1 : lm2;
3831 }
3832
3833 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3834 {
3835 struct l2cap_conn *conn;
3836
3837 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3838
3839 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3840 return -EINVAL;
3841
3842 if (!status) {
3843 conn = l2cap_conn_add(hcon, status);
3844 if (conn)
3845 l2cap_conn_ready(conn);
3846 } else
3847 l2cap_conn_del(hcon, bt_err(status));
3848
3849 return 0;
3850 }
3851
3852 static int l2cap_disconn_ind(struct hci_conn *hcon)
3853 {
3854 struct l2cap_conn *conn = hcon->l2cap_data;
3855
3856 BT_DBG("hcon %p", hcon);
3857
3858 if (hcon->type != ACL_LINK || !conn)
3859 return 0x13;
3860
3861 return conn->disc_reason;
3862 }
3863
3864 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3865 {
3866 BT_DBG("hcon %p reason %d", hcon, reason);
3867
3868 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3869 return -EINVAL;
3870
3871 l2cap_conn_del(hcon, bt_err(reason));
3872
3873 return 0;
3874 }
3875
3876 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3877 {
3878 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3879 return;
3880
3881 if (encrypt == 0x00) {
3882 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3883 l2cap_sock_clear_timer(sk);
3884 l2cap_sock_set_timer(sk, HZ * 5);
3885 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3886 __l2cap_sock_close(sk, ECONNREFUSED);
3887 } else {
3888 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3889 l2cap_sock_clear_timer(sk);
3890 }
3891 }
3892
3893 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3894 {
3895 struct l2cap_conn *conn = hcon->l2cap_data;
3896 struct l2cap_chan *chan;
3897
3898 if (!conn)
3899 return 0;
3900
3901 BT_DBG("conn %p", conn);
3902
3903 read_lock(&conn->chan_lock);
3904
3905 list_for_each_entry(chan, &conn->chan_l, list) {
3906 struct sock *sk = chan->sk;
3907
3908 bh_lock_sock(sk);
3909
3910 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3911 bh_unlock_sock(sk);
3912 continue;
3913 }
3914
3915 if (!status && (sk->sk_state == BT_CONNECTED ||
3916 sk->sk_state == BT_CONFIG)) {
3917 l2cap_check_encryption(sk, encrypt);
3918 bh_unlock_sock(sk);
3919 continue;
3920 }
3921
3922 if (sk->sk_state == BT_CONNECT) {
3923 if (!status) {
3924 struct l2cap_conn_req req;
3925 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3926 req.psm = l2cap_pi(sk)->psm;
3927
3928 chan->ident = l2cap_get_ident(conn);
3929 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3930
3931 l2cap_send_cmd(conn, chan->ident,
3932 L2CAP_CONN_REQ, sizeof(req), &req);
3933 } else {
3934 l2cap_sock_clear_timer(sk);
3935 l2cap_sock_set_timer(sk, HZ / 10);
3936 }
3937 } else if (sk->sk_state == BT_CONNECT2) {
3938 struct l2cap_conn_rsp rsp;
3939 __u16 result;
3940
3941 if (!status) {
3942 sk->sk_state = BT_CONFIG;
3943 result = L2CAP_CR_SUCCESS;
3944 } else {
3945 sk->sk_state = BT_DISCONN;
3946 l2cap_sock_set_timer(sk, HZ / 10);
3947 result = L2CAP_CR_SEC_BLOCK;
3948 }
3949
3950 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3951 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3952 rsp.result = cpu_to_le16(result);
3953 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3954 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3955 sizeof(rsp), &rsp);
3956 }
3957
3958 bh_unlock_sock(sk);
3959 }
3960
3961 read_unlock(&conn->chan_lock);
3962
3963 return 0;
3964 }
3965
3966 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3967 {
3968 struct l2cap_conn *conn = hcon->l2cap_data;
3969
3970 if (!conn)
3971 conn = l2cap_conn_add(hcon, 0);
3972
3973 if (!conn)
3974 goto drop;
3975
3976 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3977
3978 if (!(flags & ACL_CONT)) {
3979 struct l2cap_hdr *hdr;
3980 struct l2cap_chan *chan;
3981 u16 cid;
3982 int len;
3983
3984 if (conn->rx_len) {
3985 BT_ERR("Unexpected start frame (len %d)", skb->len);
3986 kfree_skb(conn->rx_skb);
3987 conn->rx_skb = NULL;
3988 conn->rx_len = 0;
3989 l2cap_conn_unreliable(conn, ECOMM);
3990 }
3991
3992 /* Start fragment always begin with Basic L2CAP header */
3993 if (skb->len < L2CAP_HDR_SIZE) {
3994 BT_ERR("Frame is too short (len %d)", skb->len);
3995 l2cap_conn_unreliable(conn, ECOMM);
3996 goto drop;
3997 }
3998
3999 hdr = (struct l2cap_hdr *) skb->data;
4000 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4001 cid = __le16_to_cpu(hdr->cid);
4002
4003 if (len == skb->len) {
4004 /* Complete frame received */
4005 l2cap_recv_frame(conn, skb);
4006 return 0;
4007 }
4008
4009 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4010
4011 if (skb->len > len) {
4012 BT_ERR("Frame is too long (len %d, expected len %d)",
4013 skb->len, len);
4014 l2cap_conn_unreliable(conn, ECOMM);
4015 goto drop;
4016 }
4017
4018 chan = l2cap_get_chan_by_scid(conn, cid);
4019
4020 if (chan && chan->sk) {
4021 struct sock *sk = chan->sk;
4022
4023 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4024 BT_ERR("Frame exceeding recv MTU (len %d, "
4025 "MTU %d)", len,
4026 l2cap_pi(sk)->imtu);
4027 bh_unlock_sock(sk);
4028 l2cap_conn_unreliable(conn, ECOMM);
4029 goto drop;
4030 }
4031 bh_unlock_sock(sk);
4032 }
4033
4034 /* Allocate skb for the complete frame (with header) */
4035 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4036 if (!conn->rx_skb)
4037 goto drop;
4038
4039 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4040 skb->len);
4041 conn->rx_len = len - skb->len;
4042 } else {
4043 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4044
4045 if (!conn->rx_len) {
4046 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4047 l2cap_conn_unreliable(conn, ECOMM);
4048 goto drop;
4049 }
4050
4051 if (skb->len > conn->rx_len) {
4052 BT_ERR("Fragment is too long (len %d, expected %d)",
4053 skb->len, conn->rx_len);
4054 kfree_skb(conn->rx_skb);
4055 conn->rx_skb = NULL;
4056 conn->rx_len = 0;
4057 l2cap_conn_unreliable(conn, ECOMM);
4058 goto drop;
4059 }
4060
4061 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4062 skb->len);
4063 conn->rx_len -= skb->len;
4064
4065 if (!conn->rx_len) {
4066 /* Complete frame received */
4067 l2cap_recv_frame(conn, conn->rx_skb);
4068 conn->rx_skb = NULL;
4069 }
4070 }
4071
4072 drop:
4073 kfree_skb(skb);
4074 return 0;
4075 }
4076
4077 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4078 {
4079 struct sock *sk;
4080 struct hlist_node *node;
4081
4082 read_lock_bh(&l2cap_sk_list.lock);
4083
4084 sk_for_each(sk, node, &l2cap_sk_list.head) {
4085 struct l2cap_pinfo *pi = l2cap_pi(sk);
4086
4087 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4088 batostr(&bt_sk(sk)->src),
4089 batostr(&bt_sk(sk)->dst),
4090 sk->sk_state, __le16_to_cpu(pi->psm),
4091 pi->scid, pi->dcid,
4092 pi->imtu, pi->omtu, pi->sec_level,
4093 pi->mode);
4094 }
4095
4096 read_unlock_bh(&l2cap_sk_list.lock);
4097
4098 return 0;
4099 }
4100
4101 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4102 {
4103 return single_open(file, l2cap_debugfs_show, inode->i_private);
4104 }
4105
4106 static const struct file_operations l2cap_debugfs_fops = {
4107 .open = l2cap_debugfs_open,
4108 .read = seq_read,
4109 .llseek = seq_lseek,
4110 .release = single_release,
4111 };
4112
4113 static struct dentry *l2cap_debugfs;
4114
4115 static struct hci_proto l2cap_hci_proto = {
4116 .name = "L2CAP",
4117 .id = HCI_PROTO_L2CAP,
4118 .connect_ind = l2cap_connect_ind,
4119 .connect_cfm = l2cap_connect_cfm,
4120 .disconn_ind = l2cap_disconn_ind,
4121 .disconn_cfm = l2cap_disconn_cfm,
4122 .security_cfm = l2cap_security_cfm,
4123 .recv_acldata = l2cap_recv_acldata
4124 };
4125
4126 int __init l2cap_init(void)
4127 {
4128 int err;
4129
4130 err = l2cap_init_sockets();
4131 if (err < 0)
4132 return err;
4133
4134 _busy_wq = create_singlethread_workqueue("l2cap");
4135 if (!_busy_wq) {
4136 err = -ENOMEM;
4137 goto error;
4138 }
4139
4140 err = hci_register_proto(&l2cap_hci_proto);
4141 if (err < 0) {
4142 BT_ERR("L2CAP protocol registration failed");
4143 bt_sock_unregister(BTPROTO_L2CAP);
4144 goto error;
4145 }
4146
4147 if (bt_debugfs) {
4148 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4149 bt_debugfs, NULL, &l2cap_debugfs_fops);
4150 if (!l2cap_debugfs)
4151 BT_ERR("Failed to create L2CAP debug file");
4152 }
4153
4154 return 0;
4155
4156 error:
4157 destroy_workqueue(_busy_wq);
4158 l2cap_cleanup_sockets();
4159 return err;
4160 }
4161
4162 void l2cap_exit(void)
4163 {
4164 debugfs_remove(l2cap_debugfs);
4165
4166 flush_workqueue(_busy_wq);
4167 destroy_workqueue(_busy_wq);
4168
4169 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4170 BT_ERR("L2CAP protocol unregistration failed");
4171
4172 l2cap_cleanup_sockets();
4173 }
4174
4175 module_param(disable_ertm, bool, 0644);
4176 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");