Bluetooth: Rename __l2cap_chan_close() to l2cap_chan_close()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 int disable_ertm;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static struct workqueue_struct *_busy_wq;
64
65 LIST_HEAD(chan_list);
66 DEFINE_RWLOCK(chan_list_lock);
67
68 static void l2cap_busy_work(struct work_struct *work);
69
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
72 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
73 void *data);
74 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
76 struct l2cap_chan *chan, int err);
77
78 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
79
80 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 {
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90
91 }
92
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
94 {
95 struct l2cap_chan *c;
96
97 list_for_each_entry(c, &conn->chan_l, list) {
98 if (c->scid == cid)
99 return c;
100 }
101 return NULL;
102 }
103
104 /* Find channel with given SCID.
105 * Returns locked socket */
106 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 {
108 struct l2cap_chan *c;
109
110 read_lock(&conn->chan_lock);
111 c = __l2cap_get_chan_by_scid(conn, cid);
112 if (c)
113 bh_lock_sock(c->sk);
114 read_unlock(&conn->chan_lock);
115 return c;
116 }
117
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 {
120 struct l2cap_chan *c;
121
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
124 return c;
125 }
126 return NULL;
127 }
128
129 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 {
131 struct l2cap_chan *c;
132
133 read_lock(&conn->chan_lock);
134 c = __l2cap_get_chan_by_ident(conn, ident);
135 if (c)
136 bh_lock_sock(c->sk);
137 read_unlock(&conn->chan_lock);
138 return c;
139 }
140
141 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
142 {
143 struct l2cap_chan *c;
144
145 list_for_each_entry(c, &chan_list, global_l) {
146 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
147 goto found;
148 }
149
150 c = NULL;
151 found:
152 return c;
153 }
154
155 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
156 {
157 int err;
158
159 write_lock_bh(&chan_list_lock);
160
161 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
162 err = -EADDRINUSE;
163 goto done;
164 }
165
166 if (psm) {
167 chan->psm = psm;
168 chan->sport = psm;
169 err = 0;
170 } else {
171 u16 p;
172
173 err = -EINVAL;
174 for (p = 0x1001; p < 0x1100; p += 2)
175 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
176 chan->psm = cpu_to_le16(p);
177 chan->sport = cpu_to_le16(p);
178 err = 0;
179 break;
180 }
181 }
182
183 done:
184 write_unlock_bh(&chan_list_lock);
185 return err;
186 }
187
188 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
189 {
190 write_lock_bh(&chan_list_lock);
191
192 chan->scid = scid;
193
194 write_unlock_bh(&chan_list_lock);
195
196 return 0;
197 }
198
199 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
200 {
201 u16 cid = L2CAP_CID_DYN_START;
202
203 for (; cid < L2CAP_CID_DYN_END; cid++) {
204 if (!__l2cap_get_chan_by_scid(conn, cid))
205 return cid;
206 }
207
208 return 0;
209 }
210
211 static void l2cap_chan_set_timer(struct l2cap_chan *chan, long timeout)
212 {
213 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->sk->sk_state,
214 timeout);
215 if (!mod_timer(&chan->chan_timer, jiffies + timeout))
216 sock_hold(chan->sk);
217 }
218
219 static void l2cap_chan_clear_timer(struct l2cap_chan *chan)
220 {
221 BT_DBG("chan %p state %d", chan, chan->sk->sk_state);
222
223 if (timer_pending(&chan->chan_timer) && del_timer(&chan->chan_timer))
224 __sock_put(chan->sk);
225 }
226
227 static void l2cap_chan_timeout(unsigned long arg)
228 {
229 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
230 struct sock *sk = chan->sk;
231 int reason;
232
233 BT_DBG("chan %p state %d", chan, sk->sk_state);
234
235 bh_lock_sock(sk);
236
237 if (sock_owned_by_user(sk)) {
238 /* sk is owned by user. Try again later */
239 l2cap_chan_set_timer(chan, HZ / 5);
240 bh_unlock_sock(sk);
241 sock_put(sk);
242 return;
243 }
244
245 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
246 reason = ECONNREFUSED;
247 else if (sk->sk_state == BT_CONNECT &&
248 chan->sec_level != BT_SECURITY_SDP)
249 reason = ECONNREFUSED;
250 else
251 reason = ETIMEDOUT;
252
253 l2cap_chan_close(chan, reason);
254
255 bh_unlock_sock(sk);
256
257 l2cap_sock_kill(sk);
258 sock_put(sk);
259 }
260
261 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
262 {
263 struct l2cap_chan *chan;
264
265 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
266 if (!chan)
267 return NULL;
268
269 chan->sk = sk;
270
271 write_lock_bh(&chan_list_lock);
272 list_add(&chan->global_l, &chan_list);
273 write_unlock_bh(&chan_list_lock);
274
275 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
276
277 return chan;
278 }
279
280 void l2cap_chan_destroy(struct l2cap_chan *chan)
281 {
282 write_lock_bh(&chan_list_lock);
283 list_del(&chan->global_l);
284 write_unlock_bh(&chan_list_lock);
285
286 kfree(chan);
287 }
288
289 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
290 {
291 struct sock *sk = chan->sk;
292
293 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
294 chan->psm, chan->dcid);
295
296 conn->disc_reason = 0x13;
297
298 chan->conn = conn;
299
300 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
301 if (conn->hcon->type == LE_LINK) {
302 /* LE connection */
303 chan->omtu = L2CAP_LE_DEFAULT_MTU;
304 chan->scid = L2CAP_CID_LE_DATA;
305 chan->dcid = L2CAP_CID_LE_DATA;
306 } else {
307 /* Alloc CID for connection-oriented socket */
308 chan->scid = l2cap_alloc_cid(conn);
309 chan->omtu = L2CAP_DEFAULT_MTU;
310 }
311 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
312 /* Connectionless socket */
313 chan->scid = L2CAP_CID_CONN_LESS;
314 chan->dcid = L2CAP_CID_CONN_LESS;
315 chan->omtu = L2CAP_DEFAULT_MTU;
316 } else {
317 /* Raw socket can send/recv signalling messages only */
318 chan->scid = L2CAP_CID_SIGNALING;
319 chan->dcid = L2CAP_CID_SIGNALING;
320 chan->omtu = L2CAP_DEFAULT_MTU;
321 }
322
323 sock_hold(sk);
324
325 list_add(&chan->list, &conn->chan_l);
326 }
327
328 /* Delete channel.
329 * Must be called on the locked socket. */
330 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
331 {
332 struct sock *sk = chan->sk;
333 struct l2cap_conn *conn = chan->conn;
334 struct sock *parent = bt_sk(sk)->parent;
335
336 l2cap_chan_clear_timer(chan);
337
338 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
339
340 if (conn) {
341 /* Delete from channel list */
342 write_lock_bh(&conn->chan_lock);
343 list_del(&chan->list);
344 write_unlock_bh(&conn->chan_lock);
345 __sock_put(sk);
346
347 chan->conn = NULL;
348 hci_conn_put(conn->hcon);
349 }
350
351 sk->sk_state = BT_CLOSED;
352 sock_set_flag(sk, SOCK_ZAPPED);
353
354 if (err)
355 sk->sk_err = err;
356
357 if (parent) {
358 bt_accept_unlink(sk);
359 parent->sk_data_ready(parent, 0);
360 } else
361 sk->sk_state_change(sk);
362
363 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
364 chan->conf_state & L2CAP_CONF_INPUT_DONE))
365 return;
366
367 skb_queue_purge(&chan->tx_q);
368
369 if (chan->mode == L2CAP_MODE_ERTM) {
370 struct srej_list *l, *tmp;
371
372 del_timer(&chan->retrans_timer);
373 del_timer(&chan->monitor_timer);
374 del_timer(&chan->ack_timer);
375
376 skb_queue_purge(&chan->srej_q);
377 skb_queue_purge(&chan->busy_q);
378
379 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
380 list_del(&l->list);
381 kfree(l);
382 }
383 }
384 }
385
386 static void l2cap_chan_cleanup_listen(struct sock *parent)
387 {
388 struct sock *sk;
389
390 BT_DBG("parent %p", parent);
391
392 /* Close not yet accepted channels */
393 while ((sk = bt_accept_dequeue(parent, NULL))) {
394 l2cap_chan_clear_timer(l2cap_pi(sk)->chan);
395 lock_sock(sk);
396 l2cap_chan_close(l2cap_pi(sk)->chan, ECONNRESET);
397 release_sock(sk);
398 l2cap_sock_kill(sk);
399 }
400
401 parent->sk_state = BT_CLOSED;
402 sock_set_flag(parent, SOCK_ZAPPED);
403 }
404
405 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
406 {
407 struct l2cap_conn *conn = chan->conn;
408 struct sock *sk = chan->sk;
409
410 BT_DBG("chan %p state %d socket %p", chan, sk->sk_state, sk->sk_socket);
411
412 switch (sk->sk_state) {
413 case BT_LISTEN:
414 l2cap_chan_cleanup_listen(sk);
415 break;
416
417 case BT_CONNECTED:
418 case BT_CONFIG:
419 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
420 conn->hcon->type == ACL_LINK) {
421 l2cap_chan_clear_timer(chan);
422 l2cap_chan_set_timer(chan, sk->sk_sndtimeo);
423 l2cap_send_disconn_req(conn, chan, reason);
424 } else
425 l2cap_chan_del(chan, reason);
426 break;
427
428 case BT_CONNECT2:
429 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
430 conn->hcon->type == ACL_LINK) {
431 struct l2cap_conn_rsp rsp;
432 __u16 result;
433
434 if (bt_sk(sk)->defer_setup)
435 result = L2CAP_CR_SEC_BLOCK;
436 else
437 result = L2CAP_CR_BAD_PSM;
438
439 rsp.scid = cpu_to_le16(chan->dcid);
440 rsp.dcid = cpu_to_le16(chan->scid);
441 rsp.result = cpu_to_le16(result);
442 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
443 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
444 sizeof(rsp), &rsp);
445 }
446
447 l2cap_chan_del(chan, reason);
448 break;
449
450 case BT_CONNECT:
451 case BT_DISCONN:
452 l2cap_chan_del(chan, reason);
453 break;
454
455 default:
456 sock_set_flag(sk, SOCK_ZAPPED);
457 break;
458 }
459 }
460
461 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
462 {
463 if (chan->chan_type == L2CAP_CHAN_RAW) {
464 switch (chan->sec_level) {
465 case BT_SECURITY_HIGH:
466 return HCI_AT_DEDICATED_BONDING_MITM;
467 case BT_SECURITY_MEDIUM:
468 return HCI_AT_DEDICATED_BONDING;
469 default:
470 return HCI_AT_NO_BONDING;
471 }
472 } else if (chan->psm == cpu_to_le16(0x0001)) {
473 if (chan->sec_level == BT_SECURITY_LOW)
474 chan->sec_level = BT_SECURITY_SDP;
475
476 if (chan->sec_level == BT_SECURITY_HIGH)
477 return HCI_AT_NO_BONDING_MITM;
478 else
479 return HCI_AT_NO_BONDING;
480 } else {
481 switch (chan->sec_level) {
482 case BT_SECURITY_HIGH:
483 return HCI_AT_GENERAL_BONDING_MITM;
484 case BT_SECURITY_MEDIUM:
485 return HCI_AT_GENERAL_BONDING;
486 default:
487 return HCI_AT_NO_BONDING;
488 }
489 }
490 }
491
492 /* Service level security */
493 static inline int l2cap_check_security(struct l2cap_chan *chan)
494 {
495 struct l2cap_conn *conn = chan->conn;
496 __u8 auth_type;
497
498 auth_type = l2cap_get_auth_type(chan);
499
500 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
501 }
502
503 u8 l2cap_get_ident(struct l2cap_conn *conn)
504 {
505 u8 id;
506
507 /* Get next available identificator.
508 * 1 - 128 are used by kernel.
509 * 129 - 199 are reserved.
510 * 200 - 254 are used by utilities like l2ping, etc.
511 */
512
513 spin_lock_bh(&conn->lock);
514
515 if (++conn->tx_ident > 128)
516 conn->tx_ident = 1;
517
518 id = conn->tx_ident;
519
520 spin_unlock_bh(&conn->lock);
521
522 return id;
523 }
524
525 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
526 {
527 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
528 u8 flags;
529
530 BT_DBG("code 0x%2.2x", code);
531
532 if (!skb)
533 return;
534
535 if (lmp_no_flush_capable(conn->hcon->hdev))
536 flags = ACL_START_NO_FLUSH;
537 else
538 flags = ACL_START;
539
540 hci_send_acl(conn->hcon, skb, flags);
541 }
542
543 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
544 {
545 struct sk_buff *skb;
546 struct l2cap_hdr *lh;
547 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
548 struct l2cap_conn *conn = chan->conn;
549 struct sock *sk = (struct sock *)pi;
550 int count, hlen = L2CAP_HDR_SIZE + 2;
551 u8 flags;
552
553 if (sk->sk_state != BT_CONNECTED)
554 return;
555
556 if (chan->fcs == L2CAP_FCS_CRC16)
557 hlen += 2;
558
559 BT_DBG("chan %p, control 0x%2.2x", chan, control);
560
561 count = min_t(unsigned int, conn->mtu, hlen);
562 control |= L2CAP_CTRL_FRAME_TYPE;
563
564 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
565 control |= L2CAP_CTRL_FINAL;
566 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
567 }
568
569 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
570 control |= L2CAP_CTRL_POLL;
571 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
572 }
573
574 skb = bt_skb_alloc(count, GFP_ATOMIC);
575 if (!skb)
576 return;
577
578 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
579 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
580 lh->cid = cpu_to_le16(chan->dcid);
581 put_unaligned_le16(control, skb_put(skb, 2));
582
583 if (chan->fcs == L2CAP_FCS_CRC16) {
584 u16 fcs = crc16(0, (u8 *)lh, count - 2);
585 put_unaligned_le16(fcs, skb_put(skb, 2));
586 }
587
588 if (lmp_no_flush_capable(conn->hcon->hdev))
589 flags = ACL_START_NO_FLUSH;
590 else
591 flags = ACL_START;
592
593 hci_send_acl(chan->conn->hcon, skb, flags);
594 }
595
596 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
597 {
598 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
599 control |= L2CAP_SUPER_RCV_NOT_READY;
600 chan->conn_state |= L2CAP_CONN_RNR_SENT;
601 } else
602 control |= L2CAP_SUPER_RCV_READY;
603
604 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
605
606 l2cap_send_sframe(chan, control);
607 }
608
609 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
610 {
611 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
612 }
613
614 static void l2cap_do_start(struct l2cap_chan *chan)
615 {
616 struct l2cap_conn *conn = chan->conn;
617
618 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
619 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
620 return;
621
622 if (l2cap_check_security(chan) &&
623 __l2cap_no_conn_pending(chan)) {
624 struct l2cap_conn_req req;
625 req.scid = cpu_to_le16(chan->scid);
626 req.psm = chan->psm;
627
628 chan->ident = l2cap_get_ident(conn);
629 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
630
631 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
632 sizeof(req), &req);
633 }
634 } else {
635 struct l2cap_info_req req;
636 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
637
638 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
639 conn->info_ident = l2cap_get_ident(conn);
640
641 mod_timer(&conn->info_timer, jiffies +
642 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
643
644 l2cap_send_cmd(conn, conn->info_ident,
645 L2CAP_INFO_REQ, sizeof(req), &req);
646 }
647 }
648
649 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
650 {
651 u32 local_feat_mask = l2cap_feat_mask;
652 if (!disable_ertm)
653 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
654
655 switch (mode) {
656 case L2CAP_MODE_ERTM:
657 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
658 case L2CAP_MODE_STREAMING:
659 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
660 default:
661 return 0x00;
662 }
663 }
664
665 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
666 {
667 struct sock *sk;
668 struct l2cap_disconn_req req;
669
670 if (!conn)
671 return;
672
673 sk = chan->sk;
674
675 if (chan->mode == L2CAP_MODE_ERTM) {
676 del_timer(&chan->retrans_timer);
677 del_timer(&chan->monitor_timer);
678 del_timer(&chan->ack_timer);
679 }
680
681 req.dcid = cpu_to_le16(chan->dcid);
682 req.scid = cpu_to_le16(chan->scid);
683 l2cap_send_cmd(conn, l2cap_get_ident(conn),
684 L2CAP_DISCONN_REQ, sizeof(req), &req);
685
686 sk->sk_state = BT_DISCONN;
687 sk->sk_err = err;
688 }
689
690 /* ---- L2CAP connections ---- */
691 static void l2cap_conn_start(struct l2cap_conn *conn)
692 {
693 struct l2cap_chan *chan, *tmp;
694
695 BT_DBG("conn %p", conn);
696
697 read_lock(&conn->chan_lock);
698
699 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
700 struct sock *sk = chan->sk;
701
702 bh_lock_sock(sk);
703
704 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
705 bh_unlock_sock(sk);
706 continue;
707 }
708
709 if (sk->sk_state == BT_CONNECT) {
710 struct l2cap_conn_req req;
711
712 if (!l2cap_check_security(chan) ||
713 !__l2cap_no_conn_pending(chan)) {
714 bh_unlock_sock(sk);
715 continue;
716 }
717
718 if (!l2cap_mode_supported(chan->mode,
719 conn->feat_mask)
720 && chan->conf_state &
721 L2CAP_CONF_STATE2_DEVICE) {
722 /* l2cap_chan_close() calls list_del(chan)
723 * so release the lock */
724 read_unlock_bh(&conn->chan_lock);
725 l2cap_chan_close(chan, ECONNRESET);
726 read_lock_bh(&conn->chan_lock);
727 bh_unlock_sock(sk);
728 continue;
729 }
730
731 req.scid = cpu_to_le16(chan->scid);
732 req.psm = chan->psm;
733
734 chan->ident = l2cap_get_ident(conn);
735 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
736
737 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
738 sizeof(req), &req);
739
740 } else if (sk->sk_state == BT_CONNECT2) {
741 struct l2cap_conn_rsp rsp;
742 char buf[128];
743 rsp.scid = cpu_to_le16(chan->dcid);
744 rsp.dcid = cpu_to_le16(chan->scid);
745
746 if (l2cap_check_security(chan)) {
747 if (bt_sk(sk)->defer_setup) {
748 struct sock *parent = bt_sk(sk)->parent;
749 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
750 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
751 parent->sk_data_ready(parent, 0);
752
753 } else {
754 sk->sk_state = BT_CONFIG;
755 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
756 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
757 }
758 } else {
759 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
760 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
761 }
762
763 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
764 sizeof(rsp), &rsp);
765
766 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
767 rsp.result != L2CAP_CR_SUCCESS) {
768 bh_unlock_sock(sk);
769 continue;
770 }
771
772 chan->conf_state |= L2CAP_CONF_REQ_SENT;
773 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
774 l2cap_build_conf_req(chan, buf), buf);
775 chan->num_conf_req++;
776 }
777
778 bh_unlock_sock(sk);
779 }
780
781 read_unlock(&conn->chan_lock);
782 }
783
784 /* Find socket with cid and source bdaddr.
785 * Returns closest match, locked.
786 */
787 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
788 {
789 struct l2cap_chan *c, *c1 = NULL;
790
791 read_lock(&chan_list_lock);
792
793 list_for_each_entry(c, &chan_list, global_l) {
794 struct sock *sk = c->sk;
795
796 if (state && sk->sk_state != state)
797 continue;
798
799 if (c->scid == cid) {
800 /* Exact match. */
801 if (!bacmp(&bt_sk(sk)->src, src)) {
802 read_unlock(&chan_list_lock);
803 return c;
804 }
805
806 /* Closest match */
807 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
808 c1 = c;
809 }
810 }
811
812 read_unlock(&chan_list_lock);
813
814 return c1;
815 }
816
817 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
818 {
819 struct sock *parent, *sk;
820 struct l2cap_chan *chan, *pchan;
821
822 BT_DBG("");
823
824 /* Check if we have socket listening on cid */
825 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
826 conn->src);
827 if (!pchan)
828 return;
829
830 parent = pchan->sk;
831
832 bh_lock_sock(parent);
833
834 /* Check for backlog size */
835 if (sk_acceptq_is_full(parent)) {
836 BT_DBG("backlog full %d", parent->sk_ack_backlog);
837 goto clean;
838 }
839
840 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
841 if (!sk)
842 goto clean;
843
844 chan = l2cap_chan_create(sk);
845 if (!chan) {
846 l2cap_sock_kill(sk);
847 goto clean;
848 }
849
850 l2cap_pi(sk)->chan = chan;
851
852 write_lock_bh(&conn->chan_lock);
853
854 hci_conn_hold(conn->hcon);
855
856 l2cap_sock_init(sk, parent);
857
858 bacpy(&bt_sk(sk)->src, conn->src);
859 bacpy(&bt_sk(sk)->dst, conn->dst);
860
861 bt_accept_enqueue(parent, sk);
862
863 __l2cap_chan_add(conn, chan);
864
865 l2cap_chan_set_timer(chan, sk->sk_sndtimeo);
866
867 sk->sk_state = BT_CONNECTED;
868 parent->sk_data_ready(parent, 0);
869
870 write_unlock_bh(&conn->chan_lock);
871
872 clean:
873 bh_unlock_sock(parent);
874 }
875
876 static void l2cap_conn_ready(struct l2cap_conn *conn)
877 {
878 struct l2cap_chan *chan;
879
880 BT_DBG("conn %p", conn);
881
882 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
883 l2cap_le_conn_ready(conn);
884
885 read_lock(&conn->chan_lock);
886
887 list_for_each_entry(chan, &conn->chan_l, list) {
888 struct sock *sk = chan->sk;
889
890 bh_lock_sock(sk);
891
892 if (conn->hcon->type == LE_LINK) {
893 l2cap_chan_clear_timer(chan);
894 sk->sk_state = BT_CONNECTED;
895 sk->sk_state_change(sk);
896 }
897
898 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
899 l2cap_chan_clear_timer(chan);
900 sk->sk_state = BT_CONNECTED;
901 sk->sk_state_change(sk);
902 } else if (sk->sk_state == BT_CONNECT)
903 l2cap_do_start(chan);
904
905 bh_unlock_sock(sk);
906 }
907
908 read_unlock(&conn->chan_lock);
909 }
910
911 /* Notify sockets that we cannot guaranty reliability anymore */
912 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
913 {
914 struct l2cap_chan *chan;
915
916 BT_DBG("conn %p", conn);
917
918 read_lock(&conn->chan_lock);
919
920 list_for_each_entry(chan, &conn->chan_l, list) {
921 struct sock *sk = chan->sk;
922
923 if (chan->force_reliable)
924 sk->sk_err = err;
925 }
926
927 read_unlock(&conn->chan_lock);
928 }
929
930 static void l2cap_info_timeout(unsigned long arg)
931 {
932 struct l2cap_conn *conn = (void *) arg;
933
934 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
935 conn->info_ident = 0;
936
937 l2cap_conn_start(conn);
938 }
939
940 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
941 {
942 struct l2cap_conn *conn = hcon->l2cap_data;
943
944 if (conn || status)
945 return conn;
946
947 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
948 if (!conn)
949 return NULL;
950
951 hcon->l2cap_data = conn;
952 conn->hcon = hcon;
953
954 BT_DBG("hcon %p conn %p", hcon, conn);
955
956 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
957 conn->mtu = hcon->hdev->le_mtu;
958 else
959 conn->mtu = hcon->hdev->acl_mtu;
960
961 conn->src = &hcon->hdev->bdaddr;
962 conn->dst = &hcon->dst;
963
964 conn->feat_mask = 0;
965
966 spin_lock_init(&conn->lock);
967 rwlock_init(&conn->chan_lock);
968
969 INIT_LIST_HEAD(&conn->chan_l);
970
971 if (hcon->type != LE_LINK)
972 setup_timer(&conn->info_timer, l2cap_info_timeout,
973 (unsigned long) conn);
974
975 conn->disc_reason = 0x13;
976
977 return conn;
978 }
979
980 static void l2cap_conn_del(struct hci_conn *hcon, int err)
981 {
982 struct l2cap_conn *conn = hcon->l2cap_data;
983 struct l2cap_chan *chan, *l;
984 struct sock *sk;
985
986 if (!conn)
987 return;
988
989 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
990
991 kfree_skb(conn->rx_skb);
992
993 /* Kill channels */
994 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
995 sk = chan->sk;
996 bh_lock_sock(sk);
997 l2cap_chan_del(chan, err);
998 bh_unlock_sock(sk);
999 l2cap_sock_kill(sk);
1000 }
1001
1002 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1003 del_timer_sync(&conn->info_timer);
1004
1005 hcon->l2cap_data = NULL;
1006 kfree(conn);
1007 }
1008
1009 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1010 {
1011 write_lock_bh(&conn->chan_lock);
1012 __l2cap_chan_add(conn, chan);
1013 write_unlock_bh(&conn->chan_lock);
1014 }
1015
1016 /* ---- Socket interface ---- */
1017
1018 /* Find socket with psm and source bdaddr.
1019 * Returns closest match.
1020 */
1021 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1022 {
1023 struct l2cap_chan *c, *c1 = NULL;
1024
1025 read_lock(&chan_list_lock);
1026
1027 list_for_each_entry(c, &chan_list, global_l) {
1028 struct sock *sk = c->sk;
1029
1030 if (state && sk->sk_state != state)
1031 continue;
1032
1033 if (c->psm == psm) {
1034 /* Exact match. */
1035 if (!bacmp(&bt_sk(sk)->src, src)) {
1036 read_unlock(&chan_list_lock);
1037 return c;
1038 }
1039
1040 /* Closest match */
1041 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1042 c1 = c;
1043 }
1044 }
1045
1046 read_unlock(&chan_list_lock);
1047
1048 return c1;
1049 }
1050
1051 int l2cap_chan_connect(struct l2cap_chan *chan)
1052 {
1053 struct sock *sk = chan->sk;
1054 bdaddr_t *src = &bt_sk(sk)->src;
1055 bdaddr_t *dst = &bt_sk(sk)->dst;
1056 struct l2cap_conn *conn;
1057 struct hci_conn *hcon;
1058 struct hci_dev *hdev;
1059 __u8 auth_type;
1060 int err;
1061
1062 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1063 chan->psm);
1064
1065 hdev = hci_get_route(dst, src);
1066 if (!hdev)
1067 return -EHOSTUNREACH;
1068
1069 hci_dev_lock_bh(hdev);
1070
1071 auth_type = l2cap_get_auth_type(chan);
1072
1073 if (chan->dcid == L2CAP_CID_LE_DATA)
1074 hcon = hci_connect(hdev, LE_LINK, dst,
1075 chan->sec_level, auth_type);
1076 else
1077 hcon = hci_connect(hdev, ACL_LINK, dst,
1078 chan->sec_level, auth_type);
1079
1080 if (IS_ERR(hcon)) {
1081 err = PTR_ERR(hcon);
1082 goto done;
1083 }
1084
1085 conn = l2cap_conn_add(hcon, 0);
1086 if (!conn) {
1087 hci_conn_put(hcon);
1088 err = -ENOMEM;
1089 goto done;
1090 }
1091
1092 /* Update source addr of the socket */
1093 bacpy(src, conn->src);
1094
1095 l2cap_chan_add(conn, chan);
1096
1097 sk->sk_state = BT_CONNECT;
1098 l2cap_chan_set_timer(chan, sk->sk_sndtimeo);
1099
1100 if (hcon->state == BT_CONNECTED) {
1101 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1102 l2cap_chan_clear_timer(chan);
1103 if (l2cap_check_security(chan))
1104 sk->sk_state = BT_CONNECTED;
1105 } else
1106 l2cap_do_start(chan);
1107 }
1108
1109 err = 0;
1110
1111 done:
1112 hci_dev_unlock_bh(hdev);
1113 hci_dev_put(hdev);
1114 return err;
1115 }
1116
1117 int __l2cap_wait_ack(struct sock *sk)
1118 {
1119 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1120 DECLARE_WAITQUEUE(wait, current);
1121 int err = 0;
1122 int timeo = HZ/5;
1123
1124 add_wait_queue(sk_sleep(sk), &wait);
1125 while ((chan->unacked_frames > 0 && chan->conn)) {
1126 set_current_state(TASK_INTERRUPTIBLE);
1127
1128 if (!timeo)
1129 timeo = HZ/5;
1130
1131 if (signal_pending(current)) {
1132 err = sock_intr_errno(timeo);
1133 break;
1134 }
1135
1136 release_sock(sk);
1137 timeo = schedule_timeout(timeo);
1138 lock_sock(sk);
1139
1140 err = sock_error(sk);
1141 if (err)
1142 break;
1143 }
1144 set_current_state(TASK_RUNNING);
1145 remove_wait_queue(sk_sleep(sk), &wait);
1146 return err;
1147 }
1148
1149 static void l2cap_monitor_timeout(unsigned long arg)
1150 {
1151 struct l2cap_chan *chan = (void *) arg;
1152 struct sock *sk = chan->sk;
1153
1154 BT_DBG("chan %p", chan);
1155
1156 bh_lock_sock(sk);
1157 if (chan->retry_count >= chan->remote_max_tx) {
1158 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1159 bh_unlock_sock(sk);
1160 return;
1161 }
1162
1163 chan->retry_count++;
1164 __mod_monitor_timer();
1165
1166 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1167 bh_unlock_sock(sk);
1168 }
1169
1170 static void l2cap_retrans_timeout(unsigned long arg)
1171 {
1172 struct l2cap_chan *chan = (void *) arg;
1173 struct sock *sk = chan->sk;
1174
1175 BT_DBG("chan %p", chan);
1176
1177 bh_lock_sock(sk);
1178 chan->retry_count = 1;
1179 __mod_monitor_timer();
1180
1181 chan->conn_state |= L2CAP_CONN_WAIT_F;
1182
1183 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1184 bh_unlock_sock(sk);
1185 }
1186
1187 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1188 {
1189 struct sk_buff *skb;
1190
1191 while ((skb = skb_peek(&chan->tx_q)) &&
1192 chan->unacked_frames) {
1193 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1194 break;
1195
1196 skb = skb_dequeue(&chan->tx_q);
1197 kfree_skb(skb);
1198
1199 chan->unacked_frames--;
1200 }
1201
1202 if (!chan->unacked_frames)
1203 del_timer(&chan->retrans_timer);
1204 }
1205
1206 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1207 {
1208 struct hci_conn *hcon = chan->conn->hcon;
1209 u16 flags;
1210
1211 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1212
1213 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1214 flags = ACL_START_NO_FLUSH;
1215 else
1216 flags = ACL_START;
1217
1218 hci_send_acl(hcon, skb, flags);
1219 }
1220
1221 void l2cap_streaming_send(struct l2cap_chan *chan)
1222 {
1223 struct sk_buff *skb;
1224 u16 control, fcs;
1225
1226 while ((skb = skb_dequeue(&chan->tx_q))) {
1227 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1228 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1229 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1230
1231 if (chan->fcs == L2CAP_FCS_CRC16) {
1232 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1233 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1234 }
1235
1236 l2cap_do_send(chan, skb);
1237
1238 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1239 }
1240 }
1241
1242 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1243 {
1244 struct sk_buff *skb, *tx_skb;
1245 u16 control, fcs;
1246
1247 skb = skb_peek(&chan->tx_q);
1248 if (!skb)
1249 return;
1250
1251 do {
1252 if (bt_cb(skb)->tx_seq == tx_seq)
1253 break;
1254
1255 if (skb_queue_is_last(&chan->tx_q, skb))
1256 return;
1257
1258 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1259
1260 if (chan->remote_max_tx &&
1261 bt_cb(skb)->retries == chan->remote_max_tx) {
1262 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1263 return;
1264 }
1265
1266 tx_skb = skb_clone(skb, GFP_ATOMIC);
1267 bt_cb(skb)->retries++;
1268 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1269 control &= L2CAP_CTRL_SAR;
1270
1271 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1272 control |= L2CAP_CTRL_FINAL;
1273 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1274 }
1275
1276 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1277 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1278
1279 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1280
1281 if (chan->fcs == L2CAP_FCS_CRC16) {
1282 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1283 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1284 }
1285
1286 l2cap_do_send(chan, tx_skb);
1287 }
1288
1289 int l2cap_ertm_send(struct l2cap_chan *chan)
1290 {
1291 struct sk_buff *skb, *tx_skb;
1292 struct sock *sk = chan->sk;
1293 u16 control, fcs;
1294 int nsent = 0;
1295
1296 if (sk->sk_state != BT_CONNECTED)
1297 return -ENOTCONN;
1298
1299 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1300
1301 if (chan->remote_max_tx &&
1302 bt_cb(skb)->retries == chan->remote_max_tx) {
1303 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1304 break;
1305 }
1306
1307 tx_skb = skb_clone(skb, GFP_ATOMIC);
1308
1309 bt_cb(skb)->retries++;
1310
1311 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1312 control &= L2CAP_CTRL_SAR;
1313
1314 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1315 control |= L2CAP_CTRL_FINAL;
1316 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1317 }
1318 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1319 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1320 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1321
1322
1323 if (chan->fcs == L2CAP_FCS_CRC16) {
1324 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1325 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1326 }
1327
1328 l2cap_do_send(chan, tx_skb);
1329
1330 __mod_retrans_timer();
1331
1332 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1333 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1334
1335 if (bt_cb(skb)->retries == 1)
1336 chan->unacked_frames++;
1337
1338 chan->frames_sent++;
1339
1340 if (skb_queue_is_last(&chan->tx_q, skb))
1341 chan->tx_send_head = NULL;
1342 else
1343 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1344
1345 nsent++;
1346 }
1347
1348 return nsent;
1349 }
1350
1351 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1352 {
1353 int ret;
1354
1355 if (!skb_queue_empty(&chan->tx_q))
1356 chan->tx_send_head = chan->tx_q.next;
1357
1358 chan->next_tx_seq = chan->expected_ack_seq;
1359 ret = l2cap_ertm_send(chan);
1360 return ret;
1361 }
1362
1363 static void l2cap_send_ack(struct l2cap_chan *chan)
1364 {
1365 u16 control = 0;
1366
1367 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1368
1369 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1370 control |= L2CAP_SUPER_RCV_NOT_READY;
1371 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1372 l2cap_send_sframe(chan, control);
1373 return;
1374 }
1375
1376 if (l2cap_ertm_send(chan) > 0)
1377 return;
1378
1379 control |= L2CAP_SUPER_RCV_READY;
1380 l2cap_send_sframe(chan, control);
1381 }
1382
1383 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1384 {
1385 struct srej_list *tail;
1386 u16 control;
1387
1388 control = L2CAP_SUPER_SELECT_REJECT;
1389 control |= L2CAP_CTRL_FINAL;
1390
1391 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1392 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1393
1394 l2cap_send_sframe(chan, control);
1395 }
1396
1397 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1398 {
1399 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1400 struct sk_buff **frag;
1401 int err, sent = 0;
1402
1403 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1404 return -EFAULT;
1405
1406 sent += count;
1407 len -= count;
1408
1409 /* Continuation fragments (no L2CAP header) */
1410 frag = &skb_shinfo(skb)->frag_list;
1411 while (len) {
1412 count = min_t(unsigned int, conn->mtu, len);
1413
1414 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1415 if (!*frag)
1416 return err;
1417 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1418 return -EFAULT;
1419
1420 sent += count;
1421 len -= count;
1422
1423 frag = &(*frag)->next;
1424 }
1425
1426 return sent;
1427 }
1428
1429 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1430 {
1431 struct sock *sk = chan->sk;
1432 struct l2cap_conn *conn = chan->conn;
1433 struct sk_buff *skb;
1434 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1435 struct l2cap_hdr *lh;
1436
1437 BT_DBG("sk %p len %d", sk, (int)len);
1438
1439 count = min_t(unsigned int, (conn->mtu - hlen), len);
1440 skb = bt_skb_send_alloc(sk, count + hlen,
1441 msg->msg_flags & MSG_DONTWAIT, &err);
1442 if (!skb)
1443 return ERR_PTR(err);
1444
1445 /* Create L2CAP header */
1446 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1447 lh->cid = cpu_to_le16(chan->dcid);
1448 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1449 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1450
1451 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1452 if (unlikely(err < 0)) {
1453 kfree_skb(skb);
1454 return ERR_PTR(err);
1455 }
1456 return skb;
1457 }
1458
1459 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1460 {
1461 struct sock *sk = chan->sk;
1462 struct l2cap_conn *conn = chan->conn;
1463 struct sk_buff *skb;
1464 int err, count, hlen = L2CAP_HDR_SIZE;
1465 struct l2cap_hdr *lh;
1466
1467 BT_DBG("sk %p len %d", sk, (int)len);
1468
1469 count = min_t(unsigned int, (conn->mtu - hlen), len);
1470 skb = bt_skb_send_alloc(sk, count + hlen,
1471 msg->msg_flags & MSG_DONTWAIT, &err);
1472 if (!skb)
1473 return ERR_PTR(err);
1474
1475 /* Create L2CAP header */
1476 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1477 lh->cid = cpu_to_le16(chan->dcid);
1478 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1479
1480 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1481 if (unlikely(err < 0)) {
1482 kfree_skb(skb);
1483 return ERR_PTR(err);
1484 }
1485 return skb;
1486 }
1487
1488 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1489 {
1490 struct sock *sk = chan->sk;
1491 struct l2cap_conn *conn = chan->conn;
1492 struct sk_buff *skb;
1493 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1494 struct l2cap_hdr *lh;
1495
1496 BT_DBG("sk %p len %d", sk, (int)len);
1497
1498 if (!conn)
1499 return ERR_PTR(-ENOTCONN);
1500
1501 if (sdulen)
1502 hlen += 2;
1503
1504 if (chan->fcs == L2CAP_FCS_CRC16)
1505 hlen += 2;
1506
1507 count = min_t(unsigned int, (conn->mtu - hlen), len);
1508 skb = bt_skb_send_alloc(sk, count + hlen,
1509 msg->msg_flags & MSG_DONTWAIT, &err);
1510 if (!skb)
1511 return ERR_PTR(err);
1512
1513 /* Create L2CAP header */
1514 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1515 lh->cid = cpu_to_le16(chan->dcid);
1516 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1517 put_unaligned_le16(control, skb_put(skb, 2));
1518 if (sdulen)
1519 put_unaligned_le16(sdulen, skb_put(skb, 2));
1520
1521 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1522 if (unlikely(err < 0)) {
1523 kfree_skb(skb);
1524 return ERR_PTR(err);
1525 }
1526
1527 if (chan->fcs == L2CAP_FCS_CRC16)
1528 put_unaligned_le16(0, skb_put(skb, 2));
1529
1530 bt_cb(skb)->retries = 0;
1531 return skb;
1532 }
1533
1534 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1535 {
1536 struct sk_buff *skb;
1537 struct sk_buff_head sar_queue;
1538 u16 control;
1539 size_t size = 0;
1540
1541 skb_queue_head_init(&sar_queue);
1542 control = L2CAP_SDU_START;
1543 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1544 if (IS_ERR(skb))
1545 return PTR_ERR(skb);
1546
1547 __skb_queue_tail(&sar_queue, skb);
1548 len -= chan->remote_mps;
1549 size += chan->remote_mps;
1550
1551 while (len > 0) {
1552 size_t buflen;
1553
1554 if (len > chan->remote_mps) {
1555 control = L2CAP_SDU_CONTINUE;
1556 buflen = chan->remote_mps;
1557 } else {
1558 control = L2CAP_SDU_END;
1559 buflen = len;
1560 }
1561
1562 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1563 if (IS_ERR(skb)) {
1564 skb_queue_purge(&sar_queue);
1565 return PTR_ERR(skb);
1566 }
1567
1568 __skb_queue_tail(&sar_queue, skb);
1569 len -= buflen;
1570 size += buflen;
1571 }
1572 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1573 if (chan->tx_send_head == NULL)
1574 chan->tx_send_head = sar_queue.next;
1575
1576 return size;
1577 }
1578
1579 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1580 {
1581 struct sk_buff *skb;
1582 u16 control;
1583 int err;
1584
1585 /* Connectionless channel */
1586 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1587 skb = l2cap_create_connless_pdu(chan, msg, len);
1588 if (IS_ERR(skb))
1589 return PTR_ERR(skb);
1590
1591 l2cap_do_send(chan, skb);
1592 return len;
1593 }
1594
1595 switch (chan->mode) {
1596 case L2CAP_MODE_BASIC:
1597 /* Check outgoing MTU */
1598 if (len > chan->omtu)
1599 return -EMSGSIZE;
1600
1601 /* Create a basic PDU */
1602 skb = l2cap_create_basic_pdu(chan, msg, len);
1603 if (IS_ERR(skb))
1604 return PTR_ERR(skb);
1605
1606 l2cap_do_send(chan, skb);
1607 err = len;
1608 break;
1609
1610 case L2CAP_MODE_ERTM:
1611 case L2CAP_MODE_STREAMING:
1612 /* Entire SDU fits into one PDU */
1613 if (len <= chan->remote_mps) {
1614 control = L2CAP_SDU_UNSEGMENTED;
1615 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1616 0);
1617 if (IS_ERR(skb))
1618 return PTR_ERR(skb);
1619
1620 __skb_queue_tail(&chan->tx_q, skb);
1621
1622 if (chan->tx_send_head == NULL)
1623 chan->tx_send_head = skb;
1624
1625 } else {
1626 /* Segment SDU into multiples PDUs */
1627 err = l2cap_sar_segment_sdu(chan, msg, len);
1628 if (err < 0)
1629 return err;
1630 }
1631
1632 if (chan->mode == L2CAP_MODE_STREAMING) {
1633 l2cap_streaming_send(chan);
1634 err = len;
1635 break;
1636 }
1637
1638 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1639 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
1640 err = len;
1641 break;
1642 }
1643
1644 err = l2cap_ertm_send(chan);
1645 if (err >= 0)
1646 err = len;
1647
1648 break;
1649
1650 default:
1651 BT_DBG("bad state %1.1x", chan->mode);
1652 err = -EBADFD;
1653 }
1654
1655 return err;
1656 }
1657
1658 static void l2cap_chan_ready(struct sock *sk)
1659 {
1660 struct sock *parent = bt_sk(sk)->parent;
1661 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1662
1663 BT_DBG("sk %p, parent %p", sk, parent);
1664
1665 chan->conf_state = 0;
1666 l2cap_chan_clear_timer(chan);
1667
1668 if (!parent) {
1669 /* Outgoing channel.
1670 * Wake up socket sleeping on connect.
1671 */
1672 sk->sk_state = BT_CONNECTED;
1673 sk->sk_state_change(sk);
1674 } else {
1675 /* Incoming channel.
1676 * Wake up socket sleeping on accept.
1677 */
1678 parent->sk_data_ready(parent, 0);
1679 }
1680 }
1681
1682 /* Copy frame to all raw sockets on that connection */
1683 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1684 {
1685 struct sk_buff *nskb;
1686 struct l2cap_chan *chan;
1687
1688 BT_DBG("conn %p", conn);
1689
1690 read_lock(&conn->chan_lock);
1691 list_for_each_entry(chan, &conn->chan_l, list) {
1692 struct sock *sk = chan->sk;
1693 if (chan->chan_type != L2CAP_CHAN_RAW)
1694 continue;
1695
1696 /* Don't send frame to the socket it came from */
1697 if (skb->sk == sk)
1698 continue;
1699 nskb = skb_clone(skb, GFP_ATOMIC);
1700 if (!nskb)
1701 continue;
1702
1703 if (sock_queue_rcv_skb(sk, nskb))
1704 kfree_skb(nskb);
1705 }
1706 read_unlock(&conn->chan_lock);
1707 }
1708
1709 /* ---- L2CAP signalling commands ---- */
1710 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1711 u8 code, u8 ident, u16 dlen, void *data)
1712 {
1713 struct sk_buff *skb, **frag;
1714 struct l2cap_cmd_hdr *cmd;
1715 struct l2cap_hdr *lh;
1716 int len, count;
1717
1718 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1719 conn, code, ident, dlen);
1720
1721 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1722 count = min_t(unsigned int, conn->mtu, len);
1723
1724 skb = bt_skb_alloc(count, GFP_ATOMIC);
1725 if (!skb)
1726 return NULL;
1727
1728 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1729 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1730
1731 if (conn->hcon->type == LE_LINK)
1732 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1733 else
1734 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1735
1736 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1737 cmd->code = code;
1738 cmd->ident = ident;
1739 cmd->len = cpu_to_le16(dlen);
1740
1741 if (dlen) {
1742 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1743 memcpy(skb_put(skb, count), data, count);
1744 data += count;
1745 }
1746
1747 len -= skb->len;
1748
1749 /* Continuation fragments (no L2CAP header) */
1750 frag = &skb_shinfo(skb)->frag_list;
1751 while (len) {
1752 count = min_t(unsigned int, conn->mtu, len);
1753
1754 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1755 if (!*frag)
1756 goto fail;
1757
1758 memcpy(skb_put(*frag, count), data, count);
1759
1760 len -= count;
1761 data += count;
1762
1763 frag = &(*frag)->next;
1764 }
1765
1766 return skb;
1767
1768 fail:
1769 kfree_skb(skb);
1770 return NULL;
1771 }
1772
1773 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1774 {
1775 struct l2cap_conf_opt *opt = *ptr;
1776 int len;
1777
1778 len = L2CAP_CONF_OPT_SIZE + opt->len;
1779 *ptr += len;
1780
1781 *type = opt->type;
1782 *olen = opt->len;
1783
1784 switch (opt->len) {
1785 case 1:
1786 *val = *((u8 *) opt->val);
1787 break;
1788
1789 case 2:
1790 *val = get_unaligned_le16(opt->val);
1791 break;
1792
1793 case 4:
1794 *val = get_unaligned_le32(opt->val);
1795 break;
1796
1797 default:
1798 *val = (unsigned long) opt->val;
1799 break;
1800 }
1801
1802 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1803 return len;
1804 }
1805
1806 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1807 {
1808 struct l2cap_conf_opt *opt = *ptr;
1809
1810 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1811
1812 opt->type = type;
1813 opt->len = len;
1814
1815 switch (len) {
1816 case 1:
1817 *((u8 *) opt->val) = val;
1818 break;
1819
1820 case 2:
1821 put_unaligned_le16(val, opt->val);
1822 break;
1823
1824 case 4:
1825 put_unaligned_le32(val, opt->val);
1826 break;
1827
1828 default:
1829 memcpy(opt->val, (void *) val, len);
1830 break;
1831 }
1832
1833 *ptr += L2CAP_CONF_OPT_SIZE + len;
1834 }
1835
1836 static void l2cap_ack_timeout(unsigned long arg)
1837 {
1838 struct l2cap_chan *chan = (void *) arg;
1839
1840 bh_lock_sock(chan->sk);
1841 l2cap_send_ack(chan);
1842 bh_unlock_sock(chan->sk);
1843 }
1844
1845 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1846 {
1847 struct sock *sk = chan->sk;
1848
1849 chan->expected_ack_seq = 0;
1850 chan->unacked_frames = 0;
1851 chan->buffer_seq = 0;
1852 chan->num_acked = 0;
1853 chan->frames_sent = 0;
1854
1855 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1856 (unsigned long) chan);
1857 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1858 (unsigned long) chan);
1859 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1860
1861 skb_queue_head_init(&chan->srej_q);
1862 skb_queue_head_init(&chan->busy_q);
1863
1864 INIT_LIST_HEAD(&chan->srej_l);
1865
1866 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1867
1868 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1869 }
1870
1871 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1872 {
1873 switch (mode) {
1874 case L2CAP_MODE_STREAMING:
1875 case L2CAP_MODE_ERTM:
1876 if (l2cap_mode_supported(mode, remote_feat_mask))
1877 return mode;
1878 /* fall through */
1879 default:
1880 return L2CAP_MODE_BASIC;
1881 }
1882 }
1883
1884 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1885 {
1886 struct l2cap_conf_req *req = data;
1887 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1888 void *ptr = req->data;
1889
1890 BT_DBG("chan %p", chan);
1891
1892 if (chan->num_conf_req || chan->num_conf_rsp)
1893 goto done;
1894
1895 switch (chan->mode) {
1896 case L2CAP_MODE_STREAMING:
1897 case L2CAP_MODE_ERTM:
1898 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1899 break;
1900
1901 /* fall through */
1902 default:
1903 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1904 break;
1905 }
1906
1907 done:
1908 if (chan->imtu != L2CAP_DEFAULT_MTU)
1909 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1910
1911 switch (chan->mode) {
1912 case L2CAP_MODE_BASIC:
1913 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1914 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1915 break;
1916
1917 rfc.mode = L2CAP_MODE_BASIC;
1918 rfc.txwin_size = 0;
1919 rfc.max_transmit = 0;
1920 rfc.retrans_timeout = 0;
1921 rfc.monitor_timeout = 0;
1922 rfc.max_pdu_size = 0;
1923
1924 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1925 (unsigned long) &rfc);
1926 break;
1927
1928 case L2CAP_MODE_ERTM:
1929 rfc.mode = L2CAP_MODE_ERTM;
1930 rfc.txwin_size = chan->tx_win;
1931 rfc.max_transmit = chan->max_tx;
1932 rfc.retrans_timeout = 0;
1933 rfc.monitor_timeout = 0;
1934 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1935 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1936 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1937
1938 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1939 (unsigned long) &rfc);
1940
1941 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1942 break;
1943
1944 if (chan->fcs == L2CAP_FCS_NONE ||
1945 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1946 chan->fcs = L2CAP_FCS_NONE;
1947 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1948 }
1949 break;
1950
1951 case L2CAP_MODE_STREAMING:
1952 rfc.mode = L2CAP_MODE_STREAMING;
1953 rfc.txwin_size = 0;
1954 rfc.max_transmit = 0;
1955 rfc.retrans_timeout = 0;
1956 rfc.monitor_timeout = 0;
1957 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1958 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1959 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1960
1961 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1962 (unsigned long) &rfc);
1963
1964 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1965 break;
1966
1967 if (chan->fcs == L2CAP_FCS_NONE ||
1968 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1969 chan->fcs = L2CAP_FCS_NONE;
1970 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1971 }
1972 break;
1973 }
1974
1975 req->dcid = cpu_to_le16(chan->dcid);
1976 req->flags = cpu_to_le16(0);
1977
1978 return ptr - data;
1979 }
1980
1981 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1982 {
1983 struct l2cap_conf_rsp *rsp = data;
1984 void *ptr = rsp->data;
1985 void *req = chan->conf_req;
1986 int len = chan->conf_len;
1987 int type, hint, olen;
1988 unsigned long val;
1989 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1990 u16 mtu = L2CAP_DEFAULT_MTU;
1991 u16 result = L2CAP_CONF_SUCCESS;
1992
1993 BT_DBG("chan %p", chan);
1994
1995 while (len >= L2CAP_CONF_OPT_SIZE) {
1996 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1997
1998 hint = type & L2CAP_CONF_HINT;
1999 type &= L2CAP_CONF_MASK;
2000
2001 switch (type) {
2002 case L2CAP_CONF_MTU:
2003 mtu = val;
2004 break;
2005
2006 case L2CAP_CONF_FLUSH_TO:
2007 chan->flush_to = val;
2008 break;
2009
2010 case L2CAP_CONF_QOS:
2011 break;
2012
2013 case L2CAP_CONF_RFC:
2014 if (olen == sizeof(rfc))
2015 memcpy(&rfc, (void *) val, olen);
2016 break;
2017
2018 case L2CAP_CONF_FCS:
2019 if (val == L2CAP_FCS_NONE)
2020 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2021
2022 break;
2023
2024 default:
2025 if (hint)
2026 break;
2027
2028 result = L2CAP_CONF_UNKNOWN;
2029 *((u8 *) ptr++) = type;
2030 break;
2031 }
2032 }
2033
2034 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2035 goto done;
2036
2037 switch (chan->mode) {
2038 case L2CAP_MODE_STREAMING:
2039 case L2CAP_MODE_ERTM:
2040 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2041 chan->mode = l2cap_select_mode(rfc.mode,
2042 chan->conn->feat_mask);
2043 break;
2044 }
2045
2046 if (chan->mode != rfc.mode)
2047 return -ECONNREFUSED;
2048
2049 break;
2050 }
2051
2052 done:
2053 if (chan->mode != rfc.mode) {
2054 result = L2CAP_CONF_UNACCEPT;
2055 rfc.mode = chan->mode;
2056
2057 if (chan->num_conf_rsp == 1)
2058 return -ECONNREFUSED;
2059
2060 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2061 sizeof(rfc), (unsigned long) &rfc);
2062 }
2063
2064
2065 if (result == L2CAP_CONF_SUCCESS) {
2066 /* Configure output options and let the other side know
2067 * which ones we don't like. */
2068
2069 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2070 result = L2CAP_CONF_UNACCEPT;
2071 else {
2072 chan->omtu = mtu;
2073 chan->conf_state |= L2CAP_CONF_MTU_DONE;
2074 }
2075 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2076
2077 switch (rfc.mode) {
2078 case L2CAP_MODE_BASIC:
2079 chan->fcs = L2CAP_FCS_NONE;
2080 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2081 break;
2082
2083 case L2CAP_MODE_ERTM:
2084 chan->remote_tx_win = rfc.txwin_size;
2085 chan->remote_max_tx = rfc.max_transmit;
2086
2087 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2088 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2089
2090 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2091
2092 rfc.retrans_timeout =
2093 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2094 rfc.monitor_timeout =
2095 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2096
2097 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2098
2099 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2100 sizeof(rfc), (unsigned long) &rfc);
2101
2102 break;
2103
2104 case L2CAP_MODE_STREAMING:
2105 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2106 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2107
2108 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2109
2110 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2111
2112 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2113 sizeof(rfc), (unsigned long) &rfc);
2114
2115 break;
2116
2117 default:
2118 result = L2CAP_CONF_UNACCEPT;
2119
2120 memset(&rfc, 0, sizeof(rfc));
2121 rfc.mode = chan->mode;
2122 }
2123
2124 if (result == L2CAP_CONF_SUCCESS)
2125 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2126 }
2127 rsp->scid = cpu_to_le16(chan->dcid);
2128 rsp->result = cpu_to_le16(result);
2129 rsp->flags = cpu_to_le16(0x0000);
2130
2131 return ptr - data;
2132 }
2133
2134 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2135 {
2136 struct l2cap_conf_req *req = data;
2137 void *ptr = req->data;
2138 int type, olen;
2139 unsigned long val;
2140 struct l2cap_conf_rfc rfc;
2141
2142 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2143
2144 while (len >= L2CAP_CONF_OPT_SIZE) {
2145 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2146
2147 switch (type) {
2148 case L2CAP_CONF_MTU:
2149 if (val < L2CAP_DEFAULT_MIN_MTU) {
2150 *result = L2CAP_CONF_UNACCEPT;
2151 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2152 } else
2153 chan->imtu = val;
2154 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2155 break;
2156
2157 case L2CAP_CONF_FLUSH_TO:
2158 chan->flush_to = val;
2159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2160 2, chan->flush_to);
2161 break;
2162
2163 case L2CAP_CONF_RFC:
2164 if (olen == sizeof(rfc))
2165 memcpy(&rfc, (void *)val, olen);
2166
2167 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2168 rfc.mode != chan->mode)
2169 return -ECONNREFUSED;
2170
2171 chan->fcs = 0;
2172
2173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2174 sizeof(rfc), (unsigned long) &rfc);
2175 break;
2176 }
2177 }
2178
2179 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2180 return -ECONNREFUSED;
2181
2182 chan->mode = rfc.mode;
2183
2184 if (*result == L2CAP_CONF_SUCCESS) {
2185 switch (rfc.mode) {
2186 case L2CAP_MODE_ERTM:
2187 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2188 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2189 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2190 break;
2191 case L2CAP_MODE_STREAMING:
2192 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2193 }
2194 }
2195
2196 req->dcid = cpu_to_le16(chan->dcid);
2197 req->flags = cpu_to_le16(0x0000);
2198
2199 return ptr - data;
2200 }
2201
2202 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2203 {
2204 struct l2cap_conf_rsp *rsp = data;
2205 void *ptr = rsp->data;
2206
2207 BT_DBG("chan %p", chan);
2208
2209 rsp->scid = cpu_to_le16(chan->dcid);
2210 rsp->result = cpu_to_le16(result);
2211 rsp->flags = cpu_to_le16(flags);
2212
2213 return ptr - data;
2214 }
2215
2216 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2217 {
2218 struct l2cap_conn_rsp rsp;
2219 struct l2cap_conn *conn = chan->conn;
2220 u8 buf[128];
2221
2222 rsp.scid = cpu_to_le16(chan->dcid);
2223 rsp.dcid = cpu_to_le16(chan->scid);
2224 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2225 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2226 l2cap_send_cmd(conn, chan->ident,
2227 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2228
2229 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2230 return;
2231
2232 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2233 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2234 l2cap_build_conf_req(chan, buf), buf);
2235 chan->num_conf_req++;
2236 }
2237
2238 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2239 {
2240 int type, olen;
2241 unsigned long val;
2242 struct l2cap_conf_rfc rfc;
2243
2244 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2245
2246 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2247 return;
2248
2249 while (len >= L2CAP_CONF_OPT_SIZE) {
2250 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2251
2252 switch (type) {
2253 case L2CAP_CONF_RFC:
2254 if (olen == sizeof(rfc))
2255 memcpy(&rfc, (void *)val, olen);
2256 goto done;
2257 }
2258 }
2259
2260 done:
2261 switch (rfc.mode) {
2262 case L2CAP_MODE_ERTM:
2263 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2264 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2265 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2266 break;
2267 case L2CAP_MODE_STREAMING:
2268 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2269 }
2270 }
2271
2272 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2273 {
2274 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2275
2276 if (rej->reason != 0x0000)
2277 return 0;
2278
2279 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2280 cmd->ident == conn->info_ident) {
2281 del_timer(&conn->info_timer);
2282
2283 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2284 conn->info_ident = 0;
2285
2286 l2cap_conn_start(conn);
2287 }
2288
2289 return 0;
2290 }
2291
2292 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2293 {
2294 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2295 struct l2cap_conn_rsp rsp;
2296 struct l2cap_chan *chan = NULL, *pchan;
2297 struct sock *parent, *sk = NULL;
2298 int result, status = L2CAP_CS_NO_INFO;
2299
2300 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2301 __le16 psm = req->psm;
2302
2303 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2304
2305 /* Check if we have socket listening on psm */
2306 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2307 if (!pchan) {
2308 result = L2CAP_CR_BAD_PSM;
2309 goto sendresp;
2310 }
2311
2312 parent = pchan->sk;
2313
2314 bh_lock_sock(parent);
2315
2316 /* Check if the ACL is secure enough (if not SDP) */
2317 if (psm != cpu_to_le16(0x0001) &&
2318 !hci_conn_check_link_mode(conn->hcon)) {
2319 conn->disc_reason = 0x05;
2320 result = L2CAP_CR_SEC_BLOCK;
2321 goto response;
2322 }
2323
2324 result = L2CAP_CR_NO_MEM;
2325
2326 /* Check for backlog size */
2327 if (sk_acceptq_is_full(parent)) {
2328 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2329 goto response;
2330 }
2331
2332 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2333 if (!sk)
2334 goto response;
2335
2336 chan = l2cap_chan_create(sk);
2337 if (!chan) {
2338 l2cap_sock_kill(sk);
2339 goto response;
2340 }
2341
2342 l2cap_pi(sk)->chan = chan;
2343
2344 write_lock_bh(&conn->chan_lock);
2345
2346 /* Check if we already have channel with that dcid */
2347 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2348 write_unlock_bh(&conn->chan_lock);
2349 sock_set_flag(sk, SOCK_ZAPPED);
2350 l2cap_sock_kill(sk);
2351 goto response;
2352 }
2353
2354 hci_conn_hold(conn->hcon);
2355
2356 l2cap_sock_init(sk, parent);
2357 bacpy(&bt_sk(sk)->src, conn->src);
2358 bacpy(&bt_sk(sk)->dst, conn->dst);
2359 chan->psm = psm;
2360 chan->dcid = scid;
2361
2362 bt_accept_enqueue(parent, sk);
2363
2364 __l2cap_chan_add(conn, chan);
2365
2366 dcid = chan->scid;
2367
2368 l2cap_chan_set_timer(chan, sk->sk_sndtimeo);
2369
2370 chan->ident = cmd->ident;
2371
2372 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2373 if (l2cap_check_security(chan)) {
2374 if (bt_sk(sk)->defer_setup) {
2375 sk->sk_state = BT_CONNECT2;
2376 result = L2CAP_CR_PEND;
2377 status = L2CAP_CS_AUTHOR_PEND;
2378 parent->sk_data_ready(parent, 0);
2379 } else {
2380 sk->sk_state = BT_CONFIG;
2381 result = L2CAP_CR_SUCCESS;
2382 status = L2CAP_CS_NO_INFO;
2383 }
2384 } else {
2385 sk->sk_state = BT_CONNECT2;
2386 result = L2CAP_CR_PEND;
2387 status = L2CAP_CS_AUTHEN_PEND;
2388 }
2389 } else {
2390 sk->sk_state = BT_CONNECT2;
2391 result = L2CAP_CR_PEND;
2392 status = L2CAP_CS_NO_INFO;
2393 }
2394
2395 write_unlock_bh(&conn->chan_lock);
2396
2397 response:
2398 bh_unlock_sock(parent);
2399
2400 sendresp:
2401 rsp.scid = cpu_to_le16(scid);
2402 rsp.dcid = cpu_to_le16(dcid);
2403 rsp.result = cpu_to_le16(result);
2404 rsp.status = cpu_to_le16(status);
2405 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2406
2407 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2408 struct l2cap_info_req info;
2409 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2410
2411 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2412 conn->info_ident = l2cap_get_ident(conn);
2413
2414 mod_timer(&conn->info_timer, jiffies +
2415 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2416
2417 l2cap_send_cmd(conn, conn->info_ident,
2418 L2CAP_INFO_REQ, sizeof(info), &info);
2419 }
2420
2421 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2422 result == L2CAP_CR_SUCCESS) {
2423 u8 buf[128];
2424 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2425 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2426 l2cap_build_conf_req(chan, buf), buf);
2427 chan->num_conf_req++;
2428 }
2429
2430 return 0;
2431 }
2432
2433 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2434 {
2435 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2436 u16 scid, dcid, result, status;
2437 struct l2cap_chan *chan;
2438 struct sock *sk;
2439 u8 req[128];
2440
2441 scid = __le16_to_cpu(rsp->scid);
2442 dcid = __le16_to_cpu(rsp->dcid);
2443 result = __le16_to_cpu(rsp->result);
2444 status = __le16_to_cpu(rsp->status);
2445
2446 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2447
2448 if (scid) {
2449 chan = l2cap_get_chan_by_scid(conn, scid);
2450 if (!chan)
2451 return -EFAULT;
2452 } else {
2453 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2454 if (!chan)
2455 return -EFAULT;
2456 }
2457
2458 sk = chan->sk;
2459
2460 switch (result) {
2461 case L2CAP_CR_SUCCESS:
2462 sk->sk_state = BT_CONFIG;
2463 chan->ident = 0;
2464 chan->dcid = dcid;
2465 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2466
2467 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2468 break;
2469
2470 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2471
2472 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2473 l2cap_build_conf_req(chan, req), req);
2474 chan->num_conf_req++;
2475 break;
2476
2477 case L2CAP_CR_PEND:
2478 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2479 break;
2480
2481 default:
2482 /* don't delete l2cap channel if sk is owned by user */
2483 if (sock_owned_by_user(sk)) {
2484 sk->sk_state = BT_DISCONN;
2485 l2cap_chan_clear_timer(chan);
2486 l2cap_chan_set_timer(chan, HZ / 5);
2487 break;
2488 }
2489
2490 l2cap_chan_del(chan, ECONNREFUSED);
2491 break;
2492 }
2493
2494 bh_unlock_sock(sk);
2495 return 0;
2496 }
2497
2498 static inline void set_default_fcs(struct l2cap_chan *chan)
2499 {
2500 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2501
2502 /* FCS is enabled only in ERTM or streaming mode, if one or both
2503 * sides request it.
2504 */
2505 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2506 chan->fcs = L2CAP_FCS_NONE;
2507 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2508 chan->fcs = L2CAP_FCS_CRC16;
2509 }
2510
2511 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2512 {
2513 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2514 u16 dcid, flags;
2515 u8 rsp[64];
2516 struct l2cap_chan *chan;
2517 struct sock *sk;
2518 int len;
2519
2520 dcid = __le16_to_cpu(req->dcid);
2521 flags = __le16_to_cpu(req->flags);
2522
2523 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2524
2525 chan = l2cap_get_chan_by_scid(conn, dcid);
2526 if (!chan)
2527 return -ENOENT;
2528
2529 sk = chan->sk;
2530
2531 if (sk->sk_state != BT_CONFIG) {
2532 struct l2cap_cmd_rej rej;
2533
2534 rej.reason = cpu_to_le16(0x0002);
2535 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2536 sizeof(rej), &rej);
2537 goto unlock;
2538 }
2539
2540 /* Reject if config buffer is too small. */
2541 len = cmd_len - sizeof(*req);
2542 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2543 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2544 l2cap_build_conf_rsp(chan, rsp,
2545 L2CAP_CONF_REJECT, flags), rsp);
2546 goto unlock;
2547 }
2548
2549 /* Store config. */
2550 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2551 chan->conf_len += len;
2552
2553 if (flags & 0x0001) {
2554 /* Incomplete config. Send empty response. */
2555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2556 l2cap_build_conf_rsp(chan, rsp,
2557 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2558 goto unlock;
2559 }
2560
2561 /* Complete config. */
2562 len = l2cap_parse_conf_req(chan, rsp);
2563 if (len < 0) {
2564 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2565 goto unlock;
2566 }
2567
2568 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2569 chan->num_conf_rsp++;
2570
2571 /* Reset config buffer. */
2572 chan->conf_len = 0;
2573
2574 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2575 goto unlock;
2576
2577 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2578 set_default_fcs(chan);
2579
2580 sk->sk_state = BT_CONNECTED;
2581
2582 chan->next_tx_seq = 0;
2583 chan->expected_tx_seq = 0;
2584 skb_queue_head_init(&chan->tx_q);
2585 if (chan->mode == L2CAP_MODE_ERTM)
2586 l2cap_ertm_init(chan);
2587
2588 l2cap_chan_ready(sk);
2589 goto unlock;
2590 }
2591
2592 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2593 u8 buf[64];
2594 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2595 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2596 l2cap_build_conf_req(chan, buf), buf);
2597 chan->num_conf_req++;
2598 }
2599
2600 unlock:
2601 bh_unlock_sock(sk);
2602 return 0;
2603 }
2604
2605 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2606 {
2607 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2608 u16 scid, flags, result;
2609 struct l2cap_chan *chan;
2610 struct sock *sk;
2611 int len = cmd->len - sizeof(*rsp);
2612
2613 scid = __le16_to_cpu(rsp->scid);
2614 flags = __le16_to_cpu(rsp->flags);
2615 result = __le16_to_cpu(rsp->result);
2616
2617 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2618 scid, flags, result);
2619
2620 chan = l2cap_get_chan_by_scid(conn, scid);
2621 if (!chan)
2622 return 0;
2623
2624 sk = chan->sk;
2625
2626 switch (result) {
2627 case L2CAP_CONF_SUCCESS:
2628 l2cap_conf_rfc_get(chan, rsp->data, len);
2629 break;
2630
2631 case L2CAP_CONF_UNACCEPT:
2632 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2633 char req[64];
2634
2635 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2636 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2637 goto done;
2638 }
2639
2640 /* throw out any old stored conf requests */
2641 result = L2CAP_CONF_SUCCESS;
2642 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2643 req, &result);
2644 if (len < 0) {
2645 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2646 goto done;
2647 }
2648
2649 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2650 L2CAP_CONF_REQ, len, req);
2651 chan->num_conf_req++;
2652 if (result != L2CAP_CONF_SUCCESS)
2653 goto done;
2654 break;
2655 }
2656
2657 default:
2658 sk->sk_err = ECONNRESET;
2659 l2cap_chan_set_timer(chan, HZ * 5);
2660 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2661 goto done;
2662 }
2663
2664 if (flags & 0x01)
2665 goto done;
2666
2667 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2668
2669 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2670 set_default_fcs(chan);
2671
2672 sk->sk_state = BT_CONNECTED;
2673 chan->next_tx_seq = 0;
2674 chan->expected_tx_seq = 0;
2675 skb_queue_head_init(&chan->tx_q);
2676 if (chan->mode == L2CAP_MODE_ERTM)
2677 l2cap_ertm_init(chan);
2678
2679 l2cap_chan_ready(sk);
2680 }
2681
2682 done:
2683 bh_unlock_sock(sk);
2684 return 0;
2685 }
2686
2687 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2688 {
2689 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2690 struct l2cap_disconn_rsp rsp;
2691 u16 dcid, scid;
2692 struct l2cap_chan *chan;
2693 struct sock *sk;
2694
2695 scid = __le16_to_cpu(req->scid);
2696 dcid = __le16_to_cpu(req->dcid);
2697
2698 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2699
2700 chan = l2cap_get_chan_by_scid(conn, dcid);
2701 if (!chan)
2702 return 0;
2703
2704 sk = chan->sk;
2705
2706 rsp.dcid = cpu_to_le16(chan->scid);
2707 rsp.scid = cpu_to_le16(chan->dcid);
2708 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2709
2710 sk->sk_shutdown = SHUTDOWN_MASK;
2711
2712 /* don't delete l2cap channel if sk is owned by user */
2713 if (sock_owned_by_user(sk)) {
2714 sk->sk_state = BT_DISCONN;
2715 l2cap_chan_clear_timer(chan);
2716 l2cap_chan_set_timer(chan, HZ / 5);
2717 bh_unlock_sock(sk);
2718 return 0;
2719 }
2720
2721 l2cap_chan_del(chan, ECONNRESET);
2722 bh_unlock_sock(sk);
2723
2724 l2cap_sock_kill(sk);
2725 return 0;
2726 }
2727
2728 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2729 {
2730 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2731 u16 dcid, scid;
2732 struct l2cap_chan *chan;
2733 struct sock *sk;
2734
2735 scid = __le16_to_cpu(rsp->scid);
2736 dcid = __le16_to_cpu(rsp->dcid);
2737
2738 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2739
2740 chan = l2cap_get_chan_by_scid(conn, scid);
2741 if (!chan)
2742 return 0;
2743
2744 sk = chan->sk;
2745
2746 /* don't delete l2cap channel if sk is owned by user */
2747 if (sock_owned_by_user(sk)) {
2748 sk->sk_state = BT_DISCONN;
2749 l2cap_chan_clear_timer(chan);
2750 l2cap_chan_set_timer(chan, HZ / 5);
2751 bh_unlock_sock(sk);
2752 return 0;
2753 }
2754
2755 l2cap_chan_del(chan, 0);
2756 bh_unlock_sock(sk);
2757
2758 l2cap_sock_kill(sk);
2759 return 0;
2760 }
2761
2762 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2763 {
2764 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2765 u16 type;
2766
2767 type = __le16_to_cpu(req->type);
2768
2769 BT_DBG("type 0x%4.4x", type);
2770
2771 if (type == L2CAP_IT_FEAT_MASK) {
2772 u8 buf[8];
2773 u32 feat_mask = l2cap_feat_mask;
2774 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2775 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2776 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2777 if (!disable_ertm)
2778 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2779 | L2CAP_FEAT_FCS;
2780 put_unaligned_le32(feat_mask, rsp->data);
2781 l2cap_send_cmd(conn, cmd->ident,
2782 L2CAP_INFO_RSP, sizeof(buf), buf);
2783 } else if (type == L2CAP_IT_FIXED_CHAN) {
2784 u8 buf[12];
2785 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2786 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2787 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2788 memcpy(buf + 4, l2cap_fixed_chan, 8);
2789 l2cap_send_cmd(conn, cmd->ident,
2790 L2CAP_INFO_RSP, sizeof(buf), buf);
2791 } else {
2792 struct l2cap_info_rsp rsp;
2793 rsp.type = cpu_to_le16(type);
2794 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2795 l2cap_send_cmd(conn, cmd->ident,
2796 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2797 }
2798
2799 return 0;
2800 }
2801
2802 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2803 {
2804 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2805 u16 type, result;
2806
2807 type = __le16_to_cpu(rsp->type);
2808 result = __le16_to_cpu(rsp->result);
2809
2810 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2811
2812 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2813 if (cmd->ident != conn->info_ident ||
2814 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2815 return 0;
2816
2817 del_timer(&conn->info_timer);
2818
2819 if (result != L2CAP_IR_SUCCESS) {
2820 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2821 conn->info_ident = 0;
2822
2823 l2cap_conn_start(conn);
2824
2825 return 0;
2826 }
2827
2828 if (type == L2CAP_IT_FEAT_MASK) {
2829 conn->feat_mask = get_unaligned_le32(rsp->data);
2830
2831 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2832 struct l2cap_info_req req;
2833 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2834
2835 conn->info_ident = l2cap_get_ident(conn);
2836
2837 l2cap_send_cmd(conn, conn->info_ident,
2838 L2CAP_INFO_REQ, sizeof(req), &req);
2839 } else {
2840 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2841 conn->info_ident = 0;
2842
2843 l2cap_conn_start(conn);
2844 }
2845 } else if (type == L2CAP_IT_FIXED_CHAN) {
2846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2847 conn->info_ident = 0;
2848
2849 l2cap_conn_start(conn);
2850 }
2851
2852 return 0;
2853 }
2854
2855 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2856 u16 to_multiplier)
2857 {
2858 u16 max_latency;
2859
2860 if (min > max || min < 6 || max > 3200)
2861 return -EINVAL;
2862
2863 if (to_multiplier < 10 || to_multiplier > 3200)
2864 return -EINVAL;
2865
2866 if (max >= to_multiplier * 8)
2867 return -EINVAL;
2868
2869 max_latency = (to_multiplier * 8 / max) - 1;
2870 if (latency > 499 || latency > max_latency)
2871 return -EINVAL;
2872
2873 return 0;
2874 }
2875
2876 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2877 struct l2cap_cmd_hdr *cmd, u8 *data)
2878 {
2879 struct hci_conn *hcon = conn->hcon;
2880 struct l2cap_conn_param_update_req *req;
2881 struct l2cap_conn_param_update_rsp rsp;
2882 u16 min, max, latency, to_multiplier, cmd_len;
2883 int err;
2884
2885 if (!(hcon->link_mode & HCI_LM_MASTER))
2886 return -EINVAL;
2887
2888 cmd_len = __le16_to_cpu(cmd->len);
2889 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2890 return -EPROTO;
2891
2892 req = (struct l2cap_conn_param_update_req *) data;
2893 min = __le16_to_cpu(req->min);
2894 max = __le16_to_cpu(req->max);
2895 latency = __le16_to_cpu(req->latency);
2896 to_multiplier = __le16_to_cpu(req->to_multiplier);
2897
2898 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2899 min, max, latency, to_multiplier);
2900
2901 memset(&rsp, 0, sizeof(rsp));
2902
2903 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2904 if (err)
2905 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2906 else
2907 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2908
2909 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2910 sizeof(rsp), &rsp);
2911
2912 if (!err)
2913 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2914
2915 return 0;
2916 }
2917
2918 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2919 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2920 {
2921 int err = 0;
2922
2923 switch (cmd->code) {
2924 case L2CAP_COMMAND_REJ:
2925 l2cap_command_rej(conn, cmd, data);
2926 break;
2927
2928 case L2CAP_CONN_REQ:
2929 err = l2cap_connect_req(conn, cmd, data);
2930 break;
2931
2932 case L2CAP_CONN_RSP:
2933 err = l2cap_connect_rsp(conn, cmd, data);
2934 break;
2935
2936 case L2CAP_CONF_REQ:
2937 err = l2cap_config_req(conn, cmd, cmd_len, data);
2938 break;
2939
2940 case L2CAP_CONF_RSP:
2941 err = l2cap_config_rsp(conn, cmd, data);
2942 break;
2943
2944 case L2CAP_DISCONN_REQ:
2945 err = l2cap_disconnect_req(conn, cmd, data);
2946 break;
2947
2948 case L2CAP_DISCONN_RSP:
2949 err = l2cap_disconnect_rsp(conn, cmd, data);
2950 break;
2951
2952 case L2CAP_ECHO_REQ:
2953 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2954 break;
2955
2956 case L2CAP_ECHO_RSP:
2957 break;
2958
2959 case L2CAP_INFO_REQ:
2960 err = l2cap_information_req(conn, cmd, data);
2961 break;
2962
2963 case L2CAP_INFO_RSP:
2964 err = l2cap_information_rsp(conn, cmd, data);
2965 break;
2966
2967 default:
2968 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2969 err = -EINVAL;
2970 break;
2971 }
2972
2973 return err;
2974 }
2975
2976 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2977 struct l2cap_cmd_hdr *cmd, u8 *data)
2978 {
2979 switch (cmd->code) {
2980 case L2CAP_COMMAND_REJ:
2981 return 0;
2982
2983 case L2CAP_CONN_PARAM_UPDATE_REQ:
2984 return l2cap_conn_param_update_req(conn, cmd, data);
2985
2986 case L2CAP_CONN_PARAM_UPDATE_RSP:
2987 return 0;
2988
2989 default:
2990 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2991 return -EINVAL;
2992 }
2993 }
2994
2995 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2996 struct sk_buff *skb)
2997 {
2998 u8 *data = skb->data;
2999 int len = skb->len;
3000 struct l2cap_cmd_hdr cmd;
3001 int err;
3002
3003 l2cap_raw_recv(conn, skb);
3004
3005 while (len >= L2CAP_CMD_HDR_SIZE) {
3006 u16 cmd_len;
3007 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3008 data += L2CAP_CMD_HDR_SIZE;
3009 len -= L2CAP_CMD_HDR_SIZE;
3010
3011 cmd_len = le16_to_cpu(cmd.len);
3012
3013 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3014
3015 if (cmd_len > len || !cmd.ident) {
3016 BT_DBG("corrupted command");
3017 break;
3018 }
3019
3020 if (conn->hcon->type == LE_LINK)
3021 err = l2cap_le_sig_cmd(conn, &cmd, data);
3022 else
3023 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3024
3025 if (err) {
3026 struct l2cap_cmd_rej rej;
3027
3028 BT_ERR("Wrong link type (%d)", err);
3029
3030 /* FIXME: Map err to a valid reason */
3031 rej.reason = cpu_to_le16(0);
3032 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3033 }
3034
3035 data += cmd_len;
3036 len -= cmd_len;
3037 }
3038
3039 kfree_skb(skb);
3040 }
3041
3042 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3043 {
3044 u16 our_fcs, rcv_fcs;
3045 int hdr_size = L2CAP_HDR_SIZE + 2;
3046
3047 if (chan->fcs == L2CAP_FCS_CRC16) {
3048 skb_trim(skb, skb->len - 2);
3049 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3050 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3051
3052 if (our_fcs != rcv_fcs)
3053 return -EBADMSG;
3054 }
3055 return 0;
3056 }
3057
3058 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3059 {
3060 u16 control = 0;
3061
3062 chan->frames_sent = 0;
3063
3064 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3065
3066 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3067 control |= L2CAP_SUPER_RCV_NOT_READY;
3068 l2cap_send_sframe(chan, control);
3069 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3070 }
3071
3072 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
3073 l2cap_retransmit_frames(chan);
3074
3075 l2cap_ertm_send(chan);
3076
3077 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3078 chan->frames_sent == 0) {
3079 control |= L2CAP_SUPER_RCV_READY;
3080 l2cap_send_sframe(chan, control);
3081 }
3082 }
3083
3084 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3085 {
3086 struct sk_buff *next_skb;
3087 int tx_seq_offset, next_tx_seq_offset;
3088
3089 bt_cb(skb)->tx_seq = tx_seq;
3090 bt_cb(skb)->sar = sar;
3091
3092 next_skb = skb_peek(&chan->srej_q);
3093 if (!next_skb) {
3094 __skb_queue_tail(&chan->srej_q, skb);
3095 return 0;
3096 }
3097
3098 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3099 if (tx_seq_offset < 0)
3100 tx_seq_offset += 64;
3101
3102 do {
3103 if (bt_cb(next_skb)->tx_seq == tx_seq)
3104 return -EINVAL;
3105
3106 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3107 chan->buffer_seq) % 64;
3108 if (next_tx_seq_offset < 0)
3109 next_tx_seq_offset += 64;
3110
3111 if (next_tx_seq_offset > tx_seq_offset) {
3112 __skb_queue_before(&chan->srej_q, next_skb, skb);
3113 return 0;
3114 }
3115
3116 if (skb_queue_is_last(&chan->srej_q, next_skb))
3117 break;
3118
3119 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3120
3121 __skb_queue_tail(&chan->srej_q, skb);
3122
3123 return 0;
3124 }
3125
3126 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3127 {
3128 struct sk_buff *_skb;
3129 int err;
3130
3131 switch (control & L2CAP_CTRL_SAR) {
3132 case L2CAP_SDU_UNSEGMENTED:
3133 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3134 goto drop;
3135
3136 return sock_queue_rcv_skb(chan->sk, skb);
3137
3138 case L2CAP_SDU_START:
3139 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3140 goto drop;
3141
3142 chan->sdu_len = get_unaligned_le16(skb->data);
3143
3144 if (chan->sdu_len > chan->imtu)
3145 goto disconnect;
3146
3147 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3148 if (!chan->sdu)
3149 return -ENOMEM;
3150
3151 /* pull sdu_len bytes only after alloc, because of Local Busy
3152 * condition we have to be sure that this will be executed
3153 * only once, i.e., when alloc does not fail */
3154 skb_pull(skb, 2);
3155
3156 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3157
3158 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3159 chan->partial_sdu_len = skb->len;
3160 break;
3161
3162 case L2CAP_SDU_CONTINUE:
3163 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3164 goto disconnect;
3165
3166 if (!chan->sdu)
3167 goto disconnect;
3168
3169 chan->partial_sdu_len += skb->len;
3170 if (chan->partial_sdu_len > chan->sdu_len)
3171 goto drop;
3172
3173 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3174
3175 break;
3176
3177 case L2CAP_SDU_END:
3178 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3179 goto disconnect;
3180
3181 if (!chan->sdu)
3182 goto disconnect;
3183
3184 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
3185 chan->partial_sdu_len += skb->len;
3186
3187 if (chan->partial_sdu_len > chan->imtu)
3188 goto drop;
3189
3190 if (chan->partial_sdu_len != chan->sdu_len)
3191 goto drop;
3192
3193 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3194 }
3195
3196 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3197 if (!_skb) {
3198 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3199 return -ENOMEM;
3200 }
3201
3202 err = sock_queue_rcv_skb(chan->sk, _skb);
3203 if (err < 0) {
3204 kfree_skb(_skb);
3205 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3206 return err;
3207 }
3208
3209 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3210 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3211
3212 kfree_skb(chan->sdu);
3213 break;
3214 }
3215
3216 kfree_skb(skb);
3217 return 0;
3218
3219 drop:
3220 kfree_skb(chan->sdu);
3221 chan->sdu = NULL;
3222
3223 disconnect:
3224 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3225 kfree_skb(skb);
3226 return 0;
3227 }
3228
3229 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3230 {
3231 struct sk_buff *skb;
3232 u16 control;
3233 int err;
3234
3235 while ((skb = skb_dequeue(&chan->busy_q))) {
3236 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3237 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3238 if (err < 0) {
3239 skb_queue_head(&chan->busy_q, skb);
3240 return -EBUSY;
3241 }
3242
3243 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3244 }
3245
3246 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3247 goto done;
3248
3249 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3250 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3251 l2cap_send_sframe(chan, control);
3252 chan->retry_count = 1;
3253
3254 del_timer(&chan->retrans_timer);
3255 __mod_monitor_timer();
3256
3257 chan->conn_state |= L2CAP_CONN_WAIT_F;
3258
3259 done:
3260 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3261 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3262
3263 BT_DBG("chan %p, Exit local busy", chan);
3264
3265 return 0;
3266 }
3267
3268 static void l2cap_busy_work(struct work_struct *work)
3269 {
3270 DECLARE_WAITQUEUE(wait, current);
3271 struct l2cap_chan *chan =
3272 container_of(work, struct l2cap_chan, busy_work);
3273 struct sock *sk = chan->sk;
3274 int n_tries = 0, timeo = HZ/5, err;
3275 struct sk_buff *skb;
3276
3277 lock_sock(sk);
3278
3279 add_wait_queue(sk_sleep(sk), &wait);
3280 while ((skb = skb_peek(&chan->busy_q))) {
3281 set_current_state(TASK_INTERRUPTIBLE);
3282
3283 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3284 err = -EBUSY;
3285 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3286 break;
3287 }
3288
3289 if (!timeo)
3290 timeo = HZ/5;
3291
3292 if (signal_pending(current)) {
3293 err = sock_intr_errno(timeo);
3294 break;
3295 }
3296
3297 release_sock(sk);
3298 timeo = schedule_timeout(timeo);
3299 lock_sock(sk);
3300
3301 err = sock_error(sk);
3302 if (err)
3303 break;
3304
3305 if (l2cap_try_push_rx_skb(chan) == 0)
3306 break;
3307 }
3308
3309 set_current_state(TASK_RUNNING);
3310 remove_wait_queue(sk_sleep(sk), &wait);
3311
3312 release_sock(sk);
3313 }
3314
3315 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3316 {
3317 int sctrl, err;
3318
3319 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3320 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3321 __skb_queue_tail(&chan->busy_q, skb);
3322 return l2cap_try_push_rx_skb(chan);
3323
3324
3325 }
3326
3327 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3328 if (err >= 0) {
3329 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3330 return err;
3331 }
3332
3333 /* Busy Condition */
3334 BT_DBG("chan %p, Enter local busy", chan);
3335
3336 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3337 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3338 __skb_queue_tail(&chan->busy_q, skb);
3339
3340 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3341 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3342 l2cap_send_sframe(chan, sctrl);
3343
3344 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3345
3346 del_timer(&chan->ack_timer);
3347
3348 queue_work(_busy_wq, &chan->busy_work);
3349
3350 return err;
3351 }
3352
3353 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3354 {
3355 struct sk_buff *_skb;
3356 int err = -EINVAL;
3357
3358 /*
3359 * TODO: We have to notify the userland if some data is lost with the
3360 * Streaming Mode.
3361 */
3362
3363 switch (control & L2CAP_CTRL_SAR) {
3364 case L2CAP_SDU_UNSEGMENTED:
3365 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3366 kfree_skb(chan->sdu);
3367 break;
3368 }
3369
3370 err = sock_queue_rcv_skb(chan->sk, skb);
3371 if (!err)
3372 return 0;
3373
3374 break;
3375
3376 case L2CAP_SDU_START:
3377 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3378 kfree_skb(chan->sdu);
3379 break;
3380 }
3381
3382 chan->sdu_len = get_unaligned_le16(skb->data);
3383 skb_pull(skb, 2);
3384
3385 if (chan->sdu_len > chan->imtu) {
3386 err = -EMSGSIZE;
3387 break;
3388 }
3389
3390 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3391 if (!chan->sdu) {
3392 err = -ENOMEM;
3393 break;
3394 }
3395
3396 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3397
3398 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3399 chan->partial_sdu_len = skb->len;
3400 err = 0;
3401 break;
3402
3403 case L2CAP_SDU_CONTINUE:
3404 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3405 break;
3406
3407 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3408
3409 chan->partial_sdu_len += skb->len;
3410 if (chan->partial_sdu_len > chan->sdu_len)
3411 kfree_skb(chan->sdu);
3412 else
3413 err = 0;
3414
3415 break;
3416
3417 case L2CAP_SDU_END:
3418 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3419 break;
3420
3421 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3422
3423 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3424 chan->partial_sdu_len += skb->len;
3425
3426 if (chan->partial_sdu_len > chan->imtu)
3427 goto drop;
3428
3429 if (chan->partial_sdu_len == chan->sdu_len) {
3430 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3431 err = sock_queue_rcv_skb(chan->sk, _skb);
3432 if (err < 0)
3433 kfree_skb(_skb);
3434 }
3435 err = 0;
3436
3437 drop:
3438 kfree_skb(chan->sdu);
3439 break;
3440 }
3441
3442 kfree_skb(skb);
3443 return err;
3444 }
3445
3446 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3447 {
3448 struct sk_buff *skb;
3449 u16 control;
3450
3451 while ((skb = skb_peek(&chan->srej_q))) {
3452 if (bt_cb(skb)->tx_seq != tx_seq)
3453 break;
3454
3455 skb = skb_dequeue(&chan->srej_q);
3456 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3457 l2cap_ertm_reassembly_sdu(chan, skb, control);
3458 chan->buffer_seq_srej =
3459 (chan->buffer_seq_srej + 1) % 64;
3460 tx_seq = (tx_seq + 1) % 64;
3461 }
3462 }
3463
3464 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3465 {
3466 struct srej_list *l, *tmp;
3467 u16 control;
3468
3469 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3470 if (l->tx_seq == tx_seq) {
3471 list_del(&l->list);
3472 kfree(l);
3473 return;
3474 }
3475 control = L2CAP_SUPER_SELECT_REJECT;
3476 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3477 l2cap_send_sframe(chan, control);
3478 list_del(&l->list);
3479 list_add_tail(&l->list, &chan->srej_l);
3480 }
3481 }
3482
3483 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3484 {
3485 struct srej_list *new;
3486 u16 control;
3487
3488 while (tx_seq != chan->expected_tx_seq) {
3489 control = L2CAP_SUPER_SELECT_REJECT;
3490 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3491 l2cap_send_sframe(chan, control);
3492
3493 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3494 new->tx_seq = chan->expected_tx_seq;
3495 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3496 list_add_tail(&new->list, &chan->srej_l);
3497 }
3498 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3499 }
3500
3501 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3502 {
3503 u8 tx_seq = __get_txseq(rx_control);
3504 u8 req_seq = __get_reqseq(rx_control);
3505 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3506 int tx_seq_offset, expected_tx_seq_offset;
3507 int num_to_ack = (chan->tx_win/6) + 1;
3508 int err = 0;
3509
3510 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3511 tx_seq, rx_control);
3512
3513 if (L2CAP_CTRL_FINAL & rx_control &&
3514 chan->conn_state & L2CAP_CONN_WAIT_F) {
3515 del_timer(&chan->monitor_timer);
3516 if (chan->unacked_frames > 0)
3517 __mod_retrans_timer();
3518 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3519 }
3520
3521 chan->expected_ack_seq = req_seq;
3522 l2cap_drop_acked_frames(chan);
3523
3524 if (tx_seq == chan->expected_tx_seq)
3525 goto expected;
3526
3527 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3528 if (tx_seq_offset < 0)
3529 tx_seq_offset += 64;
3530
3531 /* invalid tx_seq */
3532 if (tx_seq_offset >= chan->tx_win) {
3533 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3534 goto drop;
3535 }
3536
3537 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3538 goto drop;
3539
3540 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3541 struct srej_list *first;
3542
3543 first = list_first_entry(&chan->srej_l,
3544 struct srej_list, list);
3545 if (tx_seq == first->tx_seq) {
3546 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3547 l2cap_check_srej_gap(chan, tx_seq);
3548
3549 list_del(&first->list);
3550 kfree(first);
3551
3552 if (list_empty(&chan->srej_l)) {
3553 chan->buffer_seq = chan->buffer_seq_srej;
3554 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3555 l2cap_send_ack(chan);
3556 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3557 }
3558 } else {
3559 struct srej_list *l;
3560
3561 /* duplicated tx_seq */
3562 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3563 goto drop;
3564
3565 list_for_each_entry(l, &chan->srej_l, list) {
3566 if (l->tx_seq == tx_seq) {
3567 l2cap_resend_srejframe(chan, tx_seq);
3568 return 0;
3569 }
3570 }
3571 l2cap_send_srejframe(chan, tx_seq);
3572 }
3573 } else {
3574 expected_tx_seq_offset =
3575 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3576 if (expected_tx_seq_offset < 0)
3577 expected_tx_seq_offset += 64;
3578
3579 /* duplicated tx_seq */
3580 if (tx_seq_offset < expected_tx_seq_offset)
3581 goto drop;
3582
3583 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3584
3585 BT_DBG("chan %p, Enter SREJ", chan);
3586
3587 INIT_LIST_HEAD(&chan->srej_l);
3588 chan->buffer_seq_srej = chan->buffer_seq;
3589
3590 __skb_queue_head_init(&chan->srej_q);
3591 __skb_queue_head_init(&chan->busy_q);
3592 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3593
3594 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3595
3596 l2cap_send_srejframe(chan, tx_seq);
3597
3598 del_timer(&chan->ack_timer);
3599 }
3600 return 0;
3601
3602 expected:
3603 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3604
3605 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3606 bt_cb(skb)->tx_seq = tx_seq;
3607 bt_cb(skb)->sar = sar;
3608 __skb_queue_tail(&chan->srej_q, skb);
3609 return 0;
3610 }
3611
3612 err = l2cap_push_rx_skb(chan, skb, rx_control);
3613 if (err < 0)
3614 return 0;
3615
3616 if (rx_control & L2CAP_CTRL_FINAL) {
3617 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3618 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3619 else
3620 l2cap_retransmit_frames(chan);
3621 }
3622
3623 __mod_ack_timer();
3624
3625 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3626 if (chan->num_acked == num_to_ack - 1)
3627 l2cap_send_ack(chan);
3628
3629 return 0;
3630
3631 drop:
3632 kfree_skb(skb);
3633 return 0;
3634 }
3635
3636 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3637 {
3638 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3639 rx_control);
3640
3641 chan->expected_ack_seq = __get_reqseq(rx_control);
3642 l2cap_drop_acked_frames(chan);
3643
3644 if (rx_control & L2CAP_CTRL_POLL) {
3645 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3646 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3647 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3648 (chan->unacked_frames > 0))
3649 __mod_retrans_timer();
3650
3651 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3652 l2cap_send_srejtail(chan);
3653 } else {
3654 l2cap_send_i_or_rr_or_rnr(chan);
3655 }
3656
3657 } else if (rx_control & L2CAP_CTRL_FINAL) {
3658 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3659
3660 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3661 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3662 else
3663 l2cap_retransmit_frames(chan);
3664
3665 } else {
3666 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3667 (chan->unacked_frames > 0))
3668 __mod_retrans_timer();
3669
3670 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3671 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3672 l2cap_send_ack(chan);
3673 else
3674 l2cap_ertm_send(chan);
3675 }
3676 }
3677
3678 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3679 {
3680 u8 tx_seq = __get_reqseq(rx_control);
3681
3682 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3683
3684 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3685
3686 chan->expected_ack_seq = tx_seq;
3687 l2cap_drop_acked_frames(chan);
3688
3689 if (rx_control & L2CAP_CTRL_FINAL) {
3690 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3691 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3692 else
3693 l2cap_retransmit_frames(chan);
3694 } else {
3695 l2cap_retransmit_frames(chan);
3696
3697 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3698 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3699 }
3700 }
3701 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3702 {
3703 u8 tx_seq = __get_reqseq(rx_control);
3704
3705 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3706
3707 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3708
3709 if (rx_control & L2CAP_CTRL_POLL) {
3710 chan->expected_ack_seq = tx_seq;
3711 l2cap_drop_acked_frames(chan);
3712
3713 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3714 l2cap_retransmit_one_frame(chan, tx_seq);
3715
3716 l2cap_ertm_send(chan);
3717
3718 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3719 chan->srej_save_reqseq = tx_seq;
3720 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3721 }
3722 } else if (rx_control & L2CAP_CTRL_FINAL) {
3723 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3724 chan->srej_save_reqseq == tx_seq)
3725 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3726 else
3727 l2cap_retransmit_one_frame(chan, tx_seq);
3728 } else {
3729 l2cap_retransmit_one_frame(chan, tx_seq);
3730 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3731 chan->srej_save_reqseq = tx_seq;
3732 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3733 }
3734 }
3735 }
3736
3737 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3738 {
3739 u8 tx_seq = __get_reqseq(rx_control);
3740
3741 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3742
3743 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3744 chan->expected_ack_seq = tx_seq;
3745 l2cap_drop_acked_frames(chan);
3746
3747 if (rx_control & L2CAP_CTRL_POLL)
3748 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3749
3750 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3751 del_timer(&chan->retrans_timer);
3752 if (rx_control & L2CAP_CTRL_POLL)
3753 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3754 return;
3755 }
3756
3757 if (rx_control & L2CAP_CTRL_POLL)
3758 l2cap_send_srejtail(chan);
3759 else
3760 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3761 }
3762
3763 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3764 {
3765 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3766
3767 if (L2CAP_CTRL_FINAL & rx_control &&
3768 chan->conn_state & L2CAP_CONN_WAIT_F) {
3769 del_timer(&chan->monitor_timer);
3770 if (chan->unacked_frames > 0)
3771 __mod_retrans_timer();
3772 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3773 }
3774
3775 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3776 case L2CAP_SUPER_RCV_READY:
3777 l2cap_data_channel_rrframe(chan, rx_control);
3778 break;
3779
3780 case L2CAP_SUPER_REJECT:
3781 l2cap_data_channel_rejframe(chan, rx_control);
3782 break;
3783
3784 case L2CAP_SUPER_SELECT_REJECT:
3785 l2cap_data_channel_srejframe(chan, rx_control);
3786 break;
3787
3788 case L2CAP_SUPER_RCV_NOT_READY:
3789 l2cap_data_channel_rnrframe(chan, rx_control);
3790 break;
3791 }
3792
3793 kfree_skb(skb);
3794 return 0;
3795 }
3796
3797 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3798 {
3799 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3800 u16 control;
3801 u8 req_seq;
3802 int len, next_tx_seq_offset, req_seq_offset;
3803
3804 control = get_unaligned_le16(skb->data);
3805 skb_pull(skb, 2);
3806 len = skb->len;
3807
3808 /*
3809 * We can just drop the corrupted I-frame here.
3810 * Receiver will miss it and start proper recovery
3811 * procedures and ask retransmission.
3812 */
3813 if (l2cap_check_fcs(chan, skb))
3814 goto drop;
3815
3816 if (__is_sar_start(control) && __is_iframe(control))
3817 len -= 2;
3818
3819 if (chan->fcs == L2CAP_FCS_CRC16)
3820 len -= 2;
3821
3822 if (len > chan->mps) {
3823 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3824 goto drop;
3825 }
3826
3827 req_seq = __get_reqseq(control);
3828 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3829 if (req_seq_offset < 0)
3830 req_seq_offset += 64;
3831
3832 next_tx_seq_offset =
3833 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3834 if (next_tx_seq_offset < 0)
3835 next_tx_seq_offset += 64;
3836
3837 /* check for invalid req-seq */
3838 if (req_seq_offset > next_tx_seq_offset) {
3839 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3840 goto drop;
3841 }
3842
3843 if (__is_iframe(control)) {
3844 if (len < 0) {
3845 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3846 goto drop;
3847 }
3848
3849 l2cap_data_channel_iframe(chan, control, skb);
3850 } else {
3851 if (len != 0) {
3852 BT_ERR("%d", len);
3853 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3854 goto drop;
3855 }
3856
3857 l2cap_data_channel_sframe(chan, control, skb);
3858 }
3859
3860 return 0;
3861
3862 drop:
3863 kfree_skb(skb);
3864 return 0;
3865 }
3866
3867 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3868 {
3869 struct l2cap_chan *chan;
3870 struct sock *sk = NULL;
3871 u16 control;
3872 u8 tx_seq;
3873 int len;
3874
3875 chan = l2cap_get_chan_by_scid(conn, cid);
3876 if (!chan) {
3877 BT_DBG("unknown cid 0x%4.4x", cid);
3878 goto drop;
3879 }
3880
3881 sk = chan->sk;
3882
3883 BT_DBG("chan %p, len %d", chan, skb->len);
3884
3885 if (sk->sk_state != BT_CONNECTED)
3886 goto drop;
3887
3888 switch (chan->mode) {
3889 case L2CAP_MODE_BASIC:
3890 /* If socket recv buffers overflows we drop data here
3891 * which is *bad* because L2CAP has to be reliable.
3892 * But we don't have any other choice. L2CAP doesn't
3893 * provide flow control mechanism. */
3894
3895 if (chan->imtu < skb->len)
3896 goto drop;
3897
3898 if (!sock_queue_rcv_skb(sk, skb))
3899 goto done;
3900 break;
3901
3902 case L2CAP_MODE_ERTM:
3903 if (!sock_owned_by_user(sk)) {
3904 l2cap_ertm_data_rcv(sk, skb);
3905 } else {
3906 if (sk_add_backlog(sk, skb))
3907 goto drop;
3908 }
3909
3910 goto done;
3911
3912 case L2CAP_MODE_STREAMING:
3913 control = get_unaligned_le16(skb->data);
3914 skb_pull(skb, 2);
3915 len = skb->len;
3916
3917 if (l2cap_check_fcs(chan, skb))
3918 goto drop;
3919
3920 if (__is_sar_start(control))
3921 len -= 2;
3922
3923 if (chan->fcs == L2CAP_FCS_CRC16)
3924 len -= 2;
3925
3926 if (len > chan->mps || len < 0 || __is_sframe(control))
3927 goto drop;
3928
3929 tx_seq = __get_txseq(control);
3930
3931 if (chan->expected_tx_seq == tx_seq)
3932 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3933 else
3934 chan->expected_tx_seq = (tx_seq + 1) % 64;
3935
3936 l2cap_streaming_reassembly_sdu(chan, skb, control);
3937
3938 goto done;
3939
3940 default:
3941 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3942 break;
3943 }
3944
3945 drop:
3946 kfree_skb(skb);
3947
3948 done:
3949 if (sk)
3950 bh_unlock_sock(sk);
3951
3952 return 0;
3953 }
3954
3955 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3956 {
3957 struct sock *sk = NULL;
3958 struct l2cap_chan *chan;
3959
3960 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3961 if (!chan)
3962 goto drop;
3963
3964 sk = chan->sk;
3965
3966 bh_lock_sock(sk);
3967
3968 BT_DBG("sk %p, len %d", sk, skb->len);
3969
3970 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3971 goto drop;
3972
3973 if (l2cap_pi(sk)->chan->imtu < skb->len)
3974 goto drop;
3975
3976 if (!sock_queue_rcv_skb(sk, skb))
3977 goto done;
3978
3979 drop:
3980 kfree_skb(skb);
3981
3982 done:
3983 if (sk)
3984 bh_unlock_sock(sk);
3985 return 0;
3986 }
3987
3988 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3989 {
3990 struct sock *sk = NULL;
3991 struct l2cap_chan *chan;
3992
3993 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3994 if (!chan)
3995 goto drop;
3996
3997 sk = chan->sk;
3998
3999 bh_lock_sock(sk);
4000
4001 BT_DBG("sk %p, len %d", sk, skb->len);
4002
4003 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4004 goto drop;
4005
4006 if (l2cap_pi(sk)->chan->imtu < skb->len)
4007 goto drop;
4008
4009 if (!sock_queue_rcv_skb(sk, skb))
4010 goto done;
4011
4012 drop:
4013 kfree_skb(skb);
4014
4015 done:
4016 if (sk)
4017 bh_unlock_sock(sk);
4018 return 0;
4019 }
4020
4021 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4022 {
4023 struct l2cap_hdr *lh = (void *) skb->data;
4024 u16 cid, len;
4025 __le16 psm;
4026
4027 skb_pull(skb, L2CAP_HDR_SIZE);
4028 cid = __le16_to_cpu(lh->cid);
4029 len = __le16_to_cpu(lh->len);
4030
4031 if (len != skb->len) {
4032 kfree_skb(skb);
4033 return;
4034 }
4035
4036 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4037
4038 switch (cid) {
4039 case L2CAP_CID_LE_SIGNALING:
4040 case L2CAP_CID_SIGNALING:
4041 l2cap_sig_channel(conn, skb);
4042 break;
4043
4044 case L2CAP_CID_CONN_LESS:
4045 psm = get_unaligned_le16(skb->data);
4046 skb_pull(skb, 2);
4047 l2cap_conless_channel(conn, psm, skb);
4048 break;
4049
4050 case L2CAP_CID_LE_DATA:
4051 l2cap_att_channel(conn, cid, skb);
4052 break;
4053
4054 default:
4055 l2cap_data_channel(conn, cid, skb);
4056 break;
4057 }
4058 }
4059
4060 /* ---- L2CAP interface with lower layer (HCI) ---- */
4061
4062 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4063 {
4064 int exact = 0, lm1 = 0, lm2 = 0;
4065 struct l2cap_chan *c;
4066
4067 if (type != ACL_LINK)
4068 return -EINVAL;
4069
4070 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4071
4072 /* Find listening sockets and check their link_mode */
4073 read_lock(&chan_list_lock);
4074 list_for_each_entry(c, &chan_list, global_l) {
4075 struct sock *sk = c->sk;
4076
4077 if (sk->sk_state != BT_LISTEN)
4078 continue;
4079
4080 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4081 lm1 |= HCI_LM_ACCEPT;
4082 if (c->role_switch)
4083 lm1 |= HCI_LM_MASTER;
4084 exact++;
4085 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4086 lm2 |= HCI_LM_ACCEPT;
4087 if (c->role_switch)
4088 lm2 |= HCI_LM_MASTER;
4089 }
4090 }
4091 read_unlock(&chan_list_lock);
4092
4093 return exact ? lm1 : lm2;
4094 }
4095
4096 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4097 {
4098 struct l2cap_conn *conn;
4099
4100 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4101
4102 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4103 return -EINVAL;
4104
4105 if (!status) {
4106 conn = l2cap_conn_add(hcon, status);
4107 if (conn)
4108 l2cap_conn_ready(conn);
4109 } else
4110 l2cap_conn_del(hcon, bt_err(status));
4111
4112 return 0;
4113 }
4114
4115 static int l2cap_disconn_ind(struct hci_conn *hcon)
4116 {
4117 struct l2cap_conn *conn = hcon->l2cap_data;
4118
4119 BT_DBG("hcon %p", hcon);
4120
4121 if (hcon->type != ACL_LINK || !conn)
4122 return 0x13;
4123
4124 return conn->disc_reason;
4125 }
4126
4127 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4128 {
4129 BT_DBG("hcon %p reason %d", hcon, reason);
4130
4131 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4132 return -EINVAL;
4133
4134 l2cap_conn_del(hcon, bt_err(reason));
4135
4136 return 0;
4137 }
4138
4139 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4140 {
4141 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4142 return;
4143
4144 if (encrypt == 0x00) {
4145 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4146 l2cap_chan_clear_timer(chan);
4147 l2cap_chan_set_timer(chan, HZ * 5);
4148 } else if (chan->sec_level == BT_SECURITY_HIGH)
4149 l2cap_chan_close(chan, ECONNREFUSED);
4150 } else {
4151 if (chan->sec_level == BT_SECURITY_MEDIUM)
4152 l2cap_chan_clear_timer(chan);
4153 }
4154 }
4155
4156 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4157 {
4158 struct l2cap_conn *conn = hcon->l2cap_data;
4159 struct l2cap_chan *chan;
4160
4161 if (!conn)
4162 return 0;
4163
4164 BT_DBG("conn %p", conn);
4165
4166 read_lock(&conn->chan_lock);
4167
4168 list_for_each_entry(chan, &conn->chan_l, list) {
4169 struct sock *sk = chan->sk;
4170
4171 bh_lock_sock(sk);
4172
4173 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
4174 bh_unlock_sock(sk);
4175 continue;
4176 }
4177
4178 if (!status && (sk->sk_state == BT_CONNECTED ||
4179 sk->sk_state == BT_CONFIG)) {
4180 l2cap_check_encryption(chan, encrypt);
4181 bh_unlock_sock(sk);
4182 continue;
4183 }
4184
4185 if (sk->sk_state == BT_CONNECT) {
4186 if (!status) {
4187 struct l2cap_conn_req req;
4188 req.scid = cpu_to_le16(chan->scid);
4189 req.psm = chan->psm;
4190
4191 chan->ident = l2cap_get_ident(conn);
4192 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
4193
4194 l2cap_send_cmd(conn, chan->ident,
4195 L2CAP_CONN_REQ, sizeof(req), &req);
4196 } else {
4197 l2cap_chan_clear_timer(chan);
4198 l2cap_chan_set_timer(chan, HZ / 10);
4199 }
4200 } else if (sk->sk_state == BT_CONNECT2) {
4201 struct l2cap_conn_rsp rsp;
4202 __u16 result;
4203
4204 if (!status) {
4205 sk->sk_state = BT_CONFIG;
4206 result = L2CAP_CR_SUCCESS;
4207 } else {
4208 sk->sk_state = BT_DISCONN;
4209 l2cap_chan_set_timer(chan, HZ / 10);
4210 result = L2CAP_CR_SEC_BLOCK;
4211 }
4212
4213 rsp.scid = cpu_to_le16(chan->dcid);
4214 rsp.dcid = cpu_to_le16(chan->scid);
4215 rsp.result = cpu_to_le16(result);
4216 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4217 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4218 sizeof(rsp), &rsp);
4219 }
4220
4221 bh_unlock_sock(sk);
4222 }
4223
4224 read_unlock(&conn->chan_lock);
4225
4226 return 0;
4227 }
4228
4229 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4230 {
4231 struct l2cap_conn *conn = hcon->l2cap_data;
4232
4233 if (!conn)
4234 conn = l2cap_conn_add(hcon, 0);
4235
4236 if (!conn)
4237 goto drop;
4238
4239 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4240
4241 if (!(flags & ACL_CONT)) {
4242 struct l2cap_hdr *hdr;
4243 struct l2cap_chan *chan;
4244 u16 cid;
4245 int len;
4246
4247 if (conn->rx_len) {
4248 BT_ERR("Unexpected start frame (len %d)", skb->len);
4249 kfree_skb(conn->rx_skb);
4250 conn->rx_skb = NULL;
4251 conn->rx_len = 0;
4252 l2cap_conn_unreliable(conn, ECOMM);
4253 }
4254
4255 /* Start fragment always begin with Basic L2CAP header */
4256 if (skb->len < L2CAP_HDR_SIZE) {
4257 BT_ERR("Frame is too short (len %d)", skb->len);
4258 l2cap_conn_unreliable(conn, ECOMM);
4259 goto drop;
4260 }
4261
4262 hdr = (struct l2cap_hdr *) skb->data;
4263 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4264 cid = __le16_to_cpu(hdr->cid);
4265
4266 if (len == skb->len) {
4267 /* Complete frame received */
4268 l2cap_recv_frame(conn, skb);
4269 return 0;
4270 }
4271
4272 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4273
4274 if (skb->len > len) {
4275 BT_ERR("Frame is too long (len %d, expected len %d)",
4276 skb->len, len);
4277 l2cap_conn_unreliable(conn, ECOMM);
4278 goto drop;
4279 }
4280
4281 chan = l2cap_get_chan_by_scid(conn, cid);
4282
4283 if (chan && chan->sk) {
4284 struct sock *sk = chan->sk;
4285
4286 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4287 BT_ERR("Frame exceeding recv MTU (len %d, "
4288 "MTU %d)", len,
4289 chan->imtu);
4290 bh_unlock_sock(sk);
4291 l2cap_conn_unreliable(conn, ECOMM);
4292 goto drop;
4293 }
4294 bh_unlock_sock(sk);
4295 }
4296
4297 /* Allocate skb for the complete frame (with header) */
4298 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4299 if (!conn->rx_skb)
4300 goto drop;
4301
4302 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4303 skb->len);
4304 conn->rx_len = len - skb->len;
4305 } else {
4306 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4307
4308 if (!conn->rx_len) {
4309 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4310 l2cap_conn_unreliable(conn, ECOMM);
4311 goto drop;
4312 }
4313
4314 if (skb->len > conn->rx_len) {
4315 BT_ERR("Fragment is too long (len %d, expected %d)",
4316 skb->len, conn->rx_len);
4317 kfree_skb(conn->rx_skb);
4318 conn->rx_skb = NULL;
4319 conn->rx_len = 0;
4320 l2cap_conn_unreliable(conn, ECOMM);
4321 goto drop;
4322 }
4323
4324 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4325 skb->len);
4326 conn->rx_len -= skb->len;
4327
4328 if (!conn->rx_len) {
4329 /* Complete frame received */
4330 l2cap_recv_frame(conn, conn->rx_skb);
4331 conn->rx_skb = NULL;
4332 }
4333 }
4334
4335 drop:
4336 kfree_skb(skb);
4337 return 0;
4338 }
4339
4340 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4341 {
4342 struct l2cap_chan *c;
4343
4344 read_lock_bh(&chan_list_lock);
4345
4346 list_for_each_entry(c, &chan_list, global_l) {
4347 struct sock *sk = c->sk;
4348
4349 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4350 batostr(&bt_sk(sk)->src),
4351 batostr(&bt_sk(sk)->dst),
4352 sk->sk_state, __le16_to_cpu(c->psm),
4353 c->scid, c->dcid, c->imtu, c->omtu,
4354 c->sec_level, c->mode);
4355 }
4356
4357 read_unlock_bh(&chan_list_lock);
4358
4359 return 0;
4360 }
4361
4362 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4363 {
4364 return single_open(file, l2cap_debugfs_show, inode->i_private);
4365 }
4366
4367 static const struct file_operations l2cap_debugfs_fops = {
4368 .open = l2cap_debugfs_open,
4369 .read = seq_read,
4370 .llseek = seq_lseek,
4371 .release = single_release,
4372 };
4373
4374 static struct dentry *l2cap_debugfs;
4375
4376 static struct hci_proto l2cap_hci_proto = {
4377 .name = "L2CAP",
4378 .id = HCI_PROTO_L2CAP,
4379 .connect_ind = l2cap_connect_ind,
4380 .connect_cfm = l2cap_connect_cfm,
4381 .disconn_ind = l2cap_disconn_ind,
4382 .disconn_cfm = l2cap_disconn_cfm,
4383 .security_cfm = l2cap_security_cfm,
4384 .recv_acldata = l2cap_recv_acldata
4385 };
4386
4387 int __init l2cap_init(void)
4388 {
4389 int err;
4390
4391 err = l2cap_init_sockets();
4392 if (err < 0)
4393 return err;
4394
4395 _busy_wq = create_singlethread_workqueue("l2cap");
4396 if (!_busy_wq) {
4397 err = -ENOMEM;
4398 goto error;
4399 }
4400
4401 err = hci_register_proto(&l2cap_hci_proto);
4402 if (err < 0) {
4403 BT_ERR("L2CAP protocol registration failed");
4404 bt_sock_unregister(BTPROTO_L2CAP);
4405 goto error;
4406 }
4407
4408 if (bt_debugfs) {
4409 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4410 bt_debugfs, NULL, &l2cap_debugfs_fops);
4411 if (!l2cap_debugfs)
4412 BT_ERR("Failed to create L2CAP debug file");
4413 }
4414
4415 return 0;
4416
4417 error:
4418 destroy_workqueue(_busy_wq);
4419 l2cap_cleanup_sockets();
4420 return err;
4421 }
4422
4423 void l2cap_exit(void)
4424 {
4425 debugfs_remove(l2cap_debugfs);
4426
4427 flush_workqueue(_busy_wq);
4428 destroy_workqueue(_busy_wq);
4429
4430 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4431 BT_ERR("L2CAP protocol unregistration failed");
4432
4433 l2cap_cleanup_sockets();
4434 }
4435
4436 module_param(disable_ertm, bool, 0644);
4437 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");