Bluetooth: EWS: rewrite handling POLL (P) bit
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP core. */
28
29#include <linux/module.h>
30
31#include <linux/types.h>
32#include <linux/capability.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/device.h>
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#include <linux/uaccess.h>
48#include <linux/crc16.h>
49#include <net/sock.h>
50
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h>
57#include <net/bluetooth/smp.h>
58
59int disable_ertm;
60int enable_hs;
61
62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65static LIST_HEAD(chan_list);
66static DEFINE_RWLOCK(chan_list_lock);
67
68static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78/* ---- L2CAP channels ---- */
79
80static inline void chan_hold(struct l2cap_chan *c)
81{
82 atomic_inc(&c->refcnt);
83}
84
85static inline void chan_put(struct l2cap_chan *c)
86{
87 if (atomic_dec_and_test(&c->refcnt))
88 kfree(c);
89}
90
91static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
92{
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100
101}
102
103static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104{
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112}
113
114/* Find channel with given SCID.
115 * Returns locked socket */
116static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117{
118 struct l2cap_chan *c;
119
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
122 if (c)
123 bh_lock_sock(c->sk);
124 read_unlock(&conn->chan_lock);
125 return c;
126}
127
128static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129{
130 struct l2cap_chan *c;
131
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
134 return c;
135 }
136 return NULL;
137}
138
139static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140{
141 struct l2cap_chan *c;
142
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
145 if (c)
146 bh_lock_sock(c->sk);
147 read_unlock(&conn->chan_lock);
148 return c;
149}
150
151static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152{
153 struct l2cap_chan *c;
154
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 goto found;
158 }
159
160 c = NULL;
161found:
162 return c;
163}
164
165int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166{
167 int err;
168
169 write_lock_bh(&chan_list_lock);
170
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
172 err = -EADDRINUSE;
173 goto done;
174 }
175
176 if (psm) {
177 chan->psm = psm;
178 chan->sport = psm;
179 err = 0;
180 } else {
181 u16 p;
182
183 err = -EINVAL;
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
188 err = 0;
189 break;
190 }
191 }
192
193done:
194 write_unlock_bh(&chan_list_lock);
195 return err;
196}
197
198int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199{
200 write_lock_bh(&chan_list_lock);
201
202 chan->scid = scid;
203
204 write_unlock_bh(&chan_list_lock);
205
206 return 0;
207}
208
209static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210{
211 u16 cid = L2CAP_CID_DYN_START;
212
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
215 return cid;
216 }
217
218 return 0;
219}
220
221static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222{
223 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
226 chan_hold(chan);
227}
228
229static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230{
231 BT_DBG("chan %p state %d", chan, chan->state);
232
233 if (timer_pending(timer) && del_timer(timer))
234 chan_put(chan);
235}
236
237static void l2cap_state_change(struct l2cap_chan *chan, int state)
238{
239 chan->state = state;
240 chan->ops->state_change(chan->data, state);
241}
242
243static void l2cap_chan_timeout(unsigned long arg)
244{
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
247 int reason;
248
249 BT_DBG("chan %p state %d", chan, chan->state);
250
251 bh_lock_sock(sk);
252
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
256 bh_unlock_sock(sk);
257 chan_put(chan);
258 return;
259 }
260
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
266 else
267 reason = ETIMEDOUT;
268
269 l2cap_chan_close(chan, reason);
270
271 bh_unlock_sock(sk);
272
273 chan->ops->close(chan->data);
274 chan_put(chan);
275}
276
277struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278{
279 struct l2cap_chan *chan;
280
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
282 if (!chan)
283 return NULL;
284
285 chan->sk = sk;
286
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
290
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292
293 chan->state = BT_OPEN;
294
295 atomic_set(&chan->refcnt, 1);
296
297 return chan;
298}
299
300void l2cap_chan_destroy(struct l2cap_chan *chan)
301{
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
305
306 chan_put(chan);
307}
308
309static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310{
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
313
314 conn->disc_reason = 0x13;
315
316 chan->conn = conn;
317
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
320 /* LE connection */
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
324 } else {
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
328 }
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
334 } else {
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
339 }
340
341 chan_hold(chan);
342
343 list_add(&chan->list, &conn->chan_l);
344}
345
346/* Delete channel.
347 * Must be called on the locked socket. */
348static void l2cap_chan_del(struct l2cap_chan *chan, int err)
349{
350 struct sock *sk = chan->sk;
351 struct l2cap_conn *conn = chan->conn;
352 struct sock *parent = bt_sk(sk)->parent;
353
354 __clear_chan_timer(chan);
355
356 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
357
358 if (conn) {
359 /* Delete from channel list */
360 write_lock_bh(&conn->chan_lock);
361 list_del(&chan->list);
362 write_unlock_bh(&conn->chan_lock);
363 chan_put(chan);
364
365 chan->conn = NULL;
366 hci_conn_put(conn->hcon);
367 }
368
369 l2cap_state_change(chan, BT_CLOSED);
370 sock_set_flag(sk, SOCK_ZAPPED);
371
372 if (err)
373 sk->sk_err = err;
374
375 if (parent) {
376 bt_accept_unlink(sk);
377 parent->sk_data_ready(parent, 0);
378 } else
379 sk->sk_state_change(sk);
380
381 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
382 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
383 return;
384
385 skb_queue_purge(&chan->tx_q);
386
387 if (chan->mode == L2CAP_MODE_ERTM) {
388 struct srej_list *l, *tmp;
389
390 __clear_retrans_timer(chan);
391 __clear_monitor_timer(chan);
392 __clear_ack_timer(chan);
393
394 skb_queue_purge(&chan->srej_q);
395
396 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
397 list_del(&l->list);
398 kfree(l);
399 }
400 }
401}
402
403static void l2cap_chan_cleanup_listen(struct sock *parent)
404{
405 struct sock *sk;
406
407 BT_DBG("parent %p", parent);
408
409 /* Close not yet accepted channels */
410 while ((sk = bt_accept_dequeue(parent, NULL))) {
411 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
412 __clear_chan_timer(chan);
413 lock_sock(sk);
414 l2cap_chan_close(chan, ECONNRESET);
415 release_sock(sk);
416 chan->ops->close(chan->data);
417 }
418}
419
420void l2cap_chan_close(struct l2cap_chan *chan, int reason)
421{
422 struct l2cap_conn *conn = chan->conn;
423 struct sock *sk = chan->sk;
424
425 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
426
427 switch (chan->state) {
428 case BT_LISTEN:
429 l2cap_chan_cleanup_listen(sk);
430
431 l2cap_state_change(chan, BT_CLOSED);
432 sock_set_flag(sk, SOCK_ZAPPED);
433 break;
434
435 case BT_CONNECTED:
436 case BT_CONFIG:
437 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
438 conn->hcon->type == ACL_LINK) {
439 __clear_chan_timer(chan);
440 __set_chan_timer(chan, sk->sk_sndtimeo);
441 l2cap_send_disconn_req(conn, chan, reason);
442 } else
443 l2cap_chan_del(chan, reason);
444 break;
445
446 case BT_CONNECT2:
447 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
448 conn->hcon->type == ACL_LINK) {
449 struct l2cap_conn_rsp rsp;
450 __u16 result;
451
452 if (bt_sk(sk)->defer_setup)
453 result = L2CAP_CR_SEC_BLOCK;
454 else
455 result = L2CAP_CR_BAD_PSM;
456 l2cap_state_change(chan, BT_DISCONN);
457
458 rsp.scid = cpu_to_le16(chan->dcid);
459 rsp.dcid = cpu_to_le16(chan->scid);
460 rsp.result = cpu_to_le16(result);
461 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
462 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
463 sizeof(rsp), &rsp);
464 }
465
466 l2cap_chan_del(chan, reason);
467 break;
468
469 case BT_CONNECT:
470 case BT_DISCONN:
471 l2cap_chan_del(chan, reason);
472 break;
473
474 default:
475 sock_set_flag(sk, SOCK_ZAPPED);
476 break;
477 }
478}
479
480static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
481{
482 if (chan->chan_type == L2CAP_CHAN_RAW) {
483 switch (chan->sec_level) {
484 case BT_SECURITY_HIGH:
485 return HCI_AT_DEDICATED_BONDING_MITM;
486 case BT_SECURITY_MEDIUM:
487 return HCI_AT_DEDICATED_BONDING;
488 default:
489 return HCI_AT_NO_BONDING;
490 }
491 } else if (chan->psm == cpu_to_le16(0x0001)) {
492 if (chan->sec_level == BT_SECURITY_LOW)
493 chan->sec_level = BT_SECURITY_SDP;
494
495 if (chan->sec_level == BT_SECURITY_HIGH)
496 return HCI_AT_NO_BONDING_MITM;
497 else
498 return HCI_AT_NO_BONDING;
499 } else {
500 switch (chan->sec_level) {
501 case BT_SECURITY_HIGH:
502 return HCI_AT_GENERAL_BONDING_MITM;
503 case BT_SECURITY_MEDIUM:
504 return HCI_AT_GENERAL_BONDING;
505 default:
506 return HCI_AT_NO_BONDING;
507 }
508 }
509}
510
511/* Service level security */
512static inline int l2cap_check_security(struct l2cap_chan *chan)
513{
514 struct l2cap_conn *conn = chan->conn;
515 __u8 auth_type;
516
517 auth_type = l2cap_get_auth_type(chan);
518
519 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
520}
521
522static u8 l2cap_get_ident(struct l2cap_conn *conn)
523{
524 u8 id;
525
526 /* Get next available identificator.
527 * 1 - 128 are used by kernel.
528 * 129 - 199 are reserved.
529 * 200 - 254 are used by utilities like l2ping, etc.
530 */
531
532 spin_lock_bh(&conn->lock);
533
534 if (++conn->tx_ident > 128)
535 conn->tx_ident = 1;
536
537 id = conn->tx_ident;
538
539 spin_unlock_bh(&conn->lock);
540
541 return id;
542}
543
544static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
545{
546 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
547 u8 flags;
548
549 BT_DBG("code 0x%2.2x", code);
550
551 if (!skb)
552 return;
553
554 if (lmp_no_flush_capable(conn->hcon->hdev))
555 flags = ACL_START_NO_FLUSH;
556 else
557 flags = ACL_START;
558
559 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
560
561 hci_send_acl(conn->hcon, skb, flags);
562}
563
564static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
565{
566 struct sk_buff *skb;
567 struct l2cap_hdr *lh;
568 struct l2cap_conn *conn = chan->conn;
569 int count, hlen = L2CAP_HDR_SIZE + 2;
570 u8 flags;
571
572 if (chan->state != BT_CONNECTED)
573 return;
574
575 if (chan->fcs == L2CAP_FCS_CRC16)
576 hlen += 2;
577
578 BT_DBG("chan %p, control 0x%2.2x", chan, control);
579
580 count = min_t(unsigned int, conn->mtu, hlen);
581
582 control |= __set_sframe(chan);
583
584 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
585 control |= __set_ctrl_final(chan);
586
587 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
588 control |= __set_ctrl_poll(chan);
589
590 skb = bt_skb_alloc(count, GFP_ATOMIC);
591 if (!skb)
592 return;
593
594 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
595 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
596 lh->cid = cpu_to_le16(chan->dcid);
597 put_unaligned_le16(control, skb_put(skb, 2));
598
599 if (chan->fcs == L2CAP_FCS_CRC16) {
600 u16 fcs = crc16(0, (u8 *)lh, count - 2);
601 put_unaligned_le16(fcs, skb_put(skb, 2));
602 }
603
604 if (lmp_no_flush_capable(conn->hcon->hdev))
605 flags = ACL_START_NO_FLUSH;
606 else
607 flags = ACL_START;
608
609 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
610
611 hci_send_acl(chan->conn->hcon, skb, flags);
612}
613
614static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
615{
616 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
617 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
618 set_bit(CONN_RNR_SENT, &chan->conn_state);
619 } else
620 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
621
622 control |= __set_reqseq(chan, chan->buffer_seq);
623
624 l2cap_send_sframe(chan, control);
625}
626
627static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
628{
629 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
630}
631
632static void l2cap_do_start(struct l2cap_chan *chan)
633{
634 struct l2cap_conn *conn = chan->conn;
635
636 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
637 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
638 return;
639
640 if (l2cap_check_security(chan) &&
641 __l2cap_no_conn_pending(chan)) {
642 struct l2cap_conn_req req;
643 req.scid = cpu_to_le16(chan->scid);
644 req.psm = chan->psm;
645
646 chan->ident = l2cap_get_ident(conn);
647 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
648
649 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
650 sizeof(req), &req);
651 }
652 } else {
653 struct l2cap_info_req req;
654 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
655
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
657 conn->info_ident = l2cap_get_ident(conn);
658
659 mod_timer(&conn->info_timer, jiffies +
660 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
661
662 l2cap_send_cmd(conn, conn->info_ident,
663 L2CAP_INFO_REQ, sizeof(req), &req);
664 }
665}
666
667static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
668{
669 u32 local_feat_mask = l2cap_feat_mask;
670 if (!disable_ertm)
671 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
672
673 switch (mode) {
674 case L2CAP_MODE_ERTM:
675 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
676 case L2CAP_MODE_STREAMING:
677 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
678 default:
679 return 0x00;
680 }
681}
682
683static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
684{
685 struct sock *sk;
686 struct l2cap_disconn_req req;
687
688 if (!conn)
689 return;
690
691 sk = chan->sk;
692
693 if (chan->mode == L2CAP_MODE_ERTM) {
694 __clear_retrans_timer(chan);
695 __clear_monitor_timer(chan);
696 __clear_ack_timer(chan);
697 }
698
699 req.dcid = cpu_to_le16(chan->dcid);
700 req.scid = cpu_to_le16(chan->scid);
701 l2cap_send_cmd(conn, l2cap_get_ident(conn),
702 L2CAP_DISCONN_REQ, sizeof(req), &req);
703
704 l2cap_state_change(chan, BT_DISCONN);
705 sk->sk_err = err;
706}
707
708/* ---- L2CAP connections ---- */
709static void l2cap_conn_start(struct l2cap_conn *conn)
710{
711 struct l2cap_chan *chan, *tmp;
712
713 BT_DBG("conn %p", conn);
714
715 read_lock(&conn->chan_lock);
716
717 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
718 struct sock *sk = chan->sk;
719
720 bh_lock_sock(sk);
721
722 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
723 bh_unlock_sock(sk);
724 continue;
725 }
726
727 if (chan->state == BT_CONNECT) {
728 struct l2cap_conn_req req;
729
730 if (!l2cap_check_security(chan) ||
731 !__l2cap_no_conn_pending(chan)) {
732 bh_unlock_sock(sk);
733 continue;
734 }
735
736 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
737 && test_bit(CONF_STATE2_DEVICE,
738 &chan->conf_state)) {
739 /* l2cap_chan_close() calls list_del(chan)
740 * so release the lock */
741 read_unlock(&conn->chan_lock);
742 l2cap_chan_close(chan, ECONNRESET);
743 read_lock(&conn->chan_lock);
744 bh_unlock_sock(sk);
745 continue;
746 }
747
748 req.scid = cpu_to_le16(chan->scid);
749 req.psm = chan->psm;
750
751 chan->ident = l2cap_get_ident(conn);
752 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
753
754 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
755 sizeof(req), &req);
756
757 } else if (chan->state == BT_CONNECT2) {
758 struct l2cap_conn_rsp rsp;
759 char buf[128];
760 rsp.scid = cpu_to_le16(chan->dcid);
761 rsp.dcid = cpu_to_le16(chan->scid);
762
763 if (l2cap_check_security(chan)) {
764 if (bt_sk(sk)->defer_setup) {
765 struct sock *parent = bt_sk(sk)->parent;
766 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
767 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
768 if (parent)
769 parent->sk_data_ready(parent, 0);
770
771 } else {
772 l2cap_state_change(chan, BT_CONFIG);
773 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
774 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
775 }
776 } else {
777 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
778 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
779 }
780
781 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
782 sizeof(rsp), &rsp);
783
784 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
785 rsp.result != L2CAP_CR_SUCCESS) {
786 bh_unlock_sock(sk);
787 continue;
788 }
789
790 set_bit(CONF_REQ_SENT, &chan->conf_state);
791 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
792 l2cap_build_conf_req(chan, buf), buf);
793 chan->num_conf_req++;
794 }
795
796 bh_unlock_sock(sk);
797 }
798
799 read_unlock(&conn->chan_lock);
800}
801
802/* Find socket with cid and source bdaddr.
803 * Returns closest match, locked.
804 */
805static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
806{
807 struct l2cap_chan *c, *c1 = NULL;
808
809 read_lock(&chan_list_lock);
810
811 list_for_each_entry(c, &chan_list, global_l) {
812 struct sock *sk = c->sk;
813
814 if (state && c->state != state)
815 continue;
816
817 if (c->scid == cid) {
818 /* Exact match. */
819 if (!bacmp(&bt_sk(sk)->src, src)) {
820 read_unlock(&chan_list_lock);
821 return c;
822 }
823
824 /* Closest match */
825 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
826 c1 = c;
827 }
828 }
829
830 read_unlock(&chan_list_lock);
831
832 return c1;
833}
834
835static void l2cap_le_conn_ready(struct l2cap_conn *conn)
836{
837 struct sock *parent, *sk;
838 struct l2cap_chan *chan, *pchan;
839
840 BT_DBG("");
841
842 /* Check if we have socket listening on cid */
843 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
844 conn->src);
845 if (!pchan)
846 return;
847
848 parent = pchan->sk;
849
850 bh_lock_sock(parent);
851
852 /* Check for backlog size */
853 if (sk_acceptq_is_full(parent)) {
854 BT_DBG("backlog full %d", parent->sk_ack_backlog);
855 goto clean;
856 }
857
858 chan = pchan->ops->new_connection(pchan->data);
859 if (!chan)
860 goto clean;
861
862 sk = chan->sk;
863
864 write_lock_bh(&conn->chan_lock);
865
866 hci_conn_hold(conn->hcon);
867
868 bacpy(&bt_sk(sk)->src, conn->src);
869 bacpy(&bt_sk(sk)->dst, conn->dst);
870
871 bt_accept_enqueue(parent, sk);
872
873 __l2cap_chan_add(conn, chan);
874
875 __set_chan_timer(chan, sk->sk_sndtimeo);
876
877 l2cap_state_change(chan, BT_CONNECTED);
878 parent->sk_data_ready(parent, 0);
879
880 write_unlock_bh(&conn->chan_lock);
881
882clean:
883 bh_unlock_sock(parent);
884}
885
886static void l2cap_chan_ready(struct sock *sk)
887{
888 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
889 struct sock *parent = bt_sk(sk)->parent;
890
891 BT_DBG("sk %p, parent %p", sk, parent);
892
893 chan->conf_state = 0;
894 __clear_chan_timer(chan);
895
896 l2cap_state_change(chan, BT_CONNECTED);
897 sk->sk_state_change(sk);
898
899 if (parent)
900 parent->sk_data_ready(parent, 0);
901}
902
903static void l2cap_conn_ready(struct l2cap_conn *conn)
904{
905 struct l2cap_chan *chan;
906
907 BT_DBG("conn %p", conn);
908
909 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
910 l2cap_le_conn_ready(conn);
911
912 if (conn->hcon->out && conn->hcon->type == LE_LINK)
913 smp_conn_security(conn, conn->hcon->pending_sec_level);
914
915 read_lock(&conn->chan_lock);
916
917 list_for_each_entry(chan, &conn->chan_l, list) {
918 struct sock *sk = chan->sk;
919
920 bh_lock_sock(sk);
921
922 if (conn->hcon->type == LE_LINK) {
923 if (smp_conn_security(conn, chan->sec_level))
924 l2cap_chan_ready(sk);
925
926 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
927 __clear_chan_timer(chan);
928 l2cap_state_change(chan, BT_CONNECTED);
929 sk->sk_state_change(sk);
930
931 } else if (chan->state == BT_CONNECT)
932 l2cap_do_start(chan);
933
934 bh_unlock_sock(sk);
935 }
936
937 read_unlock(&conn->chan_lock);
938}
939
940/* Notify sockets that we cannot guaranty reliability anymore */
941static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
942{
943 struct l2cap_chan *chan;
944
945 BT_DBG("conn %p", conn);
946
947 read_lock(&conn->chan_lock);
948
949 list_for_each_entry(chan, &conn->chan_l, list) {
950 struct sock *sk = chan->sk;
951
952 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
953 sk->sk_err = err;
954 }
955
956 read_unlock(&conn->chan_lock);
957}
958
959static void l2cap_info_timeout(unsigned long arg)
960{
961 struct l2cap_conn *conn = (void *) arg;
962
963 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
964 conn->info_ident = 0;
965
966 l2cap_conn_start(conn);
967}
968
969static void l2cap_conn_del(struct hci_conn *hcon, int err)
970{
971 struct l2cap_conn *conn = hcon->l2cap_data;
972 struct l2cap_chan *chan, *l;
973 struct sock *sk;
974
975 if (!conn)
976 return;
977
978 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
979
980 kfree_skb(conn->rx_skb);
981
982 /* Kill channels */
983 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
984 sk = chan->sk;
985 bh_lock_sock(sk);
986 l2cap_chan_del(chan, err);
987 bh_unlock_sock(sk);
988 chan->ops->close(chan->data);
989 }
990
991 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
992 del_timer_sync(&conn->info_timer);
993
994 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
995 del_timer(&conn->security_timer);
996 smp_chan_destroy(conn);
997 }
998
999 hcon->l2cap_data = NULL;
1000 kfree(conn);
1001}
1002
1003static void security_timeout(unsigned long arg)
1004{
1005 struct l2cap_conn *conn = (void *) arg;
1006
1007 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1008}
1009
1010static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1011{
1012 struct l2cap_conn *conn = hcon->l2cap_data;
1013
1014 if (conn || status)
1015 return conn;
1016
1017 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1018 if (!conn)
1019 return NULL;
1020
1021 hcon->l2cap_data = conn;
1022 conn->hcon = hcon;
1023
1024 BT_DBG("hcon %p conn %p", hcon, conn);
1025
1026 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1027 conn->mtu = hcon->hdev->le_mtu;
1028 else
1029 conn->mtu = hcon->hdev->acl_mtu;
1030
1031 conn->src = &hcon->hdev->bdaddr;
1032 conn->dst = &hcon->dst;
1033
1034 conn->feat_mask = 0;
1035
1036 spin_lock_init(&conn->lock);
1037 rwlock_init(&conn->chan_lock);
1038
1039 INIT_LIST_HEAD(&conn->chan_l);
1040
1041 if (hcon->type == LE_LINK)
1042 setup_timer(&conn->security_timer, security_timeout,
1043 (unsigned long) conn);
1044 else
1045 setup_timer(&conn->info_timer, l2cap_info_timeout,
1046 (unsigned long) conn);
1047
1048 conn->disc_reason = 0x13;
1049
1050 return conn;
1051}
1052
1053static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1054{
1055 write_lock_bh(&conn->chan_lock);
1056 __l2cap_chan_add(conn, chan);
1057 write_unlock_bh(&conn->chan_lock);
1058}
1059
1060/* ---- Socket interface ---- */
1061
1062/* Find socket with psm and source bdaddr.
1063 * Returns closest match.
1064 */
1065static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1066{
1067 struct l2cap_chan *c, *c1 = NULL;
1068
1069 read_lock(&chan_list_lock);
1070
1071 list_for_each_entry(c, &chan_list, global_l) {
1072 struct sock *sk = c->sk;
1073
1074 if (state && c->state != state)
1075 continue;
1076
1077 if (c->psm == psm) {
1078 /* Exact match. */
1079 if (!bacmp(&bt_sk(sk)->src, src)) {
1080 read_unlock(&chan_list_lock);
1081 return c;
1082 }
1083
1084 /* Closest match */
1085 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1086 c1 = c;
1087 }
1088 }
1089
1090 read_unlock(&chan_list_lock);
1091
1092 return c1;
1093}
1094
1095int l2cap_chan_connect(struct l2cap_chan *chan)
1096{
1097 struct sock *sk = chan->sk;
1098 bdaddr_t *src = &bt_sk(sk)->src;
1099 bdaddr_t *dst = &bt_sk(sk)->dst;
1100 struct l2cap_conn *conn;
1101 struct hci_conn *hcon;
1102 struct hci_dev *hdev;
1103 __u8 auth_type;
1104 int err;
1105
1106 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1107 chan->psm);
1108
1109 hdev = hci_get_route(dst, src);
1110 if (!hdev)
1111 return -EHOSTUNREACH;
1112
1113 hci_dev_lock_bh(hdev);
1114
1115 auth_type = l2cap_get_auth_type(chan);
1116
1117 if (chan->dcid == L2CAP_CID_LE_DATA)
1118 hcon = hci_connect(hdev, LE_LINK, dst,
1119 chan->sec_level, auth_type);
1120 else
1121 hcon = hci_connect(hdev, ACL_LINK, dst,
1122 chan->sec_level, auth_type);
1123
1124 if (IS_ERR(hcon)) {
1125 err = PTR_ERR(hcon);
1126 goto done;
1127 }
1128
1129 conn = l2cap_conn_add(hcon, 0);
1130 if (!conn) {
1131 hci_conn_put(hcon);
1132 err = -ENOMEM;
1133 goto done;
1134 }
1135
1136 /* Update source addr of the socket */
1137 bacpy(src, conn->src);
1138
1139 l2cap_chan_add(conn, chan);
1140
1141 l2cap_state_change(chan, BT_CONNECT);
1142 __set_chan_timer(chan, sk->sk_sndtimeo);
1143
1144 if (hcon->state == BT_CONNECTED) {
1145 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1146 __clear_chan_timer(chan);
1147 if (l2cap_check_security(chan))
1148 l2cap_state_change(chan, BT_CONNECTED);
1149 } else
1150 l2cap_do_start(chan);
1151 }
1152
1153 err = 0;
1154
1155done:
1156 hci_dev_unlock_bh(hdev);
1157 hci_dev_put(hdev);
1158 return err;
1159}
1160
1161int __l2cap_wait_ack(struct sock *sk)
1162{
1163 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1164 DECLARE_WAITQUEUE(wait, current);
1165 int err = 0;
1166 int timeo = HZ/5;
1167
1168 add_wait_queue(sk_sleep(sk), &wait);
1169 set_current_state(TASK_INTERRUPTIBLE);
1170 while (chan->unacked_frames > 0 && chan->conn) {
1171 if (!timeo)
1172 timeo = HZ/5;
1173
1174 if (signal_pending(current)) {
1175 err = sock_intr_errno(timeo);
1176 break;
1177 }
1178
1179 release_sock(sk);
1180 timeo = schedule_timeout(timeo);
1181 lock_sock(sk);
1182 set_current_state(TASK_INTERRUPTIBLE);
1183
1184 err = sock_error(sk);
1185 if (err)
1186 break;
1187 }
1188 set_current_state(TASK_RUNNING);
1189 remove_wait_queue(sk_sleep(sk), &wait);
1190 return err;
1191}
1192
1193static void l2cap_monitor_timeout(unsigned long arg)
1194{
1195 struct l2cap_chan *chan = (void *) arg;
1196 struct sock *sk = chan->sk;
1197
1198 BT_DBG("chan %p", chan);
1199
1200 bh_lock_sock(sk);
1201 if (chan->retry_count >= chan->remote_max_tx) {
1202 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1203 bh_unlock_sock(sk);
1204 return;
1205 }
1206
1207 chan->retry_count++;
1208 __set_monitor_timer(chan);
1209
1210 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1211 bh_unlock_sock(sk);
1212}
1213
1214static void l2cap_retrans_timeout(unsigned long arg)
1215{
1216 struct l2cap_chan *chan = (void *) arg;
1217 struct sock *sk = chan->sk;
1218
1219 BT_DBG("chan %p", chan);
1220
1221 bh_lock_sock(sk);
1222 chan->retry_count = 1;
1223 __set_monitor_timer(chan);
1224
1225 set_bit(CONN_WAIT_F, &chan->conn_state);
1226
1227 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1228 bh_unlock_sock(sk);
1229}
1230
1231static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1232{
1233 struct sk_buff *skb;
1234
1235 while ((skb = skb_peek(&chan->tx_q)) &&
1236 chan->unacked_frames) {
1237 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1238 break;
1239
1240 skb = skb_dequeue(&chan->tx_q);
1241 kfree_skb(skb);
1242
1243 chan->unacked_frames--;
1244 }
1245
1246 if (!chan->unacked_frames)
1247 __clear_retrans_timer(chan);
1248}
1249
1250static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1251{
1252 struct hci_conn *hcon = chan->conn->hcon;
1253 u16 flags;
1254
1255 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1256
1257 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1258 lmp_no_flush_capable(hcon->hdev))
1259 flags = ACL_START_NO_FLUSH;
1260 else
1261 flags = ACL_START;
1262
1263 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1264 hci_send_acl(hcon, skb, flags);
1265}
1266
1267static void l2cap_streaming_send(struct l2cap_chan *chan)
1268{
1269 struct sk_buff *skb;
1270 u16 control, fcs;
1271
1272 while ((skb = skb_dequeue(&chan->tx_q))) {
1273 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1274 control |= __set_txseq(chan, chan->next_tx_seq);
1275 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1276
1277 if (chan->fcs == L2CAP_FCS_CRC16) {
1278 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1279 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1280 }
1281
1282 l2cap_do_send(chan, skb);
1283
1284 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1285 }
1286}
1287
1288static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1289{
1290 struct sk_buff *skb, *tx_skb;
1291 u16 control, fcs;
1292
1293 skb = skb_peek(&chan->tx_q);
1294 if (!skb)
1295 return;
1296
1297 do {
1298 if (bt_cb(skb)->tx_seq == tx_seq)
1299 break;
1300
1301 if (skb_queue_is_last(&chan->tx_q, skb))
1302 return;
1303
1304 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1305
1306 if (chan->remote_max_tx &&
1307 bt_cb(skb)->retries == chan->remote_max_tx) {
1308 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1309 return;
1310 }
1311
1312 tx_skb = skb_clone(skb, GFP_ATOMIC);
1313 bt_cb(skb)->retries++;
1314 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1315 control &= __get_sar_mask(chan);
1316
1317 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1318 control |= __set_ctrl_final(chan);
1319
1320 control |= __set_reqseq(chan, chan->buffer_seq);
1321 control |= __set_txseq(chan, tx_seq);
1322
1323 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1324
1325 if (chan->fcs == L2CAP_FCS_CRC16) {
1326 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1327 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1328 }
1329
1330 l2cap_do_send(chan, tx_skb);
1331}
1332
1333static int l2cap_ertm_send(struct l2cap_chan *chan)
1334{
1335 struct sk_buff *skb, *tx_skb;
1336 u16 control, fcs;
1337 int nsent = 0;
1338
1339 if (chan->state != BT_CONNECTED)
1340 return -ENOTCONN;
1341
1342 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1343
1344 if (chan->remote_max_tx &&
1345 bt_cb(skb)->retries == chan->remote_max_tx) {
1346 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1347 break;
1348 }
1349
1350 tx_skb = skb_clone(skb, GFP_ATOMIC);
1351
1352 bt_cb(skb)->retries++;
1353
1354 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1355 control &= __get_sar_mask(chan);
1356
1357 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1358 control |= __set_ctrl_final(chan);
1359
1360 control |= __set_reqseq(chan, chan->buffer_seq);
1361 control |= __set_txseq(chan, chan->next_tx_seq);
1362 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1363
1364
1365 if (chan->fcs == L2CAP_FCS_CRC16) {
1366 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1367 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1368 }
1369
1370 l2cap_do_send(chan, tx_skb);
1371
1372 __set_retrans_timer(chan);
1373
1374 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1375 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1376
1377 if (bt_cb(skb)->retries == 1)
1378 chan->unacked_frames++;
1379
1380 chan->frames_sent++;
1381
1382 if (skb_queue_is_last(&chan->tx_q, skb))
1383 chan->tx_send_head = NULL;
1384 else
1385 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1386
1387 nsent++;
1388 }
1389
1390 return nsent;
1391}
1392
1393static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1394{
1395 int ret;
1396
1397 if (!skb_queue_empty(&chan->tx_q))
1398 chan->tx_send_head = chan->tx_q.next;
1399
1400 chan->next_tx_seq = chan->expected_ack_seq;
1401 ret = l2cap_ertm_send(chan);
1402 return ret;
1403}
1404
1405static void l2cap_send_ack(struct l2cap_chan *chan)
1406{
1407 u16 control = 0;
1408
1409 control |= __set_reqseq(chan, chan->buffer_seq);
1410
1411 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1412 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1413 set_bit(CONN_RNR_SENT, &chan->conn_state);
1414 l2cap_send_sframe(chan, control);
1415 return;
1416 }
1417
1418 if (l2cap_ertm_send(chan) > 0)
1419 return;
1420
1421 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1422 l2cap_send_sframe(chan, control);
1423}
1424
1425static void l2cap_send_srejtail(struct l2cap_chan *chan)
1426{
1427 struct srej_list *tail;
1428 u16 control;
1429
1430 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1431 control |= __set_ctrl_final(chan);
1432
1433 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1434 control |= __set_reqseq(chan, tail->tx_seq);
1435
1436 l2cap_send_sframe(chan, control);
1437}
1438
1439static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1440{
1441 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1442 struct sk_buff **frag;
1443 int err, sent = 0;
1444
1445 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1446 return -EFAULT;
1447
1448 sent += count;
1449 len -= count;
1450
1451 /* Continuation fragments (no L2CAP header) */
1452 frag = &skb_shinfo(skb)->frag_list;
1453 while (len) {
1454 count = min_t(unsigned int, conn->mtu, len);
1455
1456 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1457 if (!*frag)
1458 return err;
1459 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1460 return -EFAULT;
1461
1462 sent += count;
1463 len -= count;
1464
1465 frag = &(*frag)->next;
1466 }
1467
1468 return sent;
1469}
1470
1471static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1472{
1473 struct sock *sk = chan->sk;
1474 struct l2cap_conn *conn = chan->conn;
1475 struct sk_buff *skb;
1476 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1477 struct l2cap_hdr *lh;
1478
1479 BT_DBG("sk %p len %d", sk, (int)len);
1480
1481 count = min_t(unsigned int, (conn->mtu - hlen), len);
1482 skb = bt_skb_send_alloc(sk, count + hlen,
1483 msg->msg_flags & MSG_DONTWAIT, &err);
1484 if (!skb)
1485 return ERR_PTR(err);
1486
1487 /* Create L2CAP header */
1488 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1489 lh->cid = cpu_to_le16(chan->dcid);
1490 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1491 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1492
1493 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1494 if (unlikely(err < 0)) {
1495 kfree_skb(skb);
1496 return ERR_PTR(err);
1497 }
1498 return skb;
1499}
1500
1501static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1502{
1503 struct sock *sk = chan->sk;
1504 struct l2cap_conn *conn = chan->conn;
1505 struct sk_buff *skb;
1506 int err, count, hlen = L2CAP_HDR_SIZE;
1507 struct l2cap_hdr *lh;
1508
1509 BT_DBG("sk %p len %d", sk, (int)len);
1510
1511 count = min_t(unsigned int, (conn->mtu - hlen), len);
1512 skb = bt_skb_send_alloc(sk, count + hlen,
1513 msg->msg_flags & MSG_DONTWAIT, &err);
1514 if (!skb)
1515 return ERR_PTR(err);
1516
1517 /* Create L2CAP header */
1518 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1519 lh->cid = cpu_to_le16(chan->dcid);
1520 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1521
1522 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1523 if (unlikely(err < 0)) {
1524 kfree_skb(skb);
1525 return ERR_PTR(err);
1526 }
1527 return skb;
1528}
1529
1530static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1531 struct msghdr *msg, size_t len,
1532 u16 control, u16 sdulen)
1533{
1534 struct sock *sk = chan->sk;
1535 struct l2cap_conn *conn = chan->conn;
1536 struct sk_buff *skb;
1537 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1538 struct l2cap_hdr *lh;
1539
1540 BT_DBG("sk %p len %d", sk, (int)len);
1541
1542 if (!conn)
1543 return ERR_PTR(-ENOTCONN);
1544
1545 if (sdulen)
1546 hlen += 2;
1547
1548 if (chan->fcs == L2CAP_FCS_CRC16)
1549 hlen += 2;
1550
1551 count = min_t(unsigned int, (conn->mtu - hlen), len);
1552 skb = bt_skb_send_alloc(sk, count + hlen,
1553 msg->msg_flags & MSG_DONTWAIT, &err);
1554 if (!skb)
1555 return ERR_PTR(err);
1556
1557 /* Create L2CAP header */
1558 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1559 lh->cid = cpu_to_le16(chan->dcid);
1560 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1561 put_unaligned_le16(control, skb_put(skb, 2));
1562 if (sdulen)
1563 put_unaligned_le16(sdulen, skb_put(skb, 2));
1564
1565 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1566 if (unlikely(err < 0)) {
1567 kfree_skb(skb);
1568 return ERR_PTR(err);
1569 }
1570
1571 if (chan->fcs == L2CAP_FCS_CRC16)
1572 put_unaligned_le16(0, skb_put(skb, 2));
1573
1574 bt_cb(skb)->retries = 0;
1575 return skb;
1576}
1577
1578static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1579{
1580 struct sk_buff *skb;
1581 struct sk_buff_head sar_queue;
1582 u16 control;
1583 size_t size = 0;
1584
1585 skb_queue_head_init(&sar_queue);
1586 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1587 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1588 if (IS_ERR(skb))
1589 return PTR_ERR(skb);
1590
1591 __skb_queue_tail(&sar_queue, skb);
1592 len -= chan->remote_mps;
1593 size += chan->remote_mps;
1594
1595 while (len > 0) {
1596 size_t buflen;
1597
1598 if (len > chan->remote_mps) {
1599 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1600 buflen = chan->remote_mps;
1601 } else {
1602 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1603 buflen = len;
1604 }
1605
1606 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1607 if (IS_ERR(skb)) {
1608 skb_queue_purge(&sar_queue);
1609 return PTR_ERR(skb);
1610 }
1611
1612 __skb_queue_tail(&sar_queue, skb);
1613 len -= buflen;
1614 size += buflen;
1615 }
1616 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1617 if (chan->tx_send_head == NULL)
1618 chan->tx_send_head = sar_queue.next;
1619
1620 return size;
1621}
1622
1623int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1624{
1625 struct sk_buff *skb;
1626 u16 control;
1627 int err;
1628
1629 /* Connectionless channel */
1630 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1631 skb = l2cap_create_connless_pdu(chan, msg, len);
1632 if (IS_ERR(skb))
1633 return PTR_ERR(skb);
1634
1635 l2cap_do_send(chan, skb);
1636 return len;
1637 }
1638
1639 switch (chan->mode) {
1640 case L2CAP_MODE_BASIC:
1641 /* Check outgoing MTU */
1642 if (len > chan->omtu)
1643 return -EMSGSIZE;
1644
1645 /* Create a basic PDU */
1646 skb = l2cap_create_basic_pdu(chan, msg, len);
1647 if (IS_ERR(skb))
1648 return PTR_ERR(skb);
1649
1650 l2cap_do_send(chan, skb);
1651 err = len;
1652 break;
1653
1654 case L2CAP_MODE_ERTM:
1655 case L2CAP_MODE_STREAMING:
1656 /* Entire SDU fits into one PDU */
1657 if (len <= chan->remote_mps) {
1658 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1659 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1660 0);
1661 if (IS_ERR(skb))
1662 return PTR_ERR(skb);
1663
1664 __skb_queue_tail(&chan->tx_q, skb);
1665
1666 if (chan->tx_send_head == NULL)
1667 chan->tx_send_head = skb;
1668
1669 } else {
1670 /* Segment SDU into multiples PDUs */
1671 err = l2cap_sar_segment_sdu(chan, msg, len);
1672 if (err < 0)
1673 return err;
1674 }
1675
1676 if (chan->mode == L2CAP_MODE_STREAMING) {
1677 l2cap_streaming_send(chan);
1678 err = len;
1679 break;
1680 }
1681
1682 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1683 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1684 err = len;
1685 break;
1686 }
1687
1688 err = l2cap_ertm_send(chan);
1689 if (err >= 0)
1690 err = len;
1691
1692 break;
1693
1694 default:
1695 BT_DBG("bad state %1.1x", chan->mode);
1696 err = -EBADFD;
1697 }
1698
1699 return err;
1700}
1701
1702/* Copy frame to all raw sockets on that connection */
1703static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1704{
1705 struct sk_buff *nskb;
1706 struct l2cap_chan *chan;
1707
1708 BT_DBG("conn %p", conn);
1709
1710 read_lock(&conn->chan_lock);
1711 list_for_each_entry(chan, &conn->chan_l, list) {
1712 struct sock *sk = chan->sk;
1713 if (chan->chan_type != L2CAP_CHAN_RAW)
1714 continue;
1715
1716 /* Don't send frame to the socket it came from */
1717 if (skb->sk == sk)
1718 continue;
1719 nskb = skb_clone(skb, GFP_ATOMIC);
1720 if (!nskb)
1721 continue;
1722
1723 if (chan->ops->recv(chan->data, nskb))
1724 kfree_skb(nskb);
1725 }
1726 read_unlock(&conn->chan_lock);
1727}
1728
1729/* ---- L2CAP signalling commands ---- */
1730static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1731 u8 code, u8 ident, u16 dlen, void *data)
1732{
1733 struct sk_buff *skb, **frag;
1734 struct l2cap_cmd_hdr *cmd;
1735 struct l2cap_hdr *lh;
1736 int len, count;
1737
1738 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1739 conn, code, ident, dlen);
1740
1741 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1742 count = min_t(unsigned int, conn->mtu, len);
1743
1744 skb = bt_skb_alloc(count, GFP_ATOMIC);
1745 if (!skb)
1746 return NULL;
1747
1748 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1749 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1750
1751 if (conn->hcon->type == LE_LINK)
1752 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1753 else
1754 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1755
1756 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1757 cmd->code = code;
1758 cmd->ident = ident;
1759 cmd->len = cpu_to_le16(dlen);
1760
1761 if (dlen) {
1762 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1763 memcpy(skb_put(skb, count), data, count);
1764 data += count;
1765 }
1766
1767 len -= skb->len;
1768
1769 /* Continuation fragments (no L2CAP header) */
1770 frag = &skb_shinfo(skb)->frag_list;
1771 while (len) {
1772 count = min_t(unsigned int, conn->mtu, len);
1773
1774 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1775 if (!*frag)
1776 goto fail;
1777
1778 memcpy(skb_put(*frag, count), data, count);
1779
1780 len -= count;
1781 data += count;
1782
1783 frag = &(*frag)->next;
1784 }
1785
1786 return skb;
1787
1788fail:
1789 kfree_skb(skb);
1790 return NULL;
1791}
1792
1793static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1794{
1795 struct l2cap_conf_opt *opt = *ptr;
1796 int len;
1797
1798 len = L2CAP_CONF_OPT_SIZE + opt->len;
1799 *ptr += len;
1800
1801 *type = opt->type;
1802 *olen = opt->len;
1803
1804 switch (opt->len) {
1805 case 1:
1806 *val = *((u8 *) opt->val);
1807 break;
1808
1809 case 2:
1810 *val = get_unaligned_le16(opt->val);
1811 break;
1812
1813 case 4:
1814 *val = get_unaligned_le32(opt->val);
1815 break;
1816
1817 default:
1818 *val = (unsigned long) opt->val;
1819 break;
1820 }
1821
1822 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1823 return len;
1824}
1825
1826static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1827{
1828 struct l2cap_conf_opt *opt = *ptr;
1829
1830 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1831
1832 opt->type = type;
1833 opt->len = len;
1834
1835 switch (len) {
1836 case 1:
1837 *((u8 *) opt->val) = val;
1838 break;
1839
1840 case 2:
1841 put_unaligned_le16(val, opt->val);
1842 break;
1843
1844 case 4:
1845 put_unaligned_le32(val, opt->val);
1846 break;
1847
1848 default:
1849 memcpy(opt->val, (void *) val, len);
1850 break;
1851 }
1852
1853 *ptr += L2CAP_CONF_OPT_SIZE + len;
1854}
1855
1856static void l2cap_ack_timeout(unsigned long arg)
1857{
1858 struct l2cap_chan *chan = (void *) arg;
1859
1860 bh_lock_sock(chan->sk);
1861 l2cap_send_ack(chan);
1862 bh_unlock_sock(chan->sk);
1863}
1864
1865static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1866{
1867 struct sock *sk = chan->sk;
1868
1869 chan->expected_ack_seq = 0;
1870 chan->unacked_frames = 0;
1871 chan->buffer_seq = 0;
1872 chan->num_acked = 0;
1873 chan->frames_sent = 0;
1874
1875 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1876 (unsigned long) chan);
1877 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1878 (unsigned long) chan);
1879 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1880
1881 skb_queue_head_init(&chan->srej_q);
1882
1883 INIT_LIST_HEAD(&chan->srej_l);
1884
1885
1886 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1887}
1888
1889static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1890{
1891 switch (mode) {
1892 case L2CAP_MODE_STREAMING:
1893 case L2CAP_MODE_ERTM:
1894 if (l2cap_mode_supported(mode, remote_feat_mask))
1895 return mode;
1896 /* fall through */
1897 default:
1898 return L2CAP_MODE_BASIC;
1899 }
1900}
1901
1902static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1903{
1904 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1905}
1906
1907static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1908{
1909 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1910 __l2cap_ews_supported(chan))
1911 /* use extended control field */
1912 set_bit(FLAG_EXT_CTRL, &chan->flags);
1913 else
1914 chan->tx_win = min_t(u16, chan->tx_win,
1915 L2CAP_DEFAULT_TX_WINDOW);
1916}
1917
1918static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1919{
1920 struct l2cap_conf_req *req = data;
1921 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1922 void *ptr = req->data;
1923
1924 BT_DBG("chan %p", chan);
1925
1926 if (chan->num_conf_req || chan->num_conf_rsp)
1927 goto done;
1928
1929 switch (chan->mode) {
1930 case L2CAP_MODE_STREAMING:
1931 case L2CAP_MODE_ERTM:
1932 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1933 break;
1934
1935 /* fall through */
1936 default:
1937 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1938 break;
1939 }
1940
1941done:
1942 if (chan->imtu != L2CAP_DEFAULT_MTU)
1943 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1944
1945 switch (chan->mode) {
1946 case L2CAP_MODE_BASIC:
1947 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1948 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1949 break;
1950
1951 rfc.mode = L2CAP_MODE_BASIC;
1952 rfc.txwin_size = 0;
1953 rfc.max_transmit = 0;
1954 rfc.retrans_timeout = 0;
1955 rfc.monitor_timeout = 0;
1956 rfc.max_pdu_size = 0;
1957
1958 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1959 (unsigned long) &rfc);
1960 break;
1961
1962 case L2CAP_MODE_ERTM:
1963 rfc.mode = L2CAP_MODE_ERTM;
1964 rfc.max_transmit = chan->max_tx;
1965 rfc.retrans_timeout = 0;
1966 rfc.monitor_timeout = 0;
1967 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1968 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1969 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1970
1971 l2cap_txwin_setup(chan);
1972
1973 rfc.txwin_size = min_t(u16, chan->tx_win,
1974 L2CAP_DEFAULT_TX_WINDOW);
1975
1976 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1977 (unsigned long) &rfc);
1978
1979 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1980 break;
1981
1982 if (chan->fcs == L2CAP_FCS_NONE ||
1983 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1984 chan->fcs = L2CAP_FCS_NONE;
1985 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1986 }
1987
1988 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1989 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
1990 chan->tx_win);
1991 break;
1992
1993 case L2CAP_MODE_STREAMING:
1994 rfc.mode = L2CAP_MODE_STREAMING;
1995 rfc.txwin_size = 0;
1996 rfc.max_transmit = 0;
1997 rfc.retrans_timeout = 0;
1998 rfc.monitor_timeout = 0;
1999 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2000 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
2001 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2002
2003 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2004 (unsigned long) &rfc);
2005
2006 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2007 break;
2008
2009 if (chan->fcs == L2CAP_FCS_NONE ||
2010 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2011 chan->fcs = L2CAP_FCS_NONE;
2012 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2013 }
2014 break;
2015 }
2016
2017 req->dcid = cpu_to_le16(chan->dcid);
2018 req->flags = cpu_to_le16(0);
2019
2020 return ptr - data;
2021}
2022
2023static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2024{
2025 struct l2cap_conf_rsp *rsp = data;
2026 void *ptr = rsp->data;
2027 void *req = chan->conf_req;
2028 int len = chan->conf_len;
2029 int type, hint, olen;
2030 unsigned long val;
2031 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2032 u16 mtu = L2CAP_DEFAULT_MTU;
2033 u16 result = L2CAP_CONF_SUCCESS;
2034
2035 BT_DBG("chan %p", chan);
2036
2037 while (len >= L2CAP_CONF_OPT_SIZE) {
2038 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2039
2040 hint = type & L2CAP_CONF_HINT;
2041 type &= L2CAP_CONF_MASK;
2042
2043 switch (type) {
2044 case L2CAP_CONF_MTU:
2045 mtu = val;
2046 break;
2047
2048 case L2CAP_CONF_FLUSH_TO:
2049 chan->flush_to = val;
2050 break;
2051
2052 case L2CAP_CONF_QOS:
2053 break;
2054
2055 case L2CAP_CONF_RFC:
2056 if (olen == sizeof(rfc))
2057 memcpy(&rfc, (void *) val, olen);
2058 break;
2059
2060 case L2CAP_CONF_FCS:
2061 if (val == L2CAP_FCS_NONE)
2062 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2063
2064 break;
2065
2066 case L2CAP_CONF_EWS:
2067 if (!enable_hs)
2068 return -ECONNREFUSED;
2069
2070 set_bit(FLAG_EXT_CTRL, &chan->flags);
2071 set_bit(CONF_EWS_RECV, &chan->conf_state);
2072 chan->remote_tx_win = val;
2073 break;
2074
2075 default:
2076 if (hint)
2077 break;
2078
2079 result = L2CAP_CONF_UNKNOWN;
2080 *((u8 *) ptr++) = type;
2081 break;
2082 }
2083 }
2084
2085 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2086 goto done;
2087
2088 switch (chan->mode) {
2089 case L2CAP_MODE_STREAMING:
2090 case L2CAP_MODE_ERTM:
2091 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2092 chan->mode = l2cap_select_mode(rfc.mode,
2093 chan->conn->feat_mask);
2094 break;
2095 }
2096
2097 if (chan->mode != rfc.mode)
2098 return -ECONNREFUSED;
2099
2100 break;
2101 }
2102
2103done:
2104 if (chan->mode != rfc.mode) {
2105 result = L2CAP_CONF_UNACCEPT;
2106 rfc.mode = chan->mode;
2107
2108 if (chan->num_conf_rsp == 1)
2109 return -ECONNREFUSED;
2110
2111 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2112 sizeof(rfc), (unsigned long) &rfc);
2113 }
2114
2115
2116 if (result == L2CAP_CONF_SUCCESS) {
2117 /* Configure output options and let the other side know
2118 * which ones we don't like. */
2119
2120 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2121 result = L2CAP_CONF_UNACCEPT;
2122 else {
2123 chan->omtu = mtu;
2124 set_bit(CONF_MTU_DONE, &chan->conf_state);
2125 }
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2127
2128 switch (rfc.mode) {
2129 case L2CAP_MODE_BASIC:
2130 chan->fcs = L2CAP_FCS_NONE;
2131 set_bit(CONF_MODE_DONE, &chan->conf_state);
2132 break;
2133
2134 case L2CAP_MODE_ERTM:
2135 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2136 chan->remote_tx_win = rfc.txwin_size;
2137 else
2138 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2139
2140 chan->remote_max_tx = rfc.max_transmit;
2141
2142 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2143 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2144
2145 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2146
2147 rfc.retrans_timeout =
2148 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2149 rfc.monitor_timeout =
2150 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2151
2152 set_bit(CONF_MODE_DONE, &chan->conf_state);
2153
2154 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2155 sizeof(rfc), (unsigned long) &rfc);
2156
2157 break;
2158
2159 case L2CAP_MODE_STREAMING:
2160 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2161 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2162
2163 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2164
2165 set_bit(CONF_MODE_DONE, &chan->conf_state);
2166
2167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2168 sizeof(rfc), (unsigned long) &rfc);
2169
2170 break;
2171
2172 default:
2173 result = L2CAP_CONF_UNACCEPT;
2174
2175 memset(&rfc, 0, sizeof(rfc));
2176 rfc.mode = chan->mode;
2177 }
2178
2179 if (result == L2CAP_CONF_SUCCESS)
2180 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2181 }
2182 rsp->scid = cpu_to_le16(chan->dcid);
2183 rsp->result = cpu_to_le16(result);
2184 rsp->flags = cpu_to_le16(0x0000);
2185
2186 return ptr - data;
2187}
2188
2189static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2190{
2191 struct l2cap_conf_req *req = data;
2192 void *ptr = req->data;
2193 int type, olen;
2194 unsigned long val;
2195 struct l2cap_conf_rfc rfc;
2196
2197 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2198
2199 while (len >= L2CAP_CONF_OPT_SIZE) {
2200 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2201
2202 switch (type) {
2203 case L2CAP_CONF_MTU:
2204 if (val < L2CAP_DEFAULT_MIN_MTU) {
2205 *result = L2CAP_CONF_UNACCEPT;
2206 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2207 } else
2208 chan->imtu = val;
2209 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2210 break;
2211
2212 case L2CAP_CONF_FLUSH_TO:
2213 chan->flush_to = val;
2214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2215 2, chan->flush_to);
2216 break;
2217
2218 case L2CAP_CONF_RFC:
2219 if (olen == sizeof(rfc))
2220 memcpy(&rfc, (void *)val, olen);
2221
2222 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2223 rfc.mode != chan->mode)
2224 return -ECONNREFUSED;
2225
2226 chan->fcs = 0;
2227
2228 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2229 sizeof(rfc), (unsigned long) &rfc);
2230 break;
2231
2232 case L2CAP_CONF_EWS:
2233 chan->tx_win = min_t(u16, val,
2234 L2CAP_DEFAULT_EXT_WINDOW);
2235 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS,
2236 2, chan->tx_win);
2237 break;
2238 }
2239 }
2240
2241 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2242 return -ECONNREFUSED;
2243
2244 chan->mode = rfc.mode;
2245
2246 if (*result == L2CAP_CONF_SUCCESS) {
2247 switch (rfc.mode) {
2248 case L2CAP_MODE_ERTM:
2249 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2250 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2251 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2252 break;
2253 case L2CAP_MODE_STREAMING:
2254 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2255 }
2256 }
2257
2258 req->dcid = cpu_to_le16(chan->dcid);
2259 req->flags = cpu_to_le16(0x0000);
2260
2261 return ptr - data;
2262}
2263
2264static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2265{
2266 struct l2cap_conf_rsp *rsp = data;
2267 void *ptr = rsp->data;
2268
2269 BT_DBG("chan %p", chan);
2270
2271 rsp->scid = cpu_to_le16(chan->dcid);
2272 rsp->result = cpu_to_le16(result);
2273 rsp->flags = cpu_to_le16(flags);
2274
2275 return ptr - data;
2276}
2277
2278void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2279{
2280 struct l2cap_conn_rsp rsp;
2281 struct l2cap_conn *conn = chan->conn;
2282 u8 buf[128];
2283
2284 rsp.scid = cpu_to_le16(chan->dcid);
2285 rsp.dcid = cpu_to_le16(chan->scid);
2286 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2287 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2288 l2cap_send_cmd(conn, chan->ident,
2289 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2290
2291 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2292 return;
2293
2294 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2295 l2cap_build_conf_req(chan, buf), buf);
2296 chan->num_conf_req++;
2297}
2298
2299static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2300{
2301 int type, olen;
2302 unsigned long val;
2303 struct l2cap_conf_rfc rfc;
2304
2305 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2306
2307 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2308 return;
2309
2310 while (len >= L2CAP_CONF_OPT_SIZE) {
2311 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2312
2313 switch (type) {
2314 case L2CAP_CONF_RFC:
2315 if (olen == sizeof(rfc))
2316 memcpy(&rfc, (void *)val, olen);
2317 goto done;
2318 }
2319 }
2320
2321done:
2322 switch (rfc.mode) {
2323 case L2CAP_MODE_ERTM:
2324 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2325 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2326 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2327 break;
2328 case L2CAP_MODE_STREAMING:
2329 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2330 }
2331}
2332
2333static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2334{
2335 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2336
2337 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2338 return 0;
2339
2340 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2341 cmd->ident == conn->info_ident) {
2342 del_timer(&conn->info_timer);
2343
2344 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2345 conn->info_ident = 0;
2346
2347 l2cap_conn_start(conn);
2348 }
2349
2350 return 0;
2351}
2352
2353static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2354{
2355 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2356 struct l2cap_conn_rsp rsp;
2357 struct l2cap_chan *chan = NULL, *pchan;
2358 struct sock *parent, *sk = NULL;
2359 int result, status = L2CAP_CS_NO_INFO;
2360
2361 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2362 __le16 psm = req->psm;
2363
2364 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2365
2366 /* Check if we have socket listening on psm */
2367 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2368 if (!pchan) {
2369 result = L2CAP_CR_BAD_PSM;
2370 goto sendresp;
2371 }
2372
2373 parent = pchan->sk;
2374
2375 bh_lock_sock(parent);
2376
2377 /* Check if the ACL is secure enough (if not SDP) */
2378 if (psm != cpu_to_le16(0x0001) &&
2379 !hci_conn_check_link_mode(conn->hcon)) {
2380 conn->disc_reason = 0x05;
2381 result = L2CAP_CR_SEC_BLOCK;
2382 goto response;
2383 }
2384
2385 result = L2CAP_CR_NO_MEM;
2386
2387 /* Check for backlog size */
2388 if (sk_acceptq_is_full(parent)) {
2389 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2390 goto response;
2391 }
2392
2393 chan = pchan->ops->new_connection(pchan->data);
2394 if (!chan)
2395 goto response;
2396
2397 sk = chan->sk;
2398
2399 write_lock_bh(&conn->chan_lock);
2400
2401 /* Check if we already have channel with that dcid */
2402 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2403 write_unlock_bh(&conn->chan_lock);
2404 sock_set_flag(sk, SOCK_ZAPPED);
2405 chan->ops->close(chan->data);
2406 goto response;
2407 }
2408
2409 hci_conn_hold(conn->hcon);
2410
2411 bacpy(&bt_sk(sk)->src, conn->src);
2412 bacpy(&bt_sk(sk)->dst, conn->dst);
2413 chan->psm = psm;
2414 chan->dcid = scid;
2415
2416 bt_accept_enqueue(parent, sk);
2417
2418 __l2cap_chan_add(conn, chan);
2419
2420 dcid = chan->scid;
2421
2422 __set_chan_timer(chan, sk->sk_sndtimeo);
2423
2424 chan->ident = cmd->ident;
2425
2426 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2427 if (l2cap_check_security(chan)) {
2428 if (bt_sk(sk)->defer_setup) {
2429 l2cap_state_change(chan, BT_CONNECT2);
2430 result = L2CAP_CR_PEND;
2431 status = L2CAP_CS_AUTHOR_PEND;
2432 parent->sk_data_ready(parent, 0);
2433 } else {
2434 l2cap_state_change(chan, BT_CONFIG);
2435 result = L2CAP_CR_SUCCESS;
2436 status = L2CAP_CS_NO_INFO;
2437 }
2438 } else {
2439 l2cap_state_change(chan, BT_CONNECT2);
2440 result = L2CAP_CR_PEND;
2441 status = L2CAP_CS_AUTHEN_PEND;
2442 }
2443 } else {
2444 l2cap_state_change(chan, BT_CONNECT2);
2445 result = L2CAP_CR_PEND;
2446 status = L2CAP_CS_NO_INFO;
2447 }
2448
2449 write_unlock_bh(&conn->chan_lock);
2450
2451response:
2452 bh_unlock_sock(parent);
2453
2454sendresp:
2455 rsp.scid = cpu_to_le16(scid);
2456 rsp.dcid = cpu_to_le16(dcid);
2457 rsp.result = cpu_to_le16(result);
2458 rsp.status = cpu_to_le16(status);
2459 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2460
2461 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2462 struct l2cap_info_req info;
2463 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2464
2465 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2466 conn->info_ident = l2cap_get_ident(conn);
2467
2468 mod_timer(&conn->info_timer, jiffies +
2469 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2470
2471 l2cap_send_cmd(conn, conn->info_ident,
2472 L2CAP_INFO_REQ, sizeof(info), &info);
2473 }
2474
2475 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2476 result == L2CAP_CR_SUCCESS) {
2477 u8 buf[128];
2478 set_bit(CONF_REQ_SENT, &chan->conf_state);
2479 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2480 l2cap_build_conf_req(chan, buf), buf);
2481 chan->num_conf_req++;
2482 }
2483
2484 return 0;
2485}
2486
2487static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2488{
2489 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2490 u16 scid, dcid, result, status;
2491 struct l2cap_chan *chan;
2492 struct sock *sk;
2493 u8 req[128];
2494
2495 scid = __le16_to_cpu(rsp->scid);
2496 dcid = __le16_to_cpu(rsp->dcid);
2497 result = __le16_to_cpu(rsp->result);
2498 status = __le16_to_cpu(rsp->status);
2499
2500 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2501
2502 if (scid) {
2503 chan = l2cap_get_chan_by_scid(conn, scid);
2504 if (!chan)
2505 return -EFAULT;
2506 } else {
2507 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2508 if (!chan)
2509 return -EFAULT;
2510 }
2511
2512 sk = chan->sk;
2513
2514 switch (result) {
2515 case L2CAP_CR_SUCCESS:
2516 l2cap_state_change(chan, BT_CONFIG);
2517 chan->ident = 0;
2518 chan->dcid = dcid;
2519 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2520
2521 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2522 break;
2523
2524 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2525 l2cap_build_conf_req(chan, req), req);
2526 chan->num_conf_req++;
2527 break;
2528
2529 case L2CAP_CR_PEND:
2530 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2531 break;
2532
2533 default:
2534 /* don't delete l2cap channel if sk is owned by user */
2535 if (sock_owned_by_user(sk)) {
2536 l2cap_state_change(chan, BT_DISCONN);
2537 __clear_chan_timer(chan);
2538 __set_chan_timer(chan, HZ / 5);
2539 break;
2540 }
2541
2542 l2cap_chan_del(chan, ECONNREFUSED);
2543 break;
2544 }
2545
2546 bh_unlock_sock(sk);
2547 return 0;
2548}
2549
2550static inline void set_default_fcs(struct l2cap_chan *chan)
2551{
2552 /* FCS is enabled only in ERTM or streaming mode, if one or both
2553 * sides request it.
2554 */
2555 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2556 chan->fcs = L2CAP_FCS_NONE;
2557 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2558 chan->fcs = L2CAP_FCS_CRC16;
2559}
2560
2561static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2562{
2563 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2564 u16 dcid, flags;
2565 u8 rsp[64];
2566 struct l2cap_chan *chan;
2567 struct sock *sk;
2568 int len;
2569
2570 dcid = __le16_to_cpu(req->dcid);
2571 flags = __le16_to_cpu(req->flags);
2572
2573 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2574
2575 chan = l2cap_get_chan_by_scid(conn, dcid);
2576 if (!chan)
2577 return -ENOENT;
2578
2579 sk = chan->sk;
2580
2581 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2582 struct l2cap_cmd_rej_cid rej;
2583
2584 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2585 rej.scid = cpu_to_le16(chan->scid);
2586 rej.dcid = cpu_to_le16(chan->dcid);
2587
2588 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2589 sizeof(rej), &rej);
2590 goto unlock;
2591 }
2592
2593 /* Reject if config buffer is too small. */
2594 len = cmd_len - sizeof(*req);
2595 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2596 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2597 l2cap_build_conf_rsp(chan, rsp,
2598 L2CAP_CONF_REJECT, flags), rsp);
2599 goto unlock;
2600 }
2601
2602 /* Store config. */
2603 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2604 chan->conf_len += len;
2605
2606 if (flags & 0x0001) {
2607 /* Incomplete config. Send empty response. */
2608 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2609 l2cap_build_conf_rsp(chan, rsp,
2610 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2611 goto unlock;
2612 }
2613
2614 /* Complete config. */
2615 len = l2cap_parse_conf_req(chan, rsp);
2616 if (len < 0) {
2617 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2618 goto unlock;
2619 }
2620
2621 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2622 chan->num_conf_rsp++;
2623
2624 /* Reset config buffer. */
2625 chan->conf_len = 0;
2626
2627 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2628 goto unlock;
2629
2630 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2631 set_default_fcs(chan);
2632
2633 l2cap_state_change(chan, BT_CONNECTED);
2634
2635 chan->next_tx_seq = 0;
2636 chan->expected_tx_seq = 0;
2637 skb_queue_head_init(&chan->tx_q);
2638 if (chan->mode == L2CAP_MODE_ERTM)
2639 l2cap_ertm_init(chan);
2640
2641 l2cap_chan_ready(sk);
2642 goto unlock;
2643 }
2644
2645 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2646 u8 buf[64];
2647 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2648 l2cap_build_conf_req(chan, buf), buf);
2649 chan->num_conf_req++;
2650 }
2651
2652unlock:
2653 bh_unlock_sock(sk);
2654 return 0;
2655}
2656
2657static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2658{
2659 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2660 u16 scid, flags, result;
2661 struct l2cap_chan *chan;
2662 struct sock *sk;
2663 int len = cmd->len - sizeof(*rsp);
2664
2665 scid = __le16_to_cpu(rsp->scid);
2666 flags = __le16_to_cpu(rsp->flags);
2667 result = __le16_to_cpu(rsp->result);
2668
2669 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2670 scid, flags, result);
2671
2672 chan = l2cap_get_chan_by_scid(conn, scid);
2673 if (!chan)
2674 return 0;
2675
2676 sk = chan->sk;
2677
2678 switch (result) {
2679 case L2CAP_CONF_SUCCESS:
2680 l2cap_conf_rfc_get(chan, rsp->data, len);
2681 break;
2682
2683 case L2CAP_CONF_UNACCEPT:
2684 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2685 char req[64];
2686
2687 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2688 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2689 goto done;
2690 }
2691
2692 /* throw out any old stored conf requests */
2693 result = L2CAP_CONF_SUCCESS;
2694 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2695 req, &result);
2696 if (len < 0) {
2697 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2698 goto done;
2699 }
2700
2701 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2702 L2CAP_CONF_REQ, len, req);
2703 chan->num_conf_req++;
2704 if (result != L2CAP_CONF_SUCCESS)
2705 goto done;
2706 break;
2707 }
2708
2709 default:
2710 sk->sk_err = ECONNRESET;
2711 __set_chan_timer(chan, HZ * 5);
2712 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2713 goto done;
2714 }
2715
2716 if (flags & 0x01)
2717 goto done;
2718
2719 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2720
2721 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2722 set_default_fcs(chan);
2723
2724 l2cap_state_change(chan, BT_CONNECTED);
2725 chan->next_tx_seq = 0;
2726 chan->expected_tx_seq = 0;
2727 skb_queue_head_init(&chan->tx_q);
2728 if (chan->mode == L2CAP_MODE_ERTM)
2729 l2cap_ertm_init(chan);
2730
2731 l2cap_chan_ready(sk);
2732 }
2733
2734done:
2735 bh_unlock_sock(sk);
2736 return 0;
2737}
2738
2739static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2740{
2741 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2742 struct l2cap_disconn_rsp rsp;
2743 u16 dcid, scid;
2744 struct l2cap_chan *chan;
2745 struct sock *sk;
2746
2747 scid = __le16_to_cpu(req->scid);
2748 dcid = __le16_to_cpu(req->dcid);
2749
2750 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2751
2752 chan = l2cap_get_chan_by_scid(conn, dcid);
2753 if (!chan)
2754 return 0;
2755
2756 sk = chan->sk;
2757
2758 rsp.dcid = cpu_to_le16(chan->scid);
2759 rsp.scid = cpu_to_le16(chan->dcid);
2760 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2761
2762 sk->sk_shutdown = SHUTDOWN_MASK;
2763
2764 /* don't delete l2cap channel if sk is owned by user */
2765 if (sock_owned_by_user(sk)) {
2766 l2cap_state_change(chan, BT_DISCONN);
2767 __clear_chan_timer(chan);
2768 __set_chan_timer(chan, HZ / 5);
2769 bh_unlock_sock(sk);
2770 return 0;
2771 }
2772
2773 l2cap_chan_del(chan, ECONNRESET);
2774 bh_unlock_sock(sk);
2775
2776 chan->ops->close(chan->data);
2777 return 0;
2778}
2779
2780static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2781{
2782 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2783 u16 dcid, scid;
2784 struct l2cap_chan *chan;
2785 struct sock *sk;
2786
2787 scid = __le16_to_cpu(rsp->scid);
2788 dcid = __le16_to_cpu(rsp->dcid);
2789
2790 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2791
2792 chan = l2cap_get_chan_by_scid(conn, scid);
2793 if (!chan)
2794 return 0;
2795
2796 sk = chan->sk;
2797
2798 /* don't delete l2cap channel if sk is owned by user */
2799 if (sock_owned_by_user(sk)) {
2800 l2cap_state_change(chan,BT_DISCONN);
2801 __clear_chan_timer(chan);
2802 __set_chan_timer(chan, HZ / 5);
2803 bh_unlock_sock(sk);
2804 return 0;
2805 }
2806
2807 l2cap_chan_del(chan, 0);
2808 bh_unlock_sock(sk);
2809
2810 chan->ops->close(chan->data);
2811 return 0;
2812}
2813
2814static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2815{
2816 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2817 u16 type;
2818
2819 type = __le16_to_cpu(req->type);
2820
2821 BT_DBG("type 0x%4.4x", type);
2822
2823 if (type == L2CAP_IT_FEAT_MASK) {
2824 u8 buf[8];
2825 u32 feat_mask = l2cap_feat_mask;
2826 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2827 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2828 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2829 if (!disable_ertm)
2830 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2831 | L2CAP_FEAT_FCS;
2832 if (enable_hs)
2833 feat_mask |= L2CAP_FEAT_EXT_FLOW
2834 | L2CAP_FEAT_EXT_WINDOW;
2835
2836 put_unaligned_le32(feat_mask, rsp->data);
2837 l2cap_send_cmd(conn, cmd->ident,
2838 L2CAP_INFO_RSP, sizeof(buf), buf);
2839 } else if (type == L2CAP_IT_FIXED_CHAN) {
2840 u8 buf[12];
2841 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2842 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2843 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2844 memcpy(buf + 4, l2cap_fixed_chan, 8);
2845 l2cap_send_cmd(conn, cmd->ident,
2846 L2CAP_INFO_RSP, sizeof(buf), buf);
2847 } else {
2848 struct l2cap_info_rsp rsp;
2849 rsp.type = cpu_to_le16(type);
2850 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2851 l2cap_send_cmd(conn, cmd->ident,
2852 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2853 }
2854
2855 return 0;
2856}
2857
2858static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2859{
2860 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2861 u16 type, result;
2862
2863 type = __le16_to_cpu(rsp->type);
2864 result = __le16_to_cpu(rsp->result);
2865
2866 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2867
2868 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2869 if (cmd->ident != conn->info_ident ||
2870 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2871 return 0;
2872
2873 del_timer(&conn->info_timer);
2874
2875 if (result != L2CAP_IR_SUCCESS) {
2876 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2877 conn->info_ident = 0;
2878
2879 l2cap_conn_start(conn);
2880
2881 return 0;
2882 }
2883
2884 if (type == L2CAP_IT_FEAT_MASK) {
2885 conn->feat_mask = get_unaligned_le32(rsp->data);
2886
2887 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2888 struct l2cap_info_req req;
2889 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2890
2891 conn->info_ident = l2cap_get_ident(conn);
2892
2893 l2cap_send_cmd(conn, conn->info_ident,
2894 L2CAP_INFO_REQ, sizeof(req), &req);
2895 } else {
2896 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2897 conn->info_ident = 0;
2898
2899 l2cap_conn_start(conn);
2900 }
2901 } else if (type == L2CAP_IT_FIXED_CHAN) {
2902 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2903 conn->info_ident = 0;
2904
2905 l2cap_conn_start(conn);
2906 }
2907
2908 return 0;
2909}
2910
2911static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2912 u16 to_multiplier)
2913{
2914 u16 max_latency;
2915
2916 if (min > max || min < 6 || max > 3200)
2917 return -EINVAL;
2918
2919 if (to_multiplier < 10 || to_multiplier > 3200)
2920 return -EINVAL;
2921
2922 if (max >= to_multiplier * 8)
2923 return -EINVAL;
2924
2925 max_latency = (to_multiplier * 8 / max) - 1;
2926 if (latency > 499 || latency > max_latency)
2927 return -EINVAL;
2928
2929 return 0;
2930}
2931
2932static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2933 struct l2cap_cmd_hdr *cmd, u8 *data)
2934{
2935 struct hci_conn *hcon = conn->hcon;
2936 struct l2cap_conn_param_update_req *req;
2937 struct l2cap_conn_param_update_rsp rsp;
2938 u16 min, max, latency, to_multiplier, cmd_len;
2939 int err;
2940
2941 if (!(hcon->link_mode & HCI_LM_MASTER))
2942 return -EINVAL;
2943
2944 cmd_len = __le16_to_cpu(cmd->len);
2945 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2946 return -EPROTO;
2947
2948 req = (struct l2cap_conn_param_update_req *) data;
2949 min = __le16_to_cpu(req->min);
2950 max = __le16_to_cpu(req->max);
2951 latency = __le16_to_cpu(req->latency);
2952 to_multiplier = __le16_to_cpu(req->to_multiplier);
2953
2954 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2955 min, max, latency, to_multiplier);
2956
2957 memset(&rsp, 0, sizeof(rsp));
2958
2959 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2960 if (err)
2961 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2962 else
2963 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2964
2965 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2966 sizeof(rsp), &rsp);
2967
2968 if (!err)
2969 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2970
2971 return 0;
2972}
2973
2974static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2975 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2976{
2977 int err = 0;
2978
2979 switch (cmd->code) {
2980 case L2CAP_COMMAND_REJ:
2981 l2cap_command_rej(conn, cmd, data);
2982 break;
2983
2984 case L2CAP_CONN_REQ:
2985 err = l2cap_connect_req(conn, cmd, data);
2986 break;
2987
2988 case L2CAP_CONN_RSP:
2989 err = l2cap_connect_rsp(conn, cmd, data);
2990 break;
2991
2992 case L2CAP_CONF_REQ:
2993 err = l2cap_config_req(conn, cmd, cmd_len, data);
2994 break;
2995
2996 case L2CAP_CONF_RSP:
2997 err = l2cap_config_rsp(conn, cmd, data);
2998 break;
2999
3000 case L2CAP_DISCONN_REQ:
3001 err = l2cap_disconnect_req(conn, cmd, data);
3002 break;
3003
3004 case L2CAP_DISCONN_RSP:
3005 err = l2cap_disconnect_rsp(conn, cmd, data);
3006 break;
3007
3008 case L2CAP_ECHO_REQ:
3009 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3010 break;
3011
3012 case L2CAP_ECHO_RSP:
3013 break;
3014
3015 case L2CAP_INFO_REQ:
3016 err = l2cap_information_req(conn, cmd, data);
3017 break;
3018
3019 case L2CAP_INFO_RSP:
3020 err = l2cap_information_rsp(conn, cmd, data);
3021 break;
3022
3023 default:
3024 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3025 err = -EINVAL;
3026 break;
3027 }
3028
3029 return err;
3030}
3031
3032static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3033 struct l2cap_cmd_hdr *cmd, u8 *data)
3034{
3035 switch (cmd->code) {
3036 case L2CAP_COMMAND_REJ:
3037 return 0;
3038
3039 case L2CAP_CONN_PARAM_UPDATE_REQ:
3040 return l2cap_conn_param_update_req(conn, cmd, data);
3041
3042 case L2CAP_CONN_PARAM_UPDATE_RSP:
3043 return 0;
3044
3045 default:
3046 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3047 return -EINVAL;
3048 }
3049}
3050
3051static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3052 struct sk_buff *skb)
3053{
3054 u8 *data = skb->data;
3055 int len = skb->len;
3056 struct l2cap_cmd_hdr cmd;
3057 int err;
3058
3059 l2cap_raw_recv(conn, skb);
3060
3061 while (len >= L2CAP_CMD_HDR_SIZE) {
3062 u16 cmd_len;
3063 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3064 data += L2CAP_CMD_HDR_SIZE;
3065 len -= L2CAP_CMD_HDR_SIZE;
3066
3067 cmd_len = le16_to_cpu(cmd.len);
3068
3069 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3070
3071 if (cmd_len > len || !cmd.ident) {
3072 BT_DBG("corrupted command");
3073 break;
3074 }
3075
3076 if (conn->hcon->type == LE_LINK)
3077 err = l2cap_le_sig_cmd(conn, &cmd, data);
3078 else
3079 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3080
3081 if (err) {
3082 struct l2cap_cmd_rej_unk rej;
3083
3084 BT_ERR("Wrong link type (%d)", err);
3085
3086 /* FIXME: Map err to a valid reason */
3087 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3088 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3089 }
3090
3091 data += cmd_len;
3092 len -= cmd_len;
3093 }
3094
3095 kfree_skb(skb);
3096}
3097
3098static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3099{
3100 u16 our_fcs, rcv_fcs;
3101 int hdr_size = L2CAP_HDR_SIZE + 2;
3102
3103 if (chan->fcs == L2CAP_FCS_CRC16) {
3104 skb_trim(skb, skb->len - 2);
3105 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3106 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3107
3108 if (our_fcs != rcv_fcs)
3109 return -EBADMSG;
3110 }
3111 return 0;
3112}
3113
3114static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3115{
3116 u16 control = 0;
3117
3118 chan->frames_sent = 0;
3119
3120 control |= __set_reqseq(chan, chan->buffer_seq);
3121
3122 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3123 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3124 l2cap_send_sframe(chan, control);
3125 set_bit(CONN_RNR_SENT, &chan->conn_state);
3126 }
3127
3128 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3129 l2cap_retransmit_frames(chan);
3130
3131 l2cap_ertm_send(chan);
3132
3133 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3134 chan->frames_sent == 0) {
3135 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3136 l2cap_send_sframe(chan, control);
3137 }
3138}
3139
3140static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3141{
3142 struct sk_buff *next_skb;
3143 int tx_seq_offset, next_tx_seq_offset;
3144
3145 bt_cb(skb)->tx_seq = tx_seq;
3146 bt_cb(skb)->sar = sar;
3147
3148 next_skb = skb_peek(&chan->srej_q);
3149 if (!next_skb) {
3150 __skb_queue_tail(&chan->srej_q, skb);
3151 return 0;
3152 }
3153
3154 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3155 if (tx_seq_offset < 0)
3156 tx_seq_offset += 64;
3157
3158 do {
3159 if (bt_cb(next_skb)->tx_seq == tx_seq)
3160 return -EINVAL;
3161
3162 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3163 chan->buffer_seq) % 64;
3164 if (next_tx_seq_offset < 0)
3165 next_tx_seq_offset += 64;
3166
3167 if (next_tx_seq_offset > tx_seq_offset) {
3168 __skb_queue_before(&chan->srej_q, next_skb, skb);
3169 return 0;
3170 }
3171
3172 if (skb_queue_is_last(&chan->srej_q, next_skb))
3173 break;
3174
3175 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3176
3177 __skb_queue_tail(&chan->srej_q, skb);
3178
3179 return 0;
3180}
3181
3182static void append_skb_frag(struct sk_buff *skb,
3183 struct sk_buff *new_frag, struct sk_buff **last_frag)
3184{
3185 /* skb->len reflects data in skb as well as all fragments
3186 * skb->data_len reflects only data in fragments
3187 */
3188 if (!skb_has_frag_list(skb))
3189 skb_shinfo(skb)->frag_list = new_frag;
3190
3191 new_frag->next = NULL;
3192
3193 (*last_frag)->next = new_frag;
3194 *last_frag = new_frag;
3195
3196 skb->len += new_frag->len;
3197 skb->data_len += new_frag->len;
3198 skb->truesize += new_frag->truesize;
3199}
3200
3201static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3202{
3203 int err = -EINVAL;
3204
3205 switch (__get_ctrl_sar(chan, control)) {
3206 case L2CAP_SAR_UNSEGMENTED:
3207 if (chan->sdu)
3208 break;
3209
3210 err = chan->ops->recv(chan->data, skb);
3211 break;
3212
3213 case L2CAP_SAR_START:
3214 if (chan->sdu)
3215 break;
3216
3217 chan->sdu_len = get_unaligned_le16(skb->data);
3218 skb_pull(skb, 2);
3219
3220 if (chan->sdu_len > chan->imtu) {
3221 err = -EMSGSIZE;
3222 break;
3223 }
3224
3225 if (skb->len >= chan->sdu_len)
3226 break;
3227
3228 chan->sdu = skb;
3229 chan->sdu_last_frag = skb;
3230
3231 skb = NULL;
3232 err = 0;
3233 break;
3234
3235 case L2CAP_SAR_CONTINUE:
3236 if (!chan->sdu)
3237 break;
3238
3239 append_skb_frag(chan->sdu, skb,
3240 &chan->sdu_last_frag);
3241 skb = NULL;
3242
3243 if (chan->sdu->len >= chan->sdu_len)
3244 break;
3245
3246 err = 0;
3247 break;
3248
3249 case L2CAP_SAR_END:
3250 if (!chan->sdu)
3251 break;
3252
3253 append_skb_frag(chan->sdu, skb,
3254 &chan->sdu_last_frag);
3255 skb = NULL;
3256
3257 if (chan->sdu->len != chan->sdu_len)
3258 break;
3259
3260 err = chan->ops->recv(chan->data, chan->sdu);
3261
3262 if (!err) {
3263 /* Reassembly complete */
3264 chan->sdu = NULL;
3265 chan->sdu_last_frag = NULL;
3266 chan->sdu_len = 0;
3267 }
3268 break;
3269 }
3270
3271 if (err) {
3272 kfree_skb(skb);
3273 kfree_skb(chan->sdu);
3274 chan->sdu = NULL;
3275 chan->sdu_last_frag = NULL;
3276 chan->sdu_len = 0;
3277 }
3278
3279 return err;
3280}
3281
3282static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3283{
3284 u16 control;
3285
3286 BT_DBG("chan %p, Enter local busy", chan);
3287
3288 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3289
3290 control = __set_reqseq(chan, chan->buffer_seq);
3291 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3292 l2cap_send_sframe(chan, control);
3293
3294 set_bit(CONN_RNR_SENT, &chan->conn_state);
3295
3296 __clear_ack_timer(chan);
3297}
3298
3299static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3300{
3301 u16 control;
3302
3303 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3304 goto done;
3305
3306 control = __set_reqseq(chan, chan->buffer_seq);
3307 control |= __set_ctrl_poll(chan);
3308 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3309 l2cap_send_sframe(chan, control);
3310 chan->retry_count = 1;
3311
3312 __clear_retrans_timer(chan);
3313 __set_monitor_timer(chan);
3314
3315 set_bit(CONN_WAIT_F, &chan->conn_state);
3316
3317done:
3318 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3319 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3320
3321 BT_DBG("chan %p, Exit local busy", chan);
3322}
3323
3324void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3325{
3326 if (chan->mode == L2CAP_MODE_ERTM) {
3327 if (busy)
3328 l2cap_ertm_enter_local_busy(chan);
3329 else
3330 l2cap_ertm_exit_local_busy(chan);
3331 }
3332}
3333
3334static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3335{
3336 struct sk_buff *skb;
3337 u16 control;
3338
3339 while ((skb = skb_peek(&chan->srej_q)) &&
3340 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3341 int err;
3342
3343 if (bt_cb(skb)->tx_seq != tx_seq)
3344 break;
3345
3346 skb = skb_dequeue(&chan->srej_q);
3347 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3348 err = l2cap_reassemble_sdu(chan, skb, control);
3349
3350 if (err < 0) {
3351 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3352 break;
3353 }
3354
3355 chan->buffer_seq_srej =
3356 (chan->buffer_seq_srej + 1) % 64;
3357 tx_seq = (tx_seq + 1) % 64;
3358 }
3359}
3360
3361static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3362{
3363 struct srej_list *l, *tmp;
3364 u16 control;
3365
3366 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3367 if (l->tx_seq == tx_seq) {
3368 list_del(&l->list);
3369 kfree(l);
3370 return;
3371 }
3372 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3373 control |= __set_reqseq(chan, l->tx_seq);
3374 l2cap_send_sframe(chan, control);
3375 list_del(&l->list);
3376 list_add_tail(&l->list, &chan->srej_l);
3377 }
3378}
3379
3380static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3381{
3382 struct srej_list *new;
3383 u16 control;
3384
3385 while (tx_seq != chan->expected_tx_seq) {
3386 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3387 control |= __set_reqseq(chan, chan->expected_tx_seq);
3388 l2cap_send_sframe(chan, control);
3389
3390 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3391 new->tx_seq = chan->expected_tx_seq;
3392 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3393 list_add_tail(&new->list, &chan->srej_l);
3394 }
3395 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3396}
3397
3398static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3399{
3400 u16 tx_seq = __get_txseq(chan, rx_control);
3401 u16 req_seq = __get_reqseq(chan, rx_control);
3402 u8 sar = __get_ctrl_sar(chan, rx_control);
3403 int tx_seq_offset, expected_tx_seq_offset;
3404 int num_to_ack = (chan->tx_win/6) + 1;
3405 int err = 0;
3406
3407 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3408 tx_seq, rx_control);
3409
3410 if (__is_ctrl_final(chan, rx_control) &&
3411 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3412 __clear_monitor_timer(chan);
3413 if (chan->unacked_frames > 0)
3414 __set_retrans_timer(chan);
3415 clear_bit(CONN_WAIT_F, &chan->conn_state);
3416 }
3417
3418 chan->expected_ack_seq = req_seq;
3419 l2cap_drop_acked_frames(chan);
3420
3421 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3422 if (tx_seq_offset < 0)
3423 tx_seq_offset += 64;
3424
3425 /* invalid tx_seq */
3426 if (tx_seq_offset >= chan->tx_win) {
3427 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3428 goto drop;
3429 }
3430
3431 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3432 goto drop;
3433
3434 if (tx_seq == chan->expected_tx_seq)
3435 goto expected;
3436
3437 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3438 struct srej_list *first;
3439
3440 first = list_first_entry(&chan->srej_l,
3441 struct srej_list, list);
3442 if (tx_seq == first->tx_seq) {
3443 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3444 l2cap_check_srej_gap(chan, tx_seq);
3445
3446 list_del(&first->list);
3447 kfree(first);
3448
3449 if (list_empty(&chan->srej_l)) {
3450 chan->buffer_seq = chan->buffer_seq_srej;
3451 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3452 l2cap_send_ack(chan);
3453 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3454 }
3455 } else {
3456 struct srej_list *l;
3457
3458 /* duplicated tx_seq */
3459 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3460 goto drop;
3461
3462 list_for_each_entry(l, &chan->srej_l, list) {
3463 if (l->tx_seq == tx_seq) {
3464 l2cap_resend_srejframe(chan, tx_seq);
3465 return 0;
3466 }
3467 }
3468 l2cap_send_srejframe(chan, tx_seq);
3469 }
3470 } else {
3471 expected_tx_seq_offset =
3472 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3473 if (expected_tx_seq_offset < 0)
3474 expected_tx_seq_offset += 64;
3475
3476 /* duplicated tx_seq */
3477 if (tx_seq_offset < expected_tx_seq_offset)
3478 goto drop;
3479
3480 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3481
3482 BT_DBG("chan %p, Enter SREJ", chan);
3483
3484 INIT_LIST_HEAD(&chan->srej_l);
3485 chan->buffer_seq_srej = chan->buffer_seq;
3486
3487 __skb_queue_head_init(&chan->srej_q);
3488 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3489
3490 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3491
3492 l2cap_send_srejframe(chan, tx_seq);
3493
3494 __clear_ack_timer(chan);
3495 }
3496 return 0;
3497
3498expected:
3499 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3500
3501 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3502 bt_cb(skb)->tx_seq = tx_seq;
3503 bt_cb(skb)->sar = sar;
3504 __skb_queue_tail(&chan->srej_q, skb);
3505 return 0;
3506 }
3507
3508 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3509 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3510 if (err < 0) {
3511 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3512 return err;
3513 }
3514
3515 if (__is_ctrl_final(chan, rx_control)) {
3516 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3517 l2cap_retransmit_frames(chan);
3518 }
3519
3520 __set_ack_timer(chan);
3521
3522 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3523 if (chan->num_acked == num_to_ack - 1)
3524 l2cap_send_ack(chan);
3525
3526 return 0;
3527
3528drop:
3529 kfree_skb(skb);
3530 return 0;
3531}
3532
3533static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3534{
3535 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan,
3536 __get_reqseq(chan, rx_control), rx_control);
3537
3538 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3539 l2cap_drop_acked_frames(chan);
3540
3541 if (__is_ctrl_poll(chan, rx_control)) {
3542 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3543 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3544 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3545 (chan->unacked_frames > 0))
3546 __set_retrans_timer(chan);
3547
3548 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3549 l2cap_send_srejtail(chan);
3550 } else {
3551 l2cap_send_i_or_rr_or_rnr(chan);
3552 }
3553
3554 } else if (__is_ctrl_final(chan, rx_control)) {
3555 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3556
3557 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3558 l2cap_retransmit_frames(chan);
3559
3560 } else {
3561 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3562 (chan->unacked_frames > 0))
3563 __set_retrans_timer(chan);
3564
3565 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3566 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3567 l2cap_send_ack(chan);
3568 else
3569 l2cap_ertm_send(chan);
3570 }
3571}
3572
3573static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3574{
3575 u16 tx_seq = __get_reqseq(chan, rx_control);
3576
3577 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3578
3579 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3580
3581 chan->expected_ack_seq = tx_seq;
3582 l2cap_drop_acked_frames(chan);
3583
3584 if (__is_ctrl_final(chan, rx_control)) {
3585 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3586 l2cap_retransmit_frames(chan);
3587 } else {
3588 l2cap_retransmit_frames(chan);
3589
3590 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3591 set_bit(CONN_REJ_ACT, &chan->conn_state);
3592 }
3593}
3594static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3595{
3596 u16 tx_seq = __get_reqseq(chan, rx_control);
3597
3598 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3599
3600 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3601
3602 if (__is_ctrl_poll(chan, rx_control)) {
3603 chan->expected_ack_seq = tx_seq;
3604 l2cap_drop_acked_frames(chan);
3605
3606 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3607 l2cap_retransmit_one_frame(chan, tx_seq);
3608
3609 l2cap_ertm_send(chan);
3610
3611 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3612 chan->srej_save_reqseq = tx_seq;
3613 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3614 }
3615 } else if (__is_ctrl_final(chan, rx_control)) {
3616 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3617 chan->srej_save_reqseq == tx_seq)
3618 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3619 else
3620 l2cap_retransmit_one_frame(chan, tx_seq);
3621 } else {
3622 l2cap_retransmit_one_frame(chan, tx_seq);
3623 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3624 chan->srej_save_reqseq = tx_seq;
3625 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3626 }
3627 }
3628}
3629
3630static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3631{
3632 u16 tx_seq = __get_reqseq(chan, rx_control);
3633
3634 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3635
3636 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3637 chan->expected_ack_seq = tx_seq;
3638 l2cap_drop_acked_frames(chan);
3639
3640 if (__is_ctrl_poll(chan, rx_control))
3641 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3642
3643 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3644 __clear_retrans_timer(chan);
3645 if (__is_ctrl_poll(chan, rx_control))
3646 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3647 return;
3648 }
3649
3650 if (__is_ctrl_poll(chan, rx_control)) {
3651 l2cap_send_srejtail(chan);
3652 } else {
3653 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3654 l2cap_send_sframe(chan, rx_control);
3655 }
3656}
3657
3658static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3659{
3660 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3661
3662 if (__is_ctrl_final(chan, rx_control) &&
3663 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3664 __clear_monitor_timer(chan);
3665 if (chan->unacked_frames > 0)
3666 __set_retrans_timer(chan);
3667 clear_bit(CONN_WAIT_F, &chan->conn_state);
3668 }
3669
3670 switch (__get_ctrl_super(chan, rx_control)) {
3671 case L2CAP_SUPER_RR:
3672 l2cap_data_channel_rrframe(chan, rx_control);
3673 break;
3674
3675 case L2CAP_SUPER_REJ:
3676 l2cap_data_channel_rejframe(chan, rx_control);
3677 break;
3678
3679 case L2CAP_SUPER_SREJ:
3680 l2cap_data_channel_srejframe(chan, rx_control);
3681 break;
3682
3683 case L2CAP_SUPER_RNR:
3684 l2cap_data_channel_rnrframe(chan, rx_control);
3685 break;
3686 }
3687
3688 kfree_skb(skb);
3689 return 0;
3690}
3691
3692static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3693{
3694 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3695 u16 control;
3696 u16 req_seq;
3697 int len, next_tx_seq_offset, req_seq_offset;
3698
3699 control = get_unaligned_le16(skb->data);
3700 skb_pull(skb, 2);
3701 len = skb->len;
3702
3703 /*
3704 * We can just drop the corrupted I-frame here.
3705 * Receiver will miss it and start proper recovery
3706 * procedures and ask retransmission.
3707 */
3708 if (l2cap_check_fcs(chan, skb))
3709 goto drop;
3710
3711 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3712 len -= 2;
3713
3714 if (chan->fcs == L2CAP_FCS_CRC16)
3715 len -= 2;
3716
3717 if (len > chan->mps) {
3718 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3719 goto drop;
3720 }
3721
3722 req_seq = __get_reqseq(chan, control);
3723 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3724 if (req_seq_offset < 0)
3725 req_seq_offset += 64;
3726
3727 next_tx_seq_offset =
3728 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3729 if (next_tx_seq_offset < 0)
3730 next_tx_seq_offset += 64;
3731
3732 /* check for invalid req-seq */
3733 if (req_seq_offset > next_tx_seq_offset) {
3734 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3735 goto drop;
3736 }
3737
3738 if (!__is_sframe(chan, control)) {
3739 if (len < 0) {
3740 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3741 goto drop;
3742 }
3743
3744 l2cap_data_channel_iframe(chan, control, skb);
3745 } else {
3746 if (len != 0) {
3747 BT_ERR("%d", len);
3748 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3749 goto drop;
3750 }
3751
3752 l2cap_data_channel_sframe(chan, control, skb);
3753 }
3754
3755 return 0;
3756
3757drop:
3758 kfree_skb(skb);
3759 return 0;
3760}
3761
3762static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3763{
3764 struct l2cap_chan *chan;
3765 struct sock *sk = NULL;
3766 u16 control;
3767 u16 tx_seq;
3768 int len;
3769
3770 chan = l2cap_get_chan_by_scid(conn, cid);
3771 if (!chan) {
3772 BT_DBG("unknown cid 0x%4.4x", cid);
3773 goto drop;
3774 }
3775
3776 sk = chan->sk;
3777
3778 BT_DBG("chan %p, len %d", chan, skb->len);
3779
3780 if (chan->state != BT_CONNECTED)
3781 goto drop;
3782
3783 switch (chan->mode) {
3784 case L2CAP_MODE_BASIC:
3785 /* If socket recv buffers overflows we drop data here
3786 * which is *bad* because L2CAP has to be reliable.
3787 * But we don't have any other choice. L2CAP doesn't
3788 * provide flow control mechanism. */
3789
3790 if (chan->imtu < skb->len)
3791 goto drop;
3792
3793 if (!chan->ops->recv(chan->data, skb))
3794 goto done;
3795 break;
3796
3797 case L2CAP_MODE_ERTM:
3798 if (!sock_owned_by_user(sk)) {
3799 l2cap_ertm_data_rcv(sk, skb);
3800 } else {
3801 if (sk_add_backlog(sk, skb))
3802 goto drop;
3803 }
3804
3805 goto done;
3806
3807 case L2CAP_MODE_STREAMING:
3808 control = get_unaligned_le16(skb->data);
3809 skb_pull(skb, 2);
3810 len = skb->len;
3811
3812 if (l2cap_check_fcs(chan, skb))
3813 goto drop;
3814
3815 if (__is_sar_start(chan, control))
3816 len -= 2;
3817
3818 if (chan->fcs == L2CAP_FCS_CRC16)
3819 len -= 2;
3820
3821 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
3822 goto drop;
3823
3824 tx_seq = __get_txseq(chan, control);
3825
3826 if (chan->expected_tx_seq != tx_seq) {
3827 /* Frame(s) missing - must discard partial SDU */
3828 kfree_skb(chan->sdu);
3829 chan->sdu = NULL;
3830 chan->sdu_last_frag = NULL;
3831 chan->sdu_len = 0;
3832
3833 /* TODO: Notify userland of missing data */
3834 }
3835
3836 chan->expected_tx_seq = (tx_seq + 1) % 64;
3837
3838 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3839 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3840
3841 goto done;
3842
3843 default:
3844 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3845 break;
3846 }
3847
3848drop:
3849 kfree_skb(skb);
3850
3851done:
3852 if (sk)
3853 bh_unlock_sock(sk);
3854
3855 return 0;
3856}
3857
3858static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3859{
3860 struct sock *sk = NULL;
3861 struct l2cap_chan *chan;
3862
3863 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3864 if (!chan)
3865 goto drop;
3866
3867 sk = chan->sk;
3868
3869 bh_lock_sock(sk);
3870
3871 BT_DBG("sk %p, len %d", sk, skb->len);
3872
3873 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3874 goto drop;
3875
3876 if (chan->imtu < skb->len)
3877 goto drop;
3878
3879 if (!chan->ops->recv(chan->data, skb))
3880 goto done;
3881
3882drop:
3883 kfree_skb(skb);
3884
3885done:
3886 if (sk)
3887 bh_unlock_sock(sk);
3888 return 0;
3889}
3890
3891static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3892{
3893 struct sock *sk = NULL;
3894 struct l2cap_chan *chan;
3895
3896 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3897 if (!chan)
3898 goto drop;
3899
3900 sk = chan->sk;
3901
3902 bh_lock_sock(sk);
3903
3904 BT_DBG("sk %p, len %d", sk, skb->len);
3905
3906 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3907 goto drop;
3908
3909 if (chan->imtu < skb->len)
3910 goto drop;
3911
3912 if (!chan->ops->recv(chan->data, skb))
3913 goto done;
3914
3915drop:
3916 kfree_skb(skb);
3917
3918done:
3919 if (sk)
3920 bh_unlock_sock(sk);
3921 return 0;
3922}
3923
3924static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3925{
3926 struct l2cap_hdr *lh = (void *) skb->data;
3927 u16 cid, len;
3928 __le16 psm;
3929
3930 skb_pull(skb, L2CAP_HDR_SIZE);
3931 cid = __le16_to_cpu(lh->cid);
3932 len = __le16_to_cpu(lh->len);
3933
3934 if (len != skb->len) {
3935 kfree_skb(skb);
3936 return;
3937 }
3938
3939 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3940
3941 switch (cid) {
3942 case L2CAP_CID_LE_SIGNALING:
3943 case L2CAP_CID_SIGNALING:
3944 l2cap_sig_channel(conn, skb);
3945 break;
3946
3947 case L2CAP_CID_CONN_LESS:
3948 psm = get_unaligned_le16(skb->data);
3949 skb_pull(skb, 2);
3950 l2cap_conless_channel(conn, psm, skb);
3951 break;
3952
3953 case L2CAP_CID_LE_DATA:
3954 l2cap_att_channel(conn, cid, skb);
3955 break;
3956
3957 case L2CAP_CID_SMP:
3958 if (smp_sig_channel(conn, skb))
3959 l2cap_conn_del(conn->hcon, EACCES);
3960 break;
3961
3962 default:
3963 l2cap_data_channel(conn, cid, skb);
3964 break;
3965 }
3966}
3967
3968/* ---- L2CAP interface with lower layer (HCI) ---- */
3969
3970static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3971{
3972 int exact = 0, lm1 = 0, lm2 = 0;
3973 struct l2cap_chan *c;
3974
3975 if (type != ACL_LINK)
3976 return -EINVAL;
3977
3978 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3979
3980 /* Find listening sockets and check their link_mode */
3981 read_lock(&chan_list_lock);
3982 list_for_each_entry(c, &chan_list, global_l) {
3983 struct sock *sk = c->sk;
3984
3985 if (c->state != BT_LISTEN)
3986 continue;
3987
3988 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3989 lm1 |= HCI_LM_ACCEPT;
3990 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
3991 lm1 |= HCI_LM_MASTER;
3992 exact++;
3993 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3994 lm2 |= HCI_LM_ACCEPT;
3995 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
3996 lm2 |= HCI_LM_MASTER;
3997 }
3998 }
3999 read_unlock(&chan_list_lock);
4000
4001 return exact ? lm1 : lm2;
4002}
4003
4004static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4005{
4006 struct l2cap_conn *conn;
4007
4008 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4009
4010 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4011 return -EINVAL;
4012
4013 if (!status) {
4014 conn = l2cap_conn_add(hcon, status);
4015 if (conn)
4016 l2cap_conn_ready(conn);
4017 } else
4018 l2cap_conn_del(hcon, bt_to_errno(status));
4019
4020 return 0;
4021}
4022
4023static int l2cap_disconn_ind(struct hci_conn *hcon)
4024{
4025 struct l2cap_conn *conn = hcon->l2cap_data;
4026
4027 BT_DBG("hcon %p", hcon);
4028
4029 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4030 return 0x13;
4031
4032 return conn->disc_reason;
4033}
4034
4035static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4036{
4037 BT_DBG("hcon %p reason %d", hcon, reason);
4038
4039 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4040 return -EINVAL;
4041
4042 l2cap_conn_del(hcon, bt_to_errno(reason));
4043
4044 return 0;
4045}
4046
4047static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4048{
4049 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4050 return;
4051
4052 if (encrypt == 0x00) {
4053 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4054 __clear_chan_timer(chan);
4055 __set_chan_timer(chan, HZ * 5);
4056 } else if (chan->sec_level == BT_SECURITY_HIGH)
4057 l2cap_chan_close(chan, ECONNREFUSED);
4058 } else {
4059 if (chan->sec_level == BT_SECURITY_MEDIUM)
4060 __clear_chan_timer(chan);
4061 }
4062}
4063
4064static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4065{
4066 struct l2cap_conn *conn = hcon->l2cap_data;
4067 struct l2cap_chan *chan;
4068
4069 if (!conn)
4070 return 0;
4071
4072 BT_DBG("conn %p", conn);
4073
4074 if (hcon->type == LE_LINK) {
4075 smp_distribute_keys(conn, 0);
4076 del_timer(&conn->security_timer);
4077 }
4078
4079 read_lock(&conn->chan_lock);
4080
4081 list_for_each_entry(chan, &conn->chan_l, list) {
4082 struct sock *sk = chan->sk;
4083
4084 bh_lock_sock(sk);
4085
4086 BT_DBG("chan->scid %d", chan->scid);
4087
4088 if (chan->scid == L2CAP_CID_LE_DATA) {
4089 if (!status && encrypt) {
4090 chan->sec_level = hcon->sec_level;
4091 l2cap_chan_ready(sk);
4092 }
4093
4094 bh_unlock_sock(sk);
4095 continue;
4096 }
4097
4098 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4099 bh_unlock_sock(sk);
4100 continue;
4101 }
4102
4103 if (!status && (chan->state == BT_CONNECTED ||
4104 chan->state == BT_CONFIG)) {
4105 l2cap_check_encryption(chan, encrypt);
4106 bh_unlock_sock(sk);
4107 continue;
4108 }
4109
4110 if (chan->state == BT_CONNECT) {
4111 if (!status) {
4112 struct l2cap_conn_req req;
4113 req.scid = cpu_to_le16(chan->scid);
4114 req.psm = chan->psm;
4115
4116 chan->ident = l2cap_get_ident(conn);
4117 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4118
4119 l2cap_send_cmd(conn, chan->ident,
4120 L2CAP_CONN_REQ, sizeof(req), &req);
4121 } else {
4122 __clear_chan_timer(chan);
4123 __set_chan_timer(chan, HZ / 10);
4124 }
4125 } else if (chan->state == BT_CONNECT2) {
4126 struct l2cap_conn_rsp rsp;
4127 __u16 res, stat;
4128
4129 if (!status) {
4130 if (bt_sk(sk)->defer_setup) {
4131 struct sock *parent = bt_sk(sk)->parent;
4132 res = L2CAP_CR_PEND;
4133 stat = L2CAP_CS_AUTHOR_PEND;
4134 if (parent)
4135 parent->sk_data_ready(parent, 0);
4136 } else {
4137 l2cap_state_change(chan, BT_CONFIG);
4138 res = L2CAP_CR_SUCCESS;
4139 stat = L2CAP_CS_NO_INFO;
4140 }
4141 } else {
4142 l2cap_state_change(chan, BT_DISCONN);
4143 __set_chan_timer(chan, HZ / 10);
4144 res = L2CAP_CR_SEC_BLOCK;
4145 stat = L2CAP_CS_NO_INFO;
4146 }
4147
4148 rsp.scid = cpu_to_le16(chan->dcid);
4149 rsp.dcid = cpu_to_le16(chan->scid);
4150 rsp.result = cpu_to_le16(res);
4151 rsp.status = cpu_to_le16(stat);
4152 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4153 sizeof(rsp), &rsp);
4154 }
4155
4156 bh_unlock_sock(sk);
4157 }
4158
4159 read_unlock(&conn->chan_lock);
4160
4161 return 0;
4162}
4163
4164static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4165{
4166 struct l2cap_conn *conn = hcon->l2cap_data;
4167
4168 if (!conn)
4169 conn = l2cap_conn_add(hcon, 0);
4170
4171 if (!conn)
4172 goto drop;
4173
4174 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4175
4176 if (!(flags & ACL_CONT)) {
4177 struct l2cap_hdr *hdr;
4178 struct l2cap_chan *chan;
4179 u16 cid;
4180 int len;
4181
4182 if (conn->rx_len) {
4183 BT_ERR("Unexpected start frame (len %d)", skb->len);
4184 kfree_skb(conn->rx_skb);
4185 conn->rx_skb = NULL;
4186 conn->rx_len = 0;
4187 l2cap_conn_unreliable(conn, ECOMM);
4188 }
4189
4190 /* Start fragment always begin with Basic L2CAP header */
4191 if (skb->len < L2CAP_HDR_SIZE) {
4192 BT_ERR("Frame is too short (len %d)", skb->len);
4193 l2cap_conn_unreliable(conn, ECOMM);
4194 goto drop;
4195 }
4196
4197 hdr = (struct l2cap_hdr *) skb->data;
4198 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4199 cid = __le16_to_cpu(hdr->cid);
4200
4201 if (len == skb->len) {
4202 /* Complete frame received */
4203 l2cap_recv_frame(conn, skb);
4204 return 0;
4205 }
4206
4207 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4208
4209 if (skb->len > len) {
4210 BT_ERR("Frame is too long (len %d, expected len %d)",
4211 skb->len, len);
4212 l2cap_conn_unreliable(conn, ECOMM);
4213 goto drop;
4214 }
4215
4216 chan = l2cap_get_chan_by_scid(conn, cid);
4217
4218 if (chan && chan->sk) {
4219 struct sock *sk = chan->sk;
4220
4221 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4222 BT_ERR("Frame exceeding recv MTU (len %d, "
4223 "MTU %d)", len,
4224 chan->imtu);
4225 bh_unlock_sock(sk);
4226 l2cap_conn_unreliable(conn, ECOMM);
4227 goto drop;
4228 }
4229 bh_unlock_sock(sk);
4230 }
4231
4232 /* Allocate skb for the complete frame (with header) */
4233 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4234 if (!conn->rx_skb)
4235 goto drop;
4236
4237 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4238 skb->len);
4239 conn->rx_len = len - skb->len;
4240 } else {
4241 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4242
4243 if (!conn->rx_len) {
4244 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4245 l2cap_conn_unreliable(conn, ECOMM);
4246 goto drop;
4247 }
4248
4249 if (skb->len > conn->rx_len) {
4250 BT_ERR("Fragment is too long (len %d, expected %d)",
4251 skb->len, conn->rx_len);
4252 kfree_skb(conn->rx_skb);
4253 conn->rx_skb = NULL;
4254 conn->rx_len = 0;
4255 l2cap_conn_unreliable(conn, ECOMM);
4256 goto drop;
4257 }
4258
4259 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4260 skb->len);
4261 conn->rx_len -= skb->len;
4262
4263 if (!conn->rx_len) {
4264 /* Complete frame received */
4265 l2cap_recv_frame(conn, conn->rx_skb);
4266 conn->rx_skb = NULL;
4267 }
4268 }
4269
4270drop:
4271 kfree_skb(skb);
4272 return 0;
4273}
4274
4275static int l2cap_debugfs_show(struct seq_file *f, void *p)
4276{
4277 struct l2cap_chan *c;
4278
4279 read_lock_bh(&chan_list_lock);
4280
4281 list_for_each_entry(c, &chan_list, global_l) {
4282 struct sock *sk = c->sk;
4283
4284 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4285 batostr(&bt_sk(sk)->src),
4286 batostr(&bt_sk(sk)->dst),
4287 c->state, __le16_to_cpu(c->psm),
4288 c->scid, c->dcid, c->imtu, c->omtu,
4289 c->sec_level, c->mode);
4290}
4291
4292 read_unlock_bh(&chan_list_lock);
4293
4294 return 0;
4295}
4296
4297static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4298{
4299 return single_open(file, l2cap_debugfs_show, inode->i_private);
4300}
4301
4302static const struct file_operations l2cap_debugfs_fops = {
4303 .open = l2cap_debugfs_open,
4304 .read = seq_read,
4305 .llseek = seq_lseek,
4306 .release = single_release,
4307};
4308
4309static struct dentry *l2cap_debugfs;
4310
4311static struct hci_proto l2cap_hci_proto = {
4312 .name = "L2CAP",
4313 .id = HCI_PROTO_L2CAP,
4314 .connect_ind = l2cap_connect_ind,
4315 .connect_cfm = l2cap_connect_cfm,
4316 .disconn_ind = l2cap_disconn_ind,
4317 .disconn_cfm = l2cap_disconn_cfm,
4318 .security_cfm = l2cap_security_cfm,
4319 .recv_acldata = l2cap_recv_acldata
4320};
4321
4322int __init l2cap_init(void)
4323{
4324 int err;
4325
4326 err = l2cap_init_sockets();
4327 if (err < 0)
4328 return err;
4329
4330 err = hci_register_proto(&l2cap_hci_proto);
4331 if (err < 0) {
4332 BT_ERR("L2CAP protocol registration failed");
4333 bt_sock_unregister(BTPROTO_L2CAP);
4334 goto error;
4335 }
4336
4337 if (bt_debugfs) {
4338 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4339 bt_debugfs, NULL, &l2cap_debugfs_fops);
4340 if (!l2cap_debugfs)
4341 BT_ERR("Failed to create L2CAP debug file");
4342 }
4343
4344 return 0;
4345
4346error:
4347 l2cap_cleanup_sockets();
4348 return err;
4349}
4350
4351void l2cap_exit(void)
4352{
4353 debugfs_remove(l2cap_debugfs);
4354
4355 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4356 BT_ERR("L2CAP protocol unregistration failed");
4357
4358 l2cap_cleanup_sockets();
4359}
4360
4361module_param(disable_ertm, bool, 0644);
4362MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4363
4364module_param(enable_hs, bool, 0644);
4365MODULE_PARM_DESC(enable_hs, "Enable High Speed");