Bluetooth: Add ProFUSION's copyright
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 int disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78 /* ---- L2CAP channels ---- */
79
80 static inline void chan_hold(struct l2cap_chan *c)
81 {
82 atomic_inc(&c->refcnt);
83 }
84
85 static inline void chan_put(struct l2cap_chan *c)
86 {
87 if (atomic_dec_and_test(&c->refcnt))
88 kfree(c);
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
92 {
93 struct l2cap_chan *c, *r = NULL;
94
95 rcu_read_lock();
96
97 list_for_each_entry_rcu(c, &conn->chan_l, list) {
98 if (c->dcid == cid) {
99 r = c;
100 break;
101 }
102 }
103
104 rcu_read_unlock();
105 return r;
106 }
107
108 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
109 {
110 struct l2cap_chan *c, *r = NULL;
111
112 rcu_read_lock();
113
114 list_for_each_entry_rcu(c, &conn->chan_l, list) {
115 if (c->scid == cid) {
116 r = c;
117 break;
118 }
119 }
120
121 rcu_read_unlock();
122 return r;
123 }
124
125 /* Find channel with given SCID.
126 * Returns locked socket */
127 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
128 {
129 struct l2cap_chan *c;
130
131 c = __l2cap_get_chan_by_scid(conn, cid);
132 if (c)
133 lock_sock(c->sk);
134 return c;
135 }
136
137 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
138 {
139 struct l2cap_chan *c, *r = NULL;
140
141 rcu_read_lock();
142
143 list_for_each_entry_rcu(c, &conn->chan_l, list) {
144 if (c->ident == ident) {
145 r = c;
146 break;
147 }
148 }
149
150 rcu_read_unlock();
151 return r;
152 }
153
154 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
155 {
156 struct l2cap_chan *c;
157
158 c = __l2cap_get_chan_by_ident(conn, ident);
159 if (c)
160 lock_sock(c->sk);
161 return c;
162 }
163
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 {
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
170 return c;
171 }
172 return NULL;
173 }
174
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176 {
177 int err;
178
179 write_lock_bh(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203 done:
204 write_unlock_bh(&chan_list_lock);
205 return err;
206 }
207
208 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
209 {
210 write_lock_bh(&chan_list_lock);
211
212 chan->scid = scid;
213
214 write_unlock_bh(&chan_list_lock);
215
216 return 0;
217 }
218
219 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
220 {
221 u16 cid = L2CAP_CID_DYN_START;
222
223 for (; cid < L2CAP_CID_DYN_END; cid++) {
224 if (!__l2cap_get_chan_by_scid(conn, cid))
225 return cid;
226 }
227
228 return 0;
229 }
230
231 static void l2cap_set_timer(struct l2cap_chan *chan, struct delayed_work *work, long timeout)
232 {
233 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
234
235 cancel_delayed_work_sync(work);
236
237 schedule_delayed_work(work, timeout);
238 }
239
240 static void l2cap_clear_timer(struct delayed_work *work)
241 {
242 cancel_delayed_work_sync(work);
243 }
244
245 static char *state_to_string(int state)
246 {
247 switch(state) {
248 case BT_CONNECTED:
249 return "BT_CONNECTED";
250 case BT_OPEN:
251 return "BT_OPEN";
252 case BT_BOUND:
253 return "BT_BOUND";
254 case BT_LISTEN:
255 return "BT_LISTEN";
256 case BT_CONNECT:
257 return "BT_CONNECT";
258 case BT_CONNECT2:
259 return "BT_CONNECT2";
260 case BT_CONFIG:
261 return "BT_CONFIG";
262 case BT_DISCONN:
263 return "BT_DISCONN";
264 case BT_CLOSED:
265 return "BT_CLOSED";
266 }
267
268 return "invalid state";
269 }
270
271 static void l2cap_state_change(struct l2cap_chan *chan, int state)
272 {
273 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
274 state_to_string(state));
275
276 chan->state = state;
277 chan->ops->state_change(chan->data, state);
278 }
279
280 static void l2cap_chan_timeout(struct work_struct *work)
281 {
282 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
283 chan_timer.work);
284 struct sock *sk = chan->sk;
285 int reason;
286
287 BT_DBG("chan %p state %d", chan, chan->state);
288
289 lock_sock(sk);
290
291 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
292 reason = ECONNREFUSED;
293 else if (chan->state == BT_CONNECT &&
294 chan->sec_level != BT_SECURITY_SDP)
295 reason = ECONNREFUSED;
296 else
297 reason = ETIMEDOUT;
298
299 l2cap_chan_close(chan, reason);
300
301 release_sock(sk);
302
303 chan->ops->close(chan->data);
304 chan_put(chan);
305 }
306
307 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
308 {
309 struct l2cap_chan *chan;
310
311 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
312 if (!chan)
313 return NULL;
314
315 chan->sk = sk;
316
317 write_lock_bh(&chan_list_lock);
318 list_add(&chan->global_l, &chan_list);
319 write_unlock_bh(&chan_list_lock);
320
321 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
322
323 chan->state = BT_OPEN;
324
325 atomic_set(&chan->refcnt, 1);
326
327 BT_DBG("sk %p chan %p", sk, chan);
328
329 return chan;
330 }
331
332 void l2cap_chan_destroy(struct l2cap_chan *chan)
333 {
334 write_lock_bh(&chan_list_lock);
335 list_del(&chan->global_l);
336 write_unlock_bh(&chan_list_lock);
337
338 chan_put(chan);
339 }
340
341 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
342 {
343 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
344 chan->psm, chan->dcid);
345
346 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
347
348 chan->conn = conn;
349
350 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
351 if (conn->hcon->type == LE_LINK) {
352 /* LE connection */
353 chan->omtu = L2CAP_LE_DEFAULT_MTU;
354 chan->scid = L2CAP_CID_LE_DATA;
355 chan->dcid = L2CAP_CID_LE_DATA;
356 } else {
357 /* Alloc CID for connection-oriented socket */
358 chan->scid = l2cap_alloc_cid(conn);
359 chan->omtu = L2CAP_DEFAULT_MTU;
360 }
361 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
362 /* Connectionless socket */
363 chan->scid = L2CAP_CID_CONN_LESS;
364 chan->dcid = L2CAP_CID_CONN_LESS;
365 chan->omtu = L2CAP_DEFAULT_MTU;
366 } else {
367 /* Raw socket can send/recv signalling messages only */
368 chan->scid = L2CAP_CID_SIGNALING;
369 chan->dcid = L2CAP_CID_SIGNALING;
370 chan->omtu = L2CAP_DEFAULT_MTU;
371 }
372
373 chan->local_id = L2CAP_BESTEFFORT_ID;
374 chan->local_stype = L2CAP_SERV_BESTEFFORT;
375 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
376 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
377 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
378 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
379
380 chan_hold(chan);
381
382 list_add_rcu(&chan->list, &conn->chan_l);
383 }
384
385 /* Delete channel.
386 * Must be called on the locked socket. */
387 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
388 {
389 struct sock *sk = chan->sk;
390 struct l2cap_conn *conn = chan->conn;
391 struct sock *parent = bt_sk(sk)->parent;
392
393 __clear_chan_timer(chan);
394
395 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
396
397 if (conn) {
398 /* Delete from channel list */
399 list_del_rcu(&chan->list);
400 synchronize_rcu();
401
402 chan_put(chan);
403
404 chan->conn = NULL;
405 hci_conn_put(conn->hcon);
406 }
407
408 l2cap_state_change(chan, BT_CLOSED);
409 sock_set_flag(sk, SOCK_ZAPPED);
410
411 if (err)
412 sk->sk_err = err;
413
414 if (parent) {
415 bt_accept_unlink(sk);
416 parent->sk_data_ready(parent, 0);
417 } else
418 sk->sk_state_change(sk);
419
420 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
421 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
422 return;
423
424 skb_queue_purge(&chan->tx_q);
425
426 if (chan->mode == L2CAP_MODE_ERTM) {
427 struct srej_list *l, *tmp;
428
429 __clear_retrans_timer(chan);
430 __clear_monitor_timer(chan);
431 __clear_ack_timer(chan);
432
433 skb_queue_purge(&chan->srej_q);
434
435 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
436 list_del(&l->list);
437 kfree(l);
438 }
439 }
440 }
441
442 static void l2cap_chan_cleanup_listen(struct sock *parent)
443 {
444 struct sock *sk;
445
446 BT_DBG("parent %p", parent);
447
448 /* Close not yet accepted channels */
449 while ((sk = bt_accept_dequeue(parent, NULL))) {
450 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
451 __clear_chan_timer(chan);
452 lock_sock(sk);
453 l2cap_chan_close(chan, ECONNRESET);
454 release_sock(sk);
455 chan->ops->close(chan->data);
456 }
457 }
458
459 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
460 {
461 struct l2cap_conn *conn = chan->conn;
462 struct sock *sk = chan->sk;
463
464 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
465
466 switch (chan->state) {
467 case BT_LISTEN:
468 l2cap_chan_cleanup_listen(sk);
469
470 l2cap_state_change(chan, BT_CLOSED);
471 sock_set_flag(sk, SOCK_ZAPPED);
472 break;
473
474 case BT_CONNECTED:
475 case BT_CONFIG:
476 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
477 conn->hcon->type == ACL_LINK) {
478 __clear_chan_timer(chan);
479 __set_chan_timer(chan, sk->sk_sndtimeo);
480 l2cap_send_disconn_req(conn, chan, reason);
481 } else
482 l2cap_chan_del(chan, reason);
483 break;
484
485 case BT_CONNECT2:
486 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
487 conn->hcon->type == ACL_LINK) {
488 struct l2cap_conn_rsp rsp;
489 __u16 result;
490
491 if (bt_sk(sk)->defer_setup)
492 result = L2CAP_CR_SEC_BLOCK;
493 else
494 result = L2CAP_CR_BAD_PSM;
495 l2cap_state_change(chan, BT_DISCONN);
496
497 rsp.scid = cpu_to_le16(chan->dcid);
498 rsp.dcid = cpu_to_le16(chan->scid);
499 rsp.result = cpu_to_le16(result);
500 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
501 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
502 sizeof(rsp), &rsp);
503 }
504
505 l2cap_chan_del(chan, reason);
506 break;
507
508 case BT_CONNECT:
509 case BT_DISCONN:
510 l2cap_chan_del(chan, reason);
511 break;
512
513 default:
514 sock_set_flag(sk, SOCK_ZAPPED);
515 break;
516 }
517 }
518
519 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
520 {
521 if (chan->chan_type == L2CAP_CHAN_RAW) {
522 switch (chan->sec_level) {
523 case BT_SECURITY_HIGH:
524 return HCI_AT_DEDICATED_BONDING_MITM;
525 case BT_SECURITY_MEDIUM:
526 return HCI_AT_DEDICATED_BONDING;
527 default:
528 return HCI_AT_NO_BONDING;
529 }
530 } else if (chan->psm == cpu_to_le16(0x0001)) {
531 if (chan->sec_level == BT_SECURITY_LOW)
532 chan->sec_level = BT_SECURITY_SDP;
533
534 if (chan->sec_level == BT_SECURITY_HIGH)
535 return HCI_AT_NO_BONDING_MITM;
536 else
537 return HCI_AT_NO_BONDING;
538 } else {
539 switch (chan->sec_level) {
540 case BT_SECURITY_HIGH:
541 return HCI_AT_GENERAL_BONDING_MITM;
542 case BT_SECURITY_MEDIUM:
543 return HCI_AT_GENERAL_BONDING;
544 default:
545 return HCI_AT_NO_BONDING;
546 }
547 }
548 }
549
550 /* Service level security */
551 int l2cap_chan_check_security(struct l2cap_chan *chan)
552 {
553 struct l2cap_conn *conn = chan->conn;
554 __u8 auth_type;
555
556 auth_type = l2cap_get_auth_type(chan);
557
558 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
559 }
560
561 static u8 l2cap_get_ident(struct l2cap_conn *conn)
562 {
563 u8 id;
564
565 /* Get next available identificator.
566 * 1 - 128 are used by kernel.
567 * 129 - 199 are reserved.
568 * 200 - 254 are used by utilities like l2ping, etc.
569 */
570
571 spin_lock_bh(&conn->lock);
572
573 if (++conn->tx_ident > 128)
574 conn->tx_ident = 1;
575
576 id = conn->tx_ident;
577
578 spin_unlock_bh(&conn->lock);
579
580 return id;
581 }
582
583 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
584 {
585 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
586 u8 flags;
587
588 BT_DBG("code 0x%2.2x", code);
589
590 if (!skb)
591 return;
592
593 if (lmp_no_flush_capable(conn->hcon->hdev))
594 flags = ACL_START_NO_FLUSH;
595 else
596 flags = ACL_START;
597
598 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
599 skb->priority = HCI_PRIO_MAX;
600
601 hci_send_acl(conn->hchan, skb, flags);
602 }
603
604 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
605 {
606 struct hci_conn *hcon = chan->conn->hcon;
607 u16 flags;
608
609 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
610 skb->priority);
611
612 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
613 lmp_no_flush_capable(hcon->hdev))
614 flags = ACL_START_NO_FLUSH;
615 else
616 flags = ACL_START;
617
618 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
619 hci_send_acl(chan->conn->hchan, skb, flags);
620 }
621
622 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
623 {
624 struct sk_buff *skb;
625 struct l2cap_hdr *lh;
626 struct l2cap_conn *conn = chan->conn;
627 int count, hlen;
628
629 if (chan->state != BT_CONNECTED)
630 return;
631
632 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
633 hlen = L2CAP_EXT_HDR_SIZE;
634 else
635 hlen = L2CAP_ENH_HDR_SIZE;
636
637 if (chan->fcs == L2CAP_FCS_CRC16)
638 hlen += L2CAP_FCS_SIZE;
639
640 BT_DBG("chan %p, control 0x%8.8x", chan, control);
641
642 count = min_t(unsigned int, conn->mtu, hlen);
643
644 control |= __set_sframe(chan);
645
646 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
647 control |= __set_ctrl_final(chan);
648
649 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
650 control |= __set_ctrl_poll(chan);
651
652 skb = bt_skb_alloc(count, GFP_ATOMIC);
653 if (!skb)
654 return;
655
656 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
657 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
658 lh->cid = cpu_to_le16(chan->dcid);
659
660 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
661
662 if (chan->fcs == L2CAP_FCS_CRC16) {
663 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
664 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
665 }
666
667 skb->priority = HCI_PRIO_MAX;
668 l2cap_do_send(chan, skb);
669 }
670
671 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
672 {
673 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
674 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
675 set_bit(CONN_RNR_SENT, &chan->conn_state);
676 } else
677 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
678
679 control |= __set_reqseq(chan, chan->buffer_seq);
680
681 l2cap_send_sframe(chan, control);
682 }
683
684 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
685 {
686 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
687 }
688
689 static void l2cap_do_start(struct l2cap_chan *chan)
690 {
691 struct l2cap_conn *conn = chan->conn;
692
693 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
694 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
695 return;
696
697 if (l2cap_chan_check_security(chan) &&
698 __l2cap_no_conn_pending(chan)) {
699 struct l2cap_conn_req req;
700 req.scid = cpu_to_le16(chan->scid);
701 req.psm = chan->psm;
702
703 chan->ident = l2cap_get_ident(conn);
704 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
705
706 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
707 sizeof(req), &req);
708 }
709 } else {
710 struct l2cap_info_req req;
711 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
712
713 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
714 conn->info_ident = l2cap_get_ident(conn);
715
716 schedule_delayed_work(&conn->info_work,
717 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
718
719 l2cap_send_cmd(conn, conn->info_ident,
720 L2CAP_INFO_REQ, sizeof(req), &req);
721 }
722 }
723
724 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
725 {
726 u32 local_feat_mask = l2cap_feat_mask;
727 if (!disable_ertm)
728 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
729
730 switch (mode) {
731 case L2CAP_MODE_ERTM:
732 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
733 case L2CAP_MODE_STREAMING:
734 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
735 default:
736 return 0x00;
737 }
738 }
739
740 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
741 {
742 struct sock *sk;
743 struct l2cap_disconn_req req;
744
745 if (!conn)
746 return;
747
748 sk = chan->sk;
749
750 if (chan->mode == L2CAP_MODE_ERTM) {
751 __clear_retrans_timer(chan);
752 __clear_monitor_timer(chan);
753 __clear_ack_timer(chan);
754 }
755
756 req.dcid = cpu_to_le16(chan->dcid);
757 req.scid = cpu_to_le16(chan->scid);
758 l2cap_send_cmd(conn, l2cap_get_ident(conn),
759 L2CAP_DISCONN_REQ, sizeof(req), &req);
760
761 l2cap_state_change(chan, BT_DISCONN);
762 sk->sk_err = err;
763 }
764
765 /* ---- L2CAP connections ---- */
766 static void l2cap_conn_start(struct l2cap_conn *conn)
767 {
768 struct l2cap_chan *chan;
769
770 BT_DBG("conn %p", conn);
771
772 rcu_read_lock();
773
774 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
775 struct sock *sk = chan->sk;
776
777 bh_lock_sock(sk);
778
779 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
780 bh_unlock_sock(sk);
781 continue;
782 }
783
784 if (chan->state == BT_CONNECT) {
785 struct l2cap_conn_req req;
786
787 if (!l2cap_chan_check_security(chan) ||
788 !__l2cap_no_conn_pending(chan)) {
789 bh_unlock_sock(sk);
790 continue;
791 }
792
793 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
794 && test_bit(CONF_STATE2_DEVICE,
795 &chan->conf_state)) {
796 /* l2cap_chan_close() calls list_del(chan)
797 * so release the lock */
798 l2cap_chan_close(chan, ECONNRESET);
799 bh_unlock_sock(sk);
800 continue;
801 }
802
803 req.scid = cpu_to_le16(chan->scid);
804 req.psm = chan->psm;
805
806 chan->ident = l2cap_get_ident(conn);
807 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
808
809 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
810 sizeof(req), &req);
811
812 } else if (chan->state == BT_CONNECT2) {
813 struct l2cap_conn_rsp rsp;
814 char buf[128];
815 rsp.scid = cpu_to_le16(chan->dcid);
816 rsp.dcid = cpu_to_le16(chan->scid);
817
818 if (l2cap_chan_check_security(chan)) {
819 if (bt_sk(sk)->defer_setup) {
820 struct sock *parent = bt_sk(sk)->parent;
821 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
822 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
823 if (parent)
824 parent->sk_data_ready(parent, 0);
825
826 } else {
827 l2cap_state_change(chan, BT_CONFIG);
828 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
829 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
830 }
831 } else {
832 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
833 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
834 }
835
836 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
837 sizeof(rsp), &rsp);
838
839 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
840 rsp.result != L2CAP_CR_SUCCESS) {
841 bh_unlock_sock(sk);
842 continue;
843 }
844
845 set_bit(CONF_REQ_SENT, &chan->conf_state);
846 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
847 l2cap_build_conf_req(chan, buf), buf);
848 chan->num_conf_req++;
849 }
850
851 bh_unlock_sock(sk);
852 }
853
854 rcu_read_unlock();
855 }
856
857 /* Find socket with cid and source bdaddr.
858 * Returns closest match, locked.
859 */
860 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
861 {
862 struct l2cap_chan *c, *c1 = NULL;
863
864 read_lock(&chan_list_lock);
865
866 list_for_each_entry(c, &chan_list, global_l) {
867 struct sock *sk = c->sk;
868
869 if (state && c->state != state)
870 continue;
871
872 if (c->scid == cid) {
873 /* Exact match. */
874 if (!bacmp(&bt_sk(sk)->src, src)) {
875 read_unlock(&chan_list_lock);
876 return c;
877 }
878
879 /* Closest match */
880 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
881 c1 = c;
882 }
883 }
884
885 read_unlock(&chan_list_lock);
886
887 return c1;
888 }
889
890 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
891 {
892 struct sock *parent, *sk;
893 struct l2cap_chan *chan, *pchan;
894
895 BT_DBG("");
896
897 /* Check if we have socket listening on cid */
898 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
899 conn->src);
900 if (!pchan)
901 return;
902
903 parent = pchan->sk;
904
905 lock_sock(parent);
906
907 /* Check for backlog size */
908 if (sk_acceptq_is_full(parent)) {
909 BT_DBG("backlog full %d", parent->sk_ack_backlog);
910 goto clean;
911 }
912
913 chan = pchan->ops->new_connection(pchan->data);
914 if (!chan)
915 goto clean;
916
917 sk = chan->sk;
918
919 hci_conn_hold(conn->hcon);
920
921 bacpy(&bt_sk(sk)->src, conn->src);
922 bacpy(&bt_sk(sk)->dst, conn->dst);
923
924 bt_accept_enqueue(parent, sk);
925
926 l2cap_chan_add(conn, chan);
927
928 __set_chan_timer(chan, sk->sk_sndtimeo);
929
930 l2cap_state_change(chan, BT_CONNECTED);
931 parent->sk_data_ready(parent, 0);
932
933 clean:
934 release_sock(parent);
935 }
936
937 static void l2cap_chan_ready(struct sock *sk)
938 {
939 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
940 struct sock *parent = bt_sk(sk)->parent;
941
942 BT_DBG("sk %p, parent %p", sk, parent);
943
944 chan->conf_state = 0;
945 __clear_chan_timer(chan);
946
947 l2cap_state_change(chan, BT_CONNECTED);
948 sk->sk_state_change(sk);
949
950 if (parent)
951 parent->sk_data_ready(parent, 0);
952 }
953
954 static void l2cap_conn_ready(struct l2cap_conn *conn)
955 {
956 struct l2cap_chan *chan;
957
958 BT_DBG("conn %p", conn);
959
960 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
961 l2cap_le_conn_ready(conn);
962
963 if (conn->hcon->out && conn->hcon->type == LE_LINK)
964 smp_conn_security(conn, conn->hcon->pending_sec_level);
965
966 rcu_read_lock();
967
968 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
969 struct sock *sk = chan->sk;
970
971 bh_lock_sock(sk);
972
973 if (conn->hcon->type == LE_LINK) {
974 if (smp_conn_security(conn, chan->sec_level))
975 l2cap_chan_ready(sk);
976
977 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
978 __clear_chan_timer(chan);
979 l2cap_state_change(chan, BT_CONNECTED);
980 sk->sk_state_change(sk);
981
982 } else if (chan->state == BT_CONNECT)
983 l2cap_do_start(chan);
984
985 bh_unlock_sock(sk);
986 }
987
988 rcu_read_unlock();
989 }
990
991 /* Notify sockets that we cannot guaranty reliability anymore */
992 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
993 {
994 struct l2cap_chan *chan;
995
996 BT_DBG("conn %p", conn);
997
998 rcu_read_lock();
999
1000 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1001 struct sock *sk = chan->sk;
1002
1003 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1004 sk->sk_err = err;
1005 }
1006
1007 rcu_read_unlock();
1008 }
1009
1010 static void l2cap_info_timeout(struct work_struct *work)
1011 {
1012 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1013 info_work.work);
1014
1015 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1016 conn->info_ident = 0;
1017
1018 l2cap_conn_start(conn);
1019 }
1020
1021 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1022 {
1023 struct l2cap_conn *conn = hcon->l2cap_data;
1024 struct l2cap_chan *chan, *l;
1025 struct sock *sk;
1026
1027 if (!conn)
1028 return;
1029
1030 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1031
1032 kfree_skb(conn->rx_skb);
1033
1034 /* Kill channels */
1035 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1036 sk = chan->sk;
1037 lock_sock(sk);
1038 l2cap_chan_del(chan, err);
1039 release_sock(sk);
1040 chan->ops->close(chan->data);
1041 }
1042
1043 hci_chan_del(conn->hchan);
1044
1045 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1046 cancel_delayed_work_sync(&conn->info_work);
1047
1048 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1049 del_timer(&conn->security_timer);
1050 smp_chan_destroy(conn);
1051 }
1052
1053 hcon->l2cap_data = NULL;
1054 kfree(conn);
1055 }
1056
1057 static void security_timeout(unsigned long arg)
1058 {
1059 struct l2cap_conn *conn = (void *) arg;
1060
1061 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1062 }
1063
1064 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1065 {
1066 struct l2cap_conn *conn = hcon->l2cap_data;
1067 struct hci_chan *hchan;
1068
1069 if (conn || status)
1070 return conn;
1071
1072 hchan = hci_chan_create(hcon);
1073 if (!hchan)
1074 return NULL;
1075
1076 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1077 if (!conn) {
1078 hci_chan_del(hchan);
1079 return NULL;
1080 }
1081
1082 hcon->l2cap_data = conn;
1083 conn->hcon = hcon;
1084 conn->hchan = hchan;
1085
1086 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1087
1088 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1089 conn->mtu = hcon->hdev->le_mtu;
1090 else
1091 conn->mtu = hcon->hdev->acl_mtu;
1092
1093 conn->src = &hcon->hdev->bdaddr;
1094 conn->dst = &hcon->dst;
1095
1096 conn->feat_mask = 0;
1097
1098 spin_lock_init(&conn->lock);
1099
1100 INIT_LIST_HEAD(&conn->chan_l);
1101
1102 if (hcon->type == LE_LINK)
1103 setup_timer(&conn->security_timer, security_timeout,
1104 (unsigned long) conn);
1105 else
1106 INIT_DELAYED_WORK(&conn->info_work, l2cap_info_timeout);
1107
1108 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1109
1110 return conn;
1111 }
1112
1113 /* ---- Socket interface ---- */
1114
1115 /* Find socket with psm and source bdaddr.
1116 * Returns closest match.
1117 */
1118 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1119 {
1120 struct l2cap_chan *c, *c1 = NULL;
1121
1122 read_lock(&chan_list_lock);
1123
1124 list_for_each_entry(c, &chan_list, global_l) {
1125 struct sock *sk = c->sk;
1126
1127 if (state && c->state != state)
1128 continue;
1129
1130 if (c->psm == psm) {
1131 /* Exact match. */
1132 if (!bacmp(&bt_sk(sk)->src, src)) {
1133 read_unlock(&chan_list_lock);
1134 return c;
1135 }
1136
1137 /* Closest match */
1138 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1139 c1 = c;
1140 }
1141 }
1142
1143 read_unlock(&chan_list_lock);
1144
1145 return c1;
1146 }
1147
1148 inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1149 {
1150 struct sock *sk = chan->sk;
1151 bdaddr_t *src = &bt_sk(sk)->src;
1152 struct l2cap_conn *conn;
1153 struct hci_conn *hcon;
1154 struct hci_dev *hdev;
1155 __u8 auth_type;
1156 int err;
1157
1158 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1159 chan->psm);
1160
1161 hdev = hci_get_route(dst, src);
1162 if (!hdev)
1163 return -EHOSTUNREACH;
1164
1165 hci_dev_lock(hdev);
1166
1167 lock_sock(sk);
1168
1169 /* PSM must be odd and lsb of upper byte must be 0 */
1170 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1171 chan->chan_type != L2CAP_CHAN_RAW) {
1172 err = -EINVAL;
1173 goto done;
1174 }
1175
1176 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1177 err = -EINVAL;
1178 goto done;
1179 }
1180
1181 switch (chan->mode) {
1182 case L2CAP_MODE_BASIC:
1183 break;
1184 case L2CAP_MODE_ERTM:
1185 case L2CAP_MODE_STREAMING:
1186 if (!disable_ertm)
1187 break;
1188 /* fall through */
1189 default:
1190 err = -ENOTSUPP;
1191 goto done;
1192 }
1193
1194 switch (sk->sk_state) {
1195 case BT_CONNECT:
1196 case BT_CONNECT2:
1197 case BT_CONFIG:
1198 /* Already connecting */
1199 err = 0;
1200 goto done;
1201
1202 case BT_CONNECTED:
1203 /* Already connected */
1204 err = -EISCONN;
1205 goto done;
1206
1207 case BT_OPEN:
1208 case BT_BOUND:
1209 /* Can connect */
1210 break;
1211
1212 default:
1213 err = -EBADFD;
1214 goto done;
1215 }
1216
1217 /* Set destination address and psm */
1218 bacpy(&bt_sk(sk)->dst, src);
1219 chan->psm = psm;
1220 chan->dcid = cid;
1221
1222 auth_type = l2cap_get_auth_type(chan);
1223
1224 if (chan->dcid == L2CAP_CID_LE_DATA)
1225 hcon = hci_connect(hdev, LE_LINK, dst,
1226 chan->sec_level, auth_type);
1227 else
1228 hcon = hci_connect(hdev, ACL_LINK, dst,
1229 chan->sec_level, auth_type);
1230
1231 if (IS_ERR(hcon)) {
1232 err = PTR_ERR(hcon);
1233 goto done;
1234 }
1235
1236 conn = l2cap_conn_add(hcon, 0);
1237 if (!conn) {
1238 hci_conn_put(hcon);
1239 err = -ENOMEM;
1240 goto done;
1241 }
1242
1243 /* Update source addr of the socket */
1244 bacpy(src, conn->src);
1245
1246 l2cap_chan_add(conn, chan);
1247
1248 l2cap_state_change(chan, BT_CONNECT);
1249 __set_chan_timer(chan, sk->sk_sndtimeo);
1250
1251 if (hcon->state == BT_CONNECTED) {
1252 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1253 __clear_chan_timer(chan);
1254 if (l2cap_chan_check_security(chan))
1255 l2cap_state_change(chan, BT_CONNECTED);
1256 } else
1257 l2cap_do_start(chan);
1258 }
1259
1260 err = 0;
1261
1262 done:
1263 hci_dev_unlock(hdev);
1264 hci_dev_put(hdev);
1265 return err;
1266 }
1267
1268 int __l2cap_wait_ack(struct sock *sk)
1269 {
1270 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1271 DECLARE_WAITQUEUE(wait, current);
1272 int err = 0;
1273 int timeo = HZ/5;
1274
1275 add_wait_queue(sk_sleep(sk), &wait);
1276 set_current_state(TASK_INTERRUPTIBLE);
1277 while (chan->unacked_frames > 0 && chan->conn) {
1278 if (!timeo)
1279 timeo = HZ/5;
1280
1281 if (signal_pending(current)) {
1282 err = sock_intr_errno(timeo);
1283 break;
1284 }
1285
1286 release_sock(sk);
1287 timeo = schedule_timeout(timeo);
1288 lock_sock(sk);
1289 set_current_state(TASK_INTERRUPTIBLE);
1290
1291 err = sock_error(sk);
1292 if (err)
1293 break;
1294 }
1295 set_current_state(TASK_RUNNING);
1296 remove_wait_queue(sk_sleep(sk), &wait);
1297 return err;
1298 }
1299
1300 static void l2cap_monitor_timeout(struct work_struct *work)
1301 {
1302 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1303 monitor_timer.work);
1304 struct sock *sk = chan->sk;
1305
1306 BT_DBG("chan %p", chan);
1307
1308 lock_sock(sk);
1309 if (chan->retry_count >= chan->remote_max_tx) {
1310 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1311 release_sock(sk);
1312 return;
1313 }
1314
1315 chan->retry_count++;
1316 __set_monitor_timer(chan);
1317
1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1319 release_sock(sk);
1320 }
1321
1322 static void l2cap_retrans_timeout(struct work_struct *work)
1323 {
1324 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1325 retrans_timer.work);
1326 struct sock *sk = chan->sk;
1327
1328 BT_DBG("chan %p", chan);
1329
1330 lock_sock(sk);
1331 chan->retry_count = 1;
1332 __set_monitor_timer(chan);
1333
1334 set_bit(CONN_WAIT_F, &chan->conn_state);
1335
1336 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1337 release_sock(sk);
1338 }
1339
1340 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1341 {
1342 struct sk_buff *skb;
1343
1344 while ((skb = skb_peek(&chan->tx_q)) &&
1345 chan->unacked_frames) {
1346 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1347 break;
1348
1349 skb = skb_dequeue(&chan->tx_q);
1350 kfree_skb(skb);
1351
1352 chan->unacked_frames--;
1353 }
1354
1355 if (!chan->unacked_frames)
1356 __clear_retrans_timer(chan);
1357 }
1358
1359 static void l2cap_streaming_send(struct l2cap_chan *chan)
1360 {
1361 struct sk_buff *skb;
1362 u32 control;
1363 u16 fcs;
1364
1365 while ((skb = skb_dequeue(&chan->tx_q))) {
1366 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1367 control |= __set_txseq(chan, chan->next_tx_seq);
1368 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1369
1370 if (chan->fcs == L2CAP_FCS_CRC16) {
1371 fcs = crc16(0, (u8 *)skb->data,
1372 skb->len - L2CAP_FCS_SIZE);
1373 put_unaligned_le16(fcs,
1374 skb->data + skb->len - L2CAP_FCS_SIZE);
1375 }
1376
1377 l2cap_do_send(chan, skb);
1378
1379 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1380 }
1381 }
1382
1383 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1384 {
1385 struct sk_buff *skb, *tx_skb;
1386 u16 fcs;
1387 u32 control;
1388
1389 skb = skb_peek(&chan->tx_q);
1390 if (!skb)
1391 return;
1392
1393 while (bt_cb(skb)->tx_seq != tx_seq) {
1394 if (skb_queue_is_last(&chan->tx_q, skb))
1395 return;
1396
1397 skb = skb_queue_next(&chan->tx_q, skb);
1398 }
1399
1400 if (chan->remote_max_tx &&
1401 bt_cb(skb)->retries == chan->remote_max_tx) {
1402 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1403 return;
1404 }
1405
1406 tx_skb = skb_clone(skb, GFP_ATOMIC);
1407 bt_cb(skb)->retries++;
1408
1409 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1410 control &= __get_sar_mask(chan);
1411
1412 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1413 control |= __set_ctrl_final(chan);
1414
1415 control |= __set_reqseq(chan, chan->buffer_seq);
1416 control |= __set_txseq(chan, tx_seq);
1417
1418 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1419
1420 if (chan->fcs == L2CAP_FCS_CRC16) {
1421 fcs = crc16(0, (u8 *)tx_skb->data,
1422 tx_skb->len - L2CAP_FCS_SIZE);
1423 put_unaligned_le16(fcs,
1424 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1425 }
1426
1427 l2cap_do_send(chan, tx_skb);
1428 }
1429
1430 static int l2cap_ertm_send(struct l2cap_chan *chan)
1431 {
1432 struct sk_buff *skb, *tx_skb;
1433 u16 fcs;
1434 u32 control;
1435 int nsent = 0;
1436
1437 if (chan->state != BT_CONNECTED)
1438 return -ENOTCONN;
1439
1440 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1441
1442 if (chan->remote_max_tx &&
1443 bt_cb(skb)->retries == chan->remote_max_tx) {
1444 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1445 break;
1446 }
1447
1448 tx_skb = skb_clone(skb, GFP_ATOMIC);
1449
1450 bt_cb(skb)->retries++;
1451
1452 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1453 control &= __get_sar_mask(chan);
1454
1455 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1456 control |= __set_ctrl_final(chan);
1457
1458 control |= __set_reqseq(chan, chan->buffer_seq);
1459 control |= __set_txseq(chan, chan->next_tx_seq);
1460
1461 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1462
1463 if (chan->fcs == L2CAP_FCS_CRC16) {
1464 fcs = crc16(0, (u8 *)skb->data,
1465 tx_skb->len - L2CAP_FCS_SIZE);
1466 put_unaligned_le16(fcs, skb->data +
1467 tx_skb->len - L2CAP_FCS_SIZE);
1468 }
1469
1470 l2cap_do_send(chan, tx_skb);
1471
1472 __set_retrans_timer(chan);
1473
1474 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1475
1476 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1477
1478 if (bt_cb(skb)->retries == 1)
1479 chan->unacked_frames++;
1480
1481 chan->frames_sent++;
1482
1483 if (skb_queue_is_last(&chan->tx_q, skb))
1484 chan->tx_send_head = NULL;
1485 else
1486 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1487
1488 nsent++;
1489 }
1490
1491 return nsent;
1492 }
1493
1494 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1495 {
1496 int ret;
1497
1498 if (!skb_queue_empty(&chan->tx_q))
1499 chan->tx_send_head = chan->tx_q.next;
1500
1501 chan->next_tx_seq = chan->expected_ack_seq;
1502 ret = l2cap_ertm_send(chan);
1503 return ret;
1504 }
1505
1506 static void l2cap_send_ack(struct l2cap_chan *chan)
1507 {
1508 u32 control = 0;
1509
1510 control |= __set_reqseq(chan, chan->buffer_seq);
1511
1512 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1513 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1514 set_bit(CONN_RNR_SENT, &chan->conn_state);
1515 l2cap_send_sframe(chan, control);
1516 return;
1517 }
1518
1519 if (l2cap_ertm_send(chan) > 0)
1520 return;
1521
1522 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1523 l2cap_send_sframe(chan, control);
1524 }
1525
1526 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1527 {
1528 struct srej_list *tail;
1529 u32 control;
1530
1531 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1532 control |= __set_ctrl_final(chan);
1533
1534 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1535 control |= __set_reqseq(chan, tail->tx_seq);
1536
1537 l2cap_send_sframe(chan, control);
1538 }
1539
1540 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1541 {
1542 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1543 struct sk_buff **frag;
1544 int err, sent = 0;
1545
1546 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1547 return -EFAULT;
1548
1549 sent += count;
1550 len -= count;
1551
1552 /* Continuation fragments (no L2CAP header) */
1553 frag = &skb_shinfo(skb)->frag_list;
1554 while (len) {
1555 count = min_t(unsigned int, conn->mtu, len);
1556
1557 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1558 if (!*frag)
1559 return err;
1560 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1561 return -EFAULT;
1562
1563 (*frag)->priority = skb->priority;
1564
1565 sent += count;
1566 len -= count;
1567
1568 frag = &(*frag)->next;
1569 }
1570
1571 return sent;
1572 }
1573
1574 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1575 struct msghdr *msg, size_t len,
1576 u32 priority)
1577 {
1578 struct sock *sk = chan->sk;
1579 struct l2cap_conn *conn = chan->conn;
1580 struct sk_buff *skb;
1581 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1582 struct l2cap_hdr *lh;
1583
1584 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1585
1586 count = min_t(unsigned int, (conn->mtu - hlen), len);
1587 skb = bt_skb_send_alloc(sk, count + hlen,
1588 msg->msg_flags & MSG_DONTWAIT, &err);
1589 if (!skb)
1590 return ERR_PTR(err);
1591
1592 skb->priority = priority;
1593
1594 /* Create L2CAP header */
1595 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1596 lh->cid = cpu_to_le16(chan->dcid);
1597 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1598 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1599
1600 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1601 if (unlikely(err < 0)) {
1602 kfree_skb(skb);
1603 return ERR_PTR(err);
1604 }
1605 return skb;
1606 }
1607
1608 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1609 struct msghdr *msg, size_t len,
1610 u32 priority)
1611 {
1612 struct sock *sk = chan->sk;
1613 struct l2cap_conn *conn = chan->conn;
1614 struct sk_buff *skb;
1615 int err, count, hlen = L2CAP_HDR_SIZE;
1616 struct l2cap_hdr *lh;
1617
1618 BT_DBG("sk %p len %d", sk, (int)len);
1619
1620 count = min_t(unsigned int, (conn->mtu - hlen), len);
1621 skb = bt_skb_send_alloc(sk, count + hlen,
1622 msg->msg_flags & MSG_DONTWAIT, &err);
1623 if (!skb)
1624 return ERR_PTR(err);
1625
1626 skb->priority = priority;
1627
1628 /* Create L2CAP header */
1629 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1630 lh->cid = cpu_to_le16(chan->dcid);
1631 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1632
1633 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1634 if (unlikely(err < 0)) {
1635 kfree_skb(skb);
1636 return ERR_PTR(err);
1637 }
1638 return skb;
1639 }
1640
1641 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1642 struct msghdr *msg, size_t len,
1643 u32 control, u16 sdulen)
1644 {
1645 struct sock *sk = chan->sk;
1646 struct l2cap_conn *conn = chan->conn;
1647 struct sk_buff *skb;
1648 int err, count, hlen;
1649 struct l2cap_hdr *lh;
1650
1651 BT_DBG("sk %p len %d", sk, (int)len);
1652
1653 if (!conn)
1654 return ERR_PTR(-ENOTCONN);
1655
1656 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1657 hlen = L2CAP_EXT_HDR_SIZE;
1658 else
1659 hlen = L2CAP_ENH_HDR_SIZE;
1660
1661 if (sdulen)
1662 hlen += L2CAP_SDULEN_SIZE;
1663
1664 if (chan->fcs == L2CAP_FCS_CRC16)
1665 hlen += L2CAP_FCS_SIZE;
1666
1667 count = min_t(unsigned int, (conn->mtu - hlen), len);
1668 skb = bt_skb_send_alloc(sk, count + hlen,
1669 msg->msg_flags & MSG_DONTWAIT, &err);
1670 if (!skb)
1671 return ERR_PTR(err);
1672
1673 /* Create L2CAP header */
1674 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1675 lh->cid = cpu_to_le16(chan->dcid);
1676 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1677
1678 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1679
1680 if (sdulen)
1681 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1682
1683 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1684 if (unlikely(err < 0)) {
1685 kfree_skb(skb);
1686 return ERR_PTR(err);
1687 }
1688
1689 if (chan->fcs == L2CAP_FCS_CRC16)
1690 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1691
1692 bt_cb(skb)->retries = 0;
1693 return skb;
1694 }
1695
1696 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1697 {
1698 struct sk_buff *skb;
1699 struct sk_buff_head sar_queue;
1700 u32 control;
1701 size_t size = 0;
1702
1703 skb_queue_head_init(&sar_queue);
1704 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1705 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1706 if (IS_ERR(skb))
1707 return PTR_ERR(skb);
1708
1709 __skb_queue_tail(&sar_queue, skb);
1710 len -= chan->remote_mps;
1711 size += chan->remote_mps;
1712
1713 while (len > 0) {
1714 size_t buflen;
1715
1716 if (len > chan->remote_mps) {
1717 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1718 buflen = chan->remote_mps;
1719 } else {
1720 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1721 buflen = len;
1722 }
1723
1724 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1725 if (IS_ERR(skb)) {
1726 skb_queue_purge(&sar_queue);
1727 return PTR_ERR(skb);
1728 }
1729
1730 __skb_queue_tail(&sar_queue, skb);
1731 len -= buflen;
1732 size += buflen;
1733 }
1734 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1735 if (chan->tx_send_head == NULL)
1736 chan->tx_send_head = sar_queue.next;
1737
1738 return size;
1739 }
1740
1741 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1742 u32 priority)
1743 {
1744 struct sk_buff *skb;
1745 u32 control;
1746 int err;
1747
1748 /* Connectionless channel */
1749 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1750 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1751 if (IS_ERR(skb))
1752 return PTR_ERR(skb);
1753
1754 l2cap_do_send(chan, skb);
1755 return len;
1756 }
1757
1758 switch (chan->mode) {
1759 case L2CAP_MODE_BASIC:
1760 /* Check outgoing MTU */
1761 if (len > chan->omtu)
1762 return -EMSGSIZE;
1763
1764 /* Create a basic PDU */
1765 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1766 if (IS_ERR(skb))
1767 return PTR_ERR(skb);
1768
1769 l2cap_do_send(chan, skb);
1770 err = len;
1771 break;
1772
1773 case L2CAP_MODE_ERTM:
1774 case L2CAP_MODE_STREAMING:
1775 /* Entire SDU fits into one PDU */
1776 if (len <= chan->remote_mps) {
1777 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1778 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1779 0);
1780 if (IS_ERR(skb))
1781 return PTR_ERR(skb);
1782
1783 __skb_queue_tail(&chan->tx_q, skb);
1784
1785 if (chan->tx_send_head == NULL)
1786 chan->tx_send_head = skb;
1787
1788 } else {
1789 /* Segment SDU into multiples PDUs */
1790 err = l2cap_sar_segment_sdu(chan, msg, len);
1791 if (err < 0)
1792 return err;
1793 }
1794
1795 if (chan->mode == L2CAP_MODE_STREAMING) {
1796 l2cap_streaming_send(chan);
1797 err = len;
1798 break;
1799 }
1800
1801 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1802 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1803 err = len;
1804 break;
1805 }
1806
1807 err = l2cap_ertm_send(chan);
1808 if (err >= 0)
1809 err = len;
1810
1811 break;
1812
1813 default:
1814 BT_DBG("bad state %1.1x", chan->mode);
1815 err = -EBADFD;
1816 }
1817
1818 return err;
1819 }
1820
1821 /* Copy frame to all raw sockets on that connection */
1822 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1823 {
1824 struct sk_buff *nskb;
1825 struct l2cap_chan *chan;
1826
1827 BT_DBG("conn %p", conn);
1828
1829 rcu_read_lock();
1830
1831 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1832 struct sock *sk = chan->sk;
1833 if (chan->chan_type != L2CAP_CHAN_RAW)
1834 continue;
1835
1836 /* Don't send frame to the socket it came from */
1837 if (skb->sk == sk)
1838 continue;
1839 nskb = skb_clone(skb, GFP_ATOMIC);
1840 if (!nskb)
1841 continue;
1842
1843 if (chan->ops->recv(chan->data, nskb))
1844 kfree_skb(nskb);
1845 }
1846
1847 rcu_read_unlock();
1848 }
1849
1850 /* ---- L2CAP signalling commands ---- */
1851 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1852 u8 code, u8 ident, u16 dlen, void *data)
1853 {
1854 struct sk_buff *skb, **frag;
1855 struct l2cap_cmd_hdr *cmd;
1856 struct l2cap_hdr *lh;
1857 int len, count;
1858
1859 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1860 conn, code, ident, dlen);
1861
1862 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1863 count = min_t(unsigned int, conn->mtu, len);
1864
1865 skb = bt_skb_alloc(count, GFP_ATOMIC);
1866 if (!skb)
1867 return NULL;
1868
1869 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1870 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1871
1872 if (conn->hcon->type == LE_LINK)
1873 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1874 else
1875 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1876
1877 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1878 cmd->code = code;
1879 cmd->ident = ident;
1880 cmd->len = cpu_to_le16(dlen);
1881
1882 if (dlen) {
1883 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1884 memcpy(skb_put(skb, count), data, count);
1885 data += count;
1886 }
1887
1888 len -= skb->len;
1889
1890 /* Continuation fragments (no L2CAP header) */
1891 frag = &skb_shinfo(skb)->frag_list;
1892 while (len) {
1893 count = min_t(unsigned int, conn->mtu, len);
1894
1895 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1896 if (!*frag)
1897 goto fail;
1898
1899 memcpy(skb_put(*frag, count), data, count);
1900
1901 len -= count;
1902 data += count;
1903
1904 frag = &(*frag)->next;
1905 }
1906
1907 return skb;
1908
1909 fail:
1910 kfree_skb(skb);
1911 return NULL;
1912 }
1913
1914 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1915 {
1916 struct l2cap_conf_opt *opt = *ptr;
1917 int len;
1918
1919 len = L2CAP_CONF_OPT_SIZE + opt->len;
1920 *ptr += len;
1921
1922 *type = opt->type;
1923 *olen = opt->len;
1924
1925 switch (opt->len) {
1926 case 1:
1927 *val = *((u8 *) opt->val);
1928 break;
1929
1930 case 2:
1931 *val = get_unaligned_le16(opt->val);
1932 break;
1933
1934 case 4:
1935 *val = get_unaligned_le32(opt->val);
1936 break;
1937
1938 default:
1939 *val = (unsigned long) opt->val;
1940 break;
1941 }
1942
1943 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1944 return len;
1945 }
1946
1947 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1948 {
1949 struct l2cap_conf_opt *opt = *ptr;
1950
1951 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1952
1953 opt->type = type;
1954 opt->len = len;
1955
1956 switch (len) {
1957 case 1:
1958 *((u8 *) opt->val) = val;
1959 break;
1960
1961 case 2:
1962 put_unaligned_le16(val, opt->val);
1963 break;
1964
1965 case 4:
1966 put_unaligned_le32(val, opt->val);
1967 break;
1968
1969 default:
1970 memcpy(opt->val, (void *) val, len);
1971 break;
1972 }
1973
1974 *ptr += L2CAP_CONF_OPT_SIZE + len;
1975 }
1976
1977 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1978 {
1979 struct l2cap_conf_efs efs;
1980
1981 switch (chan->mode) {
1982 case L2CAP_MODE_ERTM:
1983 efs.id = chan->local_id;
1984 efs.stype = chan->local_stype;
1985 efs.msdu = cpu_to_le16(chan->local_msdu);
1986 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1987 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1988 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1989 break;
1990
1991 case L2CAP_MODE_STREAMING:
1992 efs.id = 1;
1993 efs.stype = L2CAP_SERV_BESTEFFORT;
1994 efs.msdu = cpu_to_le16(chan->local_msdu);
1995 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1996 efs.acc_lat = 0;
1997 efs.flush_to = 0;
1998 break;
1999
2000 default:
2001 return;
2002 }
2003
2004 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2005 (unsigned long) &efs);
2006 }
2007
2008 static void l2cap_ack_timeout(struct work_struct *work)
2009 {
2010 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2011 ack_timer.work);
2012
2013 lock_sock(chan->sk);
2014 l2cap_send_ack(chan);
2015 release_sock(chan->sk);
2016 }
2017
2018 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2019 {
2020 chan->expected_ack_seq = 0;
2021 chan->unacked_frames = 0;
2022 chan->buffer_seq = 0;
2023 chan->num_acked = 0;
2024 chan->frames_sent = 0;
2025
2026 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2027 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2028 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2029
2030 skb_queue_head_init(&chan->srej_q);
2031
2032 INIT_LIST_HEAD(&chan->srej_l);
2033 }
2034
2035 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2036 {
2037 switch (mode) {
2038 case L2CAP_MODE_STREAMING:
2039 case L2CAP_MODE_ERTM:
2040 if (l2cap_mode_supported(mode, remote_feat_mask))
2041 return mode;
2042 /* fall through */
2043 default:
2044 return L2CAP_MODE_BASIC;
2045 }
2046 }
2047
2048 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2049 {
2050 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2051 }
2052
2053 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2054 {
2055 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2056 }
2057
2058 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2059 {
2060 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2061 __l2cap_ews_supported(chan)) {
2062 /* use extended control field */
2063 set_bit(FLAG_EXT_CTRL, &chan->flags);
2064 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2065 } else {
2066 chan->tx_win = min_t(u16, chan->tx_win,
2067 L2CAP_DEFAULT_TX_WINDOW);
2068 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2069 }
2070 }
2071
2072 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2073 {
2074 struct l2cap_conf_req *req = data;
2075 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2076 void *ptr = req->data;
2077 u16 size;
2078
2079 BT_DBG("chan %p", chan);
2080
2081 if (chan->num_conf_req || chan->num_conf_rsp)
2082 goto done;
2083
2084 switch (chan->mode) {
2085 case L2CAP_MODE_STREAMING:
2086 case L2CAP_MODE_ERTM:
2087 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2088 break;
2089
2090 if (__l2cap_efs_supported(chan))
2091 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2092
2093 /* fall through */
2094 default:
2095 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2096 break;
2097 }
2098
2099 done:
2100 if (chan->imtu != L2CAP_DEFAULT_MTU)
2101 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2102
2103 switch (chan->mode) {
2104 case L2CAP_MODE_BASIC:
2105 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2106 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2107 break;
2108
2109 rfc.mode = L2CAP_MODE_BASIC;
2110 rfc.txwin_size = 0;
2111 rfc.max_transmit = 0;
2112 rfc.retrans_timeout = 0;
2113 rfc.monitor_timeout = 0;
2114 rfc.max_pdu_size = 0;
2115
2116 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2117 (unsigned long) &rfc);
2118 break;
2119
2120 case L2CAP_MODE_ERTM:
2121 rfc.mode = L2CAP_MODE_ERTM;
2122 rfc.max_transmit = chan->max_tx;
2123 rfc.retrans_timeout = 0;
2124 rfc.monitor_timeout = 0;
2125
2126 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2127 L2CAP_EXT_HDR_SIZE -
2128 L2CAP_SDULEN_SIZE -
2129 L2CAP_FCS_SIZE);
2130 rfc.max_pdu_size = cpu_to_le16(size);
2131
2132 l2cap_txwin_setup(chan);
2133
2134 rfc.txwin_size = min_t(u16, chan->tx_win,
2135 L2CAP_DEFAULT_TX_WINDOW);
2136
2137 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2138 (unsigned long) &rfc);
2139
2140 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2141 l2cap_add_opt_efs(&ptr, chan);
2142
2143 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2144 break;
2145
2146 if (chan->fcs == L2CAP_FCS_NONE ||
2147 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2148 chan->fcs = L2CAP_FCS_NONE;
2149 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2150 }
2151
2152 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2153 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2154 chan->tx_win);
2155 break;
2156
2157 case L2CAP_MODE_STREAMING:
2158 rfc.mode = L2CAP_MODE_STREAMING;
2159 rfc.txwin_size = 0;
2160 rfc.max_transmit = 0;
2161 rfc.retrans_timeout = 0;
2162 rfc.monitor_timeout = 0;
2163
2164 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2165 L2CAP_EXT_HDR_SIZE -
2166 L2CAP_SDULEN_SIZE -
2167 L2CAP_FCS_SIZE);
2168 rfc.max_pdu_size = cpu_to_le16(size);
2169
2170 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2171 (unsigned long) &rfc);
2172
2173 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2174 l2cap_add_opt_efs(&ptr, chan);
2175
2176 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2177 break;
2178
2179 if (chan->fcs == L2CAP_FCS_NONE ||
2180 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2181 chan->fcs = L2CAP_FCS_NONE;
2182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2183 }
2184 break;
2185 }
2186
2187 req->dcid = cpu_to_le16(chan->dcid);
2188 req->flags = cpu_to_le16(0);
2189
2190 return ptr - data;
2191 }
2192
2193 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2194 {
2195 struct l2cap_conf_rsp *rsp = data;
2196 void *ptr = rsp->data;
2197 void *req = chan->conf_req;
2198 int len = chan->conf_len;
2199 int type, hint, olen;
2200 unsigned long val;
2201 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2202 struct l2cap_conf_efs efs;
2203 u8 remote_efs = 0;
2204 u16 mtu = L2CAP_DEFAULT_MTU;
2205 u16 result = L2CAP_CONF_SUCCESS;
2206 u16 size;
2207
2208 BT_DBG("chan %p", chan);
2209
2210 while (len >= L2CAP_CONF_OPT_SIZE) {
2211 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2212
2213 hint = type & L2CAP_CONF_HINT;
2214 type &= L2CAP_CONF_MASK;
2215
2216 switch (type) {
2217 case L2CAP_CONF_MTU:
2218 mtu = val;
2219 break;
2220
2221 case L2CAP_CONF_FLUSH_TO:
2222 chan->flush_to = val;
2223 break;
2224
2225 case L2CAP_CONF_QOS:
2226 break;
2227
2228 case L2CAP_CONF_RFC:
2229 if (olen == sizeof(rfc))
2230 memcpy(&rfc, (void *) val, olen);
2231 break;
2232
2233 case L2CAP_CONF_FCS:
2234 if (val == L2CAP_FCS_NONE)
2235 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2236 break;
2237
2238 case L2CAP_CONF_EFS:
2239 remote_efs = 1;
2240 if (olen == sizeof(efs))
2241 memcpy(&efs, (void *) val, olen);
2242 break;
2243
2244 case L2CAP_CONF_EWS:
2245 if (!enable_hs)
2246 return -ECONNREFUSED;
2247
2248 set_bit(FLAG_EXT_CTRL, &chan->flags);
2249 set_bit(CONF_EWS_RECV, &chan->conf_state);
2250 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2251 chan->remote_tx_win = val;
2252 break;
2253
2254 default:
2255 if (hint)
2256 break;
2257
2258 result = L2CAP_CONF_UNKNOWN;
2259 *((u8 *) ptr++) = type;
2260 break;
2261 }
2262 }
2263
2264 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2265 goto done;
2266
2267 switch (chan->mode) {
2268 case L2CAP_MODE_STREAMING:
2269 case L2CAP_MODE_ERTM:
2270 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2271 chan->mode = l2cap_select_mode(rfc.mode,
2272 chan->conn->feat_mask);
2273 break;
2274 }
2275
2276 if (remote_efs) {
2277 if (__l2cap_efs_supported(chan))
2278 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2279 else
2280 return -ECONNREFUSED;
2281 }
2282
2283 if (chan->mode != rfc.mode)
2284 return -ECONNREFUSED;
2285
2286 break;
2287 }
2288
2289 done:
2290 if (chan->mode != rfc.mode) {
2291 result = L2CAP_CONF_UNACCEPT;
2292 rfc.mode = chan->mode;
2293
2294 if (chan->num_conf_rsp == 1)
2295 return -ECONNREFUSED;
2296
2297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2298 sizeof(rfc), (unsigned long) &rfc);
2299 }
2300
2301 if (result == L2CAP_CONF_SUCCESS) {
2302 /* Configure output options and let the other side know
2303 * which ones we don't like. */
2304
2305 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2306 result = L2CAP_CONF_UNACCEPT;
2307 else {
2308 chan->omtu = mtu;
2309 set_bit(CONF_MTU_DONE, &chan->conf_state);
2310 }
2311 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2312
2313 if (remote_efs) {
2314 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2315 efs.stype != L2CAP_SERV_NOTRAFIC &&
2316 efs.stype != chan->local_stype) {
2317
2318 result = L2CAP_CONF_UNACCEPT;
2319
2320 if (chan->num_conf_req >= 1)
2321 return -ECONNREFUSED;
2322
2323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2324 sizeof(efs),
2325 (unsigned long) &efs);
2326 } else {
2327 /* Send PENDING Conf Rsp */
2328 result = L2CAP_CONF_PENDING;
2329 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2330 }
2331 }
2332
2333 switch (rfc.mode) {
2334 case L2CAP_MODE_BASIC:
2335 chan->fcs = L2CAP_FCS_NONE;
2336 set_bit(CONF_MODE_DONE, &chan->conf_state);
2337 break;
2338
2339 case L2CAP_MODE_ERTM:
2340 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2341 chan->remote_tx_win = rfc.txwin_size;
2342 else
2343 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2344
2345 chan->remote_max_tx = rfc.max_transmit;
2346
2347 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2348 chan->conn->mtu -
2349 L2CAP_EXT_HDR_SIZE -
2350 L2CAP_SDULEN_SIZE -
2351 L2CAP_FCS_SIZE);
2352 rfc.max_pdu_size = cpu_to_le16(size);
2353 chan->remote_mps = size;
2354
2355 rfc.retrans_timeout =
2356 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2357 rfc.monitor_timeout =
2358 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2359
2360 set_bit(CONF_MODE_DONE, &chan->conf_state);
2361
2362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2363 sizeof(rfc), (unsigned long) &rfc);
2364
2365 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2366 chan->remote_id = efs.id;
2367 chan->remote_stype = efs.stype;
2368 chan->remote_msdu = le16_to_cpu(efs.msdu);
2369 chan->remote_flush_to =
2370 le32_to_cpu(efs.flush_to);
2371 chan->remote_acc_lat =
2372 le32_to_cpu(efs.acc_lat);
2373 chan->remote_sdu_itime =
2374 le32_to_cpu(efs.sdu_itime);
2375 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2376 sizeof(efs), (unsigned long) &efs);
2377 }
2378 break;
2379
2380 case L2CAP_MODE_STREAMING:
2381 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2382 chan->conn->mtu -
2383 L2CAP_EXT_HDR_SIZE -
2384 L2CAP_SDULEN_SIZE -
2385 L2CAP_FCS_SIZE);
2386 rfc.max_pdu_size = cpu_to_le16(size);
2387 chan->remote_mps = size;
2388
2389 set_bit(CONF_MODE_DONE, &chan->conf_state);
2390
2391 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2392 sizeof(rfc), (unsigned long) &rfc);
2393
2394 break;
2395
2396 default:
2397 result = L2CAP_CONF_UNACCEPT;
2398
2399 memset(&rfc, 0, sizeof(rfc));
2400 rfc.mode = chan->mode;
2401 }
2402
2403 if (result == L2CAP_CONF_SUCCESS)
2404 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2405 }
2406 rsp->scid = cpu_to_le16(chan->dcid);
2407 rsp->result = cpu_to_le16(result);
2408 rsp->flags = cpu_to_le16(0x0000);
2409
2410 return ptr - data;
2411 }
2412
2413 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2414 {
2415 struct l2cap_conf_req *req = data;
2416 void *ptr = req->data;
2417 int type, olen;
2418 unsigned long val;
2419 struct l2cap_conf_rfc rfc;
2420 struct l2cap_conf_efs efs;
2421
2422 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2423
2424 while (len >= L2CAP_CONF_OPT_SIZE) {
2425 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2426
2427 switch (type) {
2428 case L2CAP_CONF_MTU:
2429 if (val < L2CAP_DEFAULT_MIN_MTU) {
2430 *result = L2CAP_CONF_UNACCEPT;
2431 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2432 } else
2433 chan->imtu = val;
2434 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2435 break;
2436
2437 case L2CAP_CONF_FLUSH_TO:
2438 chan->flush_to = val;
2439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2440 2, chan->flush_to);
2441 break;
2442
2443 case L2CAP_CONF_RFC:
2444 if (olen == sizeof(rfc))
2445 memcpy(&rfc, (void *)val, olen);
2446
2447 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2448 rfc.mode != chan->mode)
2449 return -ECONNREFUSED;
2450
2451 chan->fcs = 0;
2452
2453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2454 sizeof(rfc), (unsigned long) &rfc);
2455 break;
2456
2457 case L2CAP_CONF_EWS:
2458 chan->tx_win = min_t(u16, val,
2459 L2CAP_DEFAULT_EXT_WINDOW);
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2461 chan->tx_win);
2462 break;
2463
2464 case L2CAP_CONF_EFS:
2465 if (olen == sizeof(efs))
2466 memcpy(&efs, (void *)val, olen);
2467
2468 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2469 efs.stype != L2CAP_SERV_NOTRAFIC &&
2470 efs.stype != chan->local_stype)
2471 return -ECONNREFUSED;
2472
2473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2474 sizeof(efs), (unsigned long) &efs);
2475 break;
2476 }
2477 }
2478
2479 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2480 return -ECONNREFUSED;
2481
2482 chan->mode = rfc.mode;
2483
2484 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2485 switch (rfc.mode) {
2486 case L2CAP_MODE_ERTM:
2487 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2488 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2489 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2490
2491 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2492 chan->local_msdu = le16_to_cpu(efs.msdu);
2493 chan->local_sdu_itime =
2494 le32_to_cpu(efs.sdu_itime);
2495 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2496 chan->local_flush_to =
2497 le32_to_cpu(efs.flush_to);
2498 }
2499 break;
2500
2501 case L2CAP_MODE_STREAMING:
2502 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2503 }
2504 }
2505
2506 req->dcid = cpu_to_le16(chan->dcid);
2507 req->flags = cpu_to_le16(0x0000);
2508
2509 return ptr - data;
2510 }
2511
2512 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2513 {
2514 struct l2cap_conf_rsp *rsp = data;
2515 void *ptr = rsp->data;
2516
2517 BT_DBG("chan %p", chan);
2518
2519 rsp->scid = cpu_to_le16(chan->dcid);
2520 rsp->result = cpu_to_le16(result);
2521 rsp->flags = cpu_to_le16(flags);
2522
2523 return ptr - data;
2524 }
2525
2526 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2527 {
2528 struct l2cap_conn_rsp rsp;
2529 struct l2cap_conn *conn = chan->conn;
2530 u8 buf[128];
2531
2532 rsp.scid = cpu_to_le16(chan->dcid);
2533 rsp.dcid = cpu_to_le16(chan->scid);
2534 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2535 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2536 l2cap_send_cmd(conn, chan->ident,
2537 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2538
2539 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2540 return;
2541
2542 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2543 l2cap_build_conf_req(chan, buf), buf);
2544 chan->num_conf_req++;
2545 }
2546
2547 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2548 {
2549 int type, olen;
2550 unsigned long val;
2551 struct l2cap_conf_rfc rfc;
2552
2553 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2554
2555 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2556 return;
2557
2558 while (len >= L2CAP_CONF_OPT_SIZE) {
2559 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2560
2561 switch (type) {
2562 case L2CAP_CONF_RFC:
2563 if (olen == sizeof(rfc))
2564 memcpy(&rfc, (void *)val, olen);
2565 goto done;
2566 }
2567 }
2568
2569 done:
2570 switch (rfc.mode) {
2571 case L2CAP_MODE_ERTM:
2572 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2573 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2574 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2575 break;
2576 case L2CAP_MODE_STREAMING:
2577 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2578 }
2579 }
2580
2581 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2582 {
2583 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2584
2585 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2586 return 0;
2587
2588 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2589 cmd->ident == conn->info_ident) {
2590 cancel_delayed_work_sync(&conn->info_work);
2591
2592 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2593 conn->info_ident = 0;
2594
2595 l2cap_conn_start(conn);
2596 }
2597
2598 return 0;
2599 }
2600
2601 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2602 {
2603 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2604 struct l2cap_conn_rsp rsp;
2605 struct l2cap_chan *chan = NULL, *pchan;
2606 struct sock *parent, *sk = NULL;
2607 int result, status = L2CAP_CS_NO_INFO;
2608
2609 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2610 __le16 psm = req->psm;
2611
2612 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2613
2614 /* Check if we have socket listening on psm */
2615 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2616 if (!pchan) {
2617 result = L2CAP_CR_BAD_PSM;
2618 goto sendresp;
2619 }
2620
2621 parent = pchan->sk;
2622
2623 lock_sock(parent);
2624
2625 /* Check if the ACL is secure enough (if not SDP) */
2626 if (psm != cpu_to_le16(0x0001) &&
2627 !hci_conn_check_link_mode(conn->hcon)) {
2628 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2629 result = L2CAP_CR_SEC_BLOCK;
2630 goto response;
2631 }
2632
2633 result = L2CAP_CR_NO_MEM;
2634
2635 /* Check for backlog size */
2636 if (sk_acceptq_is_full(parent)) {
2637 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2638 goto response;
2639 }
2640
2641 chan = pchan->ops->new_connection(pchan->data);
2642 if (!chan)
2643 goto response;
2644
2645 sk = chan->sk;
2646
2647 /* Check if we already have channel with that dcid */
2648 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2649 sock_set_flag(sk, SOCK_ZAPPED);
2650 chan->ops->close(chan->data);
2651 goto response;
2652 }
2653
2654 hci_conn_hold(conn->hcon);
2655
2656 bacpy(&bt_sk(sk)->src, conn->src);
2657 bacpy(&bt_sk(sk)->dst, conn->dst);
2658 chan->psm = psm;
2659 chan->dcid = scid;
2660
2661 bt_accept_enqueue(parent, sk);
2662
2663 l2cap_chan_add(conn, chan);
2664
2665 dcid = chan->scid;
2666
2667 __set_chan_timer(chan, sk->sk_sndtimeo);
2668
2669 chan->ident = cmd->ident;
2670
2671 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2672 if (l2cap_chan_check_security(chan)) {
2673 if (bt_sk(sk)->defer_setup) {
2674 l2cap_state_change(chan, BT_CONNECT2);
2675 result = L2CAP_CR_PEND;
2676 status = L2CAP_CS_AUTHOR_PEND;
2677 parent->sk_data_ready(parent, 0);
2678 } else {
2679 l2cap_state_change(chan, BT_CONFIG);
2680 result = L2CAP_CR_SUCCESS;
2681 status = L2CAP_CS_NO_INFO;
2682 }
2683 } else {
2684 l2cap_state_change(chan, BT_CONNECT2);
2685 result = L2CAP_CR_PEND;
2686 status = L2CAP_CS_AUTHEN_PEND;
2687 }
2688 } else {
2689 l2cap_state_change(chan, BT_CONNECT2);
2690 result = L2CAP_CR_PEND;
2691 status = L2CAP_CS_NO_INFO;
2692 }
2693
2694 response:
2695 release_sock(parent);
2696
2697 sendresp:
2698 rsp.scid = cpu_to_le16(scid);
2699 rsp.dcid = cpu_to_le16(dcid);
2700 rsp.result = cpu_to_le16(result);
2701 rsp.status = cpu_to_le16(status);
2702 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2703
2704 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2705 struct l2cap_info_req info;
2706 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2707
2708 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2709 conn->info_ident = l2cap_get_ident(conn);
2710
2711 schedule_delayed_work(&conn->info_work,
2712 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2713
2714 l2cap_send_cmd(conn, conn->info_ident,
2715 L2CAP_INFO_REQ, sizeof(info), &info);
2716 }
2717
2718 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2719 result == L2CAP_CR_SUCCESS) {
2720 u8 buf[128];
2721 set_bit(CONF_REQ_SENT, &chan->conf_state);
2722 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2723 l2cap_build_conf_req(chan, buf), buf);
2724 chan->num_conf_req++;
2725 }
2726
2727 return 0;
2728 }
2729
2730 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2731 {
2732 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2733 u16 scid, dcid, result, status;
2734 struct l2cap_chan *chan;
2735 struct sock *sk;
2736 u8 req[128];
2737
2738 scid = __le16_to_cpu(rsp->scid);
2739 dcid = __le16_to_cpu(rsp->dcid);
2740 result = __le16_to_cpu(rsp->result);
2741 status = __le16_to_cpu(rsp->status);
2742
2743 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2744
2745 if (scid) {
2746 chan = l2cap_get_chan_by_scid(conn, scid);
2747 if (!chan)
2748 return -EFAULT;
2749 } else {
2750 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2751 if (!chan)
2752 return -EFAULT;
2753 }
2754
2755 sk = chan->sk;
2756
2757 switch (result) {
2758 case L2CAP_CR_SUCCESS:
2759 l2cap_state_change(chan, BT_CONFIG);
2760 chan->ident = 0;
2761 chan->dcid = dcid;
2762 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2763
2764 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2765 break;
2766
2767 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2768 l2cap_build_conf_req(chan, req), req);
2769 chan->num_conf_req++;
2770 break;
2771
2772 case L2CAP_CR_PEND:
2773 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2774 break;
2775
2776 default:
2777 l2cap_chan_del(chan, ECONNREFUSED);
2778 break;
2779 }
2780
2781 release_sock(sk);
2782 return 0;
2783 }
2784
2785 static inline void set_default_fcs(struct l2cap_chan *chan)
2786 {
2787 /* FCS is enabled only in ERTM or streaming mode, if one or both
2788 * sides request it.
2789 */
2790 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2791 chan->fcs = L2CAP_FCS_NONE;
2792 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2793 chan->fcs = L2CAP_FCS_CRC16;
2794 }
2795
2796 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2797 {
2798 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2799 u16 dcid, flags;
2800 u8 rsp[64];
2801 struct l2cap_chan *chan;
2802 struct sock *sk;
2803 int len;
2804
2805 dcid = __le16_to_cpu(req->dcid);
2806 flags = __le16_to_cpu(req->flags);
2807
2808 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2809
2810 chan = l2cap_get_chan_by_scid(conn, dcid);
2811 if (!chan)
2812 return -ENOENT;
2813
2814 sk = chan->sk;
2815
2816 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2817 struct l2cap_cmd_rej_cid rej;
2818
2819 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2820 rej.scid = cpu_to_le16(chan->scid);
2821 rej.dcid = cpu_to_le16(chan->dcid);
2822
2823 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2824 sizeof(rej), &rej);
2825 goto unlock;
2826 }
2827
2828 /* Reject if config buffer is too small. */
2829 len = cmd_len - sizeof(*req);
2830 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2831 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2832 l2cap_build_conf_rsp(chan, rsp,
2833 L2CAP_CONF_REJECT, flags), rsp);
2834 goto unlock;
2835 }
2836
2837 /* Store config. */
2838 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2839 chan->conf_len += len;
2840
2841 if (flags & 0x0001) {
2842 /* Incomplete config. Send empty response. */
2843 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2844 l2cap_build_conf_rsp(chan, rsp,
2845 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2846 goto unlock;
2847 }
2848
2849 /* Complete config. */
2850 len = l2cap_parse_conf_req(chan, rsp);
2851 if (len < 0) {
2852 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2853 goto unlock;
2854 }
2855
2856 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2857 chan->num_conf_rsp++;
2858
2859 /* Reset config buffer. */
2860 chan->conf_len = 0;
2861
2862 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2863 goto unlock;
2864
2865 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2866 set_default_fcs(chan);
2867
2868 l2cap_state_change(chan, BT_CONNECTED);
2869
2870 chan->next_tx_seq = 0;
2871 chan->expected_tx_seq = 0;
2872 skb_queue_head_init(&chan->tx_q);
2873 if (chan->mode == L2CAP_MODE_ERTM)
2874 l2cap_ertm_init(chan);
2875
2876 l2cap_chan_ready(sk);
2877 goto unlock;
2878 }
2879
2880 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2881 u8 buf[64];
2882 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2883 l2cap_build_conf_req(chan, buf), buf);
2884 chan->num_conf_req++;
2885 }
2886
2887 /* Got Conf Rsp PENDING from remote side and asume we sent
2888 Conf Rsp PENDING in the code above */
2889 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2890 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2891
2892 /* check compatibility */
2893
2894 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2895 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2896
2897 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2898 l2cap_build_conf_rsp(chan, rsp,
2899 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2900 }
2901
2902 unlock:
2903 release_sock(sk);
2904 return 0;
2905 }
2906
2907 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2908 {
2909 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2910 u16 scid, flags, result;
2911 struct l2cap_chan *chan;
2912 struct sock *sk;
2913 int len = cmd->len - sizeof(*rsp);
2914
2915 scid = __le16_to_cpu(rsp->scid);
2916 flags = __le16_to_cpu(rsp->flags);
2917 result = __le16_to_cpu(rsp->result);
2918
2919 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2920 scid, flags, result);
2921
2922 chan = l2cap_get_chan_by_scid(conn, scid);
2923 if (!chan)
2924 return 0;
2925
2926 sk = chan->sk;
2927
2928 switch (result) {
2929 case L2CAP_CONF_SUCCESS:
2930 l2cap_conf_rfc_get(chan, rsp->data, len);
2931 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2932 break;
2933
2934 case L2CAP_CONF_PENDING:
2935 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2936
2937 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2938 char buf[64];
2939
2940 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2941 buf, &result);
2942 if (len < 0) {
2943 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2944 goto done;
2945 }
2946
2947 /* check compatibility */
2948
2949 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2950 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2951
2952 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2953 l2cap_build_conf_rsp(chan, buf,
2954 L2CAP_CONF_SUCCESS, 0x0000), buf);
2955 }
2956 goto done;
2957
2958 case L2CAP_CONF_UNACCEPT:
2959 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2960 char req[64];
2961
2962 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2963 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2964 goto done;
2965 }
2966
2967 /* throw out any old stored conf requests */
2968 result = L2CAP_CONF_SUCCESS;
2969 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2970 req, &result);
2971 if (len < 0) {
2972 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2973 goto done;
2974 }
2975
2976 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2977 L2CAP_CONF_REQ, len, req);
2978 chan->num_conf_req++;
2979 if (result != L2CAP_CONF_SUCCESS)
2980 goto done;
2981 break;
2982 }
2983
2984 default:
2985 sk->sk_err = ECONNRESET;
2986 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2987 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2988 goto done;
2989 }
2990
2991 if (flags & 0x01)
2992 goto done;
2993
2994 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2995
2996 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2997 set_default_fcs(chan);
2998
2999 l2cap_state_change(chan, BT_CONNECTED);
3000 chan->next_tx_seq = 0;
3001 chan->expected_tx_seq = 0;
3002 skb_queue_head_init(&chan->tx_q);
3003 if (chan->mode == L2CAP_MODE_ERTM)
3004 l2cap_ertm_init(chan);
3005
3006 l2cap_chan_ready(sk);
3007 }
3008
3009 done:
3010 release_sock(sk);
3011 return 0;
3012 }
3013
3014 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3015 {
3016 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3017 struct l2cap_disconn_rsp rsp;
3018 u16 dcid, scid;
3019 struct l2cap_chan *chan;
3020 struct sock *sk;
3021
3022 scid = __le16_to_cpu(req->scid);
3023 dcid = __le16_to_cpu(req->dcid);
3024
3025 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3026
3027 chan = l2cap_get_chan_by_scid(conn, dcid);
3028 if (!chan)
3029 return 0;
3030
3031 sk = chan->sk;
3032
3033 rsp.dcid = cpu_to_le16(chan->scid);
3034 rsp.scid = cpu_to_le16(chan->dcid);
3035 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3036
3037 sk->sk_shutdown = SHUTDOWN_MASK;
3038
3039 l2cap_chan_del(chan, ECONNRESET);
3040 release_sock(sk);
3041
3042 chan->ops->close(chan->data);
3043 return 0;
3044 }
3045
3046 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3047 {
3048 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3049 u16 dcid, scid;
3050 struct l2cap_chan *chan;
3051 struct sock *sk;
3052
3053 scid = __le16_to_cpu(rsp->scid);
3054 dcid = __le16_to_cpu(rsp->dcid);
3055
3056 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3057
3058 chan = l2cap_get_chan_by_scid(conn, scid);
3059 if (!chan)
3060 return 0;
3061
3062 sk = chan->sk;
3063
3064 l2cap_chan_del(chan, 0);
3065 release_sock(sk);
3066
3067 chan->ops->close(chan->data);
3068 return 0;
3069 }
3070
3071 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3072 {
3073 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3074 u16 type;
3075
3076 type = __le16_to_cpu(req->type);
3077
3078 BT_DBG("type 0x%4.4x", type);
3079
3080 if (type == L2CAP_IT_FEAT_MASK) {
3081 u8 buf[8];
3082 u32 feat_mask = l2cap_feat_mask;
3083 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3084 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3085 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3086 if (!disable_ertm)
3087 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3088 | L2CAP_FEAT_FCS;
3089 if (enable_hs)
3090 feat_mask |= L2CAP_FEAT_EXT_FLOW
3091 | L2CAP_FEAT_EXT_WINDOW;
3092
3093 put_unaligned_le32(feat_mask, rsp->data);
3094 l2cap_send_cmd(conn, cmd->ident,
3095 L2CAP_INFO_RSP, sizeof(buf), buf);
3096 } else if (type == L2CAP_IT_FIXED_CHAN) {
3097 u8 buf[12];
3098 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3099
3100 if (enable_hs)
3101 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3102 else
3103 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3104
3105 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3106 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3107 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3108 l2cap_send_cmd(conn, cmd->ident,
3109 L2CAP_INFO_RSP, sizeof(buf), buf);
3110 } else {
3111 struct l2cap_info_rsp rsp;
3112 rsp.type = cpu_to_le16(type);
3113 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3114 l2cap_send_cmd(conn, cmd->ident,
3115 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3116 }
3117
3118 return 0;
3119 }
3120
3121 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3122 {
3123 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3124 u16 type, result;
3125
3126 type = __le16_to_cpu(rsp->type);
3127 result = __le16_to_cpu(rsp->result);
3128
3129 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3130
3131 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3132 if (cmd->ident != conn->info_ident ||
3133 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3134 return 0;
3135
3136 cancel_delayed_work_sync(&conn->info_work);
3137
3138 if (result != L2CAP_IR_SUCCESS) {
3139 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3140 conn->info_ident = 0;
3141
3142 l2cap_conn_start(conn);
3143
3144 return 0;
3145 }
3146
3147 if (type == L2CAP_IT_FEAT_MASK) {
3148 conn->feat_mask = get_unaligned_le32(rsp->data);
3149
3150 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3151 struct l2cap_info_req req;
3152 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3153
3154 conn->info_ident = l2cap_get_ident(conn);
3155
3156 l2cap_send_cmd(conn, conn->info_ident,
3157 L2CAP_INFO_REQ, sizeof(req), &req);
3158 } else {
3159 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3160 conn->info_ident = 0;
3161
3162 l2cap_conn_start(conn);
3163 }
3164 } else if (type == L2CAP_IT_FIXED_CHAN) {
3165 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3166 conn->info_ident = 0;
3167
3168 l2cap_conn_start(conn);
3169 }
3170
3171 return 0;
3172 }
3173
3174 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3175 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3176 void *data)
3177 {
3178 struct l2cap_create_chan_req *req = data;
3179 struct l2cap_create_chan_rsp rsp;
3180 u16 psm, scid;
3181
3182 if (cmd_len != sizeof(*req))
3183 return -EPROTO;
3184
3185 if (!enable_hs)
3186 return -EINVAL;
3187
3188 psm = le16_to_cpu(req->psm);
3189 scid = le16_to_cpu(req->scid);
3190
3191 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3192
3193 /* Placeholder: Always reject */
3194 rsp.dcid = 0;
3195 rsp.scid = cpu_to_le16(scid);
3196 rsp.result = L2CAP_CR_NO_MEM;
3197 rsp.status = L2CAP_CS_NO_INFO;
3198
3199 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3200 sizeof(rsp), &rsp);
3201
3202 return 0;
3203 }
3204
3205 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3206 struct l2cap_cmd_hdr *cmd, void *data)
3207 {
3208 BT_DBG("conn %p", conn);
3209
3210 return l2cap_connect_rsp(conn, cmd, data);
3211 }
3212
3213 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3214 u16 icid, u16 result)
3215 {
3216 struct l2cap_move_chan_rsp rsp;
3217
3218 BT_DBG("icid %d, result %d", icid, result);
3219
3220 rsp.icid = cpu_to_le16(icid);
3221 rsp.result = cpu_to_le16(result);
3222
3223 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3224 }
3225
3226 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3227 struct l2cap_chan *chan, u16 icid, u16 result)
3228 {
3229 struct l2cap_move_chan_cfm cfm;
3230 u8 ident;
3231
3232 BT_DBG("icid %d, result %d", icid, result);
3233
3234 ident = l2cap_get_ident(conn);
3235 if (chan)
3236 chan->ident = ident;
3237
3238 cfm.icid = cpu_to_le16(icid);
3239 cfm.result = cpu_to_le16(result);
3240
3241 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3242 }
3243
3244 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3245 u16 icid)
3246 {
3247 struct l2cap_move_chan_cfm_rsp rsp;
3248
3249 BT_DBG("icid %d", icid);
3250
3251 rsp.icid = cpu_to_le16(icid);
3252 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3253 }
3254
3255 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3256 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3257 {
3258 struct l2cap_move_chan_req *req = data;
3259 u16 icid = 0;
3260 u16 result = L2CAP_MR_NOT_ALLOWED;
3261
3262 if (cmd_len != sizeof(*req))
3263 return -EPROTO;
3264
3265 icid = le16_to_cpu(req->icid);
3266
3267 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3268
3269 if (!enable_hs)
3270 return -EINVAL;
3271
3272 /* Placeholder: Always refuse */
3273 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3274
3275 return 0;
3276 }
3277
3278 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3279 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3280 {
3281 struct l2cap_move_chan_rsp *rsp = data;
3282 u16 icid, result;
3283
3284 if (cmd_len != sizeof(*rsp))
3285 return -EPROTO;
3286
3287 icid = le16_to_cpu(rsp->icid);
3288 result = le16_to_cpu(rsp->result);
3289
3290 BT_DBG("icid %d, result %d", icid, result);
3291
3292 /* Placeholder: Always unconfirmed */
3293 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3294
3295 return 0;
3296 }
3297
3298 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3299 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3300 {
3301 struct l2cap_move_chan_cfm *cfm = data;
3302 u16 icid, result;
3303
3304 if (cmd_len != sizeof(*cfm))
3305 return -EPROTO;
3306
3307 icid = le16_to_cpu(cfm->icid);
3308 result = le16_to_cpu(cfm->result);
3309
3310 BT_DBG("icid %d, result %d", icid, result);
3311
3312 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3313
3314 return 0;
3315 }
3316
3317 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3318 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3319 {
3320 struct l2cap_move_chan_cfm_rsp *rsp = data;
3321 u16 icid;
3322
3323 if (cmd_len != sizeof(*rsp))
3324 return -EPROTO;
3325
3326 icid = le16_to_cpu(rsp->icid);
3327
3328 BT_DBG("icid %d", icid);
3329
3330 return 0;
3331 }
3332
3333 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3334 u16 to_multiplier)
3335 {
3336 u16 max_latency;
3337
3338 if (min > max || min < 6 || max > 3200)
3339 return -EINVAL;
3340
3341 if (to_multiplier < 10 || to_multiplier > 3200)
3342 return -EINVAL;
3343
3344 if (max >= to_multiplier * 8)
3345 return -EINVAL;
3346
3347 max_latency = (to_multiplier * 8 / max) - 1;
3348 if (latency > 499 || latency > max_latency)
3349 return -EINVAL;
3350
3351 return 0;
3352 }
3353
3354 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3355 struct l2cap_cmd_hdr *cmd, u8 *data)
3356 {
3357 struct hci_conn *hcon = conn->hcon;
3358 struct l2cap_conn_param_update_req *req;
3359 struct l2cap_conn_param_update_rsp rsp;
3360 u16 min, max, latency, to_multiplier, cmd_len;
3361 int err;
3362
3363 if (!(hcon->link_mode & HCI_LM_MASTER))
3364 return -EINVAL;
3365
3366 cmd_len = __le16_to_cpu(cmd->len);
3367 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3368 return -EPROTO;
3369
3370 req = (struct l2cap_conn_param_update_req *) data;
3371 min = __le16_to_cpu(req->min);
3372 max = __le16_to_cpu(req->max);
3373 latency = __le16_to_cpu(req->latency);
3374 to_multiplier = __le16_to_cpu(req->to_multiplier);
3375
3376 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3377 min, max, latency, to_multiplier);
3378
3379 memset(&rsp, 0, sizeof(rsp));
3380
3381 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3382 if (err)
3383 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3384 else
3385 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3386
3387 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3388 sizeof(rsp), &rsp);
3389
3390 if (!err)
3391 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3392
3393 return 0;
3394 }
3395
3396 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3397 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3398 {
3399 int err = 0;
3400
3401 switch (cmd->code) {
3402 case L2CAP_COMMAND_REJ:
3403 l2cap_command_rej(conn, cmd, data);
3404 break;
3405
3406 case L2CAP_CONN_REQ:
3407 err = l2cap_connect_req(conn, cmd, data);
3408 break;
3409
3410 case L2CAP_CONN_RSP:
3411 err = l2cap_connect_rsp(conn, cmd, data);
3412 break;
3413
3414 case L2CAP_CONF_REQ:
3415 err = l2cap_config_req(conn, cmd, cmd_len, data);
3416 break;
3417
3418 case L2CAP_CONF_RSP:
3419 err = l2cap_config_rsp(conn, cmd, data);
3420 break;
3421
3422 case L2CAP_DISCONN_REQ:
3423 err = l2cap_disconnect_req(conn, cmd, data);
3424 break;
3425
3426 case L2CAP_DISCONN_RSP:
3427 err = l2cap_disconnect_rsp(conn, cmd, data);
3428 break;
3429
3430 case L2CAP_ECHO_REQ:
3431 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3432 break;
3433
3434 case L2CAP_ECHO_RSP:
3435 break;
3436
3437 case L2CAP_INFO_REQ:
3438 err = l2cap_information_req(conn, cmd, data);
3439 break;
3440
3441 case L2CAP_INFO_RSP:
3442 err = l2cap_information_rsp(conn, cmd, data);
3443 break;
3444
3445 case L2CAP_CREATE_CHAN_REQ:
3446 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3447 break;
3448
3449 case L2CAP_CREATE_CHAN_RSP:
3450 err = l2cap_create_channel_rsp(conn, cmd, data);
3451 break;
3452
3453 case L2CAP_MOVE_CHAN_REQ:
3454 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3455 break;
3456
3457 case L2CAP_MOVE_CHAN_RSP:
3458 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3459 break;
3460
3461 case L2CAP_MOVE_CHAN_CFM:
3462 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3463 break;
3464
3465 case L2CAP_MOVE_CHAN_CFM_RSP:
3466 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3467 break;
3468
3469 default:
3470 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3471 err = -EINVAL;
3472 break;
3473 }
3474
3475 return err;
3476 }
3477
3478 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3479 struct l2cap_cmd_hdr *cmd, u8 *data)
3480 {
3481 switch (cmd->code) {
3482 case L2CAP_COMMAND_REJ:
3483 return 0;
3484
3485 case L2CAP_CONN_PARAM_UPDATE_REQ:
3486 return l2cap_conn_param_update_req(conn, cmd, data);
3487
3488 case L2CAP_CONN_PARAM_UPDATE_RSP:
3489 return 0;
3490
3491 default:
3492 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3493 return -EINVAL;
3494 }
3495 }
3496
3497 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3498 struct sk_buff *skb)
3499 {
3500 u8 *data = skb->data;
3501 int len = skb->len;
3502 struct l2cap_cmd_hdr cmd;
3503 int err;
3504
3505 l2cap_raw_recv(conn, skb);
3506
3507 while (len >= L2CAP_CMD_HDR_SIZE) {
3508 u16 cmd_len;
3509 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3510 data += L2CAP_CMD_HDR_SIZE;
3511 len -= L2CAP_CMD_HDR_SIZE;
3512
3513 cmd_len = le16_to_cpu(cmd.len);
3514
3515 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3516
3517 if (cmd_len > len || !cmd.ident) {
3518 BT_DBG("corrupted command");
3519 break;
3520 }
3521
3522 if (conn->hcon->type == LE_LINK)
3523 err = l2cap_le_sig_cmd(conn, &cmd, data);
3524 else
3525 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3526
3527 if (err) {
3528 struct l2cap_cmd_rej_unk rej;
3529
3530 BT_ERR("Wrong link type (%d)", err);
3531
3532 /* FIXME: Map err to a valid reason */
3533 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3534 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3535 }
3536
3537 data += cmd_len;
3538 len -= cmd_len;
3539 }
3540
3541 kfree_skb(skb);
3542 }
3543
3544 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3545 {
3546 u16 our_fcs, rcv_fcs;
3547 int hdr_size;
3548
3549 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3550 hdr_size = L2CAP_EXT_HDR_SIZE;
3551 else
3552 hdr_size = L2CAP_ENH_HDR_SIZE;
3553
3554 if (chan->fcs == L2CAP_FCS_CRC16) {
3555 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3556 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3557 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3558
3559 if (our_fcs != rcv_fcs)
3560 return -EBADMSG;
3561 }
3562 return 0;
3563 }
3564
3565 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3566 {
3567 u32 control = 0;
3568
3569 chan->frames_sent = 0;
3570
3571 control |= __set_reqseq(chan, chan->buffer_seq);
3572
3573 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3574 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3575 l2cap_send_sframe(chan, control);
3576 set_bit(CONN_RNR_SENT, &chan->conn_state);
3577 }
3578
3579 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3580 l2cap_retransmit_frames(chan);
3581
3582 l2cap_ertm_send(chan);
3583
3584 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3585 chan->frames_sent == 0) {
3586 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3587 l2cap_send_sframe(chan, control);
3588 }
3589 }
3590
3591 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3592 {
3593 struct sk_buff *next_skb;
3594 int tx_seq_offset, next_tx_seq_offset;
3595
3596 bt_cb(skb)->tx_seq = tx_seq;
3597 bt_cb(skb)->sar = sar;
3598
3599 next_skb = skb_peek(&chan->srej_q);
3600
3601 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3602
3603 while (next_skb) {
3604 if (bt_cb(next_skb)->tx_seq == tx_seq)
3605 return -EINVAL;
3606
3607 next_tx_seq_offset = __seq_offset(chan,
3608 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3609
3610 if (next_tx_seq_offset > tx_seq_offset) {
3611 __skb_queue_before(&chan->srej_q, next_skb, skb);
3612 return 0;
3613 }
3614
3615 if (skb_queue_is_last(&chan->srej_q, next_skb))
3616 next_skb = NULL;
3617 else
3618 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3619 }
3620
3621 __skb_queue_tail(&chan->srej_q, skb);
3622
3623 return 0;
3624 }
3625
3626 static void append_skb_frag(struct sk_buff *skb,
3627 struct sk_buff *new_frag, struct sk_buff **last_frag)
3628 {
3629 /* skb->len reflects data in skb as well as all fragments
3630 * skb->data_len reflects only data in fragments
3631 */
3632 if (!skb_has_frag_list(skb))
3633 skb_shinfo(skb)->frag_list = new_frag;
3634
3635 new_frag->next = NULL;
3636
3637 (*last_frag)->next = new_frag;
3638 *last_frag = new_frag;
3639
3640 skb->len += new_frag->len;
3641 skb->data_len += new_frag->len;
3642 skb->truesize += new_frag->truesize;
3643 }
3644
3645 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3646 {
3647 int err = -EINVAL;
3648
3649 switch (__get_ctrl_sar(chan, control)) {
3650 case L2CAP_SAR_UNSEGMENTED:
3651 if (chan->sdu)
3652 break;
3653
3654 err = chan->ops->recv(chan->data, skb);
3655 break;
3656
3657 case L2CAP_SAR_START:
3658 if (chan->sdu)
3659 break;
3660
3661 chan->sdu_len = get_unaligned_le16(skb->data);
3662 skb_pull(skb, L2CAP_SDULEN_SIZE);
3663
3664 if (chan->sdu_len > chan->imtu) {
3665 err = -EMSGSIZE;
3666 break;
3667 }
3668
3669 if (skb->len >= chan->sdu_len)
3670 break;
3671
3672 chan->sdu = skb;
3673 chan->sdu_last_frag = skb;
3674
3675 skb = NULL;
3676 err = 0;
3677 break;
3678
3679 case L2CAP_SAR_CONTINUE:
3680 if (!chan->sdu)
3681 break;
3682
3683 append_skb_frag(chan->sdu, skb,
3684 &chan->sdu_last_frag);
3685 skb = NULL;
3686
3687 if (chan->sdu->len >= chan->sdu_len)
3688 break;
3689
3690 err = 0;
3691 break;
3692
3693 case L2CAP_SAR_END:
3694 if (!chan->sdu)
3695 break;
3696
3697 append_skb_frag(chan->sdu, skb,
3698 &chan->sdu_last_frag);
3699 skb = NULL;
3700
3701 if (chan->sdu->len != chan->sdu_len)
3702 break;
3703
3704 err = chan->ops->recv(chan->data, chan->sdu);
3705
3706 if (!err) {
3707 /* Reassembly complete */
3708 chan->sdu = NULL;
3709 chan->sdu_last_frag = NULL;
3710 chan->sdu_len = 0;
3711 }
3712 break;
3713 }
3714
3715 if (err) {
3716 kfree_skb(skb);
3717 kfree_skb(chan->sdu);
3718 chan->sdu = NULL;
3719 chan->sdu_last_frag = NULL;
3720 chan->sdu_len = 0;
3721 }
3722
3723 return err;
3724 }
3725
3726 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3727 {
3728 u32 control;
3729
3730 BT_DBG("chan %p, Enter local busy", chan);
3731
3732 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3733
3734 control = __set_reqseq(chan, chan->buffer_seq);
3735 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3736 l2cap_send_sframe(chan, control);
3737
3738 set_bit(CONN_RNR_SENT, &chan->conn_state);
3739
3740 __clear_ack_timer(chan);
3741 }
3742
3743 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3744 {
3745 u32 control;
3746
3747 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3748 goto done;
3749
3750 control = __set_reqseq(chan, chan->buffer_seq);
3751 control |= __set_ctrl_poll(chan);
3752 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3753 l2cap_send_sframe(chan, control);
3754 chan->retry_count = 1;
3755
3756 __clear_retrans_timer(chan);
3757 __set_monitor_timer(chan);
3758
3759 set_bit(CONN_WAIT_F, &chan->conn_state);
3760
3761 done:
3762 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3763 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3764
3765 BT_DBG("chan %p, Exit local busy", chan);
3766 }
3767
3768 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3769 {
3770 if (chan->mode == L2CAP_MODE_ERTM) {
3771 if (busy)
3772 l2cap_ertm_enter_local_busy(chan);
3773 else
3774 l2cap_ertm_exit_local_busy(chan);
3775 }
3776 }
3777
3778 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3779 {
3780 struct sk_buff *skb;
3781 u32 control;
3782
3783 while ((skb = skb_peek(&chan->srej_q)) &&
3784 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3785 int err;
3786
3787 if (bt_cb(skb)->tx_seq != tx_seq)
3788 break;
3789
3790 skb = skb_dequeue(&chan->srej_q);
3791 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3792 err = l2cap_reassemble_sdu(chan, skb, control);
3793
3794 if (err < 0) {
3795 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3796 break;
3797 }
3798
3799 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3800 tx_seq = __next_seq(chan, tx_seq);
3801 }
3802 }
3803
3804 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3805 {
3806 struct srej_list *l, *tmp;
3807 u32 control;
3808
3809 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3810 if (l->tx_seq == tx_seq) {
3811 list_del(&l->list);
3812 kfree(l);
3813 return;
3814 }
3815 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3816 control |= __set_reqseq(chan, l->tx_seq);
3817 l2cap_send_sframe(chan, control);
3818 list_del(&l->list);
3819 list_add_tail(&l->list, &chan->srej_l);
3820 }
3821 }
3822
3823 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3824 {
3825 struct srej_list *new;
3826 u32 control;
3827
3828 while (tx_seq != chan->expected_tx_seq) {
3829 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3830 control |= __set_reqseq(chan, chan->expected_tx_seq);
3831 l2cap_send_sframe(chan, control);
3832
3833 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3834 if (!new)
3835 return -ENOMEM;
3836
3837 new->tx_seq = chan->expected_tx_seq;
3838
3839 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3840
3841 list_add_tail(&new->list, &chan->srej_l);
3842 }
3843
3844 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3845
3846 return 0;
3847 }
3848
3849 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3850 {
3851 u16 tx_seq = __get_txseq(chan, rx_control);
3852 u16 req_seq = __get_reqseq(chan, rx_control);
3853 u8 sar = __get_ctrl_sar(chan, rx_control);
3854 int tx_seq_offset, expected_tx_seq_offset;
3855 int num_to_ack = (chan->tx_win/6) + 1;
3856 int err = 0;
3857
3858 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3859 tx_seq, rx_control);
3860
3861 if (__is_ctrl_final(chan, rx_control) &&
3862 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3863 __clear_monitor_timer(chan);
3864 if (chan->unacked_frames > 0)
3865 __set_retrans_timer(chan);
3866 clear_bit(CONN_WAIT_F, &chan->conn_state);
3867 }
3868
3869 chan->expected_ack_seq = req_seq;
3870 l2cap_drop_acked_frames(chan);
3871
3872 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3873
3874 /* invalid tx_seq */
3875 if (tx_seq_offset >= chan->tx_win) {
3876 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3877 goto drop;
3878 }
3879
3880 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3881 goto drop;
3882
3883 if (tx_seq == chan->expected_tx_seq)
3884 goto expected;
3885
3886 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3887 struct srej_list *first;
3888
3889 first = list_first_entry(&chan->srej_l,
3890 struct srej_list, list);
3891 if (tx_seq == first->tx_seq) {
3892 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3893 l2cap_check_srej_gap(chan, tx_seq);
3894
3895 list_del(&first->list);
3896 kfree(first);
3897
3898 if (list_empty(&chan->srej_l)) {
3899 chan->buffer_seq = chan->buffer_seq_srej;
3900 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3901 l2cap_send_ack(chan);
3902 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3903 }
3904 } else {
3905 struct srej_list *l;
3906
3907 /* duplicated tx_seq */
3908 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3909 goto drop;
3910
3911 list_for_each_entry(l, &chan->srej_l, list) {
3912 if (l->tx_seq == tx_seq) {
3913 l2cap_resend_srejframe(chan, tx_seq);
3914 return 0;
3915 }
3916 }
3917
3918 err = l2cap_send_srejframe(chan, tx_seq);
3919 if (err < 0) {
3920 l2cap_send_disconn_req(chan->conn, chan, -err);
3921 return err;
3922 }
3923 }
3924 } else {
3925 expected_tx_seq_offset = __seq_offset(chan,
3926 chan->expected_tx_seq, chan->buffer_seq);
3927
3928 /* duplicated tx_seq */
3929 if (tx_seq_offset < expected_tx_seq_offset)
3930 goto drop;
3931
3932 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3933
3934 BT_DBG("chan %p, Enter SREJ", chan);
3935
3936 INIT_LIST_HEAD(&chan->srej_l);
3937 chan->buffer_seq_srej = chan->buffer_seq;
3938
3939 __skb_queue_head_init(&chan->srej_q);
3940 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3941
3942 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3943
3944 err = l2cap_send_srejframe(chan, tx_seq);
3945 if (err < 0) {
3946 l2cap_send_disconn_req(chan->conn, chan, -err);
3947 return err;
3948 }
3949
3950 __clear_ack_timer(chan);
3951 }
3952 return 0;
3953
3954 expected:
3955 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3956
3957 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3958 bt_cb(skb)->tx_seq = tx_seq;
3959 bt_cb(skb)->sar = sar;
3960 __skb_queue_tail(&chan->srej_q, skb);
3961 return 0;
3962 }
3963
3964 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3965 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3966
3967 if (err < 0) {
3968 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3969 return err;
3970 }
3971
3972 if (__is_ctrl_final(chan, rx_control)) {
3973 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3974 l2cap_retransmit_frames(chan);
3975 }
3976
3977
3978 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3979 if (chan->num_acked == num_to_ack - 1)
3980 l2cap_send_ack(chan);
3981 else
3982 __set_ack_timer(chan);
3983
3984 return 0;
3985
3986 drop:
3987 kfree_skb(skb);
3988 return 0;
3989 }
3990
3991 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3992 {
3993 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3994 __get_reqseq(chan, rx_control), rx_control);
3995
3996 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3997 l2cap_drop_acked_frames(chan);
3998
3999 if (__is_ctrl_poll(chan, rx_control)) {
4000 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4001 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4002 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4003 (chan->unacked_frames > 0))
4004 __set_retrans_timer(chan);
4005
4006 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4007 l2cap_send_srejtail(chan);
4008 } else {
4009 l2cap_send_i_or_rr_or_rnr(chan);
4010 }
4011
4012 } else if (__is_ctrl_final(chan, rx_control)) {
4013 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4014
4015 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4016 l2cap_retransmit_frames(chan);
4017
4018 } else {
4019 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4020 (chan->unacked_frames > 0))
4021 __set_retrans_timer(chan);
4022
4023 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4024 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4025 l2cap_send_ack(chan);
4026 else
4027 l2cap_ertm_send(chan);
4028 }
4029 }
4030
4031 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4032 {
4033 u16 tx_seq = __get_reqseq(chan, rx_control);
4034
4035 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4036
4037 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4038
4039 chan->expected_ack_seq = tx_seq;
4040 l2cap_drop_acked_frames(chan);
4041
4042 if (__is_ctrl_final(chan, rx_control)) {
4043 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4044 l2cap_retransmit_frames(chan);
4045 } else {
4046 l2cap_retransmit_frames(chan);
4047
4048 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4049 set_bit(CONN_REJ_ACT, &chan->conn_state);
4050 }
4051 }
4052 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4053 {
4054 u16 tx_seq = __get_reqseq(chan, rx_control);
4055
4056 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4057
4058 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4059
4060 if (__is_ctrl_poll(chan, rx_control)) {
4061 chan->expected_ack_seq = tx_seq;
4062 l2cap_drop_acked_frames(chan);
4063
4064 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4065 l2cap_retransmit_one_frame(chan, tx_seq);
4066
4067 l2cap_ertm_send(chan);
4068
4069 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4070 chan->srej_save_reqseq = tx_seq;
4071 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4072 }
4073 } else if (__is_ctrl_final(chan, rx_control)) {
4074 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4075 chan->srej_save_reqseq == tx_seq)
4076 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4077 else
4078 l2cap_retransmit_one_frame(chan, tx_seq);
4079 } else {
4080 l2cap_retransmit_one_frame(chan, tx_seq);
4081 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4082 chan->srej_save_reqseq = tx_seq;
4083 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4084 }
4085 }
4086 }
4087
4088 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4089 {
4090 u16 tx_seq = __get_reqseq(chan, rx_control);
4091
4092 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4093
4094 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4095 chan->expected_ack_seq = tx_seq;
4096 l2cap_drop_acked_frames(chan);
4097
4098 if (__is_ctrl_poll(chan, rx_control))
4099 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4100
4101 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4102 __clear_retrans_timer(chan);
4103 if (__is_ctrl_poll(chan, rx_control))
4104 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4105 return;
4106 }
4107
4108 if (__is_ctrl_poll(chan, rx_control)) {
4109 l2cap_send_srejtail(chan);
4110 } else {
4111 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4112 l2cap_send_sframe(chan, rx_control);
4113 }
4114 }
4115
4116 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4117 {
4118 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4119
4120 if (__is_ctrl_final(chan, rx_control) &&
4121 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4122 __clear_monitor_timer(chan);
4123 if (chan->unacked_frames > 0)
4124 __set_retrans_timer(chan);
4125 clear_bit(CONN_WAIT_F, &chan->conn_state);
4126 }
4127
4128 switch (__get_ctrl_super(chan, rx_control)) {
4129 case L2CAP_SUPER_RR:
4130 l2cap_data_channel_rrframe(chan, rx_control);
4131 break;
4132
4133 case L2CAP_SUPER_REJ:
4134 l2cap_data_channel_rejframe(chan, rx_control);
4135 break;
4136
4137 case L2CAP_SUPER_SREJ:
4138 l2cap_data_channel_srejframe(chan, rx_control);
4139 break;
4140
4141 case L2CAP_SUPER_RNR:
4142 l2cap_data_channel_rnrframe(chan, rx_control);
4143 break;
4144 }
4145
4146 kfree_skb(skb);
4147 return 0;
4148 }
4149
4150 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4151 {
4152 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4153 u32 control;
4154 u16 req_seq;
4155 int len, next_tx_seq_offset, req_seq_offset;
4156
4157 control = __get_control(chan, skb->data);
4158 skb_pull(skb, __ctrl_size(chan));
4159 len = skb->len;
4160
4161 /*
4162 * We can just drop the corrupted I-frame here.
4163 * Receiver will miss it and start proper recovery
4164 * procedures and ask retransmission.
4165 */
4166 if (l2cap_check_fcs(chan, skb))
4167 goto drop;
4168
4169 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4170 len -= L2CAP_SDULEN_SIZE;
4171
4172 if (chan->fcs == L2CAP_FCS_CRC16)
4173 len -= L2CAP_FCS_SIZE;
4174
4175 if (len > chan->mps) {
4176 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4177 goto drop;
4178 }
4179
4180 req_seq = __get_reqseq(chan, control);
4181
4182 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4183
4184 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4185 chan->expected_ack_seq);
4186
4187 /* check for invalid req-seq */
4188 if (req_seq_offset > next_tx_seq_offset) {
4189 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4190 goto drop;
4191 }
4192
4193 if (!__is_sframe(chan, control)) {
4194 if (len < 0) {
4195 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4196 goto drop;
4197 }
4198
4199 l2cap_data_channel_iframe(chan, control, skb);
4200 } else {
4201 if (len != 0) {
4202 BT_ERR("%d", len);
4203 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4204 goto drop;
4205 }
4206
4207 l2cap_data_channel_sframe(chan, control, skb);
4208 }
4209
4210 return 0;
4211
4212 drop:
4213 kfree_skb(skb);
4214 return 0;
4215 }
4216
4217 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4218 {
4219 struct l2cap_chan *chan;
4220 struct sock *sk = NULL;
4221 u32 control;
4222 u16 tx_seq;
4223 int len;
4224
4225 chan = l2cap_get_chan_by_scid(conn, cid);
4226 if (!chan) {
4227 BT_DBG("unknown cid 0x%4.4x", cid);
4228 goto drop;
4229 }
4230
4231 sk = chan->sk;
4232
4233 BT_DBG("chan %p, len %d", chan, skb->len);
4234
4235 if (chan->state != BT_CONNECTED)
4236 goto drop;
4237
4238 switch (chan->mode) {
4239 case L2CAP_MODE_BASIC:
4240 /* If socket recv buffers overflows we drop data here
4241 * which is *bad* because L2CAP has to be reliable.
4242 * But we don't have any other choice. L2CAP doesn't
4243 * provide flow control mechanism. */
4244
4245 if (chan->imtu < skb->len)
4246 goto drop;
4247
4248 if (!chan->ops->recv(chan->data, skb))
4249 goto done;
4250 break;
4251
4252 case L2CAP_MODE_ERTM:
4253 l2cap_ertm_data_rcv(sk, skb);
4254
4255 goto done;
4256
4257 case L2CAP_MODE_STREAMING:
4258 control = __get_control(chan, skb->data);
4259 skb_pull(skb, __ctrl_size(chan));
4260 len = skb->len;
4261
4262 if (l2cap_check_fcs(chan, skb))
4263 goto drop;
4264
4265 if (__is_sar_start(chan, control))
4266 len -= L2CAP_SDULEN_SIZE;
4267
4268 if (chan->fcs == L2CAP_FCS_CRC16)
4269 len -= L2CAP_FCS_SIZE;
4270
4271 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4272 goto drop;
4273
4274 tx_seq = __get_txseq(chan, control);
4275
4276 if (chan->expected_tx_seq != tx_seq) {
4277 /* Frame(s) missing - must discard partial SDU */
4278 kfree_skb(chan->sdu);
4279 chan->sdu = NULL;
4280 chan->sdu_last_frag = NULL;
4281 chan->sdu_len = 0;
4282
4283 /* TODO: Notify userland of missing data */
4284 }
4285
4286 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4287
4288 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4289 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4290
4291 goto done;
4292
4293 default:
4294 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4295 break;
4296 }
4297
4298 drop:
4299 kfree_skb(skb);
4300
4301 done:
4302 if (sk)
4303 release_sock(sk);
4304
4305 return 0;
4306 }
4307
4308 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4309 {
4310 struct sock *sk = NULL;
4311 struct l2cap_chan *chan;
4312
4313 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4314 if (!chan)
4315 goto drop;
4316
4317 sk = chan->sk;
4318
4319 lock_sock(sk);
4320
4321 BT_DBG("sk %p, len %d", sk, skb->len);
4322
4323 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4324 goto drop;
4325
4326 if (chan->imtu < skb->len)
4327 goto drop;
4328
4329 if (!chan->ops->recv(chan->data, skb))
4330 goto done;
4331
4332 drop:
4333 kfree_skb(skb);
4334
4335 done:
4336 if (sk)
4337 release_sock(sk);
4338 return 0;
4339 }
4340
4341 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4342 {
4343 struct sock *sk = NULL;
4344 struct l2cap_chan *chan;
4345
4346 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4347 if (!chan)
4348 goto drop;
4349
4350 sk = chan->sk;
4351
4352 lock_sock(sk);
4353
4354 BT_DBG("sk %p, len %d", sk, skb->len);
4355
4356 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4357 goto drop;
4358
4359 if (chan->imtu < skb->len)
4360 goto drop;
4361
4362 if (!chan->ops->recv(chan->data, skb))
4363 goto done;
4364
4365 drop:
4366 kfree_skb(skb);
4367
4368 done:
4369 if (sk)
4370 release_sock(sk);
4371 return 0;
4372 }
4373
4374 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4375 {
4376 struct l2cap_hdr *lh = (void *) skb->data;
4377 u16 cid, len;
4378 __le16 psm;
4379
4380 skb_pull(skb, L2CAP_HDR_SIZE);
4381 cid = __le16_to_cpu(lh->cid);
4382 len = __le16_to_cpu(lh->len);
4383
4384 if (len != skb->len) {
4385 kfree_skb(skb);
4386 return;
4387 }
4388
4389 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4390
4391 switch (cid) {
4392 case L2CAP_CID_LE_SIGNALING:
4393 case L2CAP_CID_SIGNALING:
4394 l2cap_sig_channel(conn, skb);
4395 break;
4396
4397 case L2CAP_CID_CONN_LESS:
4398 psm = get_unaligned_le16(skb->data);
4399 skb_pull(skb, 2);
4400 l2cap_conless_channel(conn, psm, skb);
4401 break;
4402
4403 case L2CAP_CID_LE_DATA:
4404 l2cap_att_channel(conn, cid, skb);
4405 break;
4406
4407 case L2CAP_CID_SMP:
4408 if (smp_sig_channel(conn, skb))
4409 l2cap_conn_del(conn->hcon, EACCES);
4410 break;
4411
4412 default:
4413 l2cap_data_channel(conn, cid, skb);
4414 break;
4415 }
4416 }
4417
4418 /* ---- L2CAP interface with lower layer (HCI) ---- */
4419
4420 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4421 {
4422 int exact = 0, lm1 = 0, lm2 = 0;
4423 struct l2cap_chan *c;
4424
4425 if (type != ACL_LINK)
4426 return -EINVAL;
4427
4428 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4429
4430 /* Find listening sockets and check their link_mode */
4431 read_lock(&chan_list_lock);
4432 list_for_each_entry(c, &chan_list, global_l) {
4433 struct sock *sk = c->sk;
4434
4435 if (c->state != BT_LISTEN)
4436 continue;
4437
4438 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4439 lm1 |= HCI_LM_ACCEPT;
4440 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4441 lm1 |= HCI_LM_MASTER;
4442 exact++;
4443 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4444 lm2 |= HCI_LM_ACCEPT;
4445 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4446 lm2 |= HCI_LM_MASTER;
4447 }
4448 }
4449 read_unlock(&chan_list_lock);
4450
4451 return exact ? lm1 : lm2;
4452 }
4453
4454 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4455 {
4456 struct l2cap_conn *conn;
4457
4458 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4459
4460 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4461 return -EINVAL;
4462
4463 if (!status) {
4464 conn = l2cap_conn_add(hcon, status);
4465 if (conn)
4466 l2cap_conn_ready(conn);
4467 } else
4468 l2cap_conn_del(hcon, bt_to_errno(status));
4469
4470 return 0;
4471 }
4472
4473 static int l2cap_disconn_ind(struct hci_conn *hcon)
4474 {
4475 struct l2cap_conn *conn = hcon->l2cap_data;
4476
4477 BT_DBG("hcon %p", hcon);
4478
4479 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4480 return HCI_ERROR_REMOTE_USER_TERM;
4481
4482 return conn->disc_reason;
4483 }
4484
4485 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4486 {
4487 BT_DBG("hcon %p reason %d", hcon, reason);
4488
4489 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4490 return -EINVAL;
4491
4492 l2cap_conn_del(hcon, bt_to_errno(reason));
4493
4494 return 0;
4495 }
4496
4497 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4498 {
4499 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4500 return;
4501
4502 if (encrypt == 0x00) {
4503 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4504 __clear_chan_timer(chan);
4505 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4506 } else if (chan->sec_level == BT_SECURITY_HIGH)
4507 l2cap_chan_close(chan, ECONNREFUSED);
4508 } else {
4509 if (chan->sec_level == BT_SECURITY_MEDIUM)
4510 __clear_chan_timer(chan);
4511 }
4512 }
4513
4514 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4515 {
4516 struct l2cap_conn *conn = hcon->l2cap_data;
4517 struct l2cap_chan *chan;
4518
4519 if (!conn)
4520 return 0;
4521
4522 BT_DBG("conn %p", conn);
4523
4524 if (hcon->type == LE_LINK) {
4525 smp_distribute_keys(conn, 0);
4526 del_timer(&conn->security_timer);
4527 }
4528
4529 rcu_read_lock();
4530
4531 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4532 struct sock *sk = chan->sk;
4533
4534 bh_lock_sock(sk);
4535
4536 BT_DBG("chan->scid %d", chan->scid);
4537
4538 if (chan->scid == L2CAP_CID_LE_DATA) {
4539 if (!status && encrypt) {
4540 chan->sec_level = hcon->sec_level;
4541 l2cap_chan_ready(sk);
4542 }
4543
4544 bh_unlock_sock(sk);
4545 continue;
4546 }
4547
4548 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4549 bh_unlock_sock(sk);
4550 continue;
4551 }
4552
4553 if (!status && (chan->state == BT_CONNECTED ||
4554 chan->state == BT_CONFIG)) {
4555 l2cap_check_encryption(chan, encrypt);
4556 bh_unlock_sock(sk);
4557 continue;
4558 }
4559
4560 if (chan->state == BT_CONNECT) {
4561 if (!status) {
4562 struct l2cap_conn_req req;
4563 req.scid = cpu_to_le16(chan->scid);
4564 req.psm = chan->psm;
4565
4566 chan->ident = l2cap_get_ident(conn);
4567 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4568
4569 l2cap_send_cmd(conn, chan->ident,
4570 L2CAP_CONN_REQ, sizeof(req), &req);
4571 } else {
4572 __clear_chan_timer(chan);
4573 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4574 }
4575 } else if (chan->state == BT_CONNECT2) {
4576 struct l2cap_conn_rsp rsp;
4577 __u16 res, stat;
4578
4579 if (!status) {
4580 if (bt_sk(sk)->defer_setup) {
4581 struct sock *parent = bt_sk(sk)->parent;
4582 res = L2CAP_CR_PEND;
4583 stat = L2CAP_CS_AUTHOR_PEND;
4584 if (parent)
4585 parent->sk_data_ready(parent, 0);
4586 } else {
4587 l2cap_state_change(chan, BT_CONFIG);
4588 res = L2CAP_CR_SUCCESS;
4589 stat = L2CAP_CS_NO_INFO;
4590 }
4591 } else {
4592 l2cap_state_change(chan, BT_DISCONN);
4593 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4594 res = L2CAP_CR_SEC_BLOCK;
4595 stat = L2CAP_CS_NO_INFO;
4596 }
4597
4598 rsp.scid = cpu_to_le16(chan->dcid);
4599 rsp.dcid = cpu_to_le16(chan->scid);
4600 rsp.result = cpu_to_le16(res);
4601 rsp.status = cpu_to_le16(stat);
4602 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4603 sizeof(rsp), &rsp);
4604 }
4605
4606 bh_unlock_sock(sk);
4607 }
4608
4609 rcu_read_unlock();
4610
4611 return 0;
4612 }
4613
4614 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4615 {
4616 struct l2cap_conn *conn = hcon->l2cap_data;
4617
4618 if (!conn)
4619 conn = l2cap_conn_add(hcon, 0);
4620
4621 if (!conn)
4622 goto drop;
4623
4624 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4625
4626 if (!(flags & ACL_CONT)) {
4627 struct l2cap_hdr *hdr;
4628 struct l2cap_chan *chan;
4629 u16 cid;
4630 int len;
4631
4632 if (conn->rx_len) {
4633 BT_ERR("Unexpected start frame (len %d)", skb->len);
4634 kfree_skb(conn->rx_skb);
4635 conn->rx_skb = NULL;
4636 conn->rx_len = 0;
4637 l2cap_conn_unreliable(conn, ECOMM);
4638 }
4639
4640 /* Start fragment always begin with Basic L2CAP header */
4641 if (skb->len < L2CAP_HDR_SIZE) {
4642 BT_ERR("Frame is too short (len %d)", skb->len);
4643 l2cap_conn_unreliable(conn, ECOMM);
4644 goto drop;
4645 }
4646
4647 hdr = (struct l2cap_hdr *) skb->data;
4648 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4649 cid = __le16_to_cpu(hdr->cid);
4650
4651 if (len == skb->len) {
4652 /* Complete frame received */
4653 l2cap_recv_frame(conn, skb);
4654 return 0;
4655 }
4656
4657 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4658
4659 if (skb->len > len) {
4660 BT_ERR("Frame is too long (len %d, expected len %d)",
4661 skb->len, len);
4662 l2cap_conn_unreliable(conn, ECOMM);
4663 goto drop;
4664 }
4665
4666 chan = l2cap_get_chan_by_scid(conn, cid);
4667
4668 if (chan && chan->sk) {
4669 struct sock *sk = chan->sk;
4670
4671 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4672 BT_ERR("Frame exceeding recv MTU (len %d, "
4673 "MTU %d)", len,
4674 chan->imtu);
4675 release_sock(sk);
4676 l2cap_conn_unreliable(conn, ECOMM);
4677 goto drop;
4678 }
4679 release_sock(sk);
4680 }
4681
4682 /* Allocate skb for the complete frame (with header) */
4683 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4684 if (!conn->rx_skb)
4685 goto drop;
4686
4687 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4688 skb->len);
4689 conn->rx_len = len - skb->len;
4690 } else {
4691 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4692
4693 if (!conn->rx_len) {
4694 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4695 l2cap_conn_unreliable(conn, ECOMM);
4696 goto drop;
4697 }
4698
4699 if (skb->len > conn->rx_len) {
4700 BT_ERR("Fragment is too long (len %d, expected %d)",
4701 skb->len, conn->rx_len);
4702 kfree_skb(conn->rx_skb);
4703 conn->rx_skb = NULL;
4704 conn->rx_len = 0;
4705 l2cap_conn_unreliable(conn, ECOMM);
4706 goto drop;
4707 }
4708
4709 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4710 skb->len);
4711 conn->rx_len -= skb->len;
4712
4713 if (!conn->rx_len) {
4714 /* Complete frame received */
4715 l2cap_recv_frame(conn, conn->rx_skb);
4716 conn->rx_skb = NULL;
4717 }
4718 }
4719
4720 drop:
4721 kfree_skb(skb);
4722 return 0;
4723 }
4724
4725 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4726 {
4727 struct l2cap_chan *c;
4728
4729 read_lock_bh(&chan_list_lock);
4730
4731 list_for_each_entry(c, &chan_list, global_l) {
4732 struct sock *sk = c->sk;
4733
4734 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4735 batostr(&bt_sk(sk)->src),
4736 batostr(&bt_sk(sk)->dst),
4737 c->state, __le16_to_cpu(c->psm),
4738 c->scid, c->dcid, c->imtu, c->omtu,
4739 c->sec_level, c->mode);
4740 }
4741
4742 read_unlock_bh(&chan_list_lock);
4743
4744 return 0;
4745 }
4746
4747 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4748 {
4749 return single_open(file, l2cap_debugfs_show, inode->i_private);
4750 }
4751
4752 static const struct file_operations l2cap_debugfs_fops = {
4753 .open = l2cap_debugfs_open,
4754 .read = seq_read,
4755 .llseek = seq_lseek,
4756 .release = single_release,
4757 };
4758
4759 static struct dentry *l2cap_debugfs;
4760
4761 static struct hci_proto l2cap_hci_proto = {
4762 .name = "L2CAP",
4763 .id = HCI_PROTO_L2CAP,
4764 .connect_ind = l2cap_connect_ind,
4765 .connect_cfm = l2cap_connect_cfm,
4766 .disconn_ind = l2cap_disconn_ind,
4767 .disconn_cfm = l2cap_disconn_cfm,
4768 .security_cfm = l2cap_security_cfm,
4769 .recv_acldata = l2cap_recv_acldata
4770 };
4771
4772 int __init l2cap_init(void)
4773 {
4774 int err;
4775
4776 err = l2cap_init_sockets();
4777 if (err < 0)
4778 return err;
4779
4780 err = hci_register_proto(&l2cap_hci_proto);
4781 if (err < 0) {
4782 BT_ERR("L2CAP protocol registration failed");
4783 bt_sock_unregister(BTPROTO_L2CAP);
4784 goto error;
4785 }
4786
4787 if (bt_debugfs) {
4788 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4789 bt_debugfs, NULL, &l2cap_debugfs_fops);
4790 if (!l2cap_debugfs)
4791 BT_ERR("Failed to create L2CAP debug file");
4792 }
4793
4794 return 0;
4795
4796 error:
4797 l2cap_cleanup_sockets();
4798 return err;
4799 }
4800
4801 void l2cap_exit(void)
4802 {
4803 debugfs_remove(l2cap_debugfs);
4804
4805 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4806 BT_ERR("L2CAP protocol unregistration failed");
4807
4808 l2cap_cleanup_sockets();
4809 }
4810
4811 module_param(disable_ertm, bool, 0644);
4812 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");