Bluetooth: Fix context in RFCOMM tty
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 int disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78 /* ---- L2CAP channels ---- */
79
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81 {
82 struct l2cap_chan *c, *r = NULL;
83
84 rcu_read_lock();
85
86 list_for_each_entry_rcu(c, &conn->chan_l, list) {
87 if (c->dcid == cid) {
88 r = c;
89 break;
90 }
91 }
92
93 rcu_read_unlock();
94 return r;
95 }
96
97 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
98 {
99 struct l2cap_chan *c, *r = NULL;
100
101 rcu_read_lock();
102
103 list_for_each_entry_rcu(c, &conn->chan_l, list) {
104 if (c->scid == cid) {
105 r = c;
106 break;
107 }
108 }
109
110 rcu_read_unlock();
111 return r;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 {
118 struct l2cap_chan *c;
119
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 lock_sock(c->sk);
123 return c;
124 }
125
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
127 {
128 struct l2cap_chan *c, *r = NULL;
129
130 rcu_read_lock();
131
132 list_for_each_entry_rcu(c, &conn->chan_l, list) {
133 if (c->ident == ident) {
134 r = c;
135 break;
136 }
137 }
138
139 rcu_read_unlock();
140 return r;
141 }
142
143 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
144 {
145 struct l2cap_chan *c;
146
147 c = __l2cap_get_chan_by_ident(conn, ident);
148 if (c)
149 lock_sock(c->sk);
150 return c;
151 }
152
153 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
154 {
155 struct l2cap_chan *c;
156
157 list_for_each_entry(c, &chan_list, global_l) {
158 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
159 return c;
160 }
161 return NULL;
162 }
163
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165 {
166 int err;
167
168 write_lock_bh(&chan_list_lock);
169
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
173 }
174
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
181
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
189 }
190 }
191
192 done:
193 write_unlock_bh(&chan_list_lock);
194 return err;
195 }
196
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
198 {
199 write_lock_bh(&chan_list_lock);
200
201 chan->scid = scid;
202
203 write_unlock_bh(&chan_list_lock);
204
205 return 0;
206 }
207
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
209 {
210 u16 cid = L2CAP_CID_DYN_START;
211
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
215 }
216
217 return 0;
218 }
219
220 static char *state_to_string(int state)
221 {
222 switch(state) {
223 case BT_CONNECTED:
224 return "BT_CONNECTED";
225 case BT_OPEN:
226 return "BT_OPEN";
227 case BT_BOUND:
228 return "BT_BOUND";
229 case BT_LISTEN:
230 return "BT_LISTEN";
231 case BT_CONNECT:
232 return "BT_CONNECT";
233 case BT_CONNECT2:
234 return "BT_CONNECT2";
235 case BT_CONFIG:
236 return "BT_CONFIG";
237 case BT_DISCONN:
238 return "BT_DISCONN";
239 case BT_CLOSED:
240 return "BT_CLOSED";
241 }
242
243 return "invalid state";
244 }
245
246 static void l2cap_state_change(struct l2cap_chan *chan, int state)
247 {
248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
249 state_to_string(state));
250
251 chan->state = state;
252 chan->ops->state_change(chan->data, state);
253 }
254
255 static void l2cap_chan_timeout(struct work_struct *work)
256 {
257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
258 chan_timer.work);
259 struct sock *sk = chan->sk;
260 int reason;
261
262 BT_DBG("chan %p state %d", chan, chan->state);
263
264 lock_sock(sk);
265
266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
267 reason = ECONNREFUSED;
268 else if (chan->state == BT_CONNECT &&
269 chan->sec_level != BT_SECURITY_SDP)
270 reason = ECONNREFUSED;
271 else
272 reason = ETIMEDOUT;
273
274 l2cap_chan_close(chan, reason);
275
276 release_sock(sk);
277
278 chan->ops->close(chan->data);
279 l2cap_chan_put(chan);
280 }
281
282 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
283 {
284 struct l2cap_chan *chan;
285
286 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 if (!chan)
288 return NULL;
289
290 chan->sk = sk;
291
292 write_lock_bh(&chan_list_lock);
293 list_add(&chan->global_l, &chan_list);
294 write_unlock_bh(&chan_list_lock);
295
296 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
297
298 chan->state = BT_OPEN;
299
300 atomic_set(&chan->refcnt, 1);
301
302 BT_DBG("sk %p chan %p", sk, chan);
303
304 return chan;
305 }
306
307 void l2cap_chan_destroy(struct l2cap_chan *chan)
308 {
309 write_lock_bh(&chan_list_lock);
310 list_del(&chan->global_l);
311 write_unlock_bh(&chan_list_lock);
312
313 l2cap_chan_put(chan);
314 }
315
316 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
317 {
318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
319 chan->psm, chan->dcid);
320
321 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
322
323 chan->conn = conn;
324
325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
326 if (conn->hcon->type == LE_LINK) {
327 /* LE connection */
328 chan->omtu = L2CAP_LE_DEFAULT_MTU;
329 chan->scid = L2CAP_CID_LE_DATA;
330 chan->dcid = L2CAP_CID_LE_DATA;
331 } else {
332 /* Alloc CID for connection-oriented socket */
333 chan->scid = l2cap_alloc_cid(conn);
334 chan->omtu = L2CAP_DEFAULT_MTU;
335 }
336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
337 /* Connectionless socket */
338 chan->scid = L2CAP_CID_CONN_LESS;
339 chan->dcid = L2CAP_CID_CONN_LESS;
340 chan->omtu = L2CAP_DEFAULT_MTU;
341 } else {
342 /* Raw socket can send/recv signalling messages only */
343 chan->scid = L2CAP_CID_SIGNALING;
344 chan->dcid = L2CAP_CID_SIGNALING;
345 chan->omtu = L2CAP_DEFAULT_MTU;
346 }
347
348 chan->local_id = L2CAP_BESTEFFORT_ID;
349 chan->local_stype = L2CAP_SERV_BESTEFFORT;
350 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
351 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
352 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
353 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
354
355 l2cap_chan_hold(chan);
356
357 list_add_rcu(&chan->list, &conn->chan_l);
358 }
359
360 /* Delete channel.
361 * Must be called on the locked socket. */
362 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
363 {
364 struct sock *sk = chan->sk;
365 struct l2cap_conn *conn = chan->conn;
366 struct sock *parent = bt_sk(sk)->parent;
367
368 __clear_chan_timer(chan);
369
370 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
371
372 if (conn) {
373 /* Delete from channel list */
374 list_del_rcu(&chan->list);
375 synchronize_rcu();
376
377 l2cap_chan_put(chan);
378
379 chan->conn = NULL;
380 hci_conn_put(conn->hcon);
381 }
382
383 l2cap_state_change(chan, BT_CLOSED);
384 sock_set_flag(sk, SOCK_ZAPPED);
385
386 if (err)
387 sk->sk_err = err;
388
389 if (parent) {
390 bt_accept_unlink(sk);
391 parent->sk_data_ready(parent, 0);
392 } else
393 sk->sk_state_change(sk);
394
395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
396 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 return;
398
399 skb_queue_purge(&chan->tx_q);
400
401 if (chan->mode == L2CAP_MODE_ERTM) {
402 struct srej_list *l, *tmp;
403
404 __clear_retrans_timer(chan);
405 __clear_monitor_timer(chan);
406 __clear_ack_timer(chan);
407
408 skb_queue_purge(&chan->srej_q);
409
410 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
411 list_del(&l->list);
412 kfree(l);
413 }
414 }
415 }
416
417 static void l2cap_chan_cleanup_listen(struct sock *parent)
418 {
419 struct sock *sk;
420
421 BT_DBG("parent %p", parent);
422
423 /* Close not yet accepted channels */
424 while ((sk = bt_accept_dequeue(parent, NULL))) {
425 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
426 __clear_chan_timer(chan);
427 lock_sock(sk);
428 l2cap_chan_close(chan, ECONNRESET);
429 release_sock(sk);
430 chan->ops->close(chan->data);
431 }
432 }
433
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
435 {
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
438
439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
440
441 switch (chan->state) {
442 case BT_LISTEN:
443 l2cap_chan_cleanup_listen(sk);
444
445 l2cap_state_change(chan, BT_CLOSED);
446 sock_set_flag(sk, SOCK_ZAPPED);
447 break;
448
449 case BT_CONNECTED:
450 case BT_CONFIG:
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 __clear_chan_timer(chan);
454 __set_chan_timer(chan, sk->sk_sndtimeo);
455 l2cap_send_disconn_req(conn, chan, reason);
456 } else
457 l2cap_chan_del(chan, reason);
458 break;
459
460 case BT_CONNECT2:
461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
462 conn->hcon->type == ACL_LINK) {
463 struct l2cap_conn_rsp rsp;
464 __u16 result;
465
466 if (bt_sk(sk)->defer_setup)
467 result = L2CAP_CR_SEC_BLOCK;
468 else
469 result = L2CAP_CR_BAD_PSM;
470 l2cap_state_change(chan, BT_DISCONN);
471
472 rsp.scid = cpu_to_le16(chan->dcid);
473 rsp.dcid = cpu_to_le16(chan->scid);
474 rsp.result = cpu_to_le16(result);
475 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
476 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
477 sizeof(rsp), &rsp);
478 }
479
480 l2cap_chan_del(chan, reason);
481 break;
482
483 case BT_CONNECT:
484 case BT_DISCONN:
485 l2cap_chan_del(chan, reason);
486 break;
487
488 default:
489 sock_set_flag(sk, SOCK_ZAPPED);
490 break;
491 }
492 }
493
494 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
495 {
496 if (chan->chan_type == L2CAP_CHAN_RAW) {
497 switch (chan->sec_level) {
498 case BT_SECURITY_HIGH:
499 return HCI_AT_DEDICATED_BONDING_MITM;
500 case BT_SECURITY_MEDIUM:
501 return HCI_AT_DEDICATED_BONDING;
502 default:
503 return HCI_AT_NO_BONDING;
504 }
505 } else if (chan->psm == cpu_to_le16(0x0001)) {
506 if (chan->sec_level == BT_SECURITY_LOW)
507 chan->sec_level = BT_SECURITY_SDP;
508
509 if (chan->sec_level == BT_SECURITY_HIGH)
510 return HCI_AT_NO_BONDING_MITM;
511 else
512 return HCI_AT_NO_BONDING;
513 } else {
514 switch (chan->sec_level) {
515 case BT_SECURITY_HIGH:
516 return HCI_AT_GENERAL_BONDING_MITM;
517 case BT_SECURITY_MEDIUM:
518 return HCI_AT_GENERAL_BONDING;
519 default:
520 return HCI_AT_NO_BONDING;
521 }
522 }
523 }
524
525 /* Service level security */
526 int l2cap_chan_check_security(struct l2cap_chan *chan)
527 {
528 struct l2cap_conn *conn = chan->conn;
529 __u8 auth_type;
530
531 auth_type = l2cap_get_auth_type(chan);
532
533 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
534 }
535
536 static u8 l2cap_get_ident(struct l2cap_conn *conn)
537 {
538 u8 id;
539
540 /* Get next available identificator.
541 * 1 - 128 are used by kernel.
542 * 129 - 199 are reserved.
543 * 200 - 254 are used by utilities like l2ping, etc.
544 */
545
546 spin_lock_bh(&conn->lock);
547
548 if (++conn->tx_ident > 128)
549 conn->tx_ident = 1;
550
551 id = conn->tx_ident;
552
553 spin_unlock_bh(&conn->lock);
554
555 return id;
556 }
557
558 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
559 {
560 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
561 u8 flags;
562
563 BT_DBG("code 0x%2.2x", code);
564
565 if (!skb)
566 return;
567
568 if (lmp_no_flush_capable(conn->hcon->hdev))
569 flags = ACL_START_NO_FLUSH;
570 else
571 flags = ACL_START;
572
573 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
574 skb->priority = HCI_PRIO_MAX;
575
576 hci_send_acl(conn->hchan, skb, flags);
577 }
578
579 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
580 {
581 struct hci_conn *hcon = chan->conn->hcon;
582 u16 flags;
583
584 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
585 skb->priority);
586
587 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
588 lmp_no_flush_capable(hcon->hdev))
589 flags = ACL_START_NO_FLUSH;
590 else
591 flags = ACL_START;
592
593 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
594 hci_send_acl(chan->conn->hchan, skb, flags);
595 }
596
597 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
598 {
599 struct sk_buff *skb;
600 struct l2cap_hdr *lh;
601 struct l2cap_conn *conn = chan->conn;
602 int count, hlen;
603
604 if (chan->state != BT_CONNECTED)
605 return;
606
607 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
608 hlen = L2CAP_EXT_HDR_SIZE;
609 else
610 hlen = L2CAP_ENH_HDR_SIZE;
611
612 if (chan->fcs == L2CAP_FCS_CRC16)
613 hlen += L2CAP_FCS_SIZE;
614
615 BT_DBG("chan %p, control 0x%8.8x", chan, control);
616
617 count = min_t(unsigned int, conn->mtu, hlen);
618
619 control |= __set_sframe(chan);
620
621 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
622 control |= __set_ctrl_final(chan);
623
624 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
625 control |= __set_ctrl_poll(chan);
626
627 skb = bt_skb_alloc(count, GFP_ATOMIC);
628 if (!skb)
629 return;
630
631 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
632 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
633 lh->cid = cpu_to_le16(chan->dcid);
634
635 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
636
637 if (chan->fcs == L2CAP_FCS_CRC16) {
638 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
639 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
640 }
641
642 skb->priority = HCI_PRIO_MAX;
643 l2cap_do_send(chan, skb);
644 }
645
646 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
647 {
648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
649 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
650 set_bit(CONN_RNR_SENT, &chan->conn_state);
651 } else
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
653
654 control |= __set_reqseq(chan, chan->buffer_seq);
655
656 l2cap_send_sframe(chan, control);
657 }
658
659 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
660 {
661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
662 }
663
664 static void l2cap_do_start(struct l2cap_chan *chan)
665 {
666 struct l2cap_conn *conn = chan->conn;
667
668 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
669 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
670 return;
671
672 if (l2cap_chan_check_security(chan) &&
673 __l2cap_no_conn_pending(chan)) {
674 struct l2cap_conn_req req;
675 req.scid = cpu_to_le16(chan->scid);
676 req.psm = chan->psm;
677
678 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
680
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
682 sizeof(req), &req);
683 }
684 } else {
685 struct l2cap_info_req req;
686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
687
688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
689 conn->info_ident = l2cap_get_ident(conn);
690
691 schedule_delayed_work(&conn->info_timer,
692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
693
694 l2cap_send_cmd(conn, conn->info_ident,
695 L2CAP_INFO_REQ, sizeof(req), &req);
696 }
697 }
698
699 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
700 {
701 u32 local_feat_mask = l2cap_feat_mask;
702 if (!disable_ertm)
703 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
704
705 switch (mode) {
706 case L2CAP_MODE_ERTM:
707 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
708 case L2CAP_MODE_STREAMING:
709 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
710 default:
711 return 0x00;
712 }
713 }
714
715 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
716 {
717 struct sock *sk;
718 struct l2cap_disconn_req req;
719
720 if (!conn)
721 return;
722
723 sk = chan->sk;
724
725 if (chan->mode == L2CAP_MODE_ERTM) {
726 __clear_retrans_timer(chan);
727 __clear_monitor_timer(chan);
728 __clear_ack_timer(chan);
729 }
730
731 req.dcid = cpu_to_le16(chan->dcid);
732 req.scid = cpu_to_le16(chan->scid);
733 l2cap_send_cmd(conn, l2cap_get_ident(conn),
734 L2CAP_DISCONN_REQ, sizeof(req), &req);
735
736 l2cap_state_change(chan, BT_DISCONN);
737 sk->sk_err = err;
738 }
739
740 /* ---- L2CAP connections ---- */
741 static void l2cap_conn_start(struct l2cap_conn *conn)
742 {
743 struct l2cap_chan *chan;
744
745 BT_DBG("conn %p", conn);
746
747 rcu_read_lock();
748
749 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
750 struct sock *sk = chan->sk;
751
752 bh_lock_sock(sk);
753
754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
755 bh_unlock_sock(sk);
756 continue;
757 }
758
759 if (chan->state == BT_CONNECT) {
760 struct l2cap_conn_req req;
761
762 if (!l2cap_chan_check_security(chan) ||
763 !__l2cap_no_conn_pending(chan)) {
764 bh_unlock_sock(sk);
765 continue;
766 }
767
768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
769 && test_bit(CONF_STATE2_DEVICE,
770 &chan->conf_state)) {
771 /* l2cap_chan_close() calls list_del(chan)
772 * so release the lock */
773 l2cap_chan_close(chan, ECONNRESET);
774 bh_unlock_sock(sk);
775 continue;
776 }
777
778 req.scid = cpu_to_le16(chan->scid);
779 req.psm = chan->psm;
780
781 chan->ident = l2cap_get_ident(conn);
782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
783
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
785 sizeof(req), &req);
786
787 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp;
789 char buf[128];
790 rsp.scid = cpu_to_le16(chan->dcid);
791 rsp.dcid = cpu_to_le16(chan->scid);
792
793 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
798 if (parent)
799 parent->sk_data_ready(parent, 0);
800
801 } else {
802 l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 }
806 } else {
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
809 }
810
811 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
812 sizeof(rsp), &rsp);
813
814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
815 rsp.result != L2CAP_CR_SUCCESS) {
816 bh_unlock_sock(sk);
817 continue;
818 }
819
820 set_bit(CONF_REQ_SENT, &chan->conf_state);
821 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
822 l2cap_build_conf_req(chan, buf), buf);
823 chan->num_conf_req++;
824 }
825
826 bh_unlock_sock(sk);
827 }
828
829 rcu_read_unlock();
830 }
831
832 /* Find socket with cid and source bdaddr.
833 * Returns closest match, locked.
834 */
835 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
836 {
837 struct l2cap_chan *c, *c1 = NULL;
838
839 read_lock(&chan_list_lock);
840
841 list_for_each_entry(c, &chan_list, global_l) {
842 struct sock *sk = c->sk;
843
844 if (state && c->state != state)
845 continue;
846
847 if (c->scid == cid) {
848 /* Exact match. */
849 if (!bacmp(&bt_sk(sk)->src, src)) {
850 read_unlock(&chan_list_lock);
851 return c;
852 }
853
854 /* Closest match */
855 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
856 c1 = c;
857 }
858 }
859
860 read_unlock(&chan_list_lock);
861
862 return c1;
863 }
864
865 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
866 {
867 struct sock *parent, *sk;
868 struct l2cap_chan *chan, *pchan;
869
870 BT_DBG("");
871
872 /* Check if we have socket listening on cid */
873 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
874 conn->src);
875 if (!pchan)
876 return;
877
878 parent = pchan->sk;
879
880 lock_sock(parent);
881
882 /* Check for backlog size */
883 if (sk_acceptq_is_full(parent)) {
884 BT_DBG("backlog full %d", parent->sk_ack_backlog);
885 goto clean;
886 }
887
888 chan = pchan->ops->new_connection(pchan->data);
889 if (!chan)
890 goto clean;
891
892 sk = chan->sk;
893
894 hci_conn_hold(conn->hcon);
895
896 bacpy(&bt_sk(sk)->src, conn->src);
897 bacpy(&bt_sk(sk)->dst, conn->dst);
898
899 bt_accept_enqueue(parent, sk);
900
901 l2cap_chan_add(conn, chan);
902
903 __set_chan_timer(chan, sk->sk_sndtimeo);
904
905 l2cap_state_change(chan, BT_CONNECTED);
906 parent->sk_data_ready(parent, 0);
907
908 clean:
909 release_sock(parent);
910 }
911
912 static void l2cap_chan_ready(struct sock *sk)
913 {
914 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
915 struct sock *parent = bt_sk(sk)->parent;
916
917 BT_DBG("sk %p, parent %p", sk, parent);
918
919 chan->conf_state = 0;
920 __clear_chan_timer(chan);
921
922 l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk);
924
925 if (parent)
926 parent->sk_data_ready(parent, 0);
927 }
928
929 static void l2cap_conn_ready(struct l2cap_conn *conn)
930 {
931 struct l2cap_chan *chan;
932
933 BT_DBG("conn %p", conn);
934
935 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
936 l2cap_le_conn_ready(conn);
937
938 if (conn->hcon->out && conn->hcon->type == LE_LINK)
939 smp_conn_security(conn, conn->hcon->pending_sec_level);
940
941 rcu_read_lock();
942
943 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
945
946 bh_lock_sock(sk);
947
948 if (conn->hcon->type == LE_LINK) {
949 if (smp_conn_security(conn, chan->sec_level))
950 l2cap_chan_ready(sk);
951
952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
953 __clear_chan_timer(chan);
954 l2cap_state_change(chan, BT_CONNECTED);
955 sk->sk_state_change(sk);
956
957 } else if (chan->state == BT_CONNECT)
958 l2cap_do_start(chan);
959
960 bh_unlock_sock(sk);
961 }
962
963 rcu_read_unlock();
964 }
965
966 /* Notify sockets that we cannot guaranty reliability anymore */
967 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
968 {
969 struct l2cap_chan *chan;
970
971 BT_DBG("conn %p", conn);
972
973 rcu_read_lock();
974
975 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
976 struct sock *sk = chan->sk;
977
978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
979 sk->sk_err = err;
980 }
981
982 rcu_read_unlock();
983 }
984
985 static void l2cap_info_timeout(struct work_struct *work)
986 {
987 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
988 info_timer.work);
989
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
992
993 l2cap_conn_start(conn);
994 }
995
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
997 {
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1000 struct sock *sk;
1001
1002 if (!conn)
1003 return;
1004
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1006
1007 kfree_skb(conn->rx_skb);
1008
1009 /* Kill channels */
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 sk = chan->sk;
1012 lock_sock(sk);
1013 l2cap_chan_del(chan, err);
1014 release_sock(sk);
1015 chan->ops->close(chan->data);
1016 }
1017
1018 hci_chan_del(conn->hchan);
1019
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 __cancel_delayed_work(&conn->info_timer);
1022
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 __cancel_delayed_work(&conn->security_timer);
1025 smp_chan_destroy(conn);
1026 }
1027
1028 hcon->l2cap_data = NULL;
1029 kfree(conn);
1030 }
1031
1032 static void security_timeout(struct work_struct *work)
1033 {
1034 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1035 security_timer.work);
1036
1037 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1038 }
1039
1040 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1041 {
1042 struct l2cap_conn *conn = hcon->l2cap_data;
1043 struct hci_chan *hchan;
1044
1045 if (conn || status)
1046 return conn;
1047
1048 hchan = hci_chan_create(hcon);
1049 if (!hchan)
1050 return NULL;
1051
1052 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1053 if (!conn) {
1054 hci_chan_del(hchan);
1055 return NULL;
1056 }
1057
1058 hcon->l2cap_data = conn;
1059 conn->hcon = hcon;
1060 conn->hchan = hchan;
1061
1062 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1063
1064 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1065 conn->mtu = hcon->hdev->le_mtu;
1066 else
1067 conn->mtu = hcon->hdev->acl_mtu;
1068
1069 conn->src = &hcon->hdev->bdaddr;
1070 conn->dst = &hcon->dst;
1071
1072 conn->feat_mask = 0;
1073
1074 spin_lock_init(&conn->lock);
1075
1076 INIT_LIST_HEAD(&conn->chan_l);
1077
1078 if (hcon->type == LE_LINK)
1079 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1080 else
1081 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1082
1083 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1084
1085 return conn;
1086 }
1087
1088 /* ---- Socket interface ---- */
1089
1090 /* Find socket with psm and source bdaddr.
1091 * Returns closest match.
1092 */
1093 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1094 {
1095 struct l2cap_chan *c, *c1 = NULL;
1096
1097 read_lock(&chan_list_lock);
1098
1099 list_for_each_entry(c, &chan_list, global_l) {
1100 struct sock *sk = c->sk;
1101
1102 if (state && c->state != state)
1103 continue;
1104
1105 if (c->psm == psm) {
1106 /* Exact match. */
1107 if (!bacmp(&bt_sk(sk)->src, src)) {
1108 read_unlock(&chan_list_lock);
1109 return c;
1110 }
1111
1112 /* Closest match */
1113 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1114 c1 = c;
1115 }
1116 }
1117
1118 read_unlock(&chan_list_lock);
1119
1120 return c1;
1121 }
1122
1123 inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1124 {
1125 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src;
1127 struct l2cap_conn *conn;
1128 struct hci_conn *hcon;
1129 struct hci_dev *hdev;
1130 __u8 auth_type;
1131 int err;
1132
1133 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1134 chan->psm);
1135
1136 hdev = hci_get_route(dst, src);
1137 if (!hdev)
1138 return -EHOSTUNREACH;
1139
1140 hci_dev_lock(hdev);
1141
1142 lock_sock(sk);
1143
1144 /* PSM must be odd and lsb of upper byte must be 0 */
1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1146 chan->chan_type != L2CAP_CHAN_RAW) {
1147 err = -EINVAL;
1148 goto done;
1149 }
1150
1151 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1152 err = -EINVAL;
1153 goto done;
1154 }
1155
1156 switch (chan->mode) {
1157 case L2CAP_MODE_BASIC:
1158 break;
1159 case L2CAP_MODE_ERTM:
1160 case L2CAP_MODE_STREAMING:
1161 if (!disable_ertm)
1162 break;
1163 /* fall through */
1164 default:
1165 err = -ENOTSUPP;
1166 goto done;
1167 }
1168
1169 switch (sk->sk_state) {
1170 case BT_CONNECT:
1171 case BT_CONNECT2:
1172 case BT_CONFIG:
1173 /* Already connecting */
1174 err = 0;
1175 goto done;
1176
1177 case BT_CONNECTED:
1178 /* Already connected */
1179 err = -EISCONN;
1180 goto done;
1181
1182 case BT_OPEN:
1183 case BT_BOUND:
1184 /* Can connect */
1185 break;
1186
1187 default:
1188 err = -EBADFD;
1189 goto done;
1190 }
1191
1192 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, dst);
1194 chan->psm = psm;
1195 chan->dcid = cid;
1196
1197 auth_type = l2cap_get_auth_type(chan);
1198
1199 if (chan->dcid == L2CAP_CID_LE_DATA)
1200 hcon = hci_connect(hdev, LE_LINK, dst,
1201 chan->sec_level, auth_type);
1202 else
1203 hcon = hci_connect(hdev, ACL_LINK, dst,
1204 chan->sec_level, auth_type);
1205
1206 if (IS_ERR(hcon)) {
1207 err = PTR_ERR(hcon);
1208 goto done;
1209 }
1210
1211 conn = l2cap_conn_add(hcon, 0);
1212 if (!conn) {
1213 hci_conn_put(hcon);
1214 err = -ENOMEM;
1215 goto done;
1216 }
1217
1218 /* Update source addr of the socket */
1219 bacpy(src, conn->src);
1220
1221 l2cap_chan_add(conn, chan);
1222
1223 l2cap_state_change(chan, BT_CONNECT);
1224 __set_chan_timer(chan, sk->sk_sndtimeo);
1225
1226 if (hcon->state == BT_CONNECTED) {
1227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1228 __clear_chan_timer(chan);
1229 if (l2cap_chan_check_security(chan))
1230 l2cap_state_change(chan, BT_CONNECTED);
1231 } else
1232 l2cap_do_start(chan);
1233 }
1234
1235 err = 0;
1236
1237 done:
1238 hci_dev_unlock(hdev);
1239 hci_dev_put(hdev);
1240 return err;
1241 }
1242
1243 int __l2cap_wait_ack(struct sock *sk)
1244 {
1245 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1246 DECLARE_WAITQUEUE(wait, current);
1247 int err = 0;
1248 int timeo = HZ/5;
1249
1250 add_wait_queue(sk_sleep(sk), &wait);
1251 set_current_state(TASK_INTERRUPTIBLE);
1252 while (chan->unacked_frames > 0 && chan->conn) {
1253 if (!timeo)
1254 timeo = HZ/5;
1255
1256 if (signal_pending(current)) {
1257 err = sock_intr_errno(timeo);
1258 break;
1259 }
1260
1261 release_sock(sk);
1262 timeo = schedule_timeout(timeo);
1263 lock_sock(sk);
1264 set_current_state(TASK_INTERRUPTIBLE);
1265
1266 err = sock_error(sk);
1267 if (err)
1268 break;
1269 }
1270 set_current_state(TASK_RUNNING);
1271 remove_wait_queue(sk_sleep(sk), &wait);
1272 return err;
1273 }
1274
1275 static void l2cap_monitor_timeout(struct work_struct *work)
1276 {
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 monitor_timer.work);
1279 struct sock *sk = chan->sk;
1280
1281 BT_DBG("chan %p", chan);
1282
1283 lock_sock(sk);
1284 if (chan->retry_count >= chan->remote_max_tx) {
1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1286 release_sock(sk);
1287 return;
1288 }
1289
1290 chan->retry_count++;
1291 __set_monitor_timer(chan);
1292
1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1294 release_sock(sk);
1295 }
1296
1297 static void l2cap_retrans_timeout(struct work_struct *work)
1298 {
1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1300 retrans_timer.work);
1301 struct sock *sk = chan->sk;
1302
1303 BT_DBG("chan %p", chan);
1304
1305 lock_sock(sk);
1306 chan->retry_count = 1;
1307 __set_monitor_timer(chan);
1308
1309 set_bit(CONN_WAIT_F, &chan->conn_state);
1310
1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1312 release_sock(sk);
1313 }
1314
1315 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1316 {
1317 struct sk_buff *skb;
1318
1319 while ((skb = skb_peek(&chan->tx_q)) &&
1320 chan->unacked_frames) {
1321 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1322 break;
1323
1324 skb = skb_dequeue(&chan->tx_q);
1325 kfree_skb(skb);
1326
1327 chan->unacked_frames--;
1328 }
1329
1330 if (!chan->unacked_frames)
1331 __clear_retrans_timer(chan);
1332 }
1333
1334 static void l2cap_streaming_send(struct l2cap_chan *chan)
1335 {
1336 struct sk_buff *skb;
1337 u32 control;
1338 u16 fcs;
1339
1340 while ((skb = skb_dequeue(&chan->tx_q))) {
1341 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1342 control |= __set_txseq(chan, chan->next_tx_seq);
1343 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1344
1345 if (chan->fcs == L2CAP_FCS_CRC16) {
1346 fcs = crc16(0, (u8 *)skb->data,
1347 skb->len - L2CAP_FCS_SIZE);
1348 put_unaligned_le16(fcs,
1349 skb->data + skb->len - L2CAP_FCS_SIZE);
1350 }
1351
1352 l2cap_do_send(chan, skb);
1353
1354 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1355 }
1356 }
1357
1358 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1359 {
1360 struct sk_buff *skb, *tx_skb;
1361 u16 fcs;
1362 u32 control;
1363
1364 skb = skb_peek(&chan->tx_q);
1365 if (!skb)
1366 return;
1367
1368 while (bt_cb(skb)->tx_seq != tx_seq) {
1369 if (skb_queue_is_last(&chan->tx_q, skb))
1370 return;
1371
1372 skb = skb_queue_next(&chan->tx_q, skb);
1373 }
1374
1375 if (chan->remote_max_tx &&
1376 bt_cb(skb)->retries == chan->remote_max_tx) {
1377 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1378 return;
1379 }
1380
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 bt_cb(skb)->retries++;
1383
1384 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1385 control &= __get_sar_mask(chan);
1386
1387 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1388 control |= __set_ctrl_final(chan);
1389
1390 control |= __set_reqseq(chan, chan->buffer_seq);
1391 control |= __set_txseq(chan, tx_seq);
1392
1393 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1394
1395 if (chan->fcs == L2CAP_FCS_CRC16) {
1396 fcs = crc16(0, (u8 *)tx_skb->data,
1397 tx_skb->len - L2CAP_FCS_SIZE);
1398 put_unaligned_le16(fcs,
1399 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1400 }
1401
1402 l2cap_do_send(chan, tx_skb);
1403 }
1404
1405 static int l2cap_ertm_send(struct l2cap_chan *chan)
1406 {
1407 struct sk_buff *skb, *tx_skb;
1408 u16 fcs;
1409 u32 control;
1410 int nsent = 0;
1411
1412 if (chan->state != BT_CONNECTED)
1413 return -ENOTCONN;
1414
1415 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1416
1417 if (chan->remote_max_tx &&
1418 bt_cb(skb)->retries == chan->remote_max_tx) {
1419 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1420 break;
1421 }
1422
1423 tx_skb = skb_clone(skb, GFP_ATOMIC);
1424
1425 bt_cb(skb)->retries++;
1426
1427 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1428 control &= __get_sar_mask(chan);
1429
1430 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1431 control |= __set_ctrl_final(chan);
1432
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1434 control |= __set_txseq(chan, chan->next_tx_seq);
1435
1436 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1437
1438 if (chan->fcs == L2CAP_FCS_CRC16) {
1439 fcs = crc16(0, (u8 *)skb->data,
1440 tx_skb->len - L2CAP_FCS_SIZE);
1441 put_unaligned_le16(fcs, skb->data +
1442 tx_skb->len - L2CAP_FCS_SIZE);
1443 }
1444
1445 l2cap_do_send(chan, tx_skb);
1446
1447 __set_retrans_timer(chan);
1448
1449 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1450
1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1452
1453 if (bt_cb(skb)->retries == 1)
1454 chan->unacked_frames++;
1455
1456 chan->frames_sent++;
1457
1458 if (skb_queue_is_last(&chan->tx_q, skb))
1459 chan->tx_send_head = NULL;
1460 else
1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1462
1463 nsent++;
1464 }
1465
1466 return nsent;
1467 }
1468
1469 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1470 {
1471 int ret;
1472
1473 if (!skb_queue_empty(&chan->tx_q))
1474 chan->tx_send_head = chan->tx_q.next;
1475
1476 chan->next_tx_seq = chan->expected_ack_seq;
1477 ret = l2cap_ertm_send(chan);
1478 return ret;
1479 }
1480
1481 static void l2cap_send_ack(struct l2cap_chan *chan)
1482 {
1483 u32 control = 0;
1484
1485 control |= __set_reqseq(chan, chan->buffer_seq);
1486
1487 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1488 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1489 set_bit(CONN_RNR_SENT, &chan->conn_state);
1490 l2cap_send_sframe(chan, control);
1491 return;
1492 }
1493
1494 if (l2cap_ertm_send(chan) > 0)
1495 return;
1496
1497 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1498 l2cap_send_sframe(chan, control);
1499 }
1500
1501 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1502 {
1503 struct srej_list *tail;
1504 u32 control;
1505
1506 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1507 control |= __set_ctrl_final(chan);
1508
1509 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1510 control |= __set_reqseq(chan, tail->tx_seq);
1511
1512 l2cap_send_sframe(chan, control);
1513 }
1514
1515 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1516 {
1517 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1518 struct sk_buff **frag;
1519 int err, sent = 0;
1520
1521 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1522 return -EFAULT;
1523
1524 sent += count;
1525 len -= count;
1526
1527 /* Continuation fragments (no L2CAP header) */
1528 frag = &skb_shinfo(skb)->frag_list;
1529 while (len) {
1530 count = min_t(unsigned int, conn->mtu, len);
1531
1532 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1533 if (!*frag)
1534 return err;
1535 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1536 return -EFAULT;
1537
1538 (*frag)->priority = skb->priority;
1539
1540 sent += count;
1541 len -= count;
1542
1543 frag = &(*frag)->next;
1544 }
1545
1546 return sent;
1547 }
1548
1549 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1550 struct msghdr *msg, size_t len,
1551 u32 priority)
1552 {
1553 struct sock *sk = chan->sk;
1554 struct l2cap_conn *conn = chan->conn;
1555 struct sk_buff *skb;
1556 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1557 struct l2cap_hdr *lh;
1558
1559 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1560
1561 count = min_t(unsigned int, (conn->mtu - hlen), len);
1562 skb = bt_skb_send_alloc(sk, count + hlen,
1563 msg->msg_flags & MSG_DONTWAIT, &err);
1564 if (!skb)
1565 return ERR_PTR(err);
1566
1567 skb->priority = priority;
1568
1569 /* Create L2CAP header */
1570 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1571 lh->cid = cpu_to_le16(chan->dcid);
1572 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1573 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1574
1575 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1576 if (unlikely(err < 0)) {
1577 kfree_skb(skb);
1578 return ERR_PTR(err);
1579 }
1580 return skb;
1581 }
1582
1583 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1584 struct msghdr *msg, size_t len,
1585 u32 priority)
1586 {
1587 struct sock *sk = chan->sk;
1588 struct l2cap_conn *conn = chan->conn;
1589 struct sk_buff *skb;
1590 int err, count, hlen = L2CAP_HDR_SIZE;
1591 struct l2cap_hdr *lh;
1592
1593 BT_DBG("sk %p len %d", sk, (int)len);
1594
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1598 if (!skb)
1599 return ERR_PTR(err);
1600
1601 skb->priority = priority;
1602
1603 /* Create L2CAP header */
1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1605 lh->cid = cpu_to_le16(chan->dcid);
1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1607
1608 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1609 if (unlikely(err < 0)) {
1610 kfree_skb(skb);
1611 return ERR_PTR(err);
1612 }
1613 return skb;
1614 }
1615
1616 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1617 struct msghdr *msg, size_t len,
1618 u32 control, u16 sdulen)
1619 {
1620 struct sock *sk = chan->sk;
1621 struct l2cap_conn *conn = chan->conn;
1622 struct sk_buff *skb;
1623 int err, count, hlen;
1624 struct l2cap_hdr *lh;
1625
1626 BT_DBG("sk %p len %d", sk, (int)len);
1627
1628 if (!conn)
1629 return ERR_PTR(-ENOTCONN);
1630
1631 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1632 hlen = L2CAP_EXT_HDR_SIZE;
1633 else
1634 hlen = L2CAP_ENH_HDR_SIZE;
1635
1636 if (sdulen)
1637 hlen += L2CAP_SDULEN_SIZE;
1638
1639 if (chan->fcs == L2CAP_FCS_CRC16)
1640 hlen += L2CAP_FCS_SIZE;
1641
1642 count = min_t(unsigned int, (conn->mtu - hlen), len);
1643 skb = bt_skb_send_alloc(sk, count + hlen,
1644 msg->msg_flags & MSG_DONTWAIT, &err);
1645 if (!skb)
1646 return ERR_PTR(err);
1647
1648 /* Create L2CAP header */
1649 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1650 lh->cid = cpu_to_le16(chan->dcid);
1651 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1652
1653 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1654
1655 if (sdulen)
1656 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1657
1658 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1659 if (unlikely(err < 0)) {
1660 kfree_skb(skb);
1661 return ERR_PTR(err);
1662 }
1663
1664 if (chan->fcs == L2CAP_FCS_CRC16)
1665 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1666
1667 bt_cb(skb)->retries = 0;
1668 return skb;
1669 }
1670
1671 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1672 {
1673 struct sk_buff *skb;
1674 struct sk_buff_head sar_queue;
1675 u32 control;
1676 size_t size = 0;
1677
1678 skb_queue_head_init(&sar_queue);
1679 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1680 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1681 if (IS_ERR(skb))
1682 return PTR_ERR(skb);
1683
1684 __skb_queue_tail(&sar_queue, skb);
1685 len -= chan->remote_mps;
1686 size += chan->remote_mps;
1687
1688 while (len > 0) {
1689 size_t buflen;
1690
1691 if (len > chan->remote_mps) {
1692 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1693 buflen = chan->remote_mps;
1694 } else {
1695 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1696 buflen = len;
1697 }
1698
1699 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1700 if (IS_ERR(skb)) {
1701 skb_queue_purge(&sar_queue);
1702 return PTR_ERR(skb);
1703 }
1704
1705 __skb_queue_tail(&sar_queue, skb);
1706 len -= buflen;
1707 size += buflen;
1708 }
1709 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1710 if (chan->tx_send_head == NULL)
1711 chan->tx_send_head = sar_queue.next;
1712
1713 return size;
1714 }
1715
1716 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1717 u32 priority)
1718 {
1719 struct sk_buff *skb;
1720 u32 control;
1721 int err;
1722
1723 /* Connectionless channel */
1724 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1725 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1726 if (IS_ERR(skb))
1727 return PTR_ERR(skb);
1728
1729 l2cap_do_send(chan, skb);
1730 return len;
1731 }
1732
1733 switch (chan->mode) {
1734 case L2CAP_MODE_BASIC:
1735 /* Check outgoing MTU */
1736 if (len > chan->omtu)
1737 return -EMSGSIZE;
1738
1739 /* Create a basic PDU */
1740 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1741 if (IS_ERR(skb))
1742 return PTR_ERR(skb);
1743
1744 l2cap_do_send(chan, skb);
1745 err = len;
1746 break;
1747
1748 case L2CAP_MODE_ERTM:
1749 case L2CAP_MODE_STREAMING:
1750 /* Entire SDU fits into one PDU */
1751 if (len <= chan->remote_mps) {
1752 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1753 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1754 0);
1755 if (IS_ERR(skb))
1756 return PTR_ERR(skb);
1757
1758 __skb_queue_tail(&chan->tx_q, skb);
1759
1760 if (chan->tx_send_head == NULL)
1761 chan->tx_send_head = skb;
1762
1763 } else {
1764 /* Segment SDU into multiples PDUs */
1765 err = l2cap_sar_segment_sdu(chan, msg, len);
1766 if (err < 0)
1767 return err;
1768 }
1769
1770 if (chan->mode == L2CAP_MODE_STREAMING) {
1771 l2cap_streaming_send(chan);
1772 err = len;
1773 break;
1774 }
1775
1776 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1777 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1778 err = len;
1779 break;
1780 }
1781
1782 err = l2cap_ertm_send(chan);
1783 if (err >= 0)
1784 err = len;
1785
1786 break;
1787
1788 default:
1789 BT_DBG("bad state %1.1x", chan->mode);
1790 err = -EBADFD;
1791 }
1792
1793 return err;
1794 }
1795
1796 /* Copy frame to all raw sockets on that connection */
1797 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1798 {
1799 struct sk_buff *nskb;
1800 struct l2cap_chan *chan;
1801
1802 BT_DBG("conn %p", conn);
1803
1804 rcu_read_lock();
1805
1806 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1807 struct sock *sk = chan->sk;
1808 if (chan->chan_type != L2CAP_CHAN_RAW)
1809 continue;
1810
1811 /* Don't send frame to the socket it came from */
1812 if (skb->sk == sk)
1813 continue;
1814 nskb = skb_clone(skb, GFP_ATOMIC);
1815 if (!nskb)
1816 continue;
1817
1818 if (chan->ops->recv(chan->data, nskb))
1819 kfree_skb(nskb);
1820 }
1821
1822 rcu_read_unlock();
1823 }
1824
1825 /* ---- L2CAP signalling commands ---- */
1826 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1827 u8 code, u8 ident, u16 dlen, void *data)
1828 {
1829 struct sk_buff *skb, **frag;
1830 struct l2cap_cmd_hdr *cmd;
1831 struct l2cap_hdr *lh;
1832 int len, count;
1833
1834 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1835 conn, code, ident, dlen);
1836
1837 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1838 count = min_t(unsigned int, conn->mtu, len);
1839
1840 skb = bt_skb_alloc(count, GFP_ATOMIC);
1841 if (!skb)
1842 return NULL;
1843
1844 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1845 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1846
1847 if (conn->hcon->type == LE_LINK)
1848 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1849 else
1850 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1851
1852 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1853 cmd->code = code;
1854 cmd->ident = ident;
1855 cmd->len = cpu_to_le16(dlen);
1856
1857 if (dlen) {
1858 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1859 memcpy(skb_put(skb, count), data, count);
1860 data += count;
1861 }
1862
1863 len -= skb->len;
1864
1865 /* Continuation fragments (no L2CAP header) */
1866 frag = &skb_shinfo(skb)->frag_list;
1867 while (len) {
1868 count = min_t(unsigned int, conn->mtu, len);
1869
1870 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1871 if (!*frag)
1872 goto fail;
1873
1874 memcpy(skb_put(*frag, count), data, count);
1875
1876 len -= count;
1877 data += count;
1878
1879 frag = &(*frag)->next;
1880 }
1881
1882 return skb;
1883
1884 fail:
1885 kfree_skb(skb);
1886 return NULL;
1887 }
1888
1889 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1890 {
1891 struct l2cap_conf_opt *opt = *ptr;
1892 int len;
1893
1894 len = L2CAP_CONF_OPT_SIZE + opt->len;
1895 *ptr += len;
1896
1897 *type = opt->type;
1898 *olen = opt->len;
1899
1900 switch (opt->len) {
1901 case 1:
1902 *val = *((u8 *) opt->val);
1903 break;
1904
1905 case 2:
1906 *val = get_unaligned_le16(opt->val);
1907 break;
1908
1909 case 4:
1910 *val = get_unaligned_le32(opt->val);
1911 break;
1912
1913 default:
1914 *val = (unsigned long) opt->val;
1915 break;
1916 }
1917
1918 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1919 return len;
1920 }
1921
1922 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1923 {
1924 struct l2cap_conf_opt *opt = *ptr;
1925
1926 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1927
1928 opt->type = type;
1929 opt->len = len;
1930
1931 switch (len) {
1932 case 1:
1933 *((u8 *) opt->val) = val;
1934 break;
1935
1936 case 2:
1937 put_unaligned_le16(val, opt->val);
1938 break;
1939
1940 case 4:
1941 put_unaligned_le32(val, opt->val);
1942 break;
1943
1944 default:
1945 memcpy(opt->val, (void *) val, len);
1946 break;
1947 }
1948
1949 *ptr += L2CAP_CONF_OPT_SIZE + len;
1950 }
1951
1952 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1953 {
1954 struct l2cap_conf_efs efs;
1955
1956 switch (chan->mode) {
1957 case L2CAP_MODE_ERTM:
1958 efs.id = chan->local_id;
1959 efs.stype = chan->local_stype;
1960 efs.msdu = cpu_to_le16(chan->local_msdu);
1961 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1962 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1963 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1964 break;
1965
1966 case L2CAP_MODE_STREAMING:
1967 efs.id = 1;
1968 efs.stype = L2CAP_SERV_BESTEFFORT;
1969 efs.msdu = cpu_to_le16(chan->local_msdu);
1970 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1971 efs.acc_lat = 0;
1972 efs.flush_to = 0;
1973 break;
1974
1975 default:
1976 return;
1977 }
1978
1979 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1980 (unsigned long) &efs);
1981 }
1982
1983 static void l2cap_ack_timeout(struct work_struct *work)
1984 {
1985 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1986 ack_timer.work);
1987
1988 BT_DBG("chan %p", chan);
1989
1990 lock_sock(chan->sk);
1991 l2cap_send_ack(chan);
1992 release_sock(chan->sk);
1993 }
1994
1995 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1996 {
1997 chan->expected_ack_seq = 0;
1998 chan->unacked_frames = 0;
1999 chan->buffer_seq = 0;
2000 chan->num_acked = 0;
2001 chan->frames_sent = 0;
2002
2003 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2004 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2005 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2006
2007 skb_queue_head_init(&chan->srej_q);
2008
2009 INIT_LIST_HEAD(&chan->srej_l);
2010 }
2011
2012 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2013 {
2014 switch (mode) {
2015 case L2CAP_MODE_STREAMING:
2016 case L2CAP_MODE_ERTM:
2017 if (l2cap_mode_supported(mode, remote_feat_mask))
2018 return mode;
2019 /* fall through */
2020 default:
2021 return L2CAP_MODE_BASIC;
2022 }
2023 }
2024
2025 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2026 {
2027 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2028 }
2029
2030 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2031 {
2032 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2033 }
2034
2035 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2036 {
2037 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2038 __l2cap_ews_supported(chan)) {
2039 /* use extended control field */
2040 set_bit(FLAG_EXT_CTRL, &chan->flags);
2041 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2042 } else {
2043 chan->tx_win = min_t(u16, chan->tx_win,
2044 L2CAP_DEFAULT_TX_WINDOW);
2045 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2046 }
2047 }
2048
2049 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2050 {
2051 struct l2cap_conf_req *req = data;
2052 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2053 void *ptr = req->data;
2054 u16 size;
2055
2056 BT_DBG("chan %p", chan);
2057
2058 if (chan->num_conf_req || chan->num_conf_rsp)
2059 goto done;
2060
2061 switch (chan->mode) {
2062 case L2CAP_MODE_STREAMING:
2063 case L2CAP_MODE_ERTM:
2064 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2065 break;
2066
2067 if (__l2cap_efs_supported(chan))
2068 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2069
2070 /* fall through */
2071 default:
2072 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2073 break;
2074 }
2075
2076 done:
2077 if (chan->imtu != L2CAP_DEFAULT_MTU)
2078 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2079
2080 switch (chan->mode) {
2081 case L2CAP_MODE_BASIC:
2082 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2083 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2084 break;
2085
2086 rfc.mode = L2CAP_MODE_BASIC;
2087 rfc.txwin_size = 0;
2088 rfc.max_transmit = 0;
2089 rfc.retrans_timeout = 0;
2090 rfc.monitor_timeout = 0;
2091 rfc.max_pdu_size = 0;
2092
2093 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2094 (unsigned long) &rfc);
2095 break;
2096
2097 case L2CAP_MODE_ERTM:
2098 rfc.mode = L2CAP_MODE_ERTM;
2099 rfc.max_transmit = chan->max_tx;
2100 rfc.retrans_timeout = 0;
2101 rfc.monitor_timeout = 0;
2102
2103 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2104 L2CAP_EXT_HDR_SIZE -
2105 L2CAP_SDULEN_SIZE -
2106 L2CAP_FCS_SIZE);
2107 rfc.max_pdu_size = cpu_to_le16(size);
2108
2109 l2cap_txwin_setup(chan);
2110
2111 rfc.txwin_size = min_t(u16, chan->tx_win,
2112 L2CAP_DEFAULT_TX_WINDOW);
2113
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2115 (unsigned long) &rfc);
2116
2117 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2118 l2cap_add_opt_efs(&ptr, chan);
2119
2120 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2121 break;
2122
2123 if (chan->fcs == L2CAP_FCS_NONE ||
2124 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2125 chan->fcs = L2CAP_FCS_NONE;
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2127 }
2128
2129 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2131 chan->tx_win);
2132 break;
2133
2134 case L2CAP_MODE_STREAMING:
2135 rfc.mode = L2CAP_MODE_STREAMING;
2136 rfc.txwin_size = 0;
2137 rfc.max_transmit = 0;
2138 rfc.retrans_timeout = 0;
2139 rfc.monitor_timeout = 0;
2140
2141 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2142 L2CAP_EXT_HDR_SIZE -
2143 L2CAP_SDULEN_SIZE -
2144 L2CAP_FCS_SIZE);
2145 rfc.max_pdu_size = cpu_to_le16(size);
2146
2147 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2148 (unsigned long) &rfc);
2149
2150 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2151 l2cap_add_opt_efs(&ptr, chan);
2152
2153 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2154 break;
2155
2156 if (chan->fcs == L2CAP_FCS_NONE ||
2157 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2158 chan->fcs = L2CAP_FCS_NONE;
2159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2160 }
2161 break;
2162 }
2163
2164 req->dcid = cpu_to_le16(chan->dcid);
2165 req->flags = cpu_to_le16(0);
2166
2167 return ptr - data;
2168 }
2169
2170 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2171 {
2172 struct l2cap_conf_rsp *rsp = data;
2173 void *ptr = rsp->data;
2174 void *req = chan->conf_req;
2175 int len = chan->conf_len;
2176 int type, hint, olen;
2177 unsigned long val;
2178 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2179 struct l2cap_conf_efs efs;
2180 u8 remote_efs = 0;
2181 u16 mtu = L2CAP_DEFAULT_MTU;
2182 u16 result = L2CAP_CONF_SUCCESS;
2183 u16 size;
2184
2185 BT_DBG("chan %p", chan);
2186
2187 while (len >= L2CAP_CONF_OPT_SIZE) {
2188 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2189
2190 hint = type & L2CAP_CONF_HINT;
2191 type &= L2CAP_CONF_MASK;
2192
2193 switch (type) {
2194 case L2CAP_CONF_MTU:
2195 mtu = val;
2196 break;
2197
2198 case L2CAP_CONF_FLUSH_TO:
2199 chan->flush_to = val;
2200 break;
2201
2202 case L2CAP_CONF_QOS:
2203 break;
2204
2205 case L2CAP_CONF_RFC:
2206 if (olen == sizeof(rfc))
2207 memcpy(&rfc, (void *) val, olen);
2208 break;
2209
2210 case L2CAP_CONF_FCS:
2211 if (val == L2CAP_FCS_NONE)
2212 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2213 break;
2214
2215 case L2CAP_CONF_EFS:
2216 remote_efs = 1;
2217 if (olen == sizeof(efs))
2218 memcpy(&efs, (void *) val, olen);
2219 break;
2220
2221 case L2CAP_CONF_EWS:
2222 if (!enable_hs)
2223 return -ECONNREFUSED;
2224
2225 set_bit(FLAG_EXT_CTRL, &chan->flags);
2226 set_bit(CONF_EWS_RECV, &chan->conf_state);
2227 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2228 chan->remote_tx_win = val;
2229 break;
2230
2231 default:
2232 if (hint)
2233 break;
2234
2235 result = L2CAP_CONF_UNKNOWN;
2236 *((u8 *) ptr++) = type;
2237 break;
2238 }
2239 }
2240
2241 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2242 goto done;
2243
2244 switch (chan->mode) {
2245 case L2CAP_MODE_STREAMING:
2246 case L2CAP_MODE_ERTM:
2247 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2248 chan->mode = l2cap_select_mode(rfc.mode,
2249 chan->conn->feat_mask);
2250 break;
2251 }
2252
2253 if (remote_efs) {
2254 if (__l2cap_efs_supported(chan))
2255 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2256 else
2257 return -ECONNREFUSED;
2258 }
2259
2260 if (chan->mode != rfc.mode)
2261 return -ECONNREFUSED;
2262
2263 break;
2264 }
2265
2266 done:
2267 if (chan->mode != rfc.mode) {
2268 result = L2CAP_CONF_UNACCEPT;
2269 rfc.mode = chan->mode;
2270
2271 if (chan->num_conf_rsp == 1)
2272 return -ECONNREFUSED;
2273
2274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2275 sizeof(rfc), (unsigned long) &rfc);
2276 }
2277
2278 if (result == L2CAP_CONF_SUCCESS) {
2279 /* Configure output options and let the other side know
2280 * which ones we don't like. */
2281
2282 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2283 result = L2CAP_CONF_UNACCEPT;
2284 else {
2285 chan->omtu = mtu;
2286 set_bit(CONF_MTU_DONE, &chan->conf_state);
2287 }
2288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2289
2290 if (remote_efs) {
2291 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2292 efs.stype != L2CAP_SERV_NOTRAFIC &&
2293 efs.stype != chan->local_stype) {
2294
2295 result = L2CAP_CONF_UNACCEPT;
2296
2297 if (chan->num_conf_req >= 1)
2298 return -ECONNREFUSED;
2299
2300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2301 sizeof(efs),
2302 (unsigned long) &efs);
2303 } else {
2304 /* Send PENDING Conf Rsp */
2305 result = L2CAP_CONF_PENDING;
2306 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2307 }
2308 }
2309
2310 switch (rfc.mode) {
2311 case L2CAP_MODE_BASIC:
2312 chan->fcs = L2CAP_FCS_NONE;
2313 set_bit(CONF_MODE_DONE, &chan->conf_state);
2314 break;
2315
2316 case L2CAP_MODE_ERTM:
2317 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2318 chan->remote_tx_win = rfc.txwin_size;
2319 else
2320 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2321
2322 chan->remote_max_tx = rfc.max_transmit;
2323
2324 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2325 chan->conn->mtu -
2326 L2CAP_EXT_HDR_SIZE -
2327 L2CAP_SDULEN_SIZE -
2328 L2CAP_FCS_SIZE);
2329 rfc.max_pdu_size = cpu_to_le16(size);
2330 chan->remote_mps = size;
2331
2332 rfc.retrans_timeout =
2333 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2334 rfc.monitor_timeout =
2335 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2336
2337 set_bit(CONF_MODE_DONE, &chan->conf_state);
2338
2339 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2340 sizeof(rfc), (unsigned long) &rfc);
2341
2342 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2343 chan->remote_id = efs.id;
2344 chan->remote_stype = efs.stype;
2345 chan->remote_msdu = le16_to_cpu(efs.msdu);
2346 chan->remote_flush_to =
2347 le32_to_cpu(efs.flush_to);
2348 chan->remote_acc_lat =
2349 le32_to_cpu(efs.acc_lat);
2350 chan->remote_sdu_itime =
2351 le32_to_cpu(efs.sdu_itime);
2352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2353 sizeof(efs), (unsigned long) &efs);
2354 }
2355 break;
2356
2357 case L2CAP_MODE_STREAMING:
2358 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2359 chan->conn->mtu -
2360 L2CAP_EXT_HDR_SIZE -
2361 L2CAP_SDULEN_SIZE -
2362 L2CAP_FCS_SIZE);
2363 rfc.max_pdu_size = cpu_to_le16(size);
2364 chan->remote_mps = size;
2365
2366 set_bit(CONF_MODE_DONE, &chan->conf_state);
2367
2368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2369 sizeof(rfc), (unsigned long) &rfc);
2370
2371 break;
2372
2373 default:
2374 result = L2CAP_CONF_UNACCEPT;
2375
2376 memset(&rfc, 0, sizeof(rfc));
2377 rfc.mode = chan->mode;
2378 }
2379
2380 if (result == L2CAP_CONF_SUCCESS)
2381 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2382 }
2383 rsp->scid = cpu_to_le16(chan->dcid);
2384 rsp->result = cpu_to_le16(result);
2385 rsp->flags = cpu_to_le16(0x0000);
2386
2387 return ptr - data;
2388 }
2389
2390 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2391 {
2392 struct l2cap_conf_req *req = data;
2393 void *ptr = req->data;
2394 int type, olen;
2395 unsigned long val;
2396 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2397 struct l2cap_conf_efs efs;
2398
2399 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2400
2401 while (len >= L2CAP_CONF_OPT_SIZE) {
2402 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2403
2404 switch (type) {
2405 case L2CAP_CONF_MTU:
2406 if (val < L2CAP_DEFAULT_MIN_MTU) {
2407 *result = L2CAP_CONF_UNACCEPT;
2408 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2409 } else
2410 chan->imtu = val;
2411 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2412 break;
2413
2414 case L2CAP_CONF_FLUSH_TO:
2415 chan->flush_to = val;
2416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2417 2, chan->flush_to);
2418 break;
2419
2420 case L2CAP_CONF_RFC:
2421 if (olen == sizeof(rfc))
2422 memcpy(&rfc, (void *)val, olen);
2423
2424 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2425 rfc.mode != chan->mode)
2426 return -ECONNREFUSED;
2427
2428 chan->fcs = 0;
2429
2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2431 sizeof(rfc), (unsigned long) &rfc);
2432 break;
2433
2434 case L2CAP_CONF_EWS:
2435 chan->tx_win = min_t(u16, val,
2436 L2CAP_DEFAULT_EXT_WINDOW);
2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2438 chan->tx_win);
2439 break;
2440
2441 case L2CAP_CONF_EFS:
2442 if (olen == sizeof(efs))
2443 memcpy(&efs, (void *)val, olen);
2444
2445 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2446 efs.stype != L2CAP_SERV_NOTRAFIC &&
2447 efs.stype != chan->local_stype)
2448 return -ECONNREFUSED;
2449
2450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2451 sizeof(efs), (unsigned long) &efs);
2452 break;
2453 }
2454 }
2455
2456 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2457 return -ECONNREFUSED;
2458
2459 chan->mode = rfc.mode;
2460
2461 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2462 switch (rfc.mode) {
2463 case L2CAP_MODE_ERTM:
2464 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2465 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2466 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2467
2468 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2469 chan->local_msdu = le16_to_cpu(efs.msdu);
2470 chan->local_sdu_itime =
2471 le32_to_cpu(efs.sdu_itime);
2472 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2473 chan->local_flush_to =
2474 le32_to_cpu(efs.flush_to);
2475 }
2476 break;
2477
2478 case L2CAP_MODE_STREAMING:
2479 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2480 }
2481 }
2482
2483 req->dcid = cpu_to_le16(chan->dcid);
2484 req->flags = cpu_to_le16(0x0000);
2485
2486 return ptr - data;
2487 }
2488
2489 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2490 {
2491 struct l2cap_conf_rsp *rsp = data;
2492 void *ptr = rsp->data;
2493
2494 BT_DBG("chan %p", chan);
2495
2496 rsp->scid = cpu_to_le16(chan->dcid);
2497 rsp->result = cpu_to_le16(result);
2498 rsp->flags = cpu_to_le16(flags);
2499
2500 return ptr - data;
2501 }
2502
2503 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2504 {
2505 struct l2cap_conn_rsp rsp;
2506 struct l2cap_conn *conn = chan->conn;
2507 u8 buf[128];
2508
2509 rsp.scid = cpu_to_le16(chan->dcid);
2510 rsp.dcid = cpu_to_le16(chan->scid);
2511 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2512 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2513 l2cap_send_cmd(conn, chan->ident,
2514 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2515
2516 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2517 return;
2518
2519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2520 l2cap_build_conf_req(chan, buf), buf);
2521 chan->num_conf_req++;
2522 }
2523
2524 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2525 {
2526 int type, olen;
2527 unsigned long val;
2528 struct l2cap_conf_rfc rfc;
2529
2530 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2531
2532 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2533 return;
2534
2535 while (len >= L2CAP_CONF_OPT_SIZE) {
2536 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2537
2538 switch (type) {
2539 case L2CAP_CONF_RFC:
2540 if (olen == sizeof(rfc))
2541 memcpy(&rfc, (void *)val, olen);
2542 goto done;
2543 }
2544 }
2545
2546 /* Use sane default values in case a misbehaving remote device
2547 * did not send an RFC option.
2548 */
2549 rfc.mode = chan->mode;
2550 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2551 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2552 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2553
2554 BT_ERR("Expected RFC option was not found, using defaults");
2555
2556 done:
2557 switch (rfc.mode) {
2558 case L2CAP_MODE_ERTM:
2559 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2560 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2561 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2562 break;
2563 case L2CAP_MODE_STREAMING:
2564 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2565 }
2566 }
2567
2568 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2569 {
2570 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2571
2572 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2573 return 0;
2574
2575 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2576 cmd->ident == conn->info_ident) {
2577 __cancel_delayed_work(&conn->info_timer);
2578
2579 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2580 conn->info_ident = 0;
2581
2582 l2cap_conn_start(conn);
2583 }
2584
2585 return 0;
2586 }
2587
2588 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2589 {
2590 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2591 struct l2cap_conn_rsp rsp;
2592 struct l2cap_chan *chan = NULL, *pchan;
2593 struct sock *parent, *sk = NULL;
2594 int result, status = L2CAP_CS_NO_INFO;
2595
2596 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2597 __le16 psm = req->psm;
2598
2599 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2600
2601 /* Check if we have socket listening on psm */
2602 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2603 if (!pchan) {
2604 result = L2CAP_CR_BAD_PSM;
2605 goto sendresp;
2606 }
2607
2608 parent = pchan->sk;
2609
2610 lock_sock(parent);
2611
2612 /* Check if the ACL is secure enough (if not SDP) */
2613 if (psm != cpu_to_le16(0x0001) &&
2614 !hci_conn_check_link_mode(conn->hcon)) {
2615 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2616 result = L2CAP_CR_SEC_BLOCK;
2617 goto response;
2618 }
2619
2620 result = L2CAP_CR_NO_MEM;
2621
2622 /* Check for backlog size */
2623 if (sk_acceptq_is_full(parent)) {
2624 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2625 goto response;
2626 }
2627
2628 chan = pchan->ops->new_connection(pchan->data);
2629 if (!chan)
2630 goto response;
2631
2632 sk = chan->sk;
2633
2634 /* Check if we already have channel with that dcid */
2635 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2636 sock_set_flag(sk, SOCK_ZAPPED);
2637 chan->ops->close(chan->data);
2638 goto response;
2639 }
2640
2641 hci_conn_hold(conn->hcon);
2642
2643 bacpy(&bt_sk(sk)->src, conn->src);
2644 bacpy(&bt_sk(sk)->dst, conn->dst);
2645 chan->psm = psm;
2646 chan->dcid = scid;
2647
2648 bt_accept_enqueue(parent, sk);
2649
2650 l2cap_chan_add(conn, chan);
2651
2652 dcid = chan->scid;
2653
2654 __set_chan_timer(chan, sk->sk_sndtimeo);
2655
2656 chan->ident = cmd->ident;
2657
2658 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2659 if (l2cap_chan_check_security(chan)) {
2660 if (bt_sk(sk)->defer_setup) {
2661 l2cap_state_change(chan, BT_CONNECT2);
2662 result = L2CAP_CR_PEND;
2663 status = L2CAP_CS_AUTHOR_PEND;
2664 parent->sk_data_ready(parent, 0);
2665 } else {
2666 l2cap_state_change(chan, BT_CONFIG);
2667 result = L2CAP_CR_SUCCESS;
2668 status = L2CAP_CS_NO_INFO;
2669 }
2670 } else {
2671 l2cap_state_change(chan, BT_CONNECT2);
2672 result = L2CAP_CR_PEND;
2673 status = L2CAP_CS_AUTHEN_PEND;
2674 }
2675 } else {
2676 l2cap_state_change(chan, BT_CONNECT2);
2677 result = L2CAP_CR_PEND;
2678 status = L2CAP_CS_NO_INFO;
2679 }
2680
2681 response:
2682 release_sock(parent);
2683
2684 sendresp:
2685 rsp.scid = cpu_to_le16(scid);
2686 rsp.dcid = cpu_to_le16(dcid);
2687 rsp.result = cpu_to_le16(result);
2688 rsp.status = cpu_to_le16(status);
2689 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2690
2691 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2692 struct l2cap_info_req info;
2693 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2694
2695 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2696 conn->info_ident = l2cap_get_ident(conn);
2697
2698 schedule_delayed_work(&conn->info_timer,
2699 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2700
2701 l2cap_send_cmd(conn, conn->info_ident,
2702 L2CAP_INFO_REQ, sizeof(info), &info);
2703 }
2704
2705 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2706 result == L2CAP_CR_SUCCESS) {
2707 u8 buf[128];
2708 set_bit(CONF_REQ_SENT, &chan->conf_state);
2709 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2710 l2cap_build_conf_req(chan, buf), buf);
2711 chan->num_conf_req++;
2712 }
2713
2714 return 0;
2715 }
2716
2717 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2718 {
2719 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2720 u16 scid, dcid, result, status;
2721 struct l2cap_chan *chan;
2722 struct sock *sk;
2723 u8 req[128];
2724
2725 scid = __le16_to_cpu(rsp->scid);
2726 dcid = __le16_to_cpu(rsp->dcid);
2727 result = __le16_to_cpu(rsp->result);
2728 status = __le16_to_cpu(rsp->status);
2729
2730 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2731
2732 if (scid) {
2733 chan = l2cap_get_chan_by_scid(conn, scid);
2734 if (!chan)
2735 return -EFAULT;
2736 } else {
2737 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2738 if (!chan)
2739 return -EFAULT;
2740 }
2741
2742 sk = chan->sk;
2743
2744 switch (result) {
2745 case L2CAP_CR_SUCCESS:
2746 l2cap_state_change(chan, BT_CONFIG);
2747 chan->ident = 0;
2748 chan->dcid = dcid;
2749 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2750
2751 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2752 break;
2753
2754 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2755 l2cap_build_conf_req(chan, req), req);
2756 chan->num_conf_req++;
2757 break;
2758
2759 case L2CAP_CR_PEND:
2760 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2761 break;
2762
2763 default:
2764 l2cap_chan_del(chan, ECONNREFUSED);
2765 break;
2766 }
2767
2768 release_sock(sk);
2769 return 0;
2770 }
2771
2772 static inline void set_default_fcs(struct l2cap_chan *chan)
2773 {
2774 /* FCS is enabled only in ERTM or streaming mode, if one or both
2775 * sides request it.
2776 */
2777 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2778 chan->fcs = L2CAP_FCS_NONE;
2779 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2780 chan->fcs = L2CAP_FCS_CRC16;
2781 }
2782
2783 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2784 {
2785 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2786 u16 dcid, flags;
2787 u8 rsp[64];
2788 struct l2cap_chan *chan;
2789 struct sock *sk;
2790 int len;
2791
2792 dcid = __le16_to_cpu(req->dcid);
2793 flags = __le16_to_cpu(req->flags);
2794
2795 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2796
2797 chan = l2cap_get_chan_by_scid(conn, dcid);
2798 if (!chan)
2799 return -ENOENT;
2800
2801 sk = chan->sk;
2802
2803 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2804 struct l2cap_cmd_rej_cid rej;
2805
2806 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2807 rej.scid = cpu_to_le16(chan->scid);
2808 rej.dcid = cpu_to_le16(chan->dcid);
2809
2810 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2811 sizeof(rej), &rej);
2812 goto unlock;
2813 }
2814
2815 /* Reject if config buffer is too small. */
2816 len = cmd_len - sizeof(*req);
2817 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2818 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2819 l2cap_build_conf_rsp(chan, rsp,
2820 L2CAP_CONF_REJECT, flags), rsp);
2821 goto unlock;
2822 }
2823
2824 /* Store config. */
2825 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2826 chan->conf_len += len;
2827
2828 if (flags & 0x0001) {
2829 /* Incomplete config. Send empty response. */
2830 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2831 l2cap_build_conf_rsp(chan, rsp,
2832 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2833 goto unlock;
2834 }
2835
2836 /* Complete config. */
2837 len = l2cap_parse_conf_req(chan, rsp);
2838 if (len < 0) {
2839 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2840 goto unlock;
2841 }
2842
2843 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2844 chan->num_conf_rsp++;
2845
2846 /* Reset config buffer. */
2847 chan->conf_len = 0;
2848
2849 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2850 goto unlock;
2851
2852 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2853 set_default_fcs(chan);
2854
2855 l2cap_state_change(chan, BT_CONNECTED);
2856
2857 chan->next_tx_seq = 0;
2858 chan->expected_tx_seq = 0;
2859 skb_queue_head_init(&chan->tx_q);
2860 if (chan->mode == L2CAP_MODE_ERTM)
2861 l2cap_ertm_init(chan);
2862
2863 l2cap_chan_ready(sk);
2864 goto unlock;
2865 }
2866
2867 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2868 u8 buf[64];
2869 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2870 l2cap_build_conf_req(chan, buf), buf);
2871 chan->num_conf_req++;
2872 }
2873
2874 /* Got Conf Rsp PENDING from remote side and asume we sent
2875 Conf Rsp PENDING in the code above */
2876 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2877 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2878
2879 /* check compatibility */
2880
2881 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2882 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2883
2884 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2885 l2cap_build_conf_rsp(chan, rsp,
2886 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2887 }
2888
2889 unlock:
2890 release_sock(sk);
2891 return 0;
2892 }
2893
2894 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2895 {
2896 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2897 u16 scid, flags, result;
2898 struct l2cap_chan *chan;
2899 struct sock *sk;
2900 int len = cmd->len - sizeof(*rsp);
2901
2902 scid = __le16_to_cpu(rsp->scid);
2903 flags = __le16_to_cpu(rsp->flags);
2904 result = __le16_to_cpu(rsp->result);
2905
2906 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2907 scid, flags, result);
2908
2909 chan = l2cap_get_chan_by_scid(conn, scid);
2910 if (!chan)
2911 return 0;
2912
2913 sk = chan->sk;
2914
2915 switch (result) {
2916 case L2CAP_CONF_SUCCESS:
2917 l2cap_conf_rfc_get(chan, rsp->data, len);
2918 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2919 break;
2920
2921 case L2CAP_CONF_PENDING:
2922 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2923
2924 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2925 char buf[64];
2926
2927 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2928 buf, &result);
2929 if (len < 0) {
2930 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2931 goto done;
2932 }
2933
2934 /* check compatibility */
2935
2936 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2937 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2938
2939 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2940 l2cap_build_conf_rsp(chan, buf,
2941 L2CAP_CONF_SUCCESS, 0x0000), buf);
2942 }
2943 goto done;
2944
2945 case L2CAP_CONF_UNACCEPT:
2946 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2947 char req[64];
2948
2949 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2950 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2951 goto done;
2952 }
2953
2954 /* throw out any old stored conf requests */
2955 result = L2CAP_CONF_SUCCESS;
2956 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2957 req, &result);
2958 if (len < 0) {
2959 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2960 goto done;
2961 }
2962
2963 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2964 L2CAP_CONF_REQ, len, req);
2965 chan->num_conf_req++;
2966 if (result != L2CAP_CONF_SUCCESS)
2967 goto done;
2968 break;
2969 }
2970
2971 default:
2972 sk->sk_err = ECONNRESET;
2973 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2974 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2975 goto done;
2976 }
2977
2978 if (flags & 0x01)
2979 goto done;
2980
2981 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2982
2983 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2984 set_default_fcs(chan);
2985
2986 l2cap_state_change(chan, BT_CONNECTED);
2987 chan->next_tx_seq = 0;
2988 chan->expected_tx_seq = 0;
2989 skb_queue_head_init(&chan->tx_q);
2990 if (chan->mode == L2CAP_MODE_ERTM)
2991 l2cap_ertm_init(chan);
2992
2993 l2cap_chan_ready(sk);
2994 }
2995
2996 done:
2997 release_sock(sk);
2998 return 0;
2999 }
3000
3001 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3002 {
3003 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3004 struct l2cap_disconn_rsp rsp;
3005 u16 dcid, scid;
3006 struct l2cap_chan *chan;
3007 struct sock *sk;
3008
3009 scid = __le16_to_cpu(req->scid);
3010 dcid = __le16_to_cpu(req->dcid);
3011
3012 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3013
3014 chan = l2cap_get_chan_by_scid(conn, dcid);
3015 if (!chan)
3016 return 0;
3017
3018 sk = chan->sk;
3019
3020 rsp.dcid = cpu_to_le16(chan->scid);
3021 rsp.scid = cpu_to_le16(chan->dcid);
3022 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3023
3024 sk->sk_shutdown = SHUTDOWN_MASK;
3025
3026 l2cap_chan_del(chan, ECONNRESET);
3027 release_sock(sk);
3028
3029 chan->ops->close(chan->data);
3030 return 0;
3031 }
3032
3033 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3034 {
3035 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3036 u16 dcid, scid;
3037 struct l2cap_chan *chan;
3038 struct sock *sk;
3039
3040 scid = __le16_to_cpu(rsp->scid);
3041 dcid = __le16_to_cpu(rsp->dcid);
3042
3043 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3044
3045 chan = l2cap_get_chan_by_scid(conn, scid);
3046 if (!chan)
3047 return 0;
3048
3049 sk = chan->sk;
3050
3051 l2cap_chan_del(chan, 0);
3052 release_sock(sk);
3053
3054 chan->ops->close(chan->data);
3055 return 0;
3056 }
3057
3058 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3059 {
3060 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3061 u16 type;
3062
3063 type = __le16_to_cpu(req->type);
3064
3065 BT_DBG("type 0x%4.4x", type);
3066
3067 if (type == L2CAP_IT_FEAT_MASK) {
3068 u8 buf[8];
3069 u32 feat_mask = l2cap_feat_mask;
3070 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3071 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3072 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3073 if (!disable_ertm)
3074 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3075 | L2CAP_FEAT_FCS;
3076 if (enable_hs)
3077 feat_mask |= L2CAP_FEAT_EXT_FLOW
3078 | L2CAP_FEAT_EXT_WINDOW;
3079
3080 put_unaligned_le32(feat_mask, rsp->data);
3081 l2cap_send_cmd(conn, cmd->ident,
3082 L2CAP_INFO_RSP, sizeof(buf), buf);
3083 } else if (type == L2CAP_IT_FIXED_CHAN) {
3084 u8 buf[12];
3085 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3086
3087 if (enable_hs)
3088 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3089 else
3090 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3091
3092 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3093 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3094 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3095 l2cap_send_cmd(conn, cmd->ident,
3096 L2CAP_INFO_RSP, sizeof(buf), buf);
3097 } else {
3098 struct l2cap_info_rsp rsp;
3099 rsp.type = cpu_to_le16(type);
3100 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3101 l2cap_send_cmd(conn, cmd->ident,
3102 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3103 }
3104
3105 return 0;
3106 }
3107
3108 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3109 {
3110 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3111 u16 type, result;
3112
3113 type = __le16_to_cpu(rsp->type);
3114 result = __le16_to_cpu(rsp->result);
3115
3116 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3117
3118 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3119 if (cmd->ident != conn->info_ident ||
3120 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3121 return 0;
3122
3123 __cancel_delayed_work(&conn->info_timer);
3124
3125 if (result != L2CAP_IR_SUCCESS) {
3126 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3127 conn->info_ident = 0;
3128
3129 l2cap_conn_start(conn);
3130
3131 return 0;
3132 }
3133
3134 if (type == L2CAP_IT_FEAT_MASK) {
3135 conn->feat_mask = get_unaligned_le32(rsp->data);
3136
3137 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3138 struct l2cap_info_req req;
3139 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3140
3141 conn->info_ident = l2cap_get_ident(conn);
3142
3143 l2cap_send_cmd(conn, conn->info_ident,
3144 L2CAP_INFO_REQ, sizeof(req), &req);
3145 } else {
3146 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3147 conn->info_ident = 0;
3148
3149 l2cap_conn_start(conn);
3150 }
3151 } else if (type == L2CAP_IT_FIXED_CHAN) {
3152 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3153 conn->info_ident = 0;
3154
3155 l2cap_conn_start(conn);
3156 }
3157
3158 return 0;
3159 }
3160
3161 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3162 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3163 void *data)
3164 {
3165 struct l2cap_create_chan_req *req = data;
3166 struct l2cap_create_chan_rsp rsp;
3167 u16 psm, scid;
3168
3169 if (cmd_len != sizeof(*req))
3170 return -EPROTO;
3171
3172 if (!enable_hs)
3173 return -EINVAL;
3174
3175 psm = le16_to_cpu(req->psm);
3176 scid = le16_to_cpu(req->scid);
3177
3178 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3179
3180 /* Placeholder: Always reject */
3181 rsp.dcid = 0;
3182 rsp.scid = cpu_to_le16(scid);
3183 rsp.result = L2CAP_CR_NO_MEM;
3184 rsp.status = L2CAP_CS_NO_INFO;
3185
3186 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3187 sizeof(rsp), &rsp);
3188
3189 return 0;
3190 }
3191
3192 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3193 struct l2cap_cmd_hdr *cmd, void *data)
3194 {
3195 BT_DBG("conn %p", conn);
3196
3197 return l2cap_connect_rsp(conn, cmd, data);
3198 }
3199
3200 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3201 u16 icid, u16 result)
3202 {
3203 struct l2cap_move_chan_rsp rsp;
3204
3205 BT_DBG("icid %d, result %d", icid, result);
3206
3207 rsp.icid = cpu_to_le16(icid);
3208 rsp.result = cpu_to_le16(result);
3209
3210 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3211 }
3212
3213 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3214 struct l2cap_chan *chan, u16 icid, u16 result)
3215 {
3216 struct l2cap_move_chan_cfm cfm;
3217 u8 ident;
3218
3219 BT_DBG("icid %d, result %d", icid, result);
3220
3221 ident = l2cap_get_ident(conn);
3222 if (chan)
3223 chan->ident = ident;
3224
3225 cfm.icid = cpu_to_le16(icid);
3226 cfm.result = cpu_to_le16(result);
3227
3228 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3229 }
3230
3231 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3232 u16 icid)
3233 {
3234 struct l2cap_move_chan_cfm_rsp rsp;
3235
3236 BT_DBG("icid %d", icid);
3237
3238 rsp.icid = cpu_to_le16(icid);
3239 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3240 }
3241
3242 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3243 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3244 {
3245 struct l2cap_move_chan_req *req = data;
3246 u16 icid = 0;
3247 u16 result = L2CAP_MR_NOT_ALLOWED;
3248
3249 if (cmd_len != sizeof(*req))
3250 return -EPROTO;
3251
3252 icid = le16_to_cpu(req->icid);
3253
3254 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3255
3256 if (!enable_hs)
3257 return -EINVAL;
3258
3259 /* Placeholder: Always refuse */
3260 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3261
3262 return 0;
3263 }
3264
3265 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3266 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3267 {
3268 struct l2cap_move_chan_rsp *rsp = data;
3269 u16 icid, result;
3270
3271 if (cmd_len != sizeof(*rsp))
3272 return -EPROTO;
3273
3274 icid = le16_to_cpu(rsp->icid);
3275 result = le16_to_cpu(rsp->result);
3276
3277 BT_DBG("icid %d, result %d", icid, result);
3278
3279 /* Placeholder: Always unconfirmed */
3280 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3281
3282 return 0;
3283 }
3284
3285 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3286 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3287 {
3288 struct l2cap_move_chan_cfm *cfm = data;
3289 u16 icid, result;
3290
3291 if (cmd_len != sizeof(*cfm))
3292 return -EPROTO;
3293
3294 icid = le16_to_cpu(cfm->icid);
3295 result = le16_to_cpu(cfm->result);
3296
3297 BT_DBG("icid %d, result %d", icid, result);
3298
3299 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3300
3301 return 0;
3302 }
3303
3304 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3305 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3306 {
3307 struct l2cap_move_chan_cfm_rsp *rsp = data;
3308 u16 icid;
3309
3310 if (cmd_len != sizeof(*rsp))
3311 return -EPROTO;
3312
3313 icid = le16_to_cpu(rsp->icid);
3314
3315 BT_DBG("icid %d", icid);
3316
3317 return 0;
3318 }
3319
3320 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3321 u16 to_multiplier)
3322 {
3323 u16 max_latency;
3324
3325 if (min > max || min < 6 || max > 3200)
3326 return -EINVAL;
3327
3328 if (to_multiplier < 10 || to_multiplier > 3200)
3329 return -EINVAL;
3330
3331 if (max >= to_multiplier * 8)
3332 return -EINVAL;
3333
3334 max_latency = (to_multiplier * 8 / max) - 1;
3335 if (latency > 499 || latency > max_latency)
3336 return -EINVAL;
3337
3338 return 0;
3339 }
3340
3341 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3342 struct l2cap_cmd_hdr *cmd, u8 *data)
3343 {
3344 struct hci_conn *hcon = conn->hcon;
3345 struct l2cap_conn_param_update_req *req;
3346 struct l2cap_conn_param_update_rsp rsp;
3347 u16 min, max, latency, to_multiplier, cmd_len;
3348 int err;
3349
3350 if (!(hcon->link_mode & HCI_LM_MASTER))
3351 return -EINVAL;
3352
3353 cmd_len = __le16_to_cpu(cmd->len);
3354 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3355 return -EPROTO;
3356
3357 req = (struct l2cap_conn_param_update_req *) data;
3358 min = __le16_to_cpu(req->min);
3359 max = __le16_to_cpu(req->max);
3360 latency = __le16_to_cpu(req->latency);
3361 to_multiplier = __le16_to_cpu(req->to_multiplier);
3362
3363 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3364 min, max, latency, to_multiplier);
3365
3366 memset(&rsp, 0, sizeof(rsp));
3367
3368 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3369 if (err)
3370 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3371 else
3372 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3373
3374 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3375 sizeof(rsp), &rsp);
3376
3377 if (!err)
3378 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3379
3380 return 0;
3381 }
3382
3383 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3384 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3385 {
3386 int err = 0;
3387
3388 switch (cmd->code) {
3389 case L2CAP_COMMAND_REJ:
3390 l2cap_command_rej(conn, cmd, data);
3391 break;
3392
3393 case L2CAP_CONN_REQ:
3394 err = l2cap_connect_req(conn, cmd, data);
3395 break;
3396
3397 case L2CAP_CONN_RSP:
3398 err = l2cap_connect_rsp(conn, cmd, data);
3399 break;
3400
3401 case L2CAP_CONF_REQ:
3402 err = l2cap_config_req(conn, cmd, cmd_len, data);
3403 break;
3404
3405 case L2CAP_CONF_RSP:
3406 err = l2cap_config_rsp(conn, cmd, data);
3407 break;
3408
3409 case L2CAP_DISCONN_REQ:
3410 err = l2cap_disconnect_req(conn, cmd, data);
3411 break;
3412
3413 case L2CAP_DISCONN_RSP:
3414 err = l2cap_disconnect_rsp(conn, cmd, data);
3415 break;
3416
3417 case L2CAP_ECHO_REQ:
3418 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3419 break;
3420
3421 case L2CAP_ECHO_RSP:
3422 break;
3423
3424 case L2CAP_INFO_REQ:
3425 err = l2cap_information_req(conn, cmd, data);
3426 break;
3427
3428 case L2CAP_INFO_RSP:
3429 err = l2cap_information_rsp(conn, cmd, data);
3430 break;
3431
3432 case L2CAP_CREATE_CHAN_REQ:
3433 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3434 break;
3435
3436 case L2CAP_CREATE_CHAN_RSP:
3437 err = l2cap_create_channel_rsp(conn, cmd, data);
3438 break;
3439
3440 case L2CAP_MOVE_CHAN_REQ:
3441 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3442 break;
3443
3444 case L2CAP_MOVE_CHAN_RSP:
3445 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3446 break;
3447
3448 case L2CAP_MOVE_CHAN_CFM:
3449 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3450 break;
3451
3452 case L2CAP_MOVE_CHAN_CFM_RSP:
3453 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3454 break;
3455
3456 default:
3457 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3458 err = -EINVAL;
3459 break;
3460 }
3461
3462 return err;
3463 }
3464
3465 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3466 struct l2cap_cmd_hdr *cmd, u8 *data)
3467 {
3468 switch (cmd->code) {
3469 case L2CAP_COMMAND_REJ:
3470 return 0;
3471
3472 case L2CAP_CONN_PARAM_UPDATE_REQ:
3473 return l2cap_conn_param_update_req(conn, cmd, data);
3474
3475 case L2CAP_CONN_PARAM_UPDATE_RSP:
3476 return 0;
3477
3478 default:
3479 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3480 return -EINVAL;
3481 }
3482 }
3483
3484 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3485 struct sk_buff *skb)
3486 {
3487 u8 *data = skb->data;
3488 int len = skb->len;
3489 struct l2cap_cmd_hdr cmd;
3490 int err;
3491
3492 l2cap_raw_recv(conn, skb);
3493
3494 while (len >= L2CAP_CMD_HDR_SIZE) {
3495 u16 cmd_len;
3496 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3497 data += L2CAP_CMD_HDR_SIZE;
3498 len -= L2CAP_CMD_HDR_SIZE;
3499
3500 cmd_len = le16_to_cpu(cmd.len);
3501
3502 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3503
3504 if (cmd_len > len || !cmd.ident) {
3505 BT_DBG("corrupted command");
3506 break;
3507 }
3508
3509 if (conn->hcon->type == LE_LINK)
3510 err = l2cap_le_sig_cmd(conn, &cmd, data);
3511 else
3512 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3513
3514 if (err) {
3515 struct l2cap_cmd_rej_unk rej;
3516
3517 BT_ERR("Wrong link type (%d)", err);
3518
3519 /* FIXME: Map err to a valid reason */
3520 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3521 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3522 }
3523
3524 data += cmd_len;
3525 len -= cmd_len;
3526 }
3527
3528 kfree_skb(skb);
3529 }
3530
3531 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3532 {
3533 u16 our_fcs, rcv_fcs;
3534 int hdr_size;
3535
3536 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3537 hdr_size = L2CAP_EXT_HDR_SIZE;
3538 else
3539 hdr_size = L2CAP_ENH_HDR_SIZE;
3540
3541 if (chan->fcs == L2CAP_FCS_CRC16) {
3542 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3543 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3544 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3545
3546 if (our_fcs != rcv_fcs)
3547 return -EBADMSG;
3548 }
3549 return 0;
3550 }
3551
3552 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3553 {
3554 u32 control = 0;
3555
3556 chan->frames_sent = 0;
3557
3558 control |= __set_reqseq(chan, chan->buffer_seq);
3559
3560 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3561 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3562 l2cap_send_sframe(chan, control);
3563 set_bit(CONN_RNR_SENT, &chan->conn_state);
3564 }
3565
3566 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3567 l2cap_retransmit_frames(chan);
3568
3569 l2cap_ertm_send(chan);
3570
3571 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3572 chan->frames_sent == 0) {
3573 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3574 l2cap_send_sframe(chan, control);
3575 }
3576 }
3577
3578 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3579 {
3580 struct sk_buff *next_skb;
3581 int tx_seq_offset, next_tx_seq_offset;
3582
3583 bt_cb(skb)->tx_seq = tx_seq;
3584 bt_cb(skb)->sar = sar;
3585
3586 next_skb = skb_peek(&chan->srej_q);
3587
3588 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3589
3590 while (next_skb) {
3591 if (bt_cb(next_skb)->tx_seq == tx_seq)
3592 return -EINVAL;
3593
3594 next_tx_seq_offset = __seq_offset(chan,
3595 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3596
3597 if (next_tx_seq_offset > tx_seq_offset) {
3598 __skb_queue_before(&chan->srej_q, next_skb, skb);
3599 return 0;
3600 }
3601
3602 if (skb_queue_is_last(&chan->srej_q, next_skb))
3603 next_skb = NULL;
3604 else
3605 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3606 }
3607
3608 __skb_queue_tail(&chan->srej_q, skb);
3609
3610 return 0;
3611 }
3612
3613 static void append_skb_frag(struct sk_buff *skb,
3614 struct sk_buff *new_frag, struct sk_buff **last_frag)
3615 {
3616 /* skb->len reflects data in skb as well as all fragments
3617 * skb->data_len reflects only data in fragments
3618 */
3619 if (!skb_has_frag_list(skb))
3620 skb_shinfo(skb)->frag_list = new_frag;
3621
3622 new_frag->next = NULL;
3623
3624 (*last_frag)->next = new_frag;
3625 *last_frag = new_frag;
3626
3627 skb->len += new_frag->len;
3628 skb->data_len += new_frag->len;
3629 skb->truesize += new_frag->truesize;
3630 }
3631
3632 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3633 {
3634 int err = -EINVAL;
3635
3636 switch (__get_ctrl_sar(chan, control)) {
3637 case L2CAP_SAR_UNSEGMENTED:
3638 if (chan->sdu)
3639 break;
3640
3641 err = chan->ops->recv(chan->data, skb);
3642 break;
3643
3644 case L2CAP_SAR_START:
3645 if (chan->sdu)
3646 break;
3647
3648 chan->sdu_len = get_unaligned_le16(skb->data);
3649 skb_pull(skb, L2CAP_SDULEN_SIZE);
3650
3651 if (chan->sdu_len > chan->imtu) {
3652 err = -EMSGSIZE;
3653 break;
3654 }
3655
3656 if (skb->len >= chan->sdu_len)
3657 break;
3658
3659 chan->sdu = skb;
3660 chan->sdu_last_frag = skb;
3661
3662 skb = NULL;
3663 err = 0;
3664 break;
3665
3666 case L2CAP_SAR_CONTINUE:
3667 if (!chan->sdu)
3668 break;
3669
3670 append_skb_frag(chan->sdu, skb,
3671 &chan->sdu_last_frag);
3672 skb = NULL;
3673
3674 if (chan->sdu->len >= chan->sdu_len)
3675 break;
3676
3677 err = 0;
3678 break;
3679
3680 case L2CAP_SAR_END:
3681 if (!chan->sdu)
3682 break;
3683
3684 append_skb_frag(chan->sdu, skb,
3685 &chan->sdu_last_frag);
3686 skb = NULL;
3687
3688 if (chan->sdu->len != chan->sdu_len)
3689 break;
3690
3691 err = chan->ops->recv(chan->data, chan->sdu);
3692
3693 if (!err) {
3694 /* Reassembly complete */
3695 chan->sdu = NULL;
3696 chan->sdu_last_frag = NULL;
3697 chan->sdu_len = 0;
3698 }
3699 break;
3700 }
3701
3702 if (err) {
3703 kfree_skb(skb);
3704 kfree_skb(chan->sdu);
3705 chan->sdu = NULL;
3706 chan->sdu_last_frag = NULL;
3707 chan->sdu_len = 0;
3708 }
3709
3710 return err;
3711 }
3712
3713 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3714 {
3715 u32 control;
3716
3717 BT_DBG("chan %p, Enter local busy", chan);
3718
3719 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3720
3721 control = __set_reqseq(chan, chan->buffer_seq);
3722 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3723 l2cap_send_sframe(chan, control);
3724
3725 set_bit(CONN_RNR_SENT, &chan->conn_state);
3726
3727 __clear_ack_timer(chan);
3728 }
3729
3730 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3731 {
3732 u32 control;
3733
3734 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3735 goto done;
3736
3737 control = __set_reqseq(chan, chan->buffer_seq);
3738 control |= __set_ctrl_poll(chan);
3739 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3740 l2cap_send_sframe(chan, control);
3741 chan->retry_count = 1;
3742
3743 __clear_retrans_timer(chan);
3744 __set_monitor_timer(chan);
3745
3746 set_bit(CONN_WAIT_F, &chan->conn_state);
3747
3748 done:
3749 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3750 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3751
3752 BT_DBG("chan %p, Exit local busy", chan);
3753 }
3754
3755 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3756 {
3757 if (chan->mode == L2CAP_MODE_ERTM) {
3758 if (busy)
3759 l2cap_ertm_enter_local_busy(chan);
3760 else
3761 l2cap_ertm_exit_local_busy(chan);
3762 }
3763 }
3764
3765 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3766 {
3767 struct sk_buff *skb;
3768 u32 control;
3769
3770 while ((skb = skb_peek(&chan->srej_q)) &&
3771 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3772 int err;
3773
3774 if (bt_cb(skb)->tx_seq != tx_seq)
3775 break;
3776
3777 skb = skb_dequeue(&chan->srej_q);
3778 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3779 err = l2cap_reassemble_sdu(chan, skb, control);
3780
3781 if (err < 0) {
3782 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3783 break;
3784 }
3785
3786 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3787 tx_seq = __next_seq(chan, tx_seq);
3788 }
3789 }
3790
3791 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3792 {
3793 struct srej_list *l, *tmp;
3794 u32 control;
3795
3796 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3797 if (l->tx_seq == tx_seq) {
3798 list_del(&l->list);
3799 kfree(l);
3800 return;
3801 }
3802 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3803 control |= __set_reqseq(chan, l->tx_seq);
3804 l2cap_send_sframe(chan, control);
3805 list_del(&l->list);
3806 list_add_tail(&l->list, &chan->srej_l);
3807 }
3808 }
3809
3810 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3811 {
3812 struct srej_list *new;
3813 u32 control;
3814
3815 while (tx_seq != chan->expected_tx_seq) {
3816 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3817 control |= __set_reqseq(chan, chan->expected_tx_seq);
3818 l2cap_send_sframe(chan, control);
3819
3820 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3821 if (!new)
3822 return -ENOMEM;
3823
3824 new->tx_seq = chan->expected_tx_seq;
3825
3826 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3827
3828 list_add_tail(&new->list, &chan->srej_l);
3829 }
3830
3831 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3832
3833 return 0;
3834 }
3835
3836 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3837 {
3838 u16 tx_seq = __get_txseq(chan, rx_control);
3839 u16 req_seq = __get_reqseq(chan, rx_control);
3840 u8 sar = __get_ctrl_sar(chan, rx_control);
3841 int tx_seq_offset, expected_tx_seq_offset;
3842 int num_to_ack = (chan->tx_win/6) + 1;
3843 int err = 0;
3844
3845 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3846 tx_seq, rx_control);
3847
3848 if (__is_ctrl_final(chan, rx_control) &&
3849 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3850 __clear_monitor_timer(chan);
3851 if (chan->unacked_frames > 0)
3852 __set_retrans_timer(chan);
3853 clear_bit(CONN_WAIT_F, &chan->conn_state);
3854 }
3855
3856 chan->expected_ack_seq = req_seq;
3857 l2cap_drop_acked_frames(chan);
3858
3859 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3860
3861 /* invalid tx_seq */
3862 if (tx_seq_offset >= chan->tx_win) {
3863 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3864 goto drop;
3865 }
3866
3867 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3868 goto drop;
3869
3870 if (tx_seq == chan->expected_tx_seq)
3871 goto expected;
3872
3873 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3874 struct srej_list *first;
3875
3876 first = list_first_entry(&chan->srej_l,
3877 struct srej_list, list);
3878 if (tx_seq == first->tx_seq) {
3879 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3880 l2cap_check_srej_gap(chan, tx_seq);
3881
3882 list_del(&first->list);
3883 kfree(first);
3884
3885 if (list_empty(&chan->srej_l)) {
3886 chan->buffer_seq = chan->buffer_seq_srej;
3887 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3888 l2cap_send_ack(chan);
3889 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3890 }
3891 } else {
3892 struct srej_list *l;
3893
3894 /* duplicated tx_seq */
3895 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3896 goto drop;
3897
3898 list_for_each_entry(l, &chan->srej_l, list) {
3899 if (l->tx_seq == tx_seq) {
3900 l2cap_resend_srejframe(chan, tx_seq);
3901 return 0;
3902 }
3903 }
3904
3905 err = l2cap_send_srejframe(chan, tx_seq);
3906 if (err < 0) {
3907 l2cap_send_disconn_req(chan->conn, chan, -err);
3908 return err;
3909 }
3910 }
3911 } else {
3912 expected_tx_seq_offset = __seq_offset(chan,
3913 chan->expected_tx_seq, chan->buffer_seq);
3914
3915 /* duplicated tx_seq */
3916 if (tx_seq_offset < expected_tx_seq_offset)
3917 goto drop;
3918
3919 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3920
3921 BT_DBG("chan %p, Enter SREJ", chan);
3922
3923 INIT_LIST_HEAD(&chan->srej_l);
3924 chan->buffer_seq_srej = chan->buffer_seq;
3925
3926 __skb_queue_head_init(&chan->srej_q);
3927 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3928
3929 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3930
3931 err = l2cap_send_srejframe(chan, tx_seq);
3932 if (err < 0) {
3933 l2cap_send_disconn_req(chan->conn, chan, -err);
3934 return err;
3935 }
3936
3937 __clear_ack_timer(chan);
3938 }
3939 return 0;
3940
3941 expected:
3942 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3943
3944 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3945 bt_cb(skb)->tx_seq = tx_seq;
3946 bt_cb(skb)->sar = sar;
3947 __skb_queue_tail(&chan->srej_q, skb);
3948 return 0;
3949 }
3950
3951 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3952 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3953
3954 if (err < 0) {
3955 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3956 return err;
3957 }
3958
3959 if (__is_ctrl_final(chan, rx_control)) {
3960 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3961 l2cap_retransmit_frames(chan);
3962 }
3963
3964
3965 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3966 if (chan->num_acked == num_to_ack - 1)
3967 l2cap_send_ack(chan);
3968 else
3969 __set_ack_timer(chan);
3970
3971 return 0;
3972
3973 drop:
3974 kfree_skb(skb);
3975 return 0;
3976 }
3977
3978 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3979 {
3980 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3981 __get_reqseq(chan, rx_control), rx_control);
3982
3983 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3984 l2cap_drop_acked_frames(chan);
3985
3986 if (__is_ctrl_poll(chan, rx_control)) {
3987 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3988 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3989 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3990 (chan->unacked_frames > 0))
3991 __set_retrans_timer(chan);
3992
3993 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3994 l2cap_send_srejtail(chan);
3995 } else {
3996 l2cap_send_i_or_rr_or_rnr(chan);
3997 }
3998
3999 } else if (__is_ctrl_final(chan, rx_control)) {
4000 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4001
4002 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4003 l2cap_retransmit_frames(chan);
4004
4005 } else {
4006 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4007 (chan->unacked_frames > 0))
4008 __set_retrans_timer(chan);
4009
4010 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4011 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4012 l2cap_send_ack(chan);
4013 else
4014 l2cap_ertm_send(chan);
4015 }
4016 }
4017
4018 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4019 {
4020 u16 tx_seq = __get_reqseq(chan, rx_control);
4021
4022 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4023
4024 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4025
4026 chan->expected_ack_seq = tx_seq;
4027 l2cap_drop_acked_frames(chan);
4028
4029 if (__is_ctrl_final(chan, rx_control)) {
4030 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4031 l2cap_retransmit_frames(chan);
4032 } else {
4033 l2cap_retransmit_frames(chan);
4034
4035 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4036 set_bit(CONN_REJ_ACT, &chan->conn_state);
4037 }
4038 }
4039 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4040 {
4041 u16 tx_seq = __get_reqseq(chan, rx_control);
4042
4043 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4044
4045 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4046
4047 if (__is_ctrl_poll(chan, rx_control)) {
4048 chan->expected_ack_seq = tx_seq;
4049 l2cap_drop_acked_frames(chan);
4050
4051 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4052 l2cap_retransmit_one_frame(chan, tx_seq);
4053
4054 l2cap_ertm_send(chan);
4055
4056 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4057 chan->srej_save_reqseq = tx_seq;
4058 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4059 }
4060 } else if (__is_ctrl_final(chan, rx_control)) {
4061 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4062 chan->srej_save_reqseq == tx_seq)
4063 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4064 else
4065 l2cap_retransmit_one_frame(chan, tx_seq);
4066 } else {
4067 l2cap_retransmit_one_frame(chan, tx_seq);
4068 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4069 chan->srej_save_reqseq = tx_seq;
4070 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4071 }
4072 }
4073 }
4074
4075 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4076 {
4077 u16 tx_seq = __get_reqseq(chan, rx_control);
4078
4079 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4080
4081 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4082 chan->expected_ack_seq = tx_seq;
4083 l2cap_drop_acked_frames(chan);
4084
4085 if (__is_ctrl_poll(chan, rx_control))
4086 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4087
4088 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4089 __clear_retrans_timer(chan);
4090 if (__is_ctrl_poll(chan, rx_control))
4091 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4092 return;
4093 }
4094
4095 if (__is_ctrl_poll(chan, rx_control)) {
4096 l2cap_send_srejtail(chan);
4097 } else {
4098 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4099 l2cap_send_sframe(chan, rx_control);
4100 }
4101 }
4102
4103 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4104 {
4105 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4106
4107 if (__is_ctrl_final(chan, rx_control) &&
4108 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4109 __clear_monitor_timer(chan);
4110 if (chan->unacked_frames > 0)
4111 __set_retrans_timer(chan);
4112 clear_bit(CONN_WAIT_F, &chan->conn_state);
4113 }
4114
4115 switch (__get_ctrl_super(chan, rx_control)) {
4116 case L2CAP_SUPER_RR:
4117 l2cap_data_channel_rrframe(chan, rx_control);
4118 break;
4119
4120 case L2CAP_SUPER_REJ:
4121 l2cap_data_channel_rejframe(chan, rx_control);
4122 break;
4123
4124 case L2CAP_SUPER_SREJ:
4125 l2cap_data_channel_srejframe(chan, rx_control);
4126 break;
4127
4128 case L2CAP_SUPER_RNR:
4129 l2cap_data_channel_rnrframe(chan, rx_control);
4130 break;
4131 }
4132
4133 kfree_skb(skb);
4134 return 0;
4135 }
4136
4137 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4138 {
4139 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4140 u32 control;
4141 u16 req_seq;
4142 int len, next_tx_seq_offset, req_seq_offset;
4143
4144 control = __get_control(chan, skb->data);
4145 skb_pull(skb, __ctrl_size(chan));
4146 len = skb->len;
4147
4148 /*
4149 * We can just drop the corrupted I-frame here.
4150 * Receiver will miss it and start proper recovery
4151 * procedures and ask retransmission.
4152 */
4153 if (l2cap_check_fcs(chan, skb))
4154 goto drop;
4155
4156 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4157 len -= L2CAP_SDULEN_SIZE;
4158
4159 if (chan->fcs == L2CAP_FCS_CRC16)
4160 len -= L2CAP_FCS_SIZE;
4161
4162 if (len > chan->mps) {
4163 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4164 goto drop;
4165 }
4166
4167 req_seq = __get_reqseq(chan, control);
4168
4169 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4170
4171 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4172 chan->expected_ack_seq);
4173
4174 /* check for invalid req-seq */
4175 if (req_seq_offset > next_tx_seq_offset) {
4176 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4177 goto drop;
4178 }
4179
4180 if (!__is_sframe(chan, control)) {
4181 if (len < 0) {
4182 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4183 goto drop;
4184 }
4185
4186 l2cap_data_channel_iframe(chan, control, skb);
4187 } else {
4188 if (len != 0) {
4189 BT_ERR("%d", len);
4190 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4191 goto drop;
4192 }
4193
4194 l2cap_data_channel_sframe(chan, control, skb);
4195 }
4196
4197 return 0;
4198
4199 drop:
4200 kfree_skb(skb);
4201 return 0;
4202 }
4203
4204 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4205 {
4206 struct l2cap_chan *chan;
4207 struct sock *sk = NULL;
4208 u32 control;
4209 u16 tx_seq;
4210 int len;
4211
4212 chan = l2cap_get_chan_by_scid(conn, cid);
4213 if (!chan) {
4214 BT_DBG("unknown cid 0x%4.4x", cid);
4215 goto drop;
4216 }
4217
4218 sk = chan->sk;
4219
4220 BT_DBG("chan %p, len %d", chan, skb->len);
4221
4222 if (chan->state != BT_CONNECTED)
4223 goto drop;
4224
4225 switch (chan->mode) {
4226 case L2CAP_MODE_BASIC:
4227 /* If socket recv buffers overflows we drop data here
4228 * which is *bad* because L2CAP has to be reliable.
4229 * But we don't have any other choice. L2CAP doesn't
4230 * provide flow control mechanism. */
4231
4232 if (chan->imtu < skb->len)
4233 goto drop;
4234
4235 if (!chan->ops->recv(chan->data, skb))
4236 goto done;
4237 break;
4238
4239 case L2CAP_MODE_ERTM:
4240 l2cap_ertm_data_rcv(sk, skb);
4241
4242 goto done;
4243
4244 case L2CAP_MODE_STREAMING:
4245 control = __get_control(chan, skb->data);
4246 skb_pull(skb, __ctrl_size(chan));
4247 len = skb->len;
4248
4249 if (l2cap_check_fcs(chan, skb))
4250 goto drop;
4251
4252 if (__is_sar_start(chan, control))
4253 len -= L2CAP_SDULEN_SIZE;
4254
4255 if (chan->fcs == L2CAP_FCS_CRC16)
4256 len -= L2CAP_FCS_SIZE;
4257
4258 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4259 goto drop;
4260
4261 tx_seq = __get_txseq(chan, control);
4262
4263 if (chan->expected_tx_seq != tx_seq) {
4264 /* Frame(s) missing - must discard partial SDU */
4265 kfree_skb(chan->sdu);
4266 chan->sdu = NULL;
4267 chan->sdu_last_frag = NULL;
4268 chan->sdu_len = 0;
4269
4270 /* TODO: Notify userland of missing data */
4271 }
4272
4273 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4274
4275 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4276 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4277
4278 goto done;
4279
4280 default:
4281 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4282 break;
4283 }
4284
4285 drop:
4286 kfree_skb(skb);
4287
4288 done:
4289 if (sk)
4290 release_sock(sk);
4291
4292 return 0;
4293 }
4294
4295 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4296 {
4297 struct sock *sk = NULL;
4298 struct l2cap_chan *chan;
4299
4300 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4301 if (!chan)
4302 goto drop;
4303
4304 sk = chan->sk;
4305
4306 lock_sock(sk);
4307
4308 BT_DBG("sk %p, len %d", sk, skb->len);
4309
4310 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4311 goto drop;
4312
4313 if (chan->imtu < skb->len)
4314 goto drop;
4315
4316 if (!chan->ops->recv(chan->data, skb))
4317 goto done;
4318
4319 drop:
4320 kfree_skb(skb);
4321
4322 done:
4323 if (sk)
4324 release_sock(sk);
4325 return 0;
4326 }
4327
4328 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4329 {
4330 struct sock *sk = NULL;
4331 struct l2cap_chan *chan;
4332
4333 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4334 if (!chan)
4335 goto drop;
4336
4337 sk = chan->sk;
4338
4339 lock_sock(sk);
4340
4341 BT_DBG("sk %p, len %d", sk, skb->len);
4342
4343 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4344 goto drop;
4345
4346 if (chan->imtu < skb->len)
4347 goto drop;
4348
4349 if (!chan->ops->recv(chan->data, skb))
4350 goto done;
4351
4352 drop:
4353 kfree_skb(skb);
4354
4355 done:
4356 if (sk)
4357 release_sock(sk);
4358 return 0;
4359 }
4360
4361 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4362 {
4363 struct l2cap_hdr *lh = (void *) skb->data;
4364 u16 cid, len;
4365 __le16 psm;
4366
4367 skb_pull(skb, L2CAP_HDR_SIZE);
4368 cid = __le16_to_cpu(lh->cid);
4369 len = __le16_to_cpu(lh->len);
4370
4371 if (len != skb->len) {
4372 kfree_skb(skb);
4373 return;
4374 }
4375
4376 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4377
4378 switch (cid) {
4379 case L2CAP_CID_LE_SIGNALING:
4380 case L2CAP_CID_SIGNALING:
4381 l2cap_sig_channel(conn, skb);
4382 break;
4383
4384 case L2CAP_CID_CONN_LESS:
4385 psm = get_unaligned_le16(skb->data);
4386 skb_pull(skb, 2);
4387 l2cap_conless_channel(conn, psm, skb);
4388 break;
4389
4390 case L2CAP_CID_LE_DATA:
4391 l2cap_att_channel(conn, cid, skb);
4392 break;
4393
4394 case L2CAP_CID_SMP:
4395 if (smp_sig_channel(conn, skb))
4396 l2cap_conn_del(conn->hcon, EACCES);
4397 break;
4398
4399 default:
4400 l2cap_data_channel(conn, cid, skb);
4401 break;
4402 }
4403 }
4404
4405 /* ---- L2CAP interface with lower layer (HCI) ---- */
4406
4407 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4408 {
4409 int exact = 0, lm1 = 0, lm2 = 0;
4410 struct l2cap_chan *c;
4411
4412 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4413
4414 /* Find listening sockets and check their link_mode */
4415 read_lock(&chan_list_lock);
4416 list_for_each_entry(c, &chan_list, global_l) {
4417 struct sock *sk = c->sk;
4418
4419 if (c->state != BT_LISTEN)
4420 continue;
4421
4422 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4423 lm1 |= HCI_LM_ACCEPT;
4424 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4425 lm1 |= HCI_LM_MASTER;
4426 exact++;
4427 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4428 lm2 |= HCI_LM_ACCEPT;
4429 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4430 lm2 |= HCI_LM_MASTER;
4431 }
4432 }
4433 read_unlock(&chan_list_lock);
4434
4435 return exact ? lm1 : lm2;
4436 }
4437
4438 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4439 {
4440 struct l2cap_conn *conn;
4441
4442 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4443
4444 if (!status) {
4445 conn = l2cap_conn_add(hcon, status);
4446 if (conn)
4447 l2cap_conn_ready(conn);
4448 } else
4449 l2cap_conn_del(hcon, bt_to_errno(status));
4450
4451 return 0;
4452 }
4453
4454 int l2cap_disconn_ind(struct hci_conn *hcon)
4455 {
4456 struct l2cap_conn *conn = hcon->l2cap_data;
4457
4458 BT_DBG("hcon %p", hcon);
4459
4460 if (!conn)
4461 return HCI_ERROR_REMOTE_USER_TERM;
4462 return conn->disc_reason;
4463 }
4464
4465 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4466 {
4467 BT_DBG("hcon %p reason %d", hcon, reason);
4468
4469 l2cap_conn_del(hcon, bt_to_errno(reason));
4470 return 0;
4471 }
4472
4473 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4474 {
4475 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4476 return;
4477
4478 if (encrypt == 0x00) {
4479 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4480 __clear_chan_timer(chan);
4481 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4482 } else if (chan->sec_level == BT_SECURITY_HIGH)
4483 l2cap_chan_close(chan, ECONNREFUSED);
4484 } else {
4485 if (chan->sec_level == BT_SECURITY_MEDIUM)
4486 __clear_chan_timer(chan);
4487 }
4488 }
4489
4490 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4491 {
4492 struct l2cap_conn *conn = hcon->l2cap_data;
4493 struct l2cap_chan *chan;
4494
4495 if (!conn)
4496 return 0;
4497
4498 BT_DBG("conn %p", conn);
4499
4500 if (hcon->type == LE_LINK) {
4501 smp_distribute_keys(conn, 0);
4502 __cancel_delayed_work(&conn->security_timer);
4503 }
4504
4505 rcu_read_lock();
4506
4507 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4508 struct sock *sk = chan->sk;
4509
4510 bh_lock_sock(sk);
4511
4512 BT_DBG("chan->scid %d", chan->scid);
4513
4514 if (chan->scid == L2CAP_CID_LE_DATA) {
4515 if (!status && encrypt) {
4516 chan->sec_level = hcon->sec_level;
4517 l2cap_chan_ready(sk);
4518 }
4519
4520 bh_unlock_sock(sk);
4521 continue;
4522 }
4523
4524 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4525 bh_unlock_sock(sk);
4526 continue;
4527 }
4528
4529 if (!status && (chan->state == BT_CONNECTED ||
4530 chan->state == BT_CONFIG)) {
4531 l2cap_check_encryption(chan, encrypt);
4532 bh_unlock_sock(sk);
4533 continue;
4534 }
4535
4536 if (chan->state == BT_CONNECT) {
4537 if (!status) {
4538 struct l2cap_conn_req req;
4539 req.scid = cpu_to_le16(chan->scid);
4540 req.psm = chan->psm;
4541
4542 chan->ident = l2cap_get_ident(conn);
4543 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4544
4545 l2cap_send_cmd(conn, chan->ident,
4546 L2CAP_CONN_REQ, sizeof(req), &req);
4547 } else {
4548 __clear_chan_timer(chan);
4549 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4550 }
4551 } else if (chan->state == BT_CONNECT2) {
4552 struct l2cap_conn_rsp rsp;
4553 __u16 res, stat;
4554
4555 if (!status) {
4556 if (bt_sk(sk)->defer_setup) {
4557 struct sock *parent = bt_sk(sk)->parent;
4558 res = L2CAP_CR_PEND;
4559 stat = L2CAP_CS_AUTHOR_PEND;
4560 if (parent)
4561 parent->sk_data_ready(parent, 0);
4562 } else {
4563 l2cap_state_change(chan, BT_CONFIG);
4564 res = L2CAP_CR_SUCCESS;
4565 stat = L2CAP_CS_NO_INFO;
4566 }
4567 } else {
4568 l2cap_state_change(chan, BT_DISCONN);
4569 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4570 res = L2CAP_CR_SEC_BLOCK;
4571 stat = L2CAP_CS_NO_INFO;
4572 }
4573
4574 rsp.scid = cpu_to_le16(chan->dcid);
4575 rsp.dcid = cpu_to_le16(chan->scid);
4576 rsp.result = cpu_to_le16(res);
4577 rsp.status = cpu_to_le16(stat);
4578 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4579 sizeof(rsp), &rsp);
4580 }
4581
4582 bh_unlock_sock(sk);
4583 }
4584
4585 rcu_read_unlock();
4586
4587 return 0;
4588 }
4589
4590 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4591 {
4592 struct l2cap_conn *conn = hcon->l2cap_data;
4593
4594 if (!conn)
4595 conn = l2cap_conn_add(hcon, 0);
4596
4597 if (!conn)
4598 goto drop;
4599
4600 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4601
4602 if (!(flags & ACL_CONT)) {
4603 struct l2cap_hdr *hdr;
4604 struct l2cap_chan *chan;
4605 u16 cid;
4606 int len;
4607
4608 if (conn->rx_len) {
4609 BT_ERR("Unexpected start frame (len %d)", skb->len);
4610 kfree_skb(conn->rx_skb);
4611 conn->rx_skb = NULL;
4612 conn->rx_len = 0;
4613 l2cap_conn_unreliable(conn, ECOMM);
4614 }
4615
4616 /* Start fragment always begin with Basic L2CAP header */
4617 if (skb->len < L2CAP_HDR_SIZE) {
4618 BT_ERR("Frame is too short (len %d)", skb->len);
4619 l2cap_conn_unreliable(conn, ECOMM);
4620 goto drop;
4621 }
4622
4623 hdr = (struct l2cap_hdr *) skb->data;
4624 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4625 cid = __le16_to_cpu(hdr->cid);
4626
4627 if (len == skb->len) {
4628 /* Complete frame received */
4629 l2cap_recv_frame(conn, skb);
4630 return 0;
4631 }
4632
4633 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4634
4635 if (skb->len > len) {
4636 BT_ERR("Frame is too long (len %d, expected len %d)",
4637 skb->len, len);
4638 l2cap_conn_unreliable(conn, ECOMM);
4639 goto drop;
4640 }
4641
4642 chan = l2cap_get_chan_by_scid(conn, cid);
4643
4644 if (chan && chan->sk) {
4645 struct sock *sk = chan->sk;
4646
4647 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4648 BT_ERR("Frame exceeding recv MTU (len %d, "
4649 "MTU %d)", len,
4650 chan->imtu);
4651 release_sock(sk);
4652 l2cap_conn_unreliable(conn, ECOMM);
4653 goto drop;
4654 }
4655 release_sock(sk);
4656 }
4657
4658 /* Allocate skb for the complete frame (with header) */
4659 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4660 if (!conn->rx_skb)
4661 goto drop;
4662
4663 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4664 skb->len);
4665 conn->rx_len = len - skb->len;
4666 } else {
4667 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4668
4669 if (!conn->rx_len) {
4670 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4671 l2cap_conn_unreliable(conn, ECOMM);
4672 goto drop;
4673 }
4674
4675 if (skb->len > conn->rx_len) {
4676 BT_ERR("Fragment is too long (len %d, expected %d)",
4677 skb->len, conn->rx_len);
4678 kfree_skb(conn->rx_skb);
4679 conn->rx_skb = NULL;
4680 conn->rx_len = 0;
4681 l2cap_conn_unreliable(conn, ECOMM);
4682 goto drop;
4683 }
4684
4685 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4686 skb->len);
4687 conn->rx_len -= skb->len;
4688
4689 if (!conn->rx_len) {
4690 /* Complete frame received */
4691 l2cap_recv_frame(conn, conn->rx_skb);
4692 conn->rx_skb = NULL;
4693 }
4694 }
4695
4696 drop:
4697 kfree_skb(skb);
4698 return 0;
4699 }
4700
4701 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4702 {
4703 struct l2cap_chan *c;
4704
4705 read_lock_bh(&chan_list_lock);
4706
4707 list_for_each_entry(c, &chan_list, global_l) {
4708 struct sock *sk = c->sk;
4709
4710 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4711 batostr(&bt_sk(sk)->src),
4712 batostr(&bt_sk(sk)->dst),
4713 c->state, __le16_to_cpu(c->psm),
4714 c->scid, c->dcid, c->imtu, c->omtu,
4715 c->sec_level, c->mode);
4716 }
4717
4718 read_unlock_bh(&chan_list_lock);
4719
4720 return 0;
4721 }
4722
4723 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4724 {
4725 return single_open(file, l2cap_debugfs_show, inode->i_private);
4726 }
4727
4728 static const struct file_operations l2cap_debugfs_fops = {
4729 .open = l2cap_debugfs_open,
4730 .read = seq_read,
4731 .llseek = seq_lseek,
4732 .release = single_release,
4733 };
4734
4735 static struct dentry *l2cap_debugfs;
4736
4737 int __init l2cap_init(void)
4738 {
4739 int err;
4740
4741 err = l2cap_init_sockets();
4742 if (err < 0)
4743 return err;
4744
4745 if (bt_debugfs) {
4746 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4747 bt_debugfs, NULL, &l2cap_debugfs_fops);
4748 if (!l2cap_debugfs)
4749 BT_ERR("Failed to create L2CAP debug file");
4750 }
4751
4752 return 0;
4753 }
4754
4755 void l2cap_exit(void)
4756 {
4757 debugfs_remove(l2cap_debugfs);
4758 l2cap_cleanup_sockets();
4759 }
4760
4761 module_param(disable_ertm, bool, 0644);
4762 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");