Bluetooth: Fix extra conversion to __le32
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 bool disable_ertm;
60
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
63
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
66
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
74
75 /* ---- L2CAP channels ---- */
76
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78 {
79 struct l2cap_chan *c;
80
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
84 }
85 return NULL;
86 }
87
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
89 {
90 struct l2cap_chan *c;
91
92 list_for_each_entry(c, &conn->chan_l, list) {
93 if (c->scid == cid)
94 return c;
95 }
96 return NULL;
97 }
98
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
102 {
103 struct l2cap_chan *c;
104
105 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid);
107 mutex_unlock(&conn->chan_lock);
108
109 return c;
110 }
111
112 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
113 {
114 struct l2cap_chan *c;
115
116 list_for_each_entry(c, &conn->chan_l, list) {
117 if (c->ident == ident)
118 return c;
119 }
120 return NULL;
121 }
122
123 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
124 {
125 struct l2cap_chan *c;
126
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
130
131 return c;
132 }
133
134 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
135 {
136 struct l2cap_chan *c;
137
138 list_for_each_entry(c, &chan_list, global_l) {
139 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 return c;
141 }
142 return NULL;
143 }
144
145 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
146 {
147 int err;
148
149 write_lock(&chan_list_lock);
150
151 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
152 err = -EADDRINUSE;
153 goto done;
154 }
155
156 if (psm) {
157 chan->psm = psm;
158 chan->sport = psm;
159 err = 0;
160 } else {
161 u16 p;
162
163 err = -EINVAL;
164 for (p = 0x1001; p < 0x1100; p += 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
166 chan->psm = cpu_to_le16(p);
167 chan->sport = cpu_to_le16(p);
168 err = 0;
169 break;
170 }
171 }
172
173 done:
174 write_unlock(&chan_list_lock);
175 return err;
176 }
177
178 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
179 {
180 write_lock(&chan_list_lock);
181
182 chan->scid = scid;
183
184 write_unlock(&chan_list_lock);
185
186 return 0;
187 }
188
189 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
190 {
191 u16 cid = L2CAP_CID_DYN_START;
192
193 for (; cid < L2CAP_CID_DYN_END; cid++) {
194 if (!__l2cap_get_chan_by_scid(conn, cid))
195 return cid;
196 }
197
198 return 0;
199 }
200
201 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
202 {
203 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
204 state_to_string(state));
205
206 chan->state = state;
207 chan->ops->state_change(chan->data, state);
208 }
209
210 static void l2cap_state_change(struct l2cap_chan *chan, int state)
211 {
212 struct sock *sk = chan->sk;
213
214 lock_sock(sk);
215 __l2cap_state_change(chan, state);
216 release_sock(sk);
217 }
218
219 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
220 {
221 struct sock *sk = chan->sk;
222
223 sk->sk_err = err;
224 }
225
226 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
227 {
228 struct sock *sk = chan->sk;
229
230 lock_sock(sk);
231 __l2cap_chan_set_err(chan, err);
232 release_sock(sk);
233 }
234
235 static void l2cap_chan_timeout(struct work_struct *work)
236 {
237 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
238 chan_timer.work);
239 struct l2cap_conn *conn = chan->conn;
240 int reason;
241
242 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
243
244 mutex_lock(&conn->chan_lock);
245 l2cap_chan_lock(chan);
246
247 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
248 reason = ECONNREFUSED;
249 else if (chan->state == BT_CONNECT &&
250 chan->sec_level != BT_SECURITY_SDP)
251 reason = ECONNREFUSED;
252 else
253 reason = ETIMEDOUT;
254
255 l2cap_chan_close(chan, reason);
256
257 l2cap_chan_unlock(chan);
258
259 chan->ops->close(chan->data);
260 mutex_unlock(&conn->chan_lock);
261
262 l2cap_chan_put(chan);
263 }
264
265 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
266 {
267 struct l2cap_chan *chan;
268
269 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
270 if (!chan)
271 return NULL;
272
273 mutex_init(&chan->lock);
274
275 chan->sk = sk;
276
277 write_lock(&chan_list_lock);
278 list_add(&chan->global_l, &chan_list);
279 write_unlock(&chan_list_lock);
280
281 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
282
283 chan->state = BT_OPEN;
284
285 atomic_set(&chan->refcnt, 1);
286
287 BT_DBG("sk %p chan %p", sk, chan);
288
289 return chan;
290 }
291
292 void l2cap_chan_destroy(struct l2cap_chan *chan)
293 {
294 write_lock(&chan_list_lock);
295 list_del(&chan->global_l);
296 write_unlock(&chan_list_lock);
297
298 l2cap_chan_put(chan);
299 }
300
301 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
302 {
303 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
304 chan->psm, chan->dcid);
305
306 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
307
308 chan->conn = conn;
309
310 switch (chan->chan_type) {
311 case L2CAP_CHAN_CONN_ORIENTED:
312 if (conn->hcon->type == LE_LINK) {
313 /* LE connection */
314 chan->omtu = L2CAP_LE_DEFAULT_MTU;
315 chan->scid = L2CAP_CID_LE_DATA;
316 chan->dcid = L2CAP_CID_LE_DATA;
317 } else {
318 /* Alloc CID for connection-oriented socket */
319 chan->scid = l2cap_alloc_cid(conn);
320 chan->omtu = L2CAP_DEFAULT_MTU;
321 }
322 break;
323
324 case L2CAP_CHAN_CONN_LESS:
325 /* Connectionless socket */
326 chan->scid = L2CAP_CID_CONN_LESS;
327 chan->dcid = L2CAP_CID_CONN_LESS;
328 chan->omtu = L2CAP_DEFAULT_MTU;
329 break;
330
331 default:
332 /* Raw socket can send/recv signalling messages only */
333 chan->scid = L2CAP_CID_SIGNALING;
334 chan->dcid = L2CAP_CID_SIGNALING;
335 chan->omtu = L2CAP_DEFAULT_MTU;
336 }
337
338 chan->local_id = L2CAP_BESTEFFORT_ID;
339 chan->local_stype = L2CAP_SERV_BESTEFFORT;
340 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
341 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
342 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
343 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
344
345 l2cap_chan_hold(chan);
346
347 list_add(&chan->list, &conn->chan_l);
348 }
349
350 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
351 {
352 mutex_lock(&conn->chan_lock);
353 __l2cap_chan_add(conn, chan);
354 mutex_unlock(&conn->chan_lock);
355 }
356
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
358 {
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
362
363 __clear_chan_timer(chan);
364
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
366
367 if (conn) {
368 /* Delete from channel list */
369 list_del(&chan->list);
370
371 l2cap_chan_put(chan);
372
373 chan->conn = NULL;
374 hci_conn_put(conn->hcon);
375 }
376
377 lock_sock(sk);
378
379 __l2cap_state_change(chan, BT_CLOSED);
380 sock_set_flag(sk, SOCK_ZAPPED);
381
382 if (err)
383 __l2cap_chan_set_err(chan, err);
384
385 if (parent) {
386 bt_accept_unlink(sk);
387 parent->sk_data_ready(parent, 0);
388 } else
389 sk->sk_state_change(sk);
390
391 release_sock(sk);
392
393 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
394 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
395 return;
396
397 skb_queue_purge(&chan->tx_q);
398
399 if (chan->mode == L2CAP_MODE_ERTM) {
400 struct srej_list *l, *tmp;
401
402 __clear_retrans_timer(chan);
403 __clear_monitor_timer(chan);
404 __clear_ack_timer(chan);
405
406 skb_queue_purge(&chan->srej_q);
407
408 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
409 list_del(&l->list);
410 kfree(l);
411 }
412 }
413 }
414
415 static void l2cap_chan_cleanup_listen(struct sock *parent)
416 {
417 struct sock *sk;
418
419 BT_DBG("parent %p", parent);
420
421 /* Close not yet accepted channels */
422 while ((sk = bt_accept_dequeue(parent, NULL))) {
423 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
424
425 l2cap_chan_lock(chan);
426 __clear_chan_timer(chan);
427 l2cap_chan_close(chan, ECONNRESET);
428 l2cap_chan_unlock(chan);
429
430 chan->ops->close(chan->data);
431 }
432 }
433
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
435 {
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
438
439 BT_DBG("chan %p state %s sk %p", chan,
440 state_to_string(chan->state), sk);
441
442 switch (chan->state) {
443 case BT_LISTEN:
444 lock_sock(sk);
445 l2cap_chan_cleanup_listen(sk);
446
447 __l2cap_state_change(chan, BT_CLOSED);
448 sock_set_flag(sk, SOCK_ZAPPED);
449 release_sock(sk);
450 break;
451
452 case BT_CONNECTED:
453 case BT_CONFIG:
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason);
459 } else
460 l2cap_chan_del(chan, reason);
461 break;
462
463 case BT_CONNECT2:
464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
465 conn->hcon->type == ACL_LINK) {
466 struct l2cap_conn_rsp rsp;
467 __u16 result;
468
469 if (bt_sk(sk)->defer_setup)
470 result = L2CAP_CR_SEC_BLOCK;
471 else
472 result = L2CAP_CR_BAD_PSM;
473 l2cap_state_change(chan, BT_DISCONN);
474
475 rsp.scid = cpu_to_le16(chan->dcid);
476 rsp.dcid = cpu_to_le16(chan->scid);
477 rsp.result = cpu_to_le16(result);
478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
479 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
480 sizeof(rsp), &rsp);
481 }
482
483 l2cap_chan_del(chan, reason);
484 break;
485
486 case BT_CONNECT:
487 case BT_DISCONN:
488 l2cap_chan_del(chan, reason);
489 break;
490
491 default:
492 lock_sock(sk);
493 sock_set_flag(sk, SOCK_ZAPPED);
494 release_sock(sk);
495 break;
496 }
497 }
498
499 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
500 {
501 if (chan->chan_type == L2CAP_CHAN_RAW) {
502 switch (chan->sec_level) {
503 case BT_SECURITY_HIGH:
504 return HCI_AT_DEDICATED_BONDING_MITM;
505 case BT_SECURITY_MEDIUM:
506 return HCI_AT_DEDICATED_BONDING;
507 default:
508 return HCI_AT_NO_BONDING;
509 }
510 } else if (chan->psm == cpu_to_le16(0x0001)) {
511 if (chan->sec_level == BT_SECURITY_LOW)
512 chan->sec_level = BT_SECURITY_SDP;
513
514 if (chan->sec_level == BT_SECURITY_HIGH)
515 return HCI_AT_NO_BONDING_MITM;
516 else
517 return HCI_AT_NO_BONDING;
518 } else {
519 switch (chan->sec_level) {
520 case BT_SECURITY_HIGH:
521 return HCI_AT_GENERAL_BONDING_MITM;
522 case BT_SECURITY_MEDIUM:
523 return HCI_AT_GENERAL_BONDING;
524 default:
525 return HCI_AT_NO_BONDING;
526 }
527 }
528 }
529
530 /* Service level security */
531 int l2cap_chan_check_security(struct l2cap_chan *chan)
532 {
533 struct l2cap_conn *conn = chan->conn;
534 __u8 auth_type;
535
536 auth_type = l2cap_get_auth_type(chan);
537
538 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
539 }
540
541 static u8 l2cap_get_ident(struct l2cap_conn *conn)
542 {
543 u8 id;
544
545 /* Get next available identificator.
546 * 1 - 128 are used by kernel.
547 * 129 - 199 are reserved.
548 * 200 - 254 are used by utilities like l2ping, etc.
549 */
550
551 spin_lock(&conn->lock);
552
553 if (++conn->tx_ident > 128)
554 conn->tx_ident = 1;
555
556 id = conn->tx_ident;
557
558 spin_unlock(&conn->lock);
559
560 return id;
561 }
562
563 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
564 {
565 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
566 u8 flags;
567
568 BT_DBG("code 0x%2.2x", code);
569
570 if (!skb)
571 return;
572
573 if (lmp_no_flush_capable(conn->hcon->hdev))
574 flags = ACL_START_NO_FLUSH;
575 else
576 flags = ACL_START;
577
578 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
579 skb->priority = HCI_PRIO_MAX;
580
581 hci_send_acl(conn->hchan, skb, flags);
582 }
583
584 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
585 {
586 struct hci_conn *hcon = chan->conn->hcon;
587 u16 flags;
588
589 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
590 skb->priority);
591
592 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
593 lmp_no_flush_capable(hcon->hdev))
594 flags = ACL_START_NO_FLUSH;
595 else
596 flags = ACL_START;
597
598 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
599 hci_send_acl(chan->conn->hchan, skb, flags);
600 }
601
602 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
603 {
604 struct sk_buff *skb;
605 struct l2cap_hdr *lh;
606 struct l2cap_conn *conn = chan->conn;
607 int count, hlen;
608
609 if (chan->state != BT_CONNECTED)
610 return;
611
612 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
613 hlen = L2CAP_EXT_HDR_SIZE;
614 else
615 hlen = L2CAP_ENH_HDR_SIZE;
616
617 if (chan->fcs == L2CAP_FCS_CRC16)
618 hlen += L2CAP_FCS_SIZE;
619
620 BT_DBG("chan %p, control 0x%8.8x", chan, control);
621
622 count = min_t(unsigned int, conn->mtu, hlen);
623
624 control |= __set_sframe(chan);
625
626 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
627 control |= __set_ctrl_final(chan);
628
629 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
630 control |= __set_ctrl_poll(chan);
631
632 skb = bt_skb_alloc(count, GFP_ATOMIC);
633 if (!skb)
634 return;
635
636 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
637 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
638 lh->cid = cpu_to_le16(chan->dcid);
639
640 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
641
642 if (chan->fcs == L2CAP_FCS_CRC16) {
643 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
644 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
645 }
646
647 skb->priority = HCI_PRIO_MAX;
648 l2cap_do_send(chan, skb);
649 }
650
651 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
652 {
653 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
654 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
655 set_bit(CONN_RNR_SENT, &chan->conn_state);
656 } else
657 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
658
659 control |= __set_reqseq(chan, chan->buffer_seq);
660
661 l2cap_send_sframe(chan, control);
662 }
663
664 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
665 {
666 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
667 }
668
669 static void l2cap_send_conn_req(struct l2cap_chan *chan)
670 {
671 struct l2cap_conn *conn = chan->conn;
672 struct l2cap_conn_req req;
673
674 req.scid = cpu_to_le16(chan->scid);
675 req.psm = chan->psm;
676
677 chan->ident = l2cap_get_ident(conn);
678
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
680
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
682 }
683
684 static void l2cap_do_start(struct l2cap_chan *chan)
685 {
686 struct l2cap_conn *conn = chan->conn;
687
688 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
689 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
690 return;
691
692 if (l2cap_chan_check_security(chan) &&
693 __l2cap_no_conn_pending(chan))
694 l2cap_send_conn_req(chan);
695 } else {
696 struct l2cap_info_req req;
697 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
698
699 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
700 conn->info_ident = l2cap_get_ident(conn);
701
702 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
703
704 l2cap_send_cmd(conn, conn->info_ident,
705 L2CAP_INFO_REQ, sizeof(req), &req);
706 }
707 }
708
709 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
710 {
711 u32 local_feat_mask = l2cap_feat_mask;
712 if (!disable_ertm)
713 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
714
715 switch (mode) {
716 case L2CAP_MODE_ERTM:
717 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
718 case L2CAP_MODE_STREAMING:
719 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
720 default:
721 return 0x00;
722 }
723 }
724
725 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
726 {
727 struct sock *sk = chan->sk;
728 struct l2cap_disconn_req req;
729
730 if (!conn)
731 return;
732
733 if (chan->mode == L2CAP_MODE_ERTM) {
734 __clear_retrans_timer(chan);
735 __clear_monitor_timer(chan);
736 __clear_ack_timer(chan);
737 }
738
739 req.dcid = cpu_to_le16(chan->dcid);
740 req.scid = cpu_to_le16(chan->scid);
741 l2cap_send_cmd(conn, l2cap_get_ident(conn),
742 L2CAP_DISCONN_REQ, sizeof(req), &req);
743
744 lock_sock(sk);
745 __l2cap_state_change(chan, BT_DISCONN);
746 __l2cap_chan_set_err(chan, err);
747 release_sock(sk);
748 }
749
750 /* ---- L2CAP connections ---- */
751 static void l2cap_conn_start(struct l2cap_conn *conn)
752 {
753 struct l2cap_chan *chan, *tmp;
754
755 BT_DBG("conn %p", conn);
756
757 mutex_lock(&conn->chan_lock);
758
759 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
761
762 l2cap_chan_lock(chan);
763
764 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
765 l2cap_chan_unlock(chan);
766 continue;
767 }
768
769 if (chan->state == BT_CONNECT) {
770 if (!l2cap_chan_check_security(chan) ||
771 !__l2cap_no_conn_pending(chan)) {
772 l2cap_chan_unlock(chan);
773 continue;
774 }
775
776 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
777 && test_bit(CONF_STATE2_DEVICE,
778 &chan->conf_state)) {
779 l2cap_chan_close(chan, ECONNRESET);
780 l2cap_chan_unlock(chan);
781 continue;
782 }
783
784 l2cap_send_conn_req(chan);
785
786 } else if (chan->state == BT_CONNECT2) {
787 struct l2cap_conn_rsp rsp;
788 char buf[128];
789 rsp.scid = cpu_to_le16(chan->dcid);
790 rsp.dcid = cpu_to_le16(chan->scid);
791
792 if (l2cap_chan_check_security(chan)) {
793 lock_sock(sk);
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
798 if (parent)
799 parent->sk_data_ready(parent, 0);
800
801 } else {
802 __l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 }
806 release_sock(sk);
807 } else {
808 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
809 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
810 }
811
812 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
813 sizeof(rsp), &rsp);
814
815 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
816 rsp.result != L2CAP_CR_SUCCESS) {
817 l2cap_chan_unlock(chan);
818 continue;
819 }
820
821 set_bit(CONF_REQ_SENT, &chan->conf_state);
822 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
823 l2cap_build_conf_req(chan, buf), buf);
824 chan->num_conf_req++;
825 }
826
827 l2cap_chan_unlock(chan);
828 }
829
830 mutex_unlock(&conn->chan_lock);
831 }
832
833 /* Find socket with cid and source bdaddr.
834 * Returns closest match, locked.
835 */
836 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
837 {
838 struct l2cap_chan *c, *c1 = NULL;
839
840 read_lock(&chan_list_lock);
841
842 list_for_each_entry(c, &chan_list, global_l) {
843 struct sock *sk = c->sk;
844
845 if (state && c->state != state)
846 continue;
847
848 if (c->scid == cid) {
849 /* Exact match. */
850 if (!bacmp(&bt_sk(sk)->src, src)) {
851 read_unlock(&chan_list_lock);
852 return c;
853 }
854
855 /* Closest match */
856 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
857 c1 = c;
858 }
859 }
860
861 read_unlock(&chan_list_lock);
862
863 return c1;
864 }
865
866 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
867 {
868 struct sock *parent, *sk;
869 struct l2cap_chan *chan, *pchan;
870
871 BT_DBG("");
872
873 /* Check if we have socket listening on cid */
874 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
875 conn->src);
876 if (!pchan)
877 return;
878
879 parent = pchan->sk;
880
881 lock_sock(parent);
882
883 /* Check for backlog size */
884 if (sk_acceptq_is_full(parent)) {
885 BT_DBG("backlog full %d", parent->sk_ack_backlog);
886 goto clean;
887 }
888
889 chan = pchan->ops->new_connection(pchan->data);
890 if (!chan)
891 goto clean;
892
893 sk = chan->sk;
894
895 hci_conn_hold(conn->hcon);
896
897 bacpy(&bt_sk(sk)->src, conn->src);
898 bacpy(&bt_sk(sk)->dst, conn->dst);
899
900 bt_accept_enqueue(parent, sk);
901
902 l2cap_chan_add(conn, chan);
903
904 __set_chan_timer(chan, sk->sk_sndtimeo);
905
906 __l2cap_state_change(chan, BT_CONNECTED);
907 parent->sk_data_ready(parent, 0);
908
909 clean:
910 release_sock(parent);
911 }
912
913 static void l2cap_chan_ready(struct l2cap_chan *chan)
914 {
915 struct sock *sk = chan->sk;
916 struct sock *parent;
917
918 lock_sock(sk);
919
920 parent = bt_sk(sk)->parent;
921
922 BT_DBG("sk %p, parent %p", sk, parent);
923
924 chan->conf_state = 0;
925 __clear_chan_timer(chan);
926
927 __l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
929
930 if (parent)
931 parent->sk_data_ready(parent, 0);
932
933 release_sock(sk);
934 }
935
936 static void l2cap_conn_ready(struct l2cap_conn *conn)
937 {
938 struct l2cap_chan *chan;
939
940 BT_DBG("conn %p", conn);
941
942 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
943 l2cap_le_conn_ready(conn);
944
945 if (conn->hcon->out && conn->hcon->type == LE_LINK)
946 smp_conn_security(conn, conn->hcon->pending_sec_level);
947
948 mutex_lock(&conn->chan_lock);
949
950 list_for_each_entry(chan, &conn->chan_l, list) {
951
952 l2cap_chan_lock(chan);
953
954 if (conn->hcon->type == LE_LINK) {
955 if (smp_conn_security(conn, chan->sec_level))
956 l2cap_chan_ready(chan);
957
958 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
959 struct sock *sk = chan->sk;
960 __clear_chan_timer(chan);
961 lock_sock(sk);
962 __l2cap_state_change(chan, BT_CONNECTED);
963 sk->sk_state_change(sk);
964 release_sock(sk);
965
966 } else if (chan->state == BT_CONNECT)
967 l2cap_do_start(chan);
968
969 l2cap_chan_unlock(chan);
970 }
971
972 mutex_unlock(&conn->chan_lock);
973 }
974
975 /* Notify sockets that we cannot guaranty reliability anymore */
976 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
977 {
978 struct l2cap_chan *chan;
979
980 BT_DBG("conn %p", conn);
981
982 mutex_lock(&conn->chan_lock);
983
984 list_for_each_entry(chan, &conn->chan_l, list) {
985 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
986 __l2cap_chan_set_err(chan, err);
987 }
988
989 mutex_unlock(&conn->chan_lock);
990 }
991
992 static void l2cap_info_timeout(struct work_struct *work)
993 {
994 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
995 info_timer.work);
996
997 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
998 conn->info_ident = 0;
999
1000 l2cap_conn_start(conn);
1001 }
1002
1003 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1004 {
1005 struct l2cap_conn *conn = hcon->l2cap_data;
1006 struct l2cap_chan *chan, *l;
1007
1008 if (!conn)
1009 return;
1010
1011 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1012
1013 kfree_skb(conn->rx_skb);
1014
1015 mutex_lock(&conn->chan_lock);
1016
1017 /* Kill channels */
1018 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1019 l2cap_chan_lock(chan);
1020
1021 l2cap_chan_del(chan, err);
1022
1023 l2cap_chan_unlock(chan);
1024
1025 chan->ops->close(chan->data);
1026 }
1027
1028 mutex_unlock(&conn->chan_lock);
1029
1030 hci_chan_del(conn->hchan);
1031
1032 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1033 cancel_delayed_work_sync(&conn->info_timer);
1034
1035 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1036 cancel_delayed_work_sync(&conn->security_timer);
1037 smp_chan_destroy(conn);
1038 }
1039
1040 hcon->l2cap_data = NULL;
1041 kfree(conn);
1042 }
1043
1044 static void security_timeout(struct work_struct *work)
1045 {
1046 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1047 security_timer.work);
1048
1049 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1050 }
1051
1052 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1053 {
1054 struct l2cap_conn *conn = hcon->l2cap_data;
1055 struct hci_chan *hchan;
1056
1057 if (conn || status)
1058 return conn;
1059
1060 hchan = hci_chan_create(hcon);
1061 if (!hchan)
1062 return NULL;
1063
1064 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1065 if (!conn) {
1066 hci_chan_del(hchan);
1067 return NULL;
1068 }
1069
1070 hcon->l2cap_data = conn;
1071 conn->hcon = hcon;
1072 conn->hchan = hchan;
1073
1074 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1075
1076 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1077 conn->mtu = hcon->hdev->le_mtu;
1078 else
1079 conn->mtu = hcon->hdev->acl_mtu;
1080
1081 conn->src = &hcon->hdev->bdaddr;
1082 conn->dst = &hcon->dst;
1083
1084 conn->feat_mask = 0;
1085
1086 spin_lock_init(&conn->lock);
1087 mutex_init(&conn->chan_lock);
1088
1089 INIT_LIST_HEAD(&conn->chan_l);
1090
1091 if (hcon->type == LE_LINK)
1092 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1093 else
1094 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1095
1096 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1097
1098 return conn;
1099 }
1100
1101 /* ---- Socket interface ---- */
1102
1103 /* Find socket with psm and source bdaddr.
1104 * Returns closest match.
1105 */
1106 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1107 {
1108 struct l2cap_chan *c, *c1 = NULL;
1109
1110 read_lock(&chan_list_lock);
1111
1112 list_for_each_entry(c, &chan_list, global_l) {
1113 struct sock *sk = c->sk;
1114
1115 if (state && c->state != state)
1116 continue;
1117
1118 if (c->psm == psm) {
1119 /* Exact match. */
1120 if (!bacmp(&bt_sk(sk)->src, src)) {
1121 read_unlock(&chan_list_lock);
1122 return c;
1123 }
1124
1125 /* Closest match */
1126 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1127 c1 = c;
1128 }
1129 }
1130
1131 read_unlock(&chan_list_lock);
1132
1133 return c1;
1134 }
1135
1136 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1137 {
1138 struct sock *sk = chan->sk;
1139 bdaddr_t *src = &bt_sk(sk)->src;
1140 struct l2cap_conn *conn;
1141 struct hci_conn *hcon;
1142 struct hci_dev *hdev;
1143 __u8 auth_type;
1144 int err;
1145
1146 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1147 chan->psm);
1148
1149 hdev = hci_get_route(dst, src);
1150 if (!hdev)
1151 return -EHOSTUNREACH;
1152
1153 hci_dev_lock(hdev);
1154
1155 l2cap_chan_lock(chan);
1156
1157 /* PSM must be odd and lsb of upper byte must be 0 */
1158 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1159 chan->chan_type != L2CAP_CHAN_RAW) {
1160 err = -EINVAL;
1161 goto done;
1162 }
1163
1164 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1165 err = -EINVAL;
1166 goto done;
1167 }
1168
1169 switch (chan->mode) {
1170 case L2CAP_MODE_BASIC:
1171 break;
1172 case L2CAP_MODE_ERTM:
1173 case L2CAP_MODE_STREAMING:
1174 if (!disable_ertm)
1175 break;
1176 /* fall through */
1177 default:
1178 err = -ENOTSUPP;
1179 goto done;
1180 }
1181
1182 lock_sock(sk);
1183
1184 switch (sk->sk_state) {
1185 case BT_CONNECT:
1186 case BT_CONNECT2:
1187 case BT_CONFIG:
1188 /* Already connecting */
1189 err = 0;
1190 release_sock(sk);
1191 goto done;
1192
1193 case BT_CONNECTED:
1194 /* Already connected */
1195 err = -EISCONN;
1196 release_sock(sk);
1197 goto done;
1198
1199 case BT_OPEN:
1200 case BT_BOUND:
1201 /* Can connect */
1202 break;
1203
1204 default:
1205 err = -EBADFD;
1206 release_sock(sk);
1207 goto done;
1208 }
1209
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, dst);
1212
1213 release_sock(sk);
1214
1215 chan->psm = psm;
1216 chan->dcid = cid;
1217
1218 auth_type = l2cap_get_auth_type(chan);
1219
1220 if (chan->dcid == L2CAP_CID_LE_DATA)
1221 hcon = hci_connect(hdev, LE_LINK, dst,
1222 chan->sec_level, auth_type);
1223 else
1224 hcon = hci_connect(hdev, ACL_LINK, dst,
1225 chan->sec_level, auth_type);
1226
1227 if (IS_ERR(hcon)) {
1228 err = PTR_ERR(hcon);
1229 goto done;
1230 }
1231
1232 conn = l2cap_conn_add(hcon, 0);
1233 if (!conn) {
1234 hci_conn_put(hcon);
1235 err = -ENOMEM;
1236 goto done;
1237 }
1238
1239 /* Update source addr of the socket */
1240 bacpy(src, conn->src);
1241
1242 l2cap_chan_unlock(chan);
1243 l2cap_chan_add(conn, chan);
1244 l2cap_chan_lock(chan);
1245
1246 l2cap_state_change(chan, BT_CONNECT);
1247 __set_chan_timer(chan, sk->sk_sndtimeo);
1248
1249 if (hcon->state == BT_CONNECTED) {
1250 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1251 __clear_chan_timer(chan);
1252 if (l2cap_chan_check_security(chan))
1253 l2cap_state_change(chan, BT_CONNECTED);
1254 } else
1255 l2cap_do_start(chan);
1256 }
1257
1258 err = 0;
1259
1260 done:
1261 l2cap_chan_unlock(chan);
1262 hci_dev_unlock(hdev);
1263 hci_dev_put(hdev);
1264 return err;
1265 }
1266
1267 int __l2cap_wait_ack(struct sock *sk)
1268 {
1269 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1270 DECLARE_WAITQUEUE(wait, current);
1271 int err = 0;
1272 int timeo = HZ/5;
1273
1274 add_wait_queue(sk_sleep(sk), &wait);
1275 set_current_state(TASK_INTERRUPTIBLE);
1276 while (chan->unacked_frames > 0 && chan->conn) {
1277 if (!timeo)
1278 timeo = HZ/5;
1279
1280 if (signal_pending(current)) {
1281 err = sock_intr_errno(timeo);
1282 break;
1283 }
1284
1285 release_sock(sk);
1286 timeo = schedule_timeout(timeo);
1287 lock_sock(sk);
1288 set_current_state(TASK_INTERRUPTIBLE);
1289
1290 err = sock_error(sk);
1291 if (err)
1292 break;
1293 }
1294 set_current_state(TASK_RUNNING);
1295 remove_wait_queue(sk_sleep(sk), &wait);
1296 return err;
1297 }
1298
1299 static void l2cap_monitor_timeout(struct work_struct *work)
1300 {
1301 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1302 monitor_timer.work);
1303
1304 BT_DBG("chan %p", chan);
1305
1306 l2cap_chan_lock(chan);
1307
1308 if (chan->retry_count >= chan->remote_max_tx) {
1309 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1310 l2cap_chan_unlock(chan);
1311 l2cap_chan_put(chan);
1312 return;
1313 }
1314
1315 chan->retry_count++;
1316 __set_monitor_timer(chan);
1317
1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1319 l2cap_chan_unlock(chan);
1320 l2cap_chan_put(chan);
1321 }
1322
1323 static void l2cap_retrans_timeout(struct work_struct *work)
1324 {
1325 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1326 retrans_timer.work);
1327
1328 BT_DBG("chan %p", chan);
1329
1330 l2cap_chan_lock(chan);
1331
1332 chan->retry_count = 1;
1333 __set_monitor_timer(chan);
1334
1335 set_bit(CONN_WAIT_F, &chan->conn_state);
1336
1337 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1338
1339 l2cap_chan_unlock(chan);
1340 l2cap_chan_put(chan);
1341 }
1342
1343 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1344 {
1345 struct sk_buff *skb;
1346
1347 while ((skb = skb_peek(&chan->tx_q)) &&
1348 chan->unacked_frames) {
1349 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1350 break;
1351
1352 skb = skb_dequeue(&chan->tx_q);
1353 kfree_skb(skb);
1354
1355 chan->unacked_frames--;
1356 }
1357
1358 if (!chan->unacked_frames)
1359 __clear_retrans_timer(chan);
1360 }
1361
1362 static void l2cap_streaming_send(struct l2cap_chan *chan)
1363 {
1364 struct sk_buff *skb;
1365 u32 control;
1366 u16 fcs;
1367
1368 while ((skb = skb_dequeue(&chan->tx_q))) {
1369 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1370 control |= __set_txseq(chan, chan->next_tx_seq);
1371 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1372
1373 if (chan->fcs == L2CAP_FCS_CRC16) {
1374 fcs = crc16(0, (u8 *)skb->data,
1375 skb->len - L2CAP_FCS_SIZE);
1376 put_unaligned_le16(fcs,
1377 skb->data + skb->len - L2CAP_FCS_SIZE);
1378 }
1379
1380 l2cap_do_send(chan, skb);
1381
1382 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1383 }
1384 }
1385
1386 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1387 {
1388 struct sk_buff *skb, *tx_skb;
1389 u16 fcs;
1390 u32 control;
1391
1392 skb = skb_peek(&chan->tx_q);
1393 if (!skb)
1394 return;
1395
1396 while (bt_cb(skb)->tx_seq != tx_seq) {
1397 if (skb_queue_is_last(&chan->tx_q, skb))
1398 return;
1399
1400 skb = skb_queue_next(&chan->tx_q, skb);
1401 }
1402
1403 if (chan->remote_max_tx &&
1404 bt_cb(skb)->retries == chan->remote_max_tx) {
1405 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1406 return;
1407 }
1408
1409 tx_skb = skb_clone(skb, GFP_ATOMIC);
1410 bt_cb(skb)->retries++;
1411
1412 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1413 control &= __get_sar_mask(chan);
1414
1415 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1416 control |= __set_ctrl_final(chan);
1417
1418 control |= __set_reqseq(chan, chan->buffer_seq);
1419 control |= __set_txseq(chan, tx_seq);
1420
1421 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1422
1423 if (chan->fcs == L2CAP_FCS_CRC16) {
1424 fcs = crc16(0, (u8 *)tx_skb->data,
1425 tx_skb->len - L2CAP_FCS_SIZE);
1426 put_unaligned_le16(fcs,
1427 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1428 }
1429
1430 l2cap_do_send(chan, tx_skb);
1431 }
1432
1433 static int l2cap_ertm_send(struct l2cap_chan *chan)
1434 {
1435 struct sk_buff *skb, *tx_skb;
1436 u16 fcs;
1437 u32 control;
1438 int nsent = 0;
1439
1440 if (chan->state != BT_CONNECTED)
1441 return -ENOTCONN;
1442
1443 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1444
1445 if (chan->remote_max_tx &&
1446 bt_cb(skb)->retries == chan->remote_max_tx) {
1447 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1448 break;
1449 }
1450
1451 tx_skb = skb_clone(skb, GFP_ATOMIC);
1452
1453 bt_cb(skb)->retries++;
1454
1455 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1456 control &= __get_sar_mask(chan);
1457
1458 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1459 control |= __set_ctrl_final(chan);
1460
1461 control |= __set_reqseq(chan, chan->buffer_seq);
1462 control |= __set_txseq(chan, chan->next_tx_seq);
1463
1464 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1465
1466 if (chan->fcs == L2CAP_FCS_CRC16) {
1467 fcs = crc16(0, (u8 *)skb->data,
1468 tx_skb->len - L2CAP_FCS_SIZE);
1469 put_unaligned_le16(fcs, skb->data +
1470 tx_skb->len - L2CAP_FCS_SIZE);
1471 }
1472
1473 l2cap_do_send(chan, tx_skb);
1474
1475 __set_retrans_timer(chan);
1476
1477 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1478
1479 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1480
1481 if (bt_cb(skb)->retries == 1) {
1482 chan->unacked_frames++;
1483
1484 if (!nsent++)
1485 __clear_ack_timer(chan);
1486 }
1487
1488 chan->frames_sent++;
1489
1490 if (skb_queue_is_last(&chan->tx_q, skb))
1491 chan->tx_send_head = NULL;
1492 else
1493 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1494 }
1495
1496 return nsent;
1497 }
1498
1499 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1500 {
1501 int ret;
1502
1503 if (!skb_queue_empty(&chan->tx_q))
1504 chan->tx_send_head = chan->tx_q.next;
1505
1506 chan->next_tx_seq = chan->expected_ack_seq;
1507 ret = l2cap_ertm_send(chan);
1508 return ret;
1509 }
1510
1511 static void __l2cap_send_ack(struct l2cap_chan *chan)
1512 {
1513 u32 control = 0;
1514
1515 control |= __set_reqseq(chan, chan->buffer_seq);
1516
1517 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1518 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1519 set_bit(CONN_RNR_SENT, &chan->conn_state);
1520 l2cap_send_sframe(chan, control);
1521 return;
1522 }
1523
1524 if (l2cap_ertm_send(chan) > 0)
1525 return;
1526
1527 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1528 l2cap_send_sframe(chan, control);
1529 }
1530
1531 static void l2cap_send_ack(struct l2cap_chan *chan)
1532 {
1533 __clear_ack_timer(chan);
1534 __l2cap_send_ack(chan);
1535 }
1536
1537 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1538 {
1539 struct srej_list *tail;
1540 u32 control;
1541
1542 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1543 control |= __set_ctrl_final(chan);
1544
1545 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1546 control |= __set_reqseq(chan, tail->tx_seq);
1547
1548 l2cap_send_sframe(chan, control);
1549 }
1550
1551 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1552 struct msghdr *msg, int len,
1553 int count, struct sk_buff *skb)
1554 {
1555 struct l2cap_conn *conn = chan->conn;
1556 struct sk_buff **frag;
1557 int err, sent = 0;
1558
1559 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1560 return -EFAULT;
1561
1562 sent += count;
1563 len -= count;
1564
1565 /* Continuation fragments (no L2CAP header) */
1566 frag = &skb_shinfo(skb)->frag_list;
1567 while (len) {
1568 count = min_t(unsigned int, conn->mtu, len);
1569
1570 *frag = chan->ops->alloc_skb(chan, count,
1571 msg->msg_flags & MSG_DONTWAIT,
1572 &err);
1573
1574 if (!*frag)
1575 return err;
1576 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1577 return -EFAULT;
1578
1579 (*frag)->priority = skb->priority;
1580
1581 sent += count;
1582 len -= count;
1583
1584 frag = &(*frag)->next;
1585 }
1586
1587 return sent;
1588 }
1589
1590 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1591 struct msghdr *msg, size_t len,
1592 u32 priority)
1593 {
1594 struct l2cap_conn *conn = chan->conn;
1595 struct sk_buff *skb;
1596 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1597 struct l2cap_hdr *lh;
1598
1599 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1600
1601 count = min_t(unsigned int, (conn->mtu - hlen), len);
1602
1603 skb = chan->ops->alloc_skb(chan, count + hlen,
1604 msg->msg_flags & MSG_DONTWAIT, &err);
1605
1606 if (!skb)
1607 return ERR_PTR(err);
1608
1609 skb->priority = priority;
1610
1611 /* Create L2CAP header */
1612 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1613 lh->cid = cpu_to_le16(chan->dcid);
1614 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1615 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1616
1617 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1618 if (unlikely(err < 0)) {
1619 kfree_skb(skb);
1620 return ERR_PTR(err);
1621 }
1622 return skb;
1623 }
1624
1625 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1626 struct msghdr *msg, size_t len,
1627 u32 priority)
1628 {
1629 struct l2cap_conn *conn = chan->conn;
1630 struct sk_buff *skb;
1631 int err, count, hlen = L2CAP_HDR_SIZE;
1632 struct l2cap_hdr *lh;
1633
1634 BT_DBG("chan %p len %d", chan, (int)len);
1635
1636 count = min_t(unsigned int, (conn->mtu - hlen), len);
1637
1638 skb = chan->ops->alloc_skb(chan, count + hlen,
1639 msg->msg_flags & MSG_DONTWAIT, &err);
1640
1641 if (!skb)
1642 return ERR_PTR(err);
1643
1644 skb->priority = priority;
1645
1646 /* Create L2CAP header */
1647 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1648 lh->cid = cpu_to_le16(chan->dcid);
1649 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1650
1651 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1652 if (unlikely(err < 0)) {
1653 kfree_skb(skb);
1654 return ERR_PTR(err);
1655 }
1656 return skb;
1657 }
1658
1659 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1660 struct msghdr *msg, size_t len,
1661 u32 control, u16 sdulen)
1662 {
1663 struct l2cap_conn *conn = chan->conn;
1664 struct sk_buff *skb;
1665 int err, count, hlen;
1666 struct l2cap_hdr *lh;
1667
1668 BT_DBG("chan %p len %d", chan, (int)len);
1669
1670 if (!conn)
1671 return ERR_PTR(-ENOTCONN);
1672
1673 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1674 hlen = L2CAP_EXT_HDR_SIZE;
1675 else
1676 hlen = L2CAP_ENH_HDR_SIZE;
1677
1678 if (sdulen)
1679 hlen += L2CAP_SDULEN_SIZE;
1680
1681 if (chan->fcs == L2CAP_FCS_CRC16)
1682 hlen += L2CAP_FCS_SIZE;
1683
1684 count = min_t(unsigned int, (conn->mtu - hlen), len);
1685
1686 skb = chan->ops->alloc_skb(chan, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1688
1689 if (!skb)
1690 return ERR_PTR(err);
1691
1692 /* Create L2CAP header */
1693 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1694 lh->cid = cpu_to_le16(chan->dcid);
1695 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1696
1697 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1698
1699 if (sdulen)
1700 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1701
1702 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1703 if (unlikely(err < 0)) {
1704 kfree_skb(skb);
1705 return ERR_PTR(err);
1706 }
1707
1708 if (chan->fcs == L2CAP_FCS_CRC16)
1709 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1710
1711 bt_cb(skb)->retries = 0;
1712 return skb;
1713 }
1714
1715 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1716 {
1717 struct sk_buff *skb;
1718 struct sk_buff_head sar_queue;
1719 u32 control;
1720 size_t size = 0;
1721
1722 skb_queue_head_init(&sar_queue);
1723 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1724 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1725 if (IS_ERR(skb))
1726 return PTR_ERR(skb);
1727
1728 __skb_queue_tail(&sar_queue, skb);
1729 len -= chan->remote_mps;
1730 size += chan->remote_mps;
1731
1732 while (len > 0) {
1733 size_t buflen;
1734
1735 if (len > chan->remote_mps) {
1736 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1737 buflen = chan->remote_mps;
1738 } else {
1739 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1740 buflen = len;
1741 }
1742
1743 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1744 if (IS_ERR(skb)) {
1745 skb_queue_purge(&sar_queue);
1746 return PTR_ERR(skb);
1747 }
1748
1749 __skb_queue_tail(&sar_queue, skb);
1750 len -= buflen;
1751 size += buflen;
1752 }
1753 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1754 if (chan->tx_send_head == NULL)
1755 chan->tx_send_head = sar_queue.next;
1756
1757 return size;
1758 }
1759
1760 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1761 u32 priority)
1762 {
1763 struct sk_buff *skb;
1764 u32 control;
1765 int err;
1766
1767 /* Connectionless channel */
1768 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1769 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1770 if (IS_ERR(skb))
1771 return PTR_ERR(skb);
1772
1773 l2cap_do_send(chan, skb);
1774 return len;
1775 }
1776
1777 switch (chan->mode) {
1778 case L2CAP_MODE_BASIC:
1779 /* Check outgoing MTU */
1780 if (len > chan->omtu)
1781 return -EMSGSIZE;
1782
1783 /* Create a basic PDU */
1784 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1785 if (IS_ERR(skb))
1786 return PTR_ERR(skb);
1787
1788 l2cap_do_send(chan, skb);
1789 err = len;
1790 break;
1791
1792 case L2CAP_MODE_ERTM:
1793 case L2CAP_MODE_STREAMING:
1794 /* Entire SDU fits into one PDU */
1795 if (len <= chan->remote_mps) {
1796 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1797 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1798 0);
1799 if (IS_ERR(skb))
1800 return PTR_ERR(skb);
1801
1802 __skb_queue_tail(&chan->tx_q, skb);
1803
1804 if (chan->tx_send_head == NULL)
1805 chan->tx_send_head = skb;
1806
1807 } else {
1808 /* Segment SDU into multiples PDUs */
1809 err = l2cap_sar_segment_sdu(chan, msg, len);
1810 if (err < 0)
1811 return err;
1812 }
1813
1814 if (chan->mode == L2CAP_MODE_STREAMING) {
1815 l2cap_streaming_send(chan);
1816 err = len;
1817 break;
1818 }
1819
1820 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1821 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1822 err = len;
1823 break;
1824 }
1825
1826 err = l2cap_ertm_send(chan);
1827 if (err >= 0)
1828 err = len;
1829
1830 break;
1831
1832 default:
1833 BT_DBG("bad state %1.1x", chan->mode);
1834 err = -EBADFD;
1835 }
1836
1837 return err;
1838 }
1839
1840 /* Copy frame to all raw sockets on that connection */
1841 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1842 {
1843 struct sk_buff *nskb;
1844 struct l2cap_chan *chan;
1845
1846 BT_DBG("conn %p", conn);
1847
1848 mutex_lock(&conn->chan_lock);
1849
1850 list_for_each_entry(chan, &conn->chan_l, list) {
1851 struct sock *sk = chan->sk;
1852 if (chan->chan_type != L2CAP_CHAN_RAW)
1853 continue;
1854
1855 /* Don't send frame to the socket it came from */
1856 if (skb->sk == sk)
1857 continue;
1858 nskb = skb_clone(skb, GFP_ATOMIC);
1859 if (!nskb)
1860 continue;
1861
1862 if (chan->ops->recv(chan->data, nskb))
1863 kfree_skb(nskb);
1864 }
1865
1866 mutex_unlock(&conn->chan_lock);
1867 }
1868
1869 /* ---- L2CAP signalling commands ---- */
1870 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1871 u8 code, u8 ident, u16 dlen, void *data)
1872 {
1873 struct sk_buff *skb, **frag;
1874 struct l2cap_cmd_hdr *cmd;
1875 struct l2cap_hdr *lh;
1876 int len, count;
1877
1878 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1879 conn, code, ident, dlen);
1880
1881 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1882 count = min_t(unsigned int, conn->mtu, len);
1883
1884 skb = bt_skb_alloc(count, GFP_ATOMIC);
1885 if (!skb)
1886 return NULL;
1887
1888 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1889 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1890
1891 if (conn->hcon->type == LE_LINK)
1892 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1893 else
1894 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1895
1896 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1897 cmd->code = code;
1898 cmd->ident = ident;
1899 cmd->len = cpu_to_le16(dlen);
1900
1901 if (dlen) {
1902 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1903 memcpy(skb_put(skb, count), data, count);
1904 data += count;
1905 }
1906
1907 len -= skb->len;
1908
1909 /* Continuation fragments (no L2CAP header) */
1910 frag = &skb_shinfo(skb)->frag_list;
1911 while (len) {
1912 count = min_t(unsigned int, conn->mtu, len);
1913
1914 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1915 if (!*frag)
1916 goto fail;
1917
1918 memcpy(skb_put(*frag, count), data, count);
1919
1920 len -= count;
1921 data += count;
1922
1923 frag = &(*frag)->next;
1924 }
1925
1926 return skb;
1927
1928 fail:
1929 kfree_skb(skb);
1930 return NULL;
1931 }
1932
1933 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1934 {
1935 struct l2cap_conf_opt *opt = *ptr;
1936 int len;
1937
1938 len = L2CAP_CONF_OPT_SIZE + opt->len;
1939 *ptr += len;
1940
1941 *type = opt->type;
1942 *olen = opt->len;
1943
1944 switch (opt->len) {
1945 case 1:
1946 *val = *((u8 *) opt->val);
1947 break;
1948
1949 case 2:
1950 *val = get_unaligned_le16(opt->val);
1951 break;
1952
1953 case 4:
1954 *val = get_unaligned_le32(opt->val);
1955 break;
1956
1957 default:
1958 *val = (unsigned long) opt->val;
1959 break;
1960 }
1961
1962 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1963 return len;
1964 }
1965
1966 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1967 {
1968 struct l2cap_conf_opt *opt = *ptr;
1969
1970 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1971
1972 opt->type = type;
1973 opt->len = len;
1974
1975 switch (len) {
1976 case 1:
1977 *((u8 *) opt->val) = val;
1978 break;
1979
1980 case 2:
1981 put_unaligned_le16(val, opt->val);
1982 break;
1983
1984 case 4:
1985 put_unaligned_le32(val, opt->val);
1986 break;
1987
1988 default:
1989 memcpy(opt->val, (void *) val, len);
1990 break;
1991 }
1992
1993 *ptr += L2CAP_CONF_OPT_SIZE + len;
1994 }
1995
1996 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1997 {
1998 struct l2cap_conf_efs efs;
1999
2000 switch (chan->mode) {
2001 case L2CAP_MODE_ERTM:
2002 efs.id = chan->local_id;
2003 efs.stype = chan->local_stype;
2004 efs.msdu = cpu_to_le16(chan->local_msdu);
2005 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2006 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2007 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2008 break;
2009
2010 case L2CAP_MODE_STREAMING:
2011 efs.id = 1;
2012 efs.stype = L2CAP_SERV_BESTEFFORT;
2013 efs.msdu = cpu_to_le16(chan->local_msdu);
2014 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2015 efs.acc_lat = 0;
2016 efs.flush_to = 0;
2017 break;
2018
2019 default:
2020 return;
2021 }
2022
2023 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2024 (unsigned long) &efs);
2025 }
2026
2027 static void l2cap_ack_timeout(struct work_struct *work)
2028 {
2029 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2030 ack_timer.work);
2031
2032 BT_DBG("chan %p", chan);
2033
2034 l2cap_chan_lock(chan);
2035
2036 __l2cap_send_ack(chan);
2037
2038 l2cap_chan_unlock(chan);
2039
2040 l2cap_chan_put(chan);
2041 }
2042
2043 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2044 {
2045 chan->expected_ack_seq = 0;
2046 chan->unacked_frames = 0;
2047 chan->buffer_seq = 0;
2048 chan->num_acked = 0;
2049 chan->frames_sent = 0;
2050
2051 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2052 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2053 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2054
2055 skb_queue_head_init(&chan->srej_q);
2056
2057 INIT_LIST_HEAD(&chan->srej_l);
2058 }
2059
2060 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2061 {
2062 switch (mode) {
2063 case L2CAP_MODE_STREAMING:
2064 case L2CAP_MODE_ERTM:
2065 if (l2cap_mode_supported(mode, remote_feat_mask))
2066 return mode;
2067 /* fall through */
2068 default:
2069 return L2CAP_MODE_BASIC;
2070 }
2071 }
2072
2073 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2074 {
2075 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2076 }
2077
2078 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2079 {
2080 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2081 }
2082
2083 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2084 {
2085 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2086 __l2cap_ews_supported(chan)) {
2087 /* use extended control field */
2088 set_bit(FLAG_EXT_CTRL, &chan->flags);
2089 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2090 } else {
2091 chan->tx_win = min_t(u16, chan->tx_win,
2092 L2CAP_DEFAULT_TX_WINDOW);
2093 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2094 }
2095 }
2096
2097 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2098 {
2099 struct l2cap_conf_req *req = data;
2100 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2101 void *ptr = req->data;
2102 u16 size;
2103
2104 BT_DBG("chan %p", chan);
2105
2106 if (chan->num_conf_req || chan->num_conf_rsp)
2107 goto done;
2108
2109 switch (chan->mode) {
2110 case L2CAP_MODE_STREAMING:
2111 case L2CAP_MODE_ERTM:
2112 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2113 break;
2114
2115 if (__l2cap_efs_supported(chan))
2116 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2117
2118 /* fall through */
2119 default:
2120 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2121 break;
2122 }
2123
2124 done:
2125 if (chan->imtu != L2CAP_DEFAULT_MTU)
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2127
2128 switch (chan->mode) {
2129 case L2CAP_MODE_BASIC:
2130 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2131 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2132 break;
2133
2134 rfc.mode = L2CAP_MODE_BASIC;
2135 rfc.txwin_size = 0;
2136 rfc.max_transmit = 0;
2137 rfc.retrans_timeout = 0;
2138 rfc.monitor_timeout = 0;
2139 rfc.max_pdu_size = 0;
2140
2141 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2142 (unsigned long) &rfc);
2143 break;
2144
2145 case L2CAP_MODE_ERTM:
2146 rfc.mode = L2CAP_MODE_ERTM;
2147 rfc.max_transmit = chan->max_tx;
2148 rfc.retrans_timeout = 0;
2149 rfc.monitor_timeout = 0;
2150
2151 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2152 L2CAP_EXT_HDR_SIZE -
2153 L2CAP_SDULEN_SIZE -
2154 L2CAP_FCS_SIZE);
2155 rfc.max_pdu_size = cpu_to_le16(size);
2156
2157 l2cap_txwin_setup(chan);
2158
2159 rfc.txwin_size = min_t(u16, chan->tx_win,
2160 L2CAP_DEFAULT_TX_WINDOW);
2161
2162 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2163 (unsigned long) &rfc);
2164
2165 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2166 l2cap_add_opt_efs(&ptr, chan);
2167
2168 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2169 break;
2170
2171 if (chan->fcs == L2CAP_FCS_NONE ||
2172 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2173 chan->fcs = L2CAP_FCS_NONE;
2174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2175 }
2176
2177 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2178 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2179 chan->tx_win);
2180 break;
2181
2182 case L2CAP_MODE_STREAMING:
2183 rfc.mode = L2CAP_MODE_STREAMING;
2184 rfc.txwin_size = 0;
2185 rfc.max_transmit = 0;
2186 rfc.retrans_timeout = 0;
2187 rfc.monitor_timeout = 0;
2188
2189 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2190 L2CAP_EXT_HDR_SIZE -
2191 L2CAP_SDULEN_SIZE -
2192 L2CAP_FCS_SIZE);
2193 rfc.max_pdu_size = cpu_to_le16(size);
2194
2195 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2196 (unsigned long) &rfc);
2197
2198 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2199 l2cap_add_opt_efs(&ptr, chan);
2200
2201 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2202 break;
2203
2204 if (chan->fcs == L2CAP_FCS_NONE ||
2205 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2206 chan->fcs = L2CAP_FCS_NONE;
2207 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2208 }
2209 break;
2210 }
2211
2212 req->dcid = cpu_to_le16(chan->dcid);
2213 req->flags = cpu_to_le16(0);
2214
2215 return ptr - data;
2216 }
2217
2218 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2219 {
2220 struct l2cap_conf_rsp *rsp = data;
2221 void *ptr = rsp->data;
2222 void *req = chan->conf_req;
2223 int len = chan->conf_len;
2224 int type, hint, olen;
2225 unsigned long val;
2226 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2227 struct l2cap_conf_efs efs;
2228 u8 remote_efs = 0;
2229 u16 mtu = L2CAP_DEFAULT_MTU;
2230 u16 result = L2CAP_CONF_SUCCESS;
2231 u16 size;
2232
2233 BT_DBG("chan %p", chan);
2234
2235 while (len >= L2CAP_CONF_OPT_SIZE) {
2236 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2237
2238 hint = type & L2CAP_CONF_HINT;
2239 type &= L2CAP_CONF_MASK;
2240
2241 switch (type) {
2242 case L2CAP_CONF_MTU:
2243 mtu = val;
2244 break;
2245
2246 case L2CAP_CONF_FLUSH_TO:
2247 chan->flush_to = val;
2248 break;
2249
2250 case L2CAP_CONF_QOS:
2251 break;
2252
2253 case L2CAP_CONF_RFC:
2254 if (olen == sizeof(rfc))
2255 memcpy(&rfc, (void *) val, olen);
2256 break;
2257
2258 case L2CAP_CONF_FCS:
2259 if (val == L2CAP_FCS_NONE)
2260 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2261 break;
2262
2263 case L2CAP_CONF_EFS:
2264 remote_efs = 1;
2265 if (olen == sizeof(efs))
2266 memcpy(&efs, (void *) val, olen);
2267 break;
2268
2269 case L2CAP_CONF_EWS:
2270 if (!enable_hs)
2271 return -ECONNREFUSED;
2272
2273 set_bit(FLAG_EXT_CTRL, &chan->flags);
2274 set_bit(CONF_EWS_RECV, &chan->conf_state);
2275 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2276 chan->remote_tx_win = val;
2277 break;
2278
2279 default:
2280 if (hint)
2281 break;
2282
2283 result = L2CAP_CONF_UNKNOWN;
2284 *((u8 *) ptr++) = type;
2285 break;
2286 }
2287 }
2288
2289 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2290 goto done;
2291
2292 switch (chan->mode) {
2293 case L2CAP_MODE_STREAMING:
2294 case L2CAP_MODE_ERTM:
2295 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2296 chan->mode = l2cap_select_mode(rfc.mode,
2297 chan->conn->feat_mask);
2298 break;
2299 }
2300
2301 if (remote_efs) {
2302 if (__l2cap_efs_supported(chan))
2303 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2304 else
2305 return -ECONNREFUSED;
2306 }
2307
2308 if (chan->mode != rfc.mode)
2309 return -ECONNREFUSED;
2310
2311 break;
2312 }
2313
2314 done:
2315 if (chan->mode != rfc.mode) {
2316 result = L2CAP_CONF_UNACCEPT;
2317 rfc.mode = chan->mode;
2318
2319 if (chan->num_conf_rsp == 1)
2320 return -ECONNREFUSED;
2321
2322 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2323 sizeof(rfc), (unsigned long) &rfc);
2324 }
2325
2326 if (result == L2CAP_CONF_SUCCESS) {
2327 /* Configure output options and let the other side know
2328 * which ones we don't like. */
2329
2330 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2331 result = L2CAP_CONF_UNACCEPT;
2332 else {
2333 chan->omtu = mtu;
2334 set_bit(CONF_MTU_DONE, &chan->conf_state);
2335 }
2336 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2337
2338 if (remote_efs) {
2339 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2340 efs.stype != L2CAP_SERV_NOTRAFIC &&
2341 efs.stype != chan->local_stype) {
2342
2343 result = L2CAP_CONF_UNACCEPT;
2344
2345 if (chan->num_conf_req >= 1)
2346 return -ECONNREFUSED;
2347
2348 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2349 sizeof(efs),
2350 (unsigned long) &efs);
2351 } else {
2352 /* Send PENDING Conf Rsp */
2353 result = L2CAP_CONF_PENDING;
2354 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2355 }
2356 }
2357
2358 switch (rfc.mode) {
2359 case L2CAP_MODE_BASIC:
2360 chan->fcs = L2CAP_FCS_NONE;
2361 set_bit(CONF_MODE_DONE, &chan->conf_state);
2362 break;
2363
2364 case L2CAP_MODE_ERTM:
2365 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2366 chan->remote_tx_win = rfc.txwin_size;
2367 else
2368 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2369
2370 chan->remote_max_tx = rfc.max_transmit;
2371
2372 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2373 chan->conn->mtu -
2374 L2CAP_EXT_HDR_SIZE -
2375 L2CAP_SDULEN_SIZE -
2376 L2CAP_FCS_SIZE);
2377 rfc.max_pdu_size = cpu_to_le16(size);
2378 chan->remote_mps = size;
2379
2380 rfc.retrans_timeout =
2381 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2382 rfc.monitor_timeout =
2383 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2384
2385 set_bit(CONF_MODE_DONE, &chan->conf_state);
2386
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2388 sizeof(rfc), (unsigned long) &rfc);
2389
2390 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2391 chan->remote_id = efs.id;
2392 chan->remote_stype = efs.stype;
2393 chan->remote_msdu = le16_to_cpu(efs.msdu);
2394 chan->remote_flush_to =
2395 le32_to_cpu(efs.flush_to);
2396 chan->remote_acc_lat =
2397 le32_to_cpu(efs.acc_lat);
2398 chan->remote_sdu_itime =
2399 le32_to_cpu(efs.sdu_itime);
2400 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2401 sizeof(efs), (unsigned long) &efs);
2402 }
2403 break;
2404
2405 case L2CAP_MODE_STREAMING:
2406 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2407 chan->conn->mtu -
2408 L2CAP_EXT_HDR_SIZE -
2409 L2CAP_SDULEN_SIZE -
2410 L2CAP_FCS_SIZE);
2411 rfc.max_pdu_size = cpu_to_le16(size);
2412 chan->remote_mps = size;
2413
2414 set_bit(CONF_MODE_DONE, &chan->conf_state);
2415
2416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2417 sizeof(rfc), (unsigned long) &rfc);
2418
2419 break;
2420
2421 default:
2422 result = L2CAP_CONF_UNACCEPT;
2423
2424 memset(&rfc, 0, sizeof(rfc));
2425 rfc.mode = chan->mode;
2426 }
2427
2428 if (result == L2CAP_CONF_SUCCESS)
2429 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2430 }
2431 rsp->scid = cpu_to_le16(chan->dcid);
2432 rsp->result = cpu_to_le16(result);
2433 rsp->flags = cpu_to_le16(0x0000);
2434
2435 return ptr - data;
2436 }
2437
2438 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2439 {
2440 struct l2cap_conf_req *req = data;
2441 void *ptr = req->data;
2442 int type, olen;
2443 unsigned long val;
2444 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2445 struct l2cap_conf_efs efs;
2446
2447 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2448
2449 while (len >= L2CAP_CONF_OPT_SIZE) {
2450 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2451
2452 switch (type) {
2453 case L2CAP_CONF_MTU:
2454 if (val < L2CAP_DEFAULT_MIN_MTU) {
2455 *result = L2CAP_CONF_UNACCEPT;
2456 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2457 } else
2458 chan->imtu = val;
2459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2460 break;
2461
2462 case L2CAP_CONF_FLUSH_TO:
2463 chan->flush_to = val;
2464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2465 2, chan->flush_to);
2466 break;
2467
2468 case L2CAP_CONF_RFC:
2469 if (olen == sizeof(rfc))
2470 memcpy(&rfc, (void *)val, olen);
2471
2472 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2473 rfc.mode != chan->mode)
2474 return -ECONNREFUSED;
2475
2476 chan->fcs = 0;
2477
2478 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2479 sizeof(rfc), (unsigned long) &rfc);
2480 break;
2481
2482 case L2CAP_CONF_EWS:
2483 chan->tx_win = min_t(u16, val,
2484 L2CAP_DEFAULT_EXT_WINDOW);
2485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2486 chan->tx_win);
2487 break;
2488
2489 case L2CAP_CONF_EFS:
2490 if (olen == sizeof(efs))
2491 memcpy(&efs, (void *)val, olen);
2492
2493 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2494 efs.stype != L2CAP_SERV_NOTRAFIC &&
2495 efs.stype != chan->local_stype)
2496 return -ECONNREFUSED;
2497
2498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2499 sizeof(efs), (unsigned long) &efs);
2500 break;
2501 }
2502 }
2503
2504 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2505 return -ECONNREFUSED;
2506
2507 chan->mode = rfc.mode;
2508
2509 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2510 switch (rfc.mode) {
2511 case L2CAP_MODE_ERTM:
2512 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2513 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2514 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2515
2516 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2517 chan->local_msdu = le16_to_cpu(efs.msdu);
2518 chan->local_sdu_itime =
2519 le32_to_cpu(efs.sdu_itime);
2520 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2521 chan->local_flush_to =
2522 le32_to_cpu(efs.flush_to);
2523 }
2524 break;
2525
2526 case L2CAP_MODE_STREAMING:
2527 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2528 }
2529 }
2530
2531 req->dcid = cpu_to_le16(chan->dcid);
2532 req->flags = cpu_to_le16(0x0000);
2533
2534 return ptr - data;
2535 }
2536
2537 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2538 {
2539 struct l2cap_conf_rsp *rsp = data;
2540 void *ptr = rsp->data;
2541
2542 BT_DBG("chan %p", chan);
2543
2544 rsp->scid = cpu_to_le16(chan->dcid);
2545 rsp->result = cpu_to_le16(result);
2546 rsp->flags = cpu_to_le16(flags);
2547
2548 return ptr - data;
2549 }
2550
2551 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2552 {
2553 struct l2cap_conn_rsp rsp;
2554 struct l2cap_conn *conn = chan->conn;
2555 u8 buf[128];
2556
2557 rsp.scid = cpu_to_le16(chan->dcid);
2558 rsp.dcid = cpu_to_le16(chan->scid);
2559 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2560 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2561 l2cap_send_cmd(conn, chan->ident,
2562 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2563
2564 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2565 return;
2566
2567 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2568 l2cap_build_conf_req(chan, buf), buf);
2569 chan->num_conf_req++;
2570 }
2571
2572 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2573 {
2574 int type, olen;
2575 unsigned long val;
2576 struct l2cap_conf_rfc rfc;
2577
2578 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2579
2580 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2581 return;
2582
2583 while (len >= L2CAP_CONF_OPT_SIZE) {
2584 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2585
2586 switch (type) {
2587 case L2CAP_CONF_RFC:
2588 if (olen == sizeof(rfc))
2589 memcpy(&rfc, (void *)val, olen);
2590 goto done;
2591 }
2592 }
2593
2594 /* Use sane default values in case a misbehaving remote device
2595 * did not send an RFC option.
2596 */
2597 rfc.mode = chan->mode;
2598 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2599 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2600 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2601
2602 BT_ERR("Expected RFC option was not found, using defaults");
2603
2604 done:
2605 switch (rfc.mode) {
2606 case L2CAP_MODE_ERTM:
2607 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2608 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2609 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2610 break;
2611 case L2CAP_MODE_STREAMING:
2612 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2613 }
2614 }
2615
2616 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2617 {
2618 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2619
2620 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2621 return 0;
2622
2623 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2624 cmd->ident == conn->info_ident) {
2625 cancel_delayed_work(&conn->info_timer);
2626
2627 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2628 conn->info_ident = 0;
2629
2630 l2cap_conn_start(conn);
2631 }
2632
2633 return 0;
2634 }
2635
2636 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2637 {
2638 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2639 struct l2cap_conn_rsp rsp;
2640 struct l2cap_chan *chan = NULL, *pchan;
2641 struct sock *parent, *sk = NULL;
2642 int result, status = L2CAP_CS_NO_INFO;
2643
2644 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2645 __le16 psm = req->psm;
2646
2647 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2648
2649 /* Check if we have socket listening on psm */
2650 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2651 if (!pchan) {
2652 result = L2CAP_CR_BAD_PSM;
2653 goto sendresp;
2654 }
2655
2656 parent = pchan->sk;
2657
2658 mutex_lock(&conn->chan_lock);
2659 lock_sock(parent);
2660
2661 /* Check if the ACL is secure enough (if not SDP) */
2662 if (psm != cpu_to_le16(0x0001) &&
2663 !hci_conn_check_link_mode(conn->hcon)) {
2664 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2665 result = L2CAP_CR_SEC_BLOCK;
2666 goto response;
2667 }
2668
2669 result = L2CAP_CR_NO_MEM;
2670
2671 /* Check for backlog size */
2672 if (sk_acceptq_is_full(parent)) {
2673 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2674 goto response;
2675 }
2676
2677 chan = pchan->ops->new_connection(pchan->data);
2678 if (!chan)
2679 goto response;
2680
2681 sk = chan->sk;
2682
2683 /* Check if we already have channel with that dcid */
2684 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2685 sock_set_flag(sk, SOCK_ZAPPED);
2686 chan->ops->close(chan->data);
2687 goto response;
2688 }
2689
2690 hci_conn_hold(conn->hcon);
2691
2692 bacpy(&bt_sk(sk)->src, conn->src);
2693 bacpy(&bt_sk(sk)->dst, conn->dst);
2694 chan->psm = psm;
2695 chan->dcid = scid;
2696
2697 bt_accept_enqueue(parent, sk);
2698
2699 __l2cap_chan_add(conn, chan);
2700
2701 dcid = chan->scid;
2702
2703 __set_chan_timer(chan, sk->sk_sndtimeo);
2704
2705 chan->ident = cmd->ident;
2706
2707 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2708 if (l2cap_chan_check_security(chan)) {
2709 if (bt_sk(sk)->defer_setup) {
2710 __l2cap_state_change(chan, BT_CONNECT2);
2711 result = L2CAP_CR_PEND;
2712 status = L2CAP_CS_AUTHOR_PEND;
2713 parent->sk_data_ready(parent, 0);
2714 } else {
2715 __l2cap_state_change(chan, BT_CONFIG);
2716 result = L2CAP_CR_SUCCESS;
2717 status = L2CAP_CS_NO_INFO;
2718 }
2719 } else {
2720 __l2cap_state_change(chan, BT_CONNECT2);
2721 result = L2CAP_CR_PEND;
2722 status = L2CAP_CS_AUTHEN_PEND;
2723 }
2724 } else {
2725 __l2cap_state_change(chan, BT_CONNECT2);
2726 result = L2CAP_CR_PEND;
2727 status = L2CAP_CS_NO_INFO;
2728 }
2729
2730 response:
2731 release_sock(parent);
2732 mutex_unlock(&conn->chan_lock);
2733
2734 sendresp:
2735 rsp.scid = cpu_to_le16(scid);
2736 rsp.dcid = cpu_to_le16(dcid);
2737 rsp.result = cpu_to_le16(result);
2738 rsp.status = cpu_to_le16(status);
2739 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2740
2741 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2742 struct l2cap_info_req info;
2743 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2744
2745 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2746 conn->info_ident = l2cap_get_ident(conn);
2747
2748 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2749
2750 l2cap_send_cmd(conn, conn->info_ident,
2751 L2CAP_INFO_REQ, sizeof(info), &info);
2752 }
2753
2754 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2755 result == L2CAP_CR_SUCCESS) {
2756 u8 buf[128];
2757 set_bit(CONF_REQ_SENT, &chan->conf_state);
2758 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2759 l2cap_build_conf_req(chan, buf), buf);
2760 chan->num_conf_req++;
2761 }
2762
2763 return 0;
2764 }
2765
2766 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2767 {
2768 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2769 u16 scid, dcid, result, status;
2770 struct l2cap_chan *chan;
2771 u8 req[128];
2772 int err;
2773
2774 scid = __le16_to_cpu(rsp->scid);
2775 dcid = __le16_to_cpu(rsp->dcid);
2776 result = __le16_to_cpu(rsp->result);
2777 status = __le16_to_cpu(rsp->status);
2778
2779 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2780 dcid, scid, result, status);
2781
2782 mutex_lock(&conn->chan_lock);
2783
2784 if (scid) {
2785 chan = __l2cap_get_chan_by_scid(conn, scid);
2786 if (!chan) {
2787 err = -EFAULT;
2788 goto unlock;
2789 }
2790 } else {
2791 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2792 if (!chan) {
2793 err = -EFAULT;
2794 goto unlock;
2795 }
2796 }
2797
2798 err = 0;
2799
2800 l2cap_chan_lock(chan);
2801
2802 switch (result) {
2803 case L2CAP_CR_SUCCESS:
2804 l2cap_state_change(chan, BT_CONFIG);
2805 chan->ident = 0;
2806 chan->dcid = dcid;
2807 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2808
2809 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2810 break;
2811
2812 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2813 l2cap_build_conf_req(chan, req), req);
2814 chan->num_conf_req++;
2815 break;
2816
2817 case L2CAP_CR_PEND:
2818 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2819 break;
2820
2821 default:
2822 l2cap_chan_del(chan, ECONNREFUSED);
2823 break;
2824 }
2825
2826 l2cap_chan_unlock(chan);
2827
2828 unlock:
2829 mutex_unlock(&conn->chan_lock);
2830
2831 return err;
2832 }
2833
2834 static inline void set_default_fcs(struct l2cap_chan *chan)
2835 {
2836 /* FCS is enabled only in ERTM or streaming mode, if one or both
2837 * sides request it.
2838 */
2839 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2840 chan->fcs = L2CAP_FCS_NONE;
2841 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2842 chan->fcs = L2CAP_FCS_CRC16;
2843 }
2844
2845 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2846 {
2847 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2848 u16 dcid, flags;
2849 u8 rsp[64];
2850 struct l2cap_chan *chan;
2851 int len;
2852
2853 dcid = __le16_to_cpu(req->dcid);
2854 flags = __le16_to_cpu(req->flags);
2855
2856 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2857
2858 chan = l2cap_get_chan_by_scid(conn, dcid);
2859 if (!chan)
2860 return -ENOENT;
2861
2862 l2cap_chan_lock(chan);
2863
2864 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2865 struct l2cap_cmd_rej_cid rej;
2866
2867 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2868 rej.scid = cpu_to_le16(chan->scid);
2869 rej.dcid = cpu_to_le16(chan->dcid);
2870
2871 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2872 sizeof(rej), &rej);
2873 goto unlock;
2874 }
2875
2876 /* Reject if config buffer is too small. */
2877 len = cmd_len - sizeof(*req);
2878 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2879 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2880 l2cap_build_conf_rsp(chan, rsp,
2881 L2CAP_CONF_REJECT, flags), rsp);
2882 goto unlock;
2883 }
2884
2885 /* Store config. */
2886 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2887 chan->conf_len += len;
2888
2889 if (flags & 0x0001) {
2890 /* Incomplete config. Send empty response. */
2891 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2892 l2cap_build_conf_rsp(chan, rsp,
2893 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2894 goto unlock;
2895 }
2896
2897 /* Complete config. */
2898 len = l2cap_parse_conf_req(chan, rsp);
2899 if (len < 0) {
2900 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2901 goto unlock;
2902 }
2903
2904 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2905 chan->num_conf_rsp++;
2906
2907 /* Reset config buffer. */
2908 chan->conf_len = 0;
2909
2910 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2911 goto unlock;
2912
2913 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2914 set_default_fcs(chan);
2915
2916 l2cap_state_change(chan, BT_CONNECTED);
2917
2918 chan->next_tx_seq = 0;
2919 chan->expected_tx_seq = 0;
2920 skb_queue_head_init(&chan->tx_q);
2921 if (chan->mode == L2CAP_MODE_ERTM)
2922 l2cap_ertm_init(chan);
2923
2924 l2cap_chan_ready(chan);
2925 goto unlock;
2926 }
2927
2928 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2929 u8 buf[64];
2930 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2931 l2cap_build_conf_req(chan, buf), buf);
2932 chan->num_conf_req++;
2933 }
2934
2935 /* Got Conf Rsp PENDING from remote side and asume we sent
2936 Conf Rsp PENDING in the code above */
2937 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2938 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2939
2940 /* check compatibility */
2941
2942 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2943 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2944
2945 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2946 l2cap_build_conf_rsp(chan, rsp,
2947 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2948 }
2949
2950 unlock:
2951 l2cap_chan_unlock(chan);
2952 return 0;
2953 }
2954
2955 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2956 {
2957 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2958 u16 scid, flags, result;
2959 struct l2cap_chan *chan;
2960 int len = cmd->len - sizeof(*rsp);
2961
2962 scid = __le16_to_cpu(rsp->scid);
2963 flags = __le16_to_cpu(rsp->flags);
2964 result = __le16_to_cpu(rsp->result);
2965
2966 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2967 scid, flags, result);
2968
2969 chan = l2cap_get_chan_by_scid(conn, scid);
2970 if (!chan)
2971 return 0;
2972
2973 l2cap_chan_lock(chan);
2974
2975 switch (result) {
2976 case L2CAP_CONF_SUCCESS:
2977 l2cap_conf_rfc_get(chan, rsp->data, len);
2978 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2979 break;
2980
2981 case L2CAP_CONF_PENDING:
2982 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2983
2984 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2985 char buf[64];
2986
2987 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2988 buf, &result);
2989 if (len < 0) {
2990 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2991 goto done;
2992 }
2993
2994 /* check compatibility */
2995
2996 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2997 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2998
2999 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3000 l2cap_build_conf_rsp(chan, buf,
3001 L2CAP_CONF_SUCCESS, 0x0000), buf);
3002 }
3003 goto done;
3004
3005 case L2CAP_CONF_UNACCEPT:
3006 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3007 char req[64];
3008
3009 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3010 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3011 goto done;
3012 }
3013
3014 /* throw out any old stored conf requests */
3015 result = L2CAP_CONF_SUCCESS;
3016 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3017 req, &result);
3018 if (len < 0) {
3019 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3020 goto done;
3021 }
3022
3023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3024 L2CAP_CONF_REQ, len, req);
3025 chan->num_conf_req++;
3026 if (result != L2CAP_CONF_SUCCESS)
3027 goto done;
3028 break;
3029 }
3030
3031 default:
3032 l2cap_chan_set_err(chan, ECONNRESET);
3033
3034 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3035 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3036 goto done;
3037 }
3038
3039 if (flags & 0x01)
3040 goto done;
3041
3042 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3043
3044 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3045 set_default_fcs(chan);
3046
3047 l2cap_state_change(chan, BT_CONNECTED);
3048 chan->next_tx_seq = 0;
3049 chan->expected_tx_seq = 0;
3050 skb_queue_head_init(&chan->tx_q);
3051 if (chan->mode == L2CAP_MODE_ERTM)
3052 l2cap_ertm_init(chan);
3053
3054 l2cap_chan_ready(chan);
3055 }
3056
3057 done:
3058 l2cap_chan_unlock(chan);
3059 return 0;
3060 }
3061
3062 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3063 {
3064 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3065 struct l2cap_disconn_rsp rsp;
3066 u16 dcid, scid;
3067 struct l2cap_chan *chan;
3068 struct sock *sk;
3069
3070 scid = __le16_to_cpu(req->scid);
3071 dcid = __le16_to_cpu(req->dcid);
3072
3073 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3074
3075 mutex_lock(&conn->chan_lock);
3076
3077 chan = __l2cap_get_chan_by_scid(conn, dcid);
3078 if (!chan) {
3079 mutex_unlock(&conn->chan_lock);
3080 return 0;
3081 }
3082
3083 l2cap_chan_lock(chan);
3084
3085 sk = chan->sk;
3086
3087 rsp.dcid = cpu_to_le16(chan->scid);
3088 rsp.scid = cpu_to_le16(chan->dcid);
3089 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3090
3091 lock_sock(sk);
3092 sk->sk_shutdown = SHUTDOWN_MASK;
3093 release_sock(sk);
3094
3095 l2cap_chan_del(chan, ECONNRESET);
3096
3097 l2cap_chan_unlock(chan);
3098
3099 chan->ops->close(chan->data);
3100
3101 mutex_unlock(&conn->chan_lock);
3102
3103 return 0;
3104 }
3105
3106 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3107 {
3108 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3109 u16 dcid, scid;
3110 struct l2cap_chan *chan;
3111
3112 scid = __le16_to_cpu(rsp->scid);
3113 dcid = __le16_to_cpu(rsp->dcid);
3114
3115 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3116
3117 mutex_lock(&conn->chan_lock);
3118
3119 chan = __l2cap_get_chan_by_scid(conn, scid);
3120 if (!chan) {
3121 mutex_unlock(&conn->chan_lock);
3122 return 0;
3123 }
3124
3125 l2cap_chan_lock(chan);
3126
3127 l2cap_chan_del(chan, 0);
3128
3129 l2cap_chan_unlock(chan);
3130
3131 chan->ops->close(chan->data);
3132
3133 mutex_unlock(&conn->chan_lock);
3134
3135 return 0;
3136 }
3137
3138 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3139 {
3140 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3141 u16 type;
3142
3143 type = __le16_to_cpu(req->type);
3144
3145 BT_DBG("type 0x%4.4x", type);
3146
3147 if (type == L2CAP_IT_FEAT_MASK) {
3148 u8 buf[8];
3149 u32 feat_mask = l2cap_feat_mask;
3150 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3151 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3152 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3153 if (!disable_ertm)
3154 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3155 | L2CAP_FEAT_FCS;
3156 if (enable_hs)
3157 feat_mask |= L2CAP_FEAT_EXT_FLOW
3158 | L2CAP_FEAT_EXT_WINDOW;
3159
3160 put_unaligned_le32(feat_mask, rsp->data);
3161 l2cap_send_cmd(conn, cmd->ident,
3162 L2CAP_INFO_RSP, sizeof(buf), buf);
3163 } else if (type == L2CAP_IT_FIXED_CHAN) {
3164 u8 buf[12];
3165 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3166
3167 if (enable_hs)
3168 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3169 else
3170 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3171
3172 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3173 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3174 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3175 l2cap_send_cmd(conn, cmd->ident,
3176 L2CAP_INFO_RSP, sizeof(buf), buf);
3177 } else {
3178 struct l2cap_info_rsp rsp;
3179 rsp.type = cpu_to_le16(type);
3180 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3181 l2cap_send_cmd(conn, cmd->ident,
3182 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3183 }
3184
3185 return 0;
3186 }
3187
3188 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3189 {
3190 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3191 u16 type, result;
3192
3193 type = __le16_to_cpu(rsp->type);
3194 result = __le16_to_cpu(rsp->result);
3195
3196 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3197
3198 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3199 if (cmd->ident != conn->info_ident ||
3200 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3201 return 0;
3202
3203 cancel_delayed_work(&conn->info_timer);
3204
3205 if (result != L2CAP_IR_SUCCESS) {
3206 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3207 conn->info_ident = 0;
3208
3209 l2cap_conn_start(conn);
3210
3211 return 0;
3212 }
3213
3214 switch (type) {
3215 case L2CAP_IT_FEAT_MASK:
3216 conn->feat_mask = get_unaligned_le32(rsp->data);
3217
3218 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3219 struct l2cap_info_req req;
3220 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3221
3222 conn->info_ident = l2cap_get_ident(conn);
3223
3224 l2cap_send_cmd(conn, conn->info_ident,
3225 L2CAP_INFO_REQ, sizeof(req), &req);
3226 } else {
3227 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3228 conn->info_ident = 0;
3229
3230 l2cap_conn_start(conn);
3231 }
3232 break;
3233
3234 case L2CAP_IT_FIXED_CHAN:
3235 conn->fixed_chan_mask = rsp->data[0];
3236 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3237 conn->info_ident = 0;
3238
3239 l2cap_conn_start(conn);
3240 break;
3241 }
3242
3243 return 0;
3244 }
3245
3246 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3247 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3248 void *data)
3249 {
3250 struct l2cap_create_chan_req *req = data;
3251 struct l2cap_create_chan_rsp rsp;
3252 u16 psm, scid;
3253
3254 if (cmd_len != sizeof(*req))
3255 return -EPROTO;
3256
3257 if (!enable_hs)
3258 return -EINVAL;
3259
3260 psm = le16_to_cpu(req->psm);
3261 scid = le16_to_cpu(req->scid);
3262
3263 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3264
3265 /* Placeholder: Always reject */
3266 rsp.dcid = 0;
3267 rsp.scid = cpu_to_le16(scid);
3268 rsp.result = L2CAP_CR_NO_MEM;
3269 rsp.status = L2CAP_CS_NO_INFO;
3270
3271 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3272 sizeof(rsp), &rsp);
3273
3274 return 0;
3275 }
3276
3277 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3278 struct l2cap_cmd_hdr *cmd, void *data)
3279 {
3280 BT_DBG("conn %p", conn);
3281
3282 return l2cap_connect_rsp(conn, cmd, data);
3283 }
3284
3285 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3286 u16 icid, u16 result)
3287 {
3288 struct l2cap_move_chan_rsp rsp;
3289
3290 BT_DBG("icid %d, result %d", icid, result);
3291
3292 rsp.icid = cpu_to_le16(icid);
3293 rsp.result = cpu_to_le16(result);
3294
3295 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3296 }
3297
3298 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3299 struct l2cap_chan *chan, u16 icid, u16 result)
3300 {
3301 struct l2cap_move_chan_cfm cfm;
3302 u8 ident;
3303
3304 BT_DBG("icid %d, result %d", icid, result);
3305
3306 ident = l2cap_get_ident(conn);
3307 if (chan)
3308 chan->ident = ident;
3309
3310 cfm.icid = cpu_to_le16(icid);
3311 cfm.result = cpu_to_le16(result);
3312
3313 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3314 }
3315
3316 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3317 u16 icid)
3318 {
3319 struct l2cap_move_chan_cfm_rsp rsp;
3320
3321 BT_DBG("icid %d", icid);
3322
3323 rsp.icid = cpu_to_le16(icid);
3324 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3325 }
3326
3327 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3328 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3329 {
3330 struct l2cap_move_chan_req *req = data;
3331 u16 icid = 0;
3332 u16 result = L2CAP_MR_NOT_ALLOWED;
3333
3334 if (cmd_len != sizeof(*req))
3335 return -EPROTO;
3336
3337 icid = le16_to_cpu(req->icid);
3338
3339 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3340
3341 if (!enable_hs)
3342 return -EINVAL;
3343
3344 /* Placeholder: Always refuse */
3345 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3346
3347 return 0;
3348 }
3349
3350 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3351 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3352 {
3353 struct l2cap_move_chan_rsp *rsp = data;
3354 u16 icid, result;
3355
3356 if (cmd_len != sizeof(*rsp))
3357 return -EPROTO;
3358
3359 icid = le16_to_cpu(rsp->icid);
3360 result = le16_to_cpu(rsp->result);
3361
3362 BT_DBG("icid %d, result %d", icid, result);
3363
3364 /* Placeholder: Always unconfirmed */
3365 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3366
3367 return 0;
3368 }
3369
3370 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3371 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3372 {
3373 struct l2cap_move_chan_cfm *cfm = data;
3374 u16 icid, result;
3375
3376 if (cmd_len != sizeof(*cfm))
3377 return -EPROTO;
3378
3379 icid = le16_to_cpu(cfm->icid);
3380 result = le16_to_cpu(cfm->result);
3381
3382 BT_DBG("icid %d, result %d", icid, result);
3383
3384 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3385
3386 return 0;
3387 }
3388
3389 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3390 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3391 {
3392 struct l2cap_move_chan_cfm_rsp *rsp = data;
3393 u16 icid;
3394
3395 if (cmd_len != sizeof(*rsp))
3396 return -EPROTO;
3397
3398 icid = le16_to_cpu(rsp->icid);
3399
3400 BT_DBG("icid %d", icid);
3401
3402 return 0;
3403 }
3404
3405 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3406 u16 to_multiplier)
3407 {
3408 u16 max_latency;
3409
3410 if (min > max || min < 6 || max > 3200)
3411 return -EINVAL;
3412
3413 if (to_multiplier < 10 || to_multiplier > 3200)
3414 return -EINVAL;
3415
3416 if (max >= to_multiplier * 8)
3417 return -EINVAL;
3418
3419 max_latency = (to_multiplier * 8 / max) - 1;
3420 if (latency > 499 || latency > max_latency)
3421 return -EINVAL;
3422
3423 return 0;
3424 }
3425
3426 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3427 struct l2cap_cmd_hdr *cmd, u8 *data)
3428 {
3429 struct hci_conn *hcon = conn->hcon;
3430 struct l2cap_conn_param_update_req *req;
3431 struct l2cap_conn_param_update_rsp rsp;
3432 u16 min, max, latency, to_multiplier, cmd_len;
3433 int err;
3434
3435 if (!(hcon->link_mode & HCI_LM_MASTER))
3436 return -EINVAL;
3437
3438 cmd_len = __le16_to_cpu(cmd->len);
3439 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3440 return -EPROTO;
3441
3442 req = (struct l2cap_conn_param_update_req *) data;
3443 min = __le16_to_cpu(req->min);
3444 max = __le16_to_cpu(req->max);
3445 latency = __le16_to_cpu(req->latency);
3446 to_multiplier = __le16_to_cpu(req->to_multiplier);
3447
3448 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3449 min, max, latency, to_multiplier);
3450
3451 memset(&rsp, 0, sizeof(rsp));
3452
3453 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3454 if (err)
3455 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3456 else
3457 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3458
3459 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3460 sizeof(rsp), &rsp);
3461
3462 if (!err)
3463 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3464
3465 return 0;
3466 }
3467
3468 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3469 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3470 {
3471 int err = 0;
3472
3473 switch (cmd->code) {
3474 case L2CAP_COMMAND_REJ:
3475 l2cap_command_rej(conn, cmd, data);
3476 break;
3477
3478 case L2CAP_CONN_REQ:
3479 err = l2cap_connect_req(conn, cmd, data);
3480 break;
3481
3482 case L2CAP_CONN_RSP:
3483 err = l2cap_connect_rsp(conn, cmd, data);
3484 break;
3485
3486 case L2CAP_CONF_REQ:
3487 err = l2cap_config_req(conn, cmd, cmd_len, data);
3488 break;
3489
3490 case L2CAP_CONF_RSP:
3491 err = l2cap_config_rsp(conn, cmd, data);
3492 break;
3493
3494 case L2CAP_DISCONN_REQ:
3495 err = l2cap_disconnect_req(conn, cmd, data);
3496 break;
3497
3498 case L2CAP_DISCONN_RSP:
3499 err = l2cap_disconnect_rsp(conn, cmd, data);
3500 break;
3501
3502 case L2CAP_ECHO_REQ:
3503 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3504 break;
3505
3506 case L2CAP_ECHO_RSP:
3507 break;
3508
3509 case L2CAP_INFO_REQ:
3510 err = l2cap_information_req(conn, cmd, data);
3511 break;
3512
3513 case L2CAP_INFO_RSP:
3514 err = l2cap_information_rsp(conn, cmd, data);
3515 break;
3516
3517 case L2CAP_CREATE_CHAN_REQ:
3518 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3519 break;
3520
3521 case L2CAP_CREATE_CHAN_RSP:
3522 err = l2cap_create_channel_rsp(conn, cmd, data);
3523 break;
3524
3525 case L2CAP_MOVE_CHAN_REQ:
3526 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3527 break;
3528
3529 case L2CAP_MOVE_CHAN_RSP:
3530 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3531 break;
3532
3533 case L2CAP_MOVE_CHAN_CFM:
3534 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3535 break;
3536
3537 case L2CAP_MOVE_CHAN_CFM_RSP:
3538 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3539 break;
3540
3541 default:
3542 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3543 err = -EINVAL;
3544 break;
3545 }
3546
3547 return err;
3548 }
3549
3550 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3551 struct l2cap_cmd_hdr *cmd, u8 *data)
3552 {
3553 switch (cmd->code) {
3554 case L2CAP_COMMAND_REJ:
3555 return 0;
3556
3557 case L2CAP_CONN_PARAM_UPDATE_REQ:
3558 return l2cap_conn_param_update_req(conn, cmd, data);
3559
3560 case L2CAP_CONN_PARAM_UPDATE_RSP:
3561 return 0;
3562
3563 default:
3564 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3565 return -EINVAL;
3566 }
3567 }
3568
3569 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3570 struct sk_buff *skb)
3571 {
3572 u8 *data = skb->data;
3573 int len = skb->len;
3574 struct l2cap_cmd_hdr cmd;
3575 int err;
3576
3577 l2cap_raw_recv(conn, skb);
3578
3579 while (len >= L2CAP_CMD_HDR_SIZE) {
3580 u16 cmd_len;
3581 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3582 data += L2CAP_CMD_HDR_SIZE;
3583 len -= L2CAP_CMD_HDR_SIZE;
3584
3585 cmd_len = le16_to_cpu(cmd.len);
3586
3587 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3588
3589 if (cmd_len > len || !cmd.ident) {
3590 BT_DBG("corrupted command");
3591 break;
3592 }
3593
3594 if (conn->hcon->type == LE_LINK)
3595 err = l2cap_le_sig_cmd(conn, &cmd, data);
3596 else
3597 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3598
3599 if (err) {
3600 struct l2cap_cmd_rej_unk rej;
3601
3602 BT_ERR("Wrong link type (%d)", err);
3603
3604 /* FIXME: Map err to a valid reason */
3605 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3606 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3607 }
3608
3609 data += cmd_len;
3610 len -= cmd_len;
3611 }
3612
3613 kfree_skb(skb);
3614 }
3615
3616 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3617 {
3618 u16 our_fcs, rcv_fcs;
3619 int hdr_size;
3620
3621 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3622 hdr_size = L2CAP_EXT_HDR_SIZE;
3623 else
3624 hdr_size = L2CAP_ENH_HDR_SIZE;
3625
3626 if (chan->fcs == L2CAP_FCS_CRC16) {
3627 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3628 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3629 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3630
3631 if (our_fcs != rcv_fcs)
3632 return -EBADMSG;
3633 }
3634 return 0;
3635 }
3636
3637 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3638 {
3639 u32 control = 0;
3640
3641 chan->frames_sent = 0;
3642
3643 control |= __set_reqseq(chan, chan->buffer_seq);
3644
3645 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3646 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3647 l2cap_send_sframe(chan, control);
3648 set_bit(CONN_RNR_SENT, &chan->conn_state);
3649 }
3650
3651 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3652 l2cap_retransmit_frames(chan);
3653
3654 l2cap_ertm_send(chan);
3655
3656 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3657 chan->frames_sent == 0) {
3658 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3659 l2cap_send_sframe(chan, control);
3660 }
3661 }
3662
3663 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3664 {
3665 struct sk_buff *next_skb;
3666 int tx_seq_offset, next_tx_seq_offset;
3667
3668 bt_cb(skb)->tx_seq = tx_seq;
3669 bt_cb(skb)->sar = sar;
3670
3671 next_skb = skb_peek(&chan->srej_q);
3672
3673 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3674
3675 while (next_skb) {
3676 if (bt_cb(next_skb)->tx_seq == tx_seq)
3677 return -EINVAL;
3678
3679 next_tx_seq_offset = __seq_offset(chan,
3680 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3681
3682 if (next_tx_seq_offset > tx_seq_offset) {
3683 __skb_queue_before(&chan->srej_q, next_skb, skb);
3684 return 0;
3685 }
3686
3687 if (skb_queue_is_last(&chan->srej_q, next_skb))
3688 next_skb = NULL;
3689 else
3690 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3691 }
3692
3693 __skb_queue_tail(&chan->srej_q, skb);
3694
3695 return 0;
3696 }
3697
3698 static void append_skb_frag(struct sk_buff *skb,
3699 struct sk_buff *new_frag, struct sk_buff **last_frag)
3700 {
3701 /* skb->len reflects data in skb as well as all fragments
3702 * skb->data_len reflects only data in fragments
3703 */
3704 if (!skb_has_frag_list(skb))
3705 skb_shinfo(skb)->frag_list = new_frag;
3706
3707 new_frag->next = NULL;
3708
3709 (*last_frag)->next = new_frag;
3710 *last_frag = new_frag;
3711
3712 skb->len += new_frag->len;
3713 skb->data_len += new_frag->len;
3714 skb->truesize += new_frag->truesize;
3715 }
3716
3717 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3718 {
3719 int err = -EINVAL;
3720
3721 switch (__get_ctrl_sar(chan, control)) {
3722 case L2CAP_SAR_UNSEGMENTED:
3723 if (chan->sdu)
3724 break;
3725
3726 err = chan->ops->recv(chan->data, skb);
3727 break;
3728
3729 case L2CAP_SAR_START:
3730 if (chan->sdu)
3731 break;
3732
3733 chan->sdu_len = get_unaligned_le16(skb->data);
3734 skb_pull(skb, L2CAP_SDULEN_SIZE);
3735
3736 if (chan->sdu_len > chan->imtu) {
3737 err = -EMSGSIZE;
3738 break;
3739 }
3740
3741 if (skb->len >= chan->sdu_len)
3742 break;
3743
3744 chan->sdu = skb;
3745 chan->sdu_last_frag = skb;
3746
3747 skb = NULL;
3748 err = 0;
3749 break;
3750
3751 case L2CAP_SAR_CONTINUE:
3752 if (!chan->sdu)
3753 break;
3754
3755 append_skb_frag(chan->sdu, skb,
3756 &chan->sdu_last_frag);
3757 skb = NULL;
3758
3759 if (chan->sdu->len >= chan->sdu_len)
3760 break;
3761
3762 err = 0;
3763 break;
3764
3765 case L2CAP_SAR_END:
3766 if (!chan->sdu)
3767 break;
3768
3769 append_skb_frag(chan->sdu, skb,
3770 &chan->sdu_last_frag);
3771 skb = NULL;
3772
3773 if (chan->sdu->len != chan->sdu_len)
3774 break;
3775
3776 err = chan->ops->recv(chan->data, chan->sdu);
3777
3778 if (!err) {
3779 /* Reassembly complete */
3780 chan->sdu = NULL;
3781 chan->sdu_last_frag = NULL;
3782 chan->sdu_len = 0;
3783 }
3784 break;
3785 }
3786
3787 if (err) {
3788 kfree_skb(skb);
3789 kfree_skb(chan->sdu);
3790 chan->sdu = NULL;
3791 chan->sdu_last_frag = NULL;
3792 chan->sdu_len = 0;
3793 }
3794
3795 return err;
3796 }
3797
3798 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3799 {
3800 BT_DBG("chan %p, Enter local busy", chan);
3801
3802 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3803
3804 __set_ack_timer(chan);
3805 }
3806
3807 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3808 {
3809 u32 control;
3810
3811 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3812 goto done;
3813
3814 control = __set_reqseq(chan, chan->buffer_seq);
3815 control |= __set_ctrl_poll(chan);
3816 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3817 l2cap_send_sframe(chan, control);
3818 chan->retry_count = 1;
3819
3820 __clear_retrans_timer(chan);
3821 __set_monitor_timer(chan);
3822
3823 set_bit(CONN_WAIT_F, &chan->conn_state);
3824
3825 done:
3826 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3827 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3828
3829 BT_DBG("chan %p, Exit local busy", chan);
3830 }
3831
3832 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3833 {
3834 if (chan->mode == L2CAP_MODE_ERTM) {
3835 if (busy)
3836 l2cap_ertm_enter_local_busy(chan);
3837 else
3838 l2cap_ertm_exit_local_busy(chan);
3839 }
3840 }
3841
3842 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3843 {
3844 struct sk_buff *skb;
3845 u32 control;
3846
3847 while ((skb = skb_peek(&chan->srej_q)) &&
3848 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3849 int err;
3850
3851 if (bt_cb(skb)->tx_seq != tx_seq)
3852 break;
3853
3854 skb = skb_dequeue(&chan->srej_q);
3855 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3856 err = l2cap_reassemble_sdu(chan, skb, control);
3857
3858 if (err < 0) {
3859 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3860 break;
3861 }
3862
3863 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3864 tx_seq = __next_seq(chan, tx_seq);
3865 }
3866 }
3867
3868 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3869 {
3870 struct srej_list *l, *tmp;
3871 u32 control;
3872
3873 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3874 if (l->tx_seq == tx_seq) {
3875 list_del(&l->list);
3876 kfree(l);
3877 return;
3878 }
3879 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3880 control |= __set_reqseq(chan, l->tx_seq);
3881 l2cap_send_sframe(chan, control);
3882 list_del(&l->list);
3883 list_add_tail(&l->list, &chan->srej_l);
3884 }
3885 }
3886
3887 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3888 {
3889 struct srej_list *new;
3890 u32 control;
3891
3892 while (tx_seq != chan->expected_tx_seq) {
3893 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3894 control |= __set_reqseq(chan, chan->expected_tx_seq);
3895 l2cap_send_sframe(chan, control);
3896
3897 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3898 if (!new)
3899 return -ENOMEM;
3900
3901 new->tx_seq = chan->expected_tx_seq;
3902
3903 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3904
3905 list_add_tail(&new->list, &chan->srej_l);
3906 }
3907
3908 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3909
3910 return 0;
3911 }
3912
3913 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3914 {
3915 u16 tx_seq = __get_txseq(chan, rx_control);
3916 u16 req_seq = __get_reqseq(chan, rx_control);
3917 u8 sar = __get_ctrl_sar(chan, rx_control);
3918 int tx_seq_offset, expected_tx_seq_offset;
3919 int num_to_ack = (chan->tx_win/6) + 1;
3920 int err = 0;
3921
3922 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3923 tx_seq, rx_control);
3924
3925 if (__is_ctrl_final(chan, rx_control) &&
3926 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3927 __clear_monitor_timer(chan);
3928 if (chan->unacked_frames > 0)
3929 __set_retrans_timer(chan);
3930 clear_bit(CONN_WAIT_F, &chan->conn_state);
3931 }
3932
3933 chan->expected_ack_seq = req_seq;
3934 l2cap_drop_acked_frames(chan);
3935
3936 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3937
3938 /* invalid tx_seq */
3939 if (tx_seq_offset >= chan->tx_win) {
3940 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3941 goto drop;
3942 }
3943
3944 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3945 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3946 l2cap_send_ack(chan);
3947 goto drop;
3948 }
3949
3950 if (tx_seq == chan->expected_tx_seq)
3951 goto expected;
3952
3953 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3954 struct srej_list *first;
3955
3956 first = list_first_entry(&chan->srej_l,
3957 struct srej_list, list);
3958 if (tx_seq == first->tx_seq) {
3959 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3960 l2cap_check_srej_gap(chan, tx_seq);
3961
3962 list_del(&first->list);
3963 kfree(first);
3964
3965 if (list_empty(&chan->srej_l)) {
3966 chan->buffer_seq = chan->buffer_seq_srej;
3967 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3968 l2cap_send_ack(chan);
3969 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3970 }
3971 } else {
3972 struct srej_list *l;
3973
3974 /* duplicated tx_seq */
3975 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3976 goto drop;
3977
3978 list_for_each_entry(l, &chan->srej_l, list) {
3979 if (l->tx_seq == tx_seq) {
3980 l2cap_resend_srejframe(chan, tx_seq);
3981 return 0;
3982 }
3983 }
3984
3985 err = l2cap_send_srejframe(chan, tx_seq);
3986 if (err < 0) {
3987 l2cap_send_disconn_req(chan->conn, chan, -err);
3988 return err;
3989 }
3990 }
3991 } else {
3992 expected_tx_seq_offset = __seq_offset(chan,
3993 chan->expected_tx_seq, chan->buffer_seq);
3994
3995 /* duplicated tx_seq */
3996 if (tx_seq_offset < expected_tx_seq_offset)
3997 goto drop;
3998
3999 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4000
4001 BT_DBG("chan %p, Enter SREJ", chan);
4002
4003 INIT_LIST_HEAD(&chan->srej_l);
4004 chan->buffer_seq_srej = chan->buffer_seq;
4005
4006 __skb_queue_head_init(&chan->srej_q);
4007 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4008
4009 /* Set P-bit only if there are some I-frames to ack. */
4010 if (__clear_ack_timer(chan))
4011 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4012
4013 err = l2cap_send_srejframe(chan, tx_seq);
4014 if (err < 0) {
4015 l2cap_send_disconn_req(chan->conn, chan, -err);
4016 return err;
4017 }
4018 }
4019 return 0;
4020
4021 expected:
4022 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4023
4024 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4025 bt_cb(skb)->tx_seq = tx_seq;
4026 bt_cb(skb)->sar = sar;
4027 __skb_queue_tail(&chan->srej_q, skb);
4028 return 0;
4029 }
4030
4031 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4032 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4033
4034 if (err < 0) {
4035 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4036 return err;
4037 }
4038
4039 if (__is_ctrl_final(chan, rx_control)) {
4040 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4041 l2cap_retransmit_frames(chan);
4042 }
4043
4044
4045 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4046 if (chan->num_acked == num_to_ack - 1)
4047 l2cap_send_ack(chan);
4048 else
4049 __set_ack_timer(chan);
4050
4051 return 0;
4052
4053 drop:
4054 kfree_skb(skb);
4055 return 0;
4056 }
4057
4058 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4059 {
4060 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4061 __get_reqseq(chan, rx_control), rx_control);
4062
4063 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4064 l2cap_drop_acked_frames(chan);
4065
4066 if (__is_ctrl_poll(chan, rx_control)) {
4067 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4068 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4069 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4070 (chan->unacked_frames > 0))
4071 __set_retrans_timer(chan);
4072
4073 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4074 l2cap_send_srejtail(chan);
4075 } else {
4076 l2cap_send_i_or_rr_or_rnr(chan);
4077 }
4078
4079 } else if (__is_ctrl_final(chan, rx_control)) {
4080 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4081
4082 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4083 l2cap_retransmit_frames(chan);
4084
4085 } else {
4086 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4087 (chan->unacked_frames > 0))
4088 __set_retrans_timer(chan);
4089
4090 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4091 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4092 l2cap_send_ack(chan);
4093 else
4094 l2cap_ertm_send(chan);
4095 }
4096 }
4097
4098 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4099 {
4100 u16 tx_seq = __get_reqseq(chan, rx_control);
4101
4102 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4103
4104 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4105
4106 chan->expected_ack_seq = tx_seq;
4107 l2cap_drop_acked_frames(chan);
4108
4109 if (__is_ctrl_final(chan, rx_control)) {
4110 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4111 l2cap_retransmit_frames(chan);
4112 } else {
4113 l2cap_retransmit_frames(chan);
4114
4115 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4116 set_bit(CONN_REJ_ACT, &chan->conn_state);
4117 }
4118 }
4119 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4120 {
4121 u16 tx_seq = __get_reqseq(chan, rx_control);
4122
4123 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4124
4125 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4126
4127 if (__is_ctrl_poll(chan, rx_control)) {
4128 chan->expected_ack_seq = tx_seq;
4129 l2cap_drop_acked_frames(chan);
4130
4131 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4132 l2cap_retransmit_one_frame(chan, tx_seq);
4133
4134 l2cap_ertm_send(chan);
4135
4136 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4137 chan->srej_save_reqseq = tx_seq;
4138 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4139 }
4140 } else if (__is_ctrl_final(chan, rx_control)) {
4141 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4142 chan->srej_save_reqseq == tx_seq)
4143 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4144 else
4145 l2cap_retransmit_one_frame(chan, tx_seq);
4146 } else {
4147 l2cap_retransmit_one_frame(chan, tx_seq);
4148 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4149 chan->srej_save_reqseq = tx_seq;
4150 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4151 }
4152 }
4153 }
4154
4155 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4156 {
4157 u16 tx_seq = __get_reqseq(chan, rx_control);
4158
4159 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4160
4161 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4162 chan->expected_ack_seq = tx_seq;
4163 l2cap_drop_acked_frames(chan);
4164
4165 if (__is_ctrl_poll(chan, rx_control))
4166 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4167
4168 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4169 __clear_retrans_timer(chan);
4170 if (__is_ctrl_poll(chan, rx_control))
4171 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4172 return;
4173 }
4174
4175 if (__is_ctrl_poll(chan, rx_control)) {
4176 l2cap_send_srejtail(chan);
4177 } else {
4178 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4179 l2cap_send_sframe(chan, rx_control);
4180 }
4181 }
4182
4183 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4184 {
4185 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4186
4187 if (__is_ctrl_final(chan, rx_control) &&
4188 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4189 __clear_monitor_timer(chan);
4190 if (chan->unacked_frames > 0)
4191 __set_retrans_timer(chan);
4192 clear_bit(CONN_WAIT_F, &chan->conn_state);
4193 }
4194
4195 switch (__get_ctrl_super(chan, rx_control)) {
4196 case L2CAP_SUPER_RR:
4197 l2cap_data_channel_rrframe(chan, rx_control);
4198 break;
4199
4200 case L2CAP_SUPER_REJ:
4201 l2cap_data_channel_rejframe(chan, rx_control);
4202 break;
4203
4204 case L2CAP_SUPER_SREJ:
4205 l2cap_data_channel_srejframe(chan, rx_control);
4206 break;
4207
4208 case L2CAP_SUPER_RNR:
4209 l2cap_data_channel_rnrframe(chan, rx_control);
4210 break;
4211 }
4212
4213 kfree_skb(skb);
4214 return 0;
4215 }
4216
4217 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4218 {
4219 u32 control;
4220 u16 req_seq;
4221 int len, next_tx_seq_offset, req_seq_offset;
4222
4223 control = __get_control(chan, skb->data);
4224 skb_pull(skb, __ctrl_size(chan));
4225 len = skb->len;
4226
4227 /*
4228 * We can just drop the corrupted I-frame here.
4229 * Receiver will miss it and start proper recovery
4230 * procedures and ask retransmission.
4231 */
4232 if (l2cap_check_fcs(chan, skb))
4233 goto drop;
4234
4235 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4236 len -= L2CAP_SDULEN_SIZE;
4237
4238 if (chan->fcs == L2CAP_FCS_CRC16)
4239 len -= L2CAP_FCS_SIZE;
4240
4241 if (len > chan->mps) {
4242 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4243 goto drop;
4244 }
4245
4246 req_seq = __get_reqseq(chan, control);
4247
4248 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4249
4250 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4251 chan->expected_ack_seq);
4252
4253 /* check for invalid req-seq */
4254 if (req_seq_offset > next_tx_seq_offset) {
4255 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4256 goto drop;
4257 }
4258
4259 if (!__is_sframe(chan, control)) {
4260 if (len < 0) {
4261 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4262 goto drop;
4263 }
4264
4265 l2cap_data_channel_iframe(chan, control, skb);
4266 } else {
4267 if (len != 0) {
4268 BT_ERR("%d", len);
4269 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4270 goto drop;
4271 }
4272
4273 l2cap_data_channel_sframe(chan, control, skb);
4274 }
4275
4276 return 0;
4277
4278 drop:
4279 kfree_skb(skb);
4280 return 0;
4281 }
4282
4283 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4284 {
4285 struct l2cap_chan *chan;
4286 u32 control;
4287 u16 tx_seq;
4288 int len;
4289
4290 chan = l2cap_get_chan_by_scid(conn, cid);
4291 if (!chan) {
4292 BT_DBG("unknown cid 0x%4.4x", cid);
4293 /* Drop packet and return */
4294 kfree_skb(skb);
4295 return 0;
4296 }
4297
4298 l2cap_chan_lock(chan);
4299
4300 BT_DBG("chan %p, len %d", chan, skb->len);
4301
4302 if (chan->state != BT_CONNECTED)
4303 goto drop;
4304
4305 switch (chan->mode) {
4306 case L2CAP_MODE_BASIC:
4307 /* If socket recv buffers overflows we drop data here
4308 * which is *bad* because L2CAP has to be reliable.
4309 * But we don't have any other choice. L2CAP doesn't
4310 * provide flow control mechanism. */
4311
4312 if (chan->imtu < skb->len)
4313 goto drop;
4314
4315 if (!chan->ops->recv(chan->data, skb))
4316 goto done;
4317 break;
4318
4319 case L2CAP_MODE_ERTM:
4320 l2cap_ertm_data_rcv(chan, skb);
4321
4322 goto done;
4323
4324 case L2CAP_MODE_STREAMING:
4325 control = __get_control(chan, skb->data);
4326 skb_pull(skb, __ctrl_size(chan));
4327 len = skb->len;
4328
4329 if (l2cap_check_fcs(chan, skb))
4330 goto drop;
4331
4332 if (__is_sar_start(chan, control))
4333 len -= L2CAP_SDULEN_SIZE;
4334
4335 if (chan->fcs == L2CAP_FCS_CRC16)
4336 len -= L2CAP_FCS_SIZE;
4337
4338 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4339 goto drop;
4340
4341 tx_seq = __get_txseq(chan, control);
4342
4343 if (chan->expected_tx_seq != tx_seq) {
4344 /* Frame(s) missing - must discard partial SDU */
4345 kfree_skb(chan->sdu);
4346 chan->sdu = NULL;
4347 chan->sdu_last_frag = NULL;
4348 chan->sdu_len = 0;
4349
4350 /* TODO: Notify userland of missing data */
4351 }
4352
4353 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4354
4355 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4356 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4357
4358 goto done;
4359
4360 default:
4361 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4362 break;
4363 }
4364
4365 drop:
4366 kfree_skb(skb);
4367
4368 done:
4369 l2cap_chan_unlock(chan);
4370
4371 return 0;
4372 }
4373
4374 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4375 {
4376 struct l2cap_chan *chan;
4377
4378 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4379 if (!chan)
4380 goto drop;
4381
4382 BT_DBG("chan %p, len %d", chan, skb->len);
4383
4384 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4385 goto drop;
4386
4387 if (chan->imtu < skb->len)
4388 goto drop;
4389
4390 if (!chan->ops->recv(chan->data, skb))
4391 return 0;
4392
4393 drop:
4394 kfree_skb(skb);
4395
4396 return 0;
4397 }
4398
4399 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4400 {
4401 struct l2cap_chan *chan;
4402
4403 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4404 if (!chan)
4405 goto drop;
4406
4407 BT_DBG("chan %p, len %d", chan, skb->len);
4408
4409 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4410 goto drop;
4411
4412 if (chan->imtu < skb->len)
4413 goto drop;
4414
4415 if (!chan->ops->recv(chan->data, skb))
4416 return 0;
4417
4418 drop:
4419 kfree_skb(skb);
4420
4421 return 0;
4422 }
4423
4424 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4425 {
4426 struct l2cap_hdr *lh = (void *) skb->data;
4427 u16 cid, len;
4428 __le16 psm;
4429
4430 skb_pull(skb, L2CAP_HDR_SIZE);
4431 cid = __le16_to_cpu(lh->cid);
4432 len = __le16_to_cpu(lh->len);
4433
4434 if (len != skb->len) {
4435 kfree_skb(skb);
4436 return;
4437 }
4438
4439 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4440
4441 switch (cid) {
4442 case L2CAP_CID_LE_SIGNALING:
4443 case L2CAP_CID_SIGNALING:
4444 l2cap_sig_channel(conn, skb);
4445 break;
4446
4447 case L2CAP_CID_CONN_LESS:
4448 psm = get_unaligned_le16(skb->data);
4449 skb_pull(skb, 2);
4450 l2cap_conless_channel(conn, psm, skb);
4451 break;
4452
4453 case L2CAP_CID_LE_DATA:
4454 l2cap_att_channel(conn, cid, skb);
4455 break;
4456
4457 case L2CAP_CID_SMP:
4458 if (smp_sig_channel(conn, skb))
4459 l2cap_conn_del(conn->hcon, EACCES);
4460 break;
4461
4462 default:
4463 l2cap_data_channel(conn, cid, skb);
4464 break;
4465 }
4466 }
4467
4468 /* ---- L2CAP interface with lower layer (HCI) ---- */
4469
4470 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4471 {
4472 int exact = 0, lm1 = 0, lm2 = 0;
4473 struct l2cap_chan *c;
4474
4475 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4476
4477 /* Find listening sockets and check their link_mode */
4478 read_lock(&chan_list_lock);
4479 list_for_each_entry(c, &chan_list, global_l) {
4480 struct sock *sk = c->sk;
4481
4482 if (c->state != BT_LISTEN)
4483 continue;
4484
4485 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4486 lm1 |= HCI_LM_ACCEPT;
4487 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4488 lm1 |= HCI_LM_MASTER;
4489 exact++;
4490 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4491 lm2 |= HCI_LM_ACCEPT;
4492 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4493 lm2 |= HCI_LM_MASTER;
4494 }
4495 }
4496 read_unlock(&chan_list_lock);
4497
4498 return exact ? lm1 : lm2;
4499 }
4500
4501 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4502 {
4503 struct l2cap_conn *conn;
4504
4505 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4506
4507 if (!status) {
4508 conn = l2cap_conn_add(hcon, status);
4509 if (conn)
4510 l2cap_conn_ready(conn);
4511 } else
4512 l2cap_conn_del(hcon, bt_to_errno(status));
4513
4514 return 0;
4515 }
4516
4517 int l2cap_disconn_ind(struct hci_conn *hcon)
4518 {
4519 struct l2cap_conn *conn = hcon->l2cap_data;
4520
4521 BT_DBG("hcon %p", hcon);
4522
4523 if (!conn)
4524 return HCI_ERROR_REMOTE_USER_TERM;
4525 return conn->disc_reason;
4526 }
4527
4528 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4529 {
4530 BT_DBG("hcon %p reason %d", hcon, reason);
4531
4532 l2cap_conn_del(hcon, bt_to_errno(reason));
4533 return 0;
4534 }
4535
4536 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4537 {
4538 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4539 return;
4540
4541 if (encrypt == 0x00) {
4542 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4543 __clear_chan_timer(chan);
4544 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4545 } else if (chan->sec_level == BT_SECURITY_HIGH)
4546 l2cap_chan_close(chan, ECONNREFUSED);
4547 } else {
4548 if (chan->sec_level == BT_SECURITY_MEDIUM)
4549 __clear_chan_timer(chan);
4550 }
4551 }
4552
4553 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4554 {
4555 struct l2cap_conn *conn = hcon->l2cap_data;
4556 struct l2cap_chan *chan;
4557
4558 if (!conn)
4559 return 0;
4560
4561 BT_DBG("conn %p", conn);
4562
4563 if (hcon->type == LE_LINK) {
4564 smp_distribute_keys(conn, 0);
4565 cancel_delayed_work(&conn->security_timer);
4566 }
4567
4568 mutex_lock(&conn->chan_lock);
4569
4570 list_for_each_entry(chan, &conn->chan_l, list) {
4571 l2cap_chan_lock(chan);
4572
4573 BT_DBG("chan->scid %d", chan->scid);
4574
4575 if (chan->scid == L2CAP_CID_LE_DATA) {
4576 if (!status && encrypt) {
4577 chan->sec_level = hcon->sec_level;
4578 l2cap_chan_ready(chan);
4579 }
4580
4581 l2cap_chan_unlock(chan);
4582 continue;
4583 }
4584
4585 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4586 l2cap_chan_unlock(chan);
4587 continue;
4588 }
4589
4590 if (!status && (chan->state == BT_CONNECTED ||
4591 chan->state == BT_CONFIG)) {
4592 l2cap_check_encryption(chan, encrypt);
4593 l2cap_chan_unlock(chan);
4594 continue;
4595 }
4596
4597 if (chan->state == BT_CONNECT) {
4598 if (!status) {
4599 l2cap_send_conn_req(chan);
4600 } else {
4601 __clear_chan_timer(chan);
4602 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4603 }
4604 } else if (chan->state == BT_CONNECT2) {
4605 struct sock *sk = chan->sk;
4606 struct l2cap_conn_rsp rsp;
4607 __u16 res, stat;
4608
4609 lock_sock(sk);
4610
4611 if (!status) {
4612 if (bt_sk(sk)->defer_setup) {
4613 struct sock *parent = bt_sk(sk)->parent;
4614 res = L2CAP_CR_PEND;
4615 stat = L2CAP_CS_AUTHOR_PEND;
4616 if (parent)
4617 parent->sk_data_ready(parent, 0);
4618 } else {
4619 __l2cap_state_change(chan, BT_CONFIG);
4620 res = L2CAP_CR_SUCCESS;
4621 stat = L2CAP_CS_NO_INFO;
4622 }
4623 } else {
4624 __l2cap_state_change(chan, BT_DISCONN);
4625 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4626 res = L2CAP_CR_SEC_BLOCK;
4627 stat = L2CAP_CS_NO_INFO;
4628 }
4629
4630 release_sock(sk);
4631
4632 rsp.scid = cpu_to_le16(chan->dcid);
4633 rsp.dcid = cpu_to_le16(chan->scid);
4634 rsp.result = cpu_to_le16(res);
4635 rsp.status = cpu_to_le16(stat);
4636 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4637 sizeof(rsp), &rsp);
4638 }
4639
4640 l2cap_chan_unlock(chan);
4641 }
4642
4643 mutex_unlock(&conn->chan_lock);
4644
4645 return 0;
4646 }
4647
4648 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4649 {
4650 struct l2cap_conn *conn = hcon->l2cap_data;
4651
4652 if (!conn)
4653 conn = l2cap_conn_add(hcon, 0);
4654
4655 if (!conn)
4656 goto drop;
4657
4658 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4659
4660 if (!(flags & ACL_CONT)) {
4661 struct l2cap_hdr *hdr;
4662 struct l2cap_chan *chan;
4663 u16 cid;
4664 int len;
4665
4666 if (conn->rx_len) {
4667 BT_ERR("Unexpected start frame (len %d)", skb->len);
4668 kfree_skb(conn->rx_skb);
4669 conn->rx_skb = NULL;
4670 conn->rx_len = 0;
4671 l2cap_conn_unreliable(conn, ECOMM);
4672 }
4673
4674 /* Start fragment always begin with Basic L2CAP header */
4675 if (skb->len < L2CAP_HDR_SIZE) {
4676 BT_ERR("Frame is too short (len %d)", skb->len);
4677 l2cap_conn_unreliable(conn, ECOMM);
4678 goto drop;
4679 }
4680
4681 hdr = (struct l2cap_hdr *) skb->data;
4682 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4683 cid = __le16_to_cpu(hdr->cid);
4684
4685 if (len == skb->len) {
4686 /* Complete frame received */
4687 l2cap_recv_frame(conn, skb);
4688 return 0;
4689 }
4690
4691 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4692
4693 if (skb->len > len) {
4694 BT_ERR("Frame is too long (len %d, expected len %d)",
4695 skb->len, len);
4696 l2cap_conn_unreliable(conn, ECOMM);
4697 goto drop;
4698 }
4699
4700 chan = l2cap_get_chan_by_scid(conn, cid);
4701
4702 if (chan && chan->sk) {
4703 struct sock *sk = chan->sk;
4704 lock_sock(sk);
4705
4706 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4707 BT_ERR("Frame exceeding recv MTU (len %d, "
4708 "MTU %d)", len,
4709 chan->imtu);
4710 release_sock(sk);
4711 l2cap_conn_unreliable(conn, ECOMM);
4712 goto drop;
4713 }
4714 release_sock(sk);
4715 }
4716
4717 /* Allocate skb for the complete frame (with header) */
4718 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4719 if (!conn->rx_skb)
4720 goto drop;
4721
4722 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4723 skb->len);
4724 conn->rx_len = len - skb->len;
4725 } else {
4726 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4727
4728 if (!conn->rx_len) {
4729 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4730 l2cap_conn_unreliable(conn, ECOMM);
4731 goto drop;
4732 }
4733
4734 if (skb->len > conn->rx_len) {
4735 BT_ERR("Fragment is too long (len %d, expected %d)",
4736 skb->len, conn->rx_len);
4737 kfree_skb(conn->rx_skb);
4738 conn->rx_skb = NULL;
4739 conn->rx_len = 0;
4740 l2cap_conn_unreliable(conn, ECOMM);
4741 goto drop;
4742 }
4743
4744 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4745 skb->len);
4746 conn->rx_len -= skb->len;
4747
4748 if (!conn->rx_len) {
4749 /* Complete frame received */
4750 l2cap_recv_frame(conn, conn->rx_skb);
4751 conn->rx_skb = NULL;
4752 }
4753 }
4754
4755 drop:
4756 kfree_skb(skb);
4757 return 0;
4758 }
4759
4760 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4761 {
4762 struct l2cap_chan *c;
4763
4764 read_lock(&chan_list_lock);
4765
4766 list_for_each_entry(c, &chan_list, global_l) {
4767 struct sock *sk = c->sk;
4768
4769 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4770 batostr(&bt_sk(sk)->src),
4771 batostr(&bt_sk(sk)->dst),
4772 c->state, __le16_to_cpu(c->psm),
4773 c->scid, c->dcid, c->imtu, c->omtu,
4774 c->sec_level, c->mode);
4775 }
4776
4777 read_unlock(&chan_list_lock);
4778
4779 return 0;
4780 }
4781
4782 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4783 {
4784 return single_open(file, l2cap_debugfs_show, inode->i_private);
4785 }
4786
4787 static const struct file_operations l2cap_debugfs_fops = {
4788 .open = l2cap_debugfs_open,
4789 .read = seq_read,
4790 .llseek = seq_lseek,
4791 .release = single_release,
4792 };
4793
4794 static struct dentry *l2cap_debugfs;
4795
4796 int __init l2cap_init(void)
4797 {
4798 int err;
4799
4800 err = l2cap_init_sockets();
4801 if (err < 0)
4802 return err;
4803
4804 if (bt_debugfs) {
4805 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4806 bt_debugfs, NULL, &l2cap_debugfs_fops);
4807 if (!l2cap_debugfs)
4808 BT_ERR("Failed to create L2CAP debug file");
4809 }
4810
4811 return 0;
4812 }
4813
4814 void l2cap_exit(void)
4815 {
4816 debugfs_remove(l2cap_debugfs);
4817 l2cap_cleanup_sockets();
4818 }
4819
4820 module_param(disable_ertm, bool, 0644);
4821 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");