Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/padovan/blueto...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 int disable_ertm;
60 int enable_hs;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78 /* ---- L2CAP channels ---- */
79
80 static inline void chan_hold(struct l2cap_chan *c)
81 {
82 atomic_inc(&c->refcnt);
83 }
84
85 static inline void chan_put(struct l2cap_chan *c)
86 {
87 if (atomic_dec_and_test(&c->refcnt))
88 kfree(c);
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100
101 }
102
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 {
118 struct l2cap_chan *c;
119
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
122 if (c)
123 bh_lock_sock(c->sk);
124 read_unlock(&conn->chan_lock);
125 return c;
126 }
127
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 {
130 struct l2cap_chan *c;
131
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
134 return c;
135 }
136 return NULL;
137 }
138
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
145 if (c)
146 bh_lock_sock(c->sk);
147 read_unlock(&conn->chan_lock);
148 return c;
149 }
150
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 {
153 struct l2cap_chan *c;
154
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 goto found;
158 }
159
160 c = NULL;
161 found:
162 return c;
163 }
164
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 {
167 int err;
168
169 write_lock_bh(&chan_list_lock);
170
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
172 err = -EADDRINUSE;
173 goto done;
174 }
175
176 if (psm) {
177 chan->psm = psm;
178 chan->sport = psm;
179 err = 0;
180 } else {
181 u16 p;
182
183 err = -EINVAL;
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
188 err = 0;
189 break;
190 }
191 }
192
193 done:
194 write_unlock_bh(&chan_list_lock);
195 return err;
196 }
197
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 {
200 write_lock_bh(&chan_list_lock);
201
202 chan->scid = scid;
203
204 write_unlock_bh(&chan_list_lock);
205
206 return 0;
207 }
208
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 {
211 u16 cid = L2CAP_CID_DYN_START;
212
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
215 return cid;
216 }
217
218 return 0;
219 }
220
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 {
223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
224
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
226 chan_hold(chan);
227 }
228
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 {
231 BT_DBG("chan %p state %d", chan, chan->state);
232
233 if (timer_pending(timer) && del_timer(timer))
234 chan_put(chan);
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 chan->state = state;
240 chan->ops->state_change(chan->data, state);
241 }
242
243 static void l2cap_chan_timeout(unsigned long arg)
244 {
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
247 int reason;
248
249 BT_DBG("chan %p state %d", chan, chan->state);
250
251 bh_lock_sock(sk);
252
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
256 bh_unlock_sock(sk);
257 chan_put(chan);
258 return;
259 }
260
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
266 else
267 reason = ETIMEDOUT;
268
269 l2cap_chan_close(chan, reason);
270
271 bh_unlock_sock(sk);
272
273 chan->ops->close(chan->data);
274 chan_put(chan);
275 }
276
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 {
279 struct l2cap_chan *chan;
280
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
282 if (!chan)
283 return NULL;
284
285 chan->sk = sk;
286
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
290
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292
293 chan->state = BT_OPEN;
294
295 atomic_set(&chan->refcnt, 1);
296
297 BT_DBG("sk %p chan %p", sk, chan);
298
299 return chan;
300 }
301
302 void l2cap_chan_destroy(struct l2cap_chan *chan)
303 {
304 write_lock_bh(&chan_list_lock);
305 list_del(&chan->global_l);
306 write_unlock_bh(&chan_list_lock);
307
308 chan_put(chan);
309 }
310
311 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
312 {
313 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
314 chan->psm, chan->dcid);
315
316 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
317
318 chan->conn = conn;
319
320 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
321 if (conn->hcon->type == LE_LINK) {
322 /* LE connection */
323 chan->omtu = L2CAP_LE_DEFAULT_MTU;
324 chan->scid = L2CAP_CID_LE_DATA;
325 chan->dcid = L2CAP_CID_LE_DATA;
326 } else {
327 /* Alloc CID for connection-oriented socket */
328 chan->scid = l2cap_alloc_cid(conn);
329 chan->omtu = L2CAP_DEFAULT_MTU;
330 }
331 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
332 /* Connectionless socket */
333 chan->scid = L2CAP_CID_CONN_LESS;
334 chan->dcid = L2CAP_CID_CONN_LESS;
335 chan->omtu = L2CAP_DEFAULT_MTU;
336 } else {
337 /* Raw socket can send/recv signalling messages only */
338 chan->scid = L2CAP_CID_SIGNALING;
339 chan->dcid = L2CAP_CID_SIGNALING;
340 chan->omtu = L2CAP_DEFAULT_MTU;
341 }
342
343 chan->local_id = L2CAP_BESTEFFORT_ID;
344 chan->local_stype = L2CAP_SERV_BESTEFFORT;
345 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
346 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
347 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
348 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
349
350 chan_hold(chan);
351
352 list_add(&chan->list, &conn->chan_l);
353 }
354
355 /* Delete channel.
356 * Must be called on the locked socket. */
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
358 {
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
362
363 __clear_chan_timer(chan);
364
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
366
367 if (conn) {
368 /* Delete from channel list */
369 write_lock_bh(&conn->chan_lock);
370 list_del(&chan->list);
371 write_unlock_bh(&conn->chan_lock);
372 chan_put(chan);
373
374 chan->conn = NULL;
375 hci_conn_put(conn->hcon);
376 }
377
378 l2cap_state_change(chan, BT_CLOSED);
379 sock_set_flag(sk, SOCK_ZAPPED);
380
381 if (err)
382 sk->sk_err = err;
383
384 if (parent) {
385 bt_accept_unlink(sk);
386 parent->sk_data_ready(parent, 0);
387 } else
388 sk->sk_state_change(sk);
389
390 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
391 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
392 return;
393
394 skb_queue_purge(&chan->tx_q);
395
396 if (chan->mode == L2CAP_MODE_ERTM) {
397 struct srej_list *l, *tmp;
398
399 __clear_retrans_timer(chan);
400 __clear_monitor_timer(chan);
401 __clear_ack_timer(chan);
402
403 skb_queue_purge(&chan->srej_q);
404
405 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
406 list_del(&l->list);
407 kfree(l);
408 }
409 }
410 }
411
412 static void l2cap_chan_cleanup_listen(struct sock *parent)
413 {
414 struct sock *sk;
415
416 BT_DBG("parent %p", parent);
417
418 /* Close not yet accepted channels */
419 while ((sk = bt_accept_dequeue(parent, NULL))) {
420 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
421 __clear_chan_timer(chan);
422 lock_sock(sk);
423 l2cap_chan_close(chan, ECONNRESET);
424 release_sock(sk);
425 chan->ops->close(chan->data);
426 }
427 }
428
429 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
430 {
431 struct l2cap_conn *conn = chan->conn;
432 struct sock *sk = chan->sk;
433
434 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
435
436 switch (chan->state) {
437 case BT_LISTEN:
438 l2cap_chan_cleanup_listen(sk);
439
440 l2cap_state_change(chan, BT_CLOSED);
441 sock_set_flag(sk, SOCK_ZAPPED);
442 break;
443
444 case BT_CONNECTED:
445 case BT_CONFIG:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 __clear_chan_timer(chan);
449 __set_chan_timer(chan, sk->sk_sndtimeo);
450 l2cap_send_disconn_req(conn, chan, reason);
451 } else
452 l2cap_chan_del(chan, reason);
453 break;
454
455 case BT_CONNECT2:
456 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
457 conn->hcon->type == ACL_LINK) {
458 struct l2cap_conn_rsp rsp;
459 __u16 result;
460
461 if (bt_sk(sk)->defer_setup)
462 result = L2CAP_CR_SEC_BLOCK;
463 else
464 result = L2CAP_CR_BAD_PSM;
465 l2cap_state_change(chan, BT_DISCONN);
466
467 rsp.scid = cpu_to_le16(chan->dcid);
468 rsp.dcid = cpu_to_le16(chan->scid);
469 rsp.result = cpu_to_le16(result);
470 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
471 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
472 sizeof(rsp), &rsp);
473 }
474
475 l2cap_chan_del(chan, reason);
476 break;
477
478 case BT_CONNECT:
479 case BT_DISCONN:
480 l2cap_chan_del(chan, reason);
481 break;
482
483 default:
484 sock_set_flag(sk, SOCK_ZAPPED);
485 break;
486 }
487 }
488
489 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
490 {
491 if (chan->chan_type == L2CAP_CHAN_RAW) {
492 switch (chan->sec_level) {
493 case BT_SECURITY_HIGH:
494 return HCI_AT_DEDICATED_BONDING_MITM;
495 case BT_SECURITY_MEDIUM:
496 return HCI_AT_DEDICATED_BONDING;
497 default:
498 return HCI_AT_NO_BONDING;
499 }
500 } else if (chan->psm == cpu_to_le16(0x0001)) {
501 if (chan->sec_level == BT_SECURITY_LOW)
502 chan->sec_level = BT_SECURITY_SDP;
503
504 if (chan->sec_level == BT_SECURITY_HIGH)
505 return HCI_AT_NO_BONDING_MITM;
506 else
507 return HCI_AT_NO_BONDING;
508 } else {
509 switch (chan->sec_level) {
510 case BT_SECURITY_HIGH:
511 return HCI_AT_GENERAL_BONDING_MITM;
512 case BT_SECURITY_MEDIUM:
513 return HCI_AT_GENERAL_BONDING;
514 default:
515 return HCI_AT_NO_BONDING;
516 }
517 }
518 }
519
520 /* Service level security */
521 static inline int l2cap_check_security(struct l2cap_chan *chan)
522 {
523 struct l2cap_conn *conn = chan->conn;
524 __u8 auth_type;
525
526 auth_type = l2cap_get_auth_type(chan);
527
528 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
529 }
530
531 static u8 l2cap_get_ident(struct l2cap_conn *conn)
532 {
533 u8 id;
534
535 /* Get next available identificator.
536 * 1 - 128 are used by kernel.
537 * 129 - 199 are reserved.
538 * 200 - 254 are used by utilities like l2ping, etc.
539 */
540
541 spin_lock_bh(&conn->lock);
542
543 if (++conn->tx_ident > 128)
544 conn->tx_ident = 1;
545
546 id = conn->tx_ident;
547
548 spin_unlock_bh(&conn->lock);
549
550 return id;
551 }
552
553 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
554 {
555 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
556 u8 flags;
557
558 BT_DBG("code 0x%2.2x", code);
559
560 if (!skb)
561 return;
562
563 if (lmp_no_flush_capable(conn->hcon->hdev))
564 flags = ACL_START_NO_FLUSH;
565 else
566 flags = ACL_START;
567
568 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
569 skb->priority = HCI_PRIO_MAX;
570
571 hci_send_acl(conn->hchan, skb, flags);
572 }
573
574 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
575 {
576 struct hci_conn *hcon = chan->conn->hcon;
577 u16 flags;
578
579 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
580 skb->priority);
581
582 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
583 lmp_no_flush_capable(hcon->hdev))
584 flags = ACL_START_NO_FLUSH;
585 else
586 flags = ACL_START;
587
588 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
589 hci_send_acl(chan->conn->hchan, skb, flags);
590 }
591
592 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
593 {
594 struct sk_buff *skb;
595 struct l2cap_hdr *lh;
596 struct l2cap_conn *conn = chan->conn;
597 int count, hlen;
598
599 if (chan->state != BT_CONNECTED)
600 return;
601
602 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
603 hlen = L2CAP_EXT_HDR_SIZE;
604 else
605 hlen = L2CAP_ENH_HDR_SIZE;
606
607 if (chan->fcs == L2CAP_FCS_CRC16)
608 hlen += L2CAP_FCS_SIZE;
609
610 BT_DBG("chan %p, control 0x%8.8x", chan, control);
611
612 count = min_t(unsigned int, conn->mtu, hlen);
613
614 control |= __set_sframe(chan);
615
616 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
617 control |= __set_ctrl_final(chan);
618
619 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
620 control |= __set_ctrl_poll(chan);
621
622 skb = bt_skb_alloc(count, GFP_ATOMIC);
623 if (!skb)
624 return;
625
626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
627 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
628 lh->cid = cpu_to_le16(chan->dcid);
629
630 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
631
632 if (chan->fcs == L2CAP_FCS_CRC16) {
633 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
634 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
635 }
636
637 skb->priority = HCI_PRIO_MAX;
638 l2cap_do_send(chan, skb);
639 }
640
641 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
642 {
643 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
644 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
645 set_bit(CONN_RNR_SENT, &chan->conn_state);
646 } else
647 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
648
649 control |= __set_reqseq(chan, chan->buffer_seq);
650
651 l2cap_send_sframe(chan, control);
652 }
653
654 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
655 {
656 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
657 }
658
659 static void l2cap_do_start(struct l2cap_chan *chan)
660 {
661 struct l2cap_conn *conn = chan->conn;
662
663 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
664 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
665 return;
666
667 if (l2cap_check_security(chan) &&
668 __l2cap_no_conn_pending(chan)) {
669 struct l2cap_conn_req req;
670 req.scid = cpu_to_le16(chan->scid);
671 req.psm = chan->psm;
672
673 chan->ident = l2cap_get_ident(conn);
674 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
675
676 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
677 sizeof(req), &req);
678 }
679 } else {
680 struct l2cap_info_req req;
681 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
682
683 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
684 conn->info_ident = l2cap_get_ident(conn);
685
686 mod_timer(&conn->info_timer, jiffies +
687 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
688
689 l2cap_send_cmd(conn, conn->info_ident,
690 L2CAP_INFO_REQ, sizeof(req), &req);
691 }
692 }
693
694 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
695 {
696 u32 local_feat_mask = l2cap_feat_mask;
697 if (!disable_ertm)
698 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
699
700 switch (mode) {
701 case L2CAP_MODE_ERTM:
702 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
703 case L2CAP_MODE_STREAMING:
704 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
705 default:
706 return 0x00;
707 }
708 }
709
710 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
711 {
712 struct sock *sk;
713 struct l2cap_disconn_req req;
714
715 if (!conn)
716 return;
717
718 sk = chan->sk;
719
720 if (chan->mode == L2CAP_MODE_ERTM) {
721 __clear_retrans_timer(chan);
722 __clear_monitor_timer(chan);
723 __clear_ack_timer(chan);
724 }
725
726 req.dcid = cpu_to_le16(chan->dcid);
727 req.scid = cpu_to_le16(chan->scid);
728 l2cap_send_cmd(conn, l2cap_get_ident(conn),
729 L2CAP_DISCONN_REQ, sizeof(req), &req);
730
731 l2cap_state_change(chan, BT_DISCONN);
732 sk->sk_err = err;
733 }
734
735 /* ---- L2CAP connections ---- */
736 static void l2cap_conn_start(struct l2cap_conn *conn)
737 {
738 struct l2cap_chan *chan, *tmp;
739
740 BT_DBG("conn %p", conn);
741
742 read_lock(&conn->chan_lock);
743
744 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
745 struct sock *sk = chan->sk;
746
747 bh_lock_sock(sk);
748
749 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
750 bh_unlock_sock(sk);
751 continue;
752 }
753
754 if (chan->state == BT_CONNECT) {
755 struct l2cap_conn_req req;
756
757 if (!l2cap_check_security(chan) ||
758 !__l2cap_no_conn_pending(chan)) {
759 bh_unlock_sock(sk);
760 continue;
761 }
762
763 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
764 && test_bit(CONF_STATE2_DEVICE,
765 &chan->conf_state)) {
766 /* l2cap_chan_close() calls list_del(chan)
767 * so release the lock */
768 read_unlock(&conn->chan_lock);
769 l2cap_chan_close(chan, ECONNRESET);
770 read_lock(&conn->chan_lock);
771 bh_unlock_sock(sk);
772 continue;
773 }
774
775 req.scid = cpu_to_le16(chan->scid);
776 req.psm = chan->psm;
777
778 chan->ident = l2cap_get_ident(conn);
779 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
780
781 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
782 sizeof(req), &req);
783
784 } else if (chan->state == BT_CONNECT2) {
785 struct l2cap_conn_rsp rsp;
786 char buf[128];
787 rsp.scid = cpu_to_le16(chan->dcid);
788 rsp.dcid = cpu_to_le16(chan->scid);
789
790 if (l2cap_check_security(chan)) {
791 if (bt_sk(sk)->defer_setup) {
792 struct sock *parent = bt_sk(sk)->parent;
793 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
794 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
795 if (parent)
796 parent->sk_data_ready(parent, 0);
797
798 } else {
799 l2cap_state_change(chan, BT_CONFIG);
800 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
801 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
802 }
803 } else {
804 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
805 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
806 }
807
808 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
809 sizeof(rsp), &rsp);
810
811 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
812 rsp.result != L2CAP_CR_SUCCESS) {
813 bh_unlock_sock(sk);
814 continue;
815 }
816
817 set_bit(CONF_REQ_SENT, &chan->conf_state);
818 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
819 l2cap_build_conf_req(chan, buf), buf);
820 chan->num_conf_req++;
821 }
822
823 bh_unlock_sock(sk);
824 }
825
826 read_unlock(&conn->chan_lock);
827 }
828
829 /* Find socket with cid and source bdaddr.
830 * Returns closest match, locked.
831 */
832 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
833 {
834 struct l2cap_chan *c, *c1 = NULL;
835
836 read_lock(&chan_list_lock);
837
838 list_for_each_entry(c, &chan_list, global_l) {
839 struct sock *sk = c->sk;
840
841 if (state && c->state != state)
842 continue;
843
844 if (c->scid == cid) {
845 /* Exact match. */
846 if (!bacmp(&bt_sk(sk)->src, src)) {
847 read_unlock(&chan_list_lock);
848 return c;
849 }
850
851 /* Closest match */
852 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
853 c1 = c;
854 }
855 }
856
857 read_unlock(&chan_list_lock);
858
859 return c1;
860 }
861
862 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
863 {
864 struct sock *parent, *sk;
865 struct l2cap_chan *chan, *pchan;
866
867 BT_DBG("");
868
869 /* Check if we have socket listening on cid */
870 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
871 conn->src);
872 if (!pchan)
873 return;
874
875 parent = pchan->sk;
876
877 bh_lock_sock(parent);
878
879 /* Check for backlog size */
880 if (sk_acceptq_is_full(parent)) {
881 BT_DBG("backlog full %d", parent->sk_ack_backlog);
882 goto clean;
883 }
884
885 chan = pchan->ops->new_connection(pchan->data);
886 if (!chan)
887 goto clean;
888
889 sk = chan->sk;
890
891 write_lock_bh(&conn->chan_lock);
892
893 hci_conn_hold(conn->hcon);
894
895 bacpy(&bt_sk(sk)->src, conn->src);
896 bacpy(&bt_sk(sk)->dst, conn->dst);
897
898 bt_accept_enqueue(parent, sk);
899
900 __l2cap_chan_add(conn, chan);
901
902 __set_chan_timer(chan, sk->sk_sndtimeo);
903
904 l2cap_state_change(chan, BT_CONNECTED);
905 parent->sk_data_ready(parent, 0);
906
907 write_unlock_bh(&conn->chan_lock);
908
909 clean:
910 bh_unlock_sock(parent);
911 }
912
913 static void l2cap_chan_ready(struct sock *sk)
914 {
915 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
916 struct sock *parent = bt_sk(sk)->parent;
917
918 BT_DBG("sk %p, parent %p", sk, parent);
919
920 chan->conf_state = 0;
921 __clear_chan_timer(chan);
922
923 l2cap_state_change(chan, BT_CONNECTED);
924 sk->sk_state_change(sk);
925
926 if (parent)
927 parent->sk_data_ready(parent, 0);
928 }
929
930 static void l2cap_conn_ready(struct l2cap_conn *conn)
931 {
932 struct l2cap_chan *chan;
933
934 BT_DBG("conn %p", conn);
935
936 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
937 l2cap_le_conn_ready(conn);
938
939 if (conn->hcon->out && conn->hcon->type == LE_LINK)
940 smp_conn_security(conn, conn->hcon->pending_sec_level);
941
942 read_lock(&conn->chan_lock);
943
944 list_for_each_entry(chan, &conn->chan_l, list) {
945 struct sock *sk = chan->sk;
946
947 bh_lock_sock(sk);
948
949 if (conn->hcon->type == LE_LINK) {
950 if (smp_conn_security(conn, chan->sec_level))
951 l2cap_chan_ready(sk);
952
953 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
954 __clear_chan_timer(chan);
955 l2cap_state_change(chan, BT_CONNECTED);
956 sk->sk_state_change(sk);
957
958 } else if (chan->state == BT_CONNECT)
959 l2cap_do_start(chan);
960
961 bh_unlock_sock(sk);
962 }
963
964 read_unlock(&conn->chan_lock);
965 }
966
967 /* Notify sockets that we cannot guaranty reliability anymore */
968 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
969 {
970 struct l2cap_chan *chan;
971
972 BT_DBG("conn %p", conn);
973
974 read_lock(&conn->chan_lock);
975
976 list_for_each_entry(chan, &conn->chan_l, list) {
977 struct sock *sk = chan->sk;
978
979 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
980 sk->sk_err = err;
981 }
982
983 read_unlock(&conn->chan_lock);
984 }
985
986 static void l2cap_info_timeout(unsigned long arg)
987 {
988 struct l2cap_conn *conn = (void *) arg;
989
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
992
993 l2cap_conn_start(conn);
994 }
995
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
997 {
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1000 struct sock *sk;
1001
1002 if (!conn)
1003 return;
1004
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1006
1007 kfree_skb(conn->rx_skb);
1008
1009 /* Kill channels */
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 sk = chan->sk;
1012 bh_lock_sock(sk);
1013 l2cap_chan_del(chan, err);
1014 bh_unlock_sock(sk);
1015 chan->ops->close(chan->data);
1016 }
1017
1018 hci_chan_del(conn->hchan);
1019
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 del_timer_sync(&conn->info_timer);
1022
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 del_timer(&conn->security_timer);
1025 smp_chan_destroy(conn);
1026 }
1027
1028 hcon->l2cap_data = NULL;
1029 kfree(conn);
1030 }
1031
1032 static void security_timeout(unsigned long arg)
1033 {
1034 struct l2cap_conn *conn = (void *) arg;
1035
1036 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1037 }
1038
1039 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1040 {
1041 struct l2cap_conn *conn = hcon->l2cap_data;
1042 struct hci_chan *hchan;
1043
1044 if (conn || status)
1045 return conn;
1046
1047 hchan = hci_chan_create(hcon);
1048 if (!hchan)
1049 return NULL;
1050
1051 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1052 if (!conn) {
1053 hci_chan_del(hchan);
1054 return NULL;
1055 }
1056
1057 hcon->l2cap_data = conn;
1058 conn->hcon = hcon;
1059 conn->hchan = hchan;
1060
1061 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1062
1063 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1064 conn->mtu = hcon->hdev->le_mtu;
1065 else
1066 conn->mtu = hcon->hdev->acl_mtu;
1067
1068 conn->src = &hcon->hdev->bdaddr;
1069 conn->dst = &hcon->dst;
1070
1071 conn->feat_mask = 0;
1072
1073 spin_lock_init(&conn->lock);
1074 rwlock_init(&conn->chan_lock);
1075
1076 INIT_LIST_HEAD(&conn->chan_l);
1077
1078 if (hcon->type == LE_LINK)
1079 setup_timer(&conn->security_timer, security_timeout,
1080 (unsigned long) conn);
1081 else
1082 setup_timer(&conn->info_timer, l2cap_info_timeout,
1083 (unsigned long) conn);
1084
1085 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1086
1087 return conn;
1088 }
1089
1090 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1091 {
1092 write_lock_bh(&conn->chan_lock);
1093 __l2cap_chan_add(conn, chan);
1094 write_unlock_bh(&conn->chan_lock);
1095 }
1096
1097 /* ---- Socket interface ---- */
1098
1099 /* Find socket with psm and source bdaddr.
1100 * Returns closest match.
1101 */
1102 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1103 {
1104 struct l2cap_chan *c, *c1 = NULL;
1105
1106 read_lock(&chan_list_lock);
1107
1108 list_for_each_entry(c, &chan_list, global_l) {
1109 struct sock *sk = c->sk;
1110
1111 if (state && c->state != state)
1112 continue;
1113
1114 if (c->psm == psm) {
1115 /* Exact match. */
1116 if (!bacmp(&bt_sk(sk)->src, src)) {
1117 read_unlock(&chan_list_lock);
1118 return c;
1119 }
1120
1121 /* Closest match */
1122 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1123 c1 = c;
1124 }
1125 }
1126
1127 read_unlock(&chan_list_lock);
1128
1129 return c1;
1130 }
1131
1132 int l2cap_chan_connect(struct l2cap_chan *chan)
1133 {
1134 struct sock *sk = chan->sk;
1135 bdaddr_t *src = &bt_sk(sk)->src;
1136 bdaddr_t *dst = &bt_sk(sk)->dst;
1137 struct l2cap_conn *conn;
1138 struct hci_conn *hcon;
1139 struct hci_dev *hdev;
1140 __u8 auth_type;
1141 int err;
1142
1143 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1144 chan->psm);
1145
1146 hdev = hci_get_route(dst, src);
1147 if (!hdev)
1148 return -EHOSTUNREACH;
1149
1150 hci_dev_lock_bh(hdev);
1151
1152 auth_type = l2cap_get_auth_type(chan);
1153
1154 if (chan->dcid == L2CAP_CID_LE_DATA)
1155 hcon = hci_connect(hdev, LE_LINK, dst,
1156 chan->sec_level, auth_type);
1157 else
1158 hcon = hci_connect(hdev, ACL_LINK, dst,
1159 chan->sec_level, auth_type);
1160
1161 if (IS_ERR(hcon)) {
1162 err = PTR_ERR(hcon);
1163 goto done;
1164 }
1165
1166 conn = l2cap_conn_add(hcon, 0);
1167 if (!conn) {
1168 hci_conn_put(hcon);
1169 err = -ENOMEM;
1170 goto done;
1171 }
1172
1173 /* Update source addr of the socket */
1174 bacpy(src, conn->src);
1175
1176 l2cap_chan_add(conn, chan);
1177
1178 l2cap_state_change(chan, BT_CONNECT);
1179 __set_chan_timer(chan, sk->sk_sndtimeo);
1180
1181 if (hcon->state == BT_CONNECTED) {
1182 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1183 __clear_chan_timer(chan);
1184 if (l2cap_check_security(chan))
1185 l2cap_state_change(chan, BT_CONNECTED);
1186 } else
1187 l2cap_do_start(chan);
1188 }
1189
1190 err = 0;
1191
1192 done:
1193 hci_dev_unlock_bh(hdev);
1194 hci_dev_put(hdev);
1195 return err;
1196 }
1197
1198 int __l2cap_wait_ack(struct sock *sk)
1199 {
1200 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1201 DECLARE_WAITQUEUE(wait, current);
1202 int err = 0;
1203 int timeo = HZ/5;
1204
1205 add_wait_queue(sk_sleep(sk), &wait);
1206 set_current_state(TASK_INTERRUPTIBLE);
1207 while (chan->unacked_frames > 0 && chan->conn) {
1208 if (!timeo)
1209 timeo = HZ/5;
1210
1211 if (signal_pending(current)) {
1212 err = sock_intr_errno(timeo);
1213 break;
1214 }
1215
1216 release_sock(sk);
1217 timeo = schedule_timeout(timeo);
1218 lock_sock(sk);
1219 set_current_state(TASK_INTERRUPTIBLE);
1220
1221 err = sock_error(sk);
1222 if (err)
1223 break;
1224 }
1225 set_current_state(TASK_RUNNING);
1226 remove_wait_queue(sk_sleep(sk), &wait);
1227 return err;
1228 }
1229
1230 static void l2cap_monitor_timeout(unsigned long arg)
1231 {
1232 struct l2cap_chan *chan = (void *) arg;
1233 struct sock *sk = chan->sk;
1234
1235 BT_DBG("chan %p", chan);
1236
1237 bh_lock_sock(sk);
1238 if (chan->retry_count >= chan->remote_max_tx) {
1239 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1240 bh_unlock_sock(sk);
1241 return;
1242 }
1243
1244 chan->retry_count++;
1245 __set_monitor_timer(chan);
1246
1247 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1248 bh_unlock_sock(sk);
1249 }
1250
1251 static void l2cap_retrans_timeout(unsigned long arg)
1252 {
1253 struct l2cap_chan *chan = (void *) arg;
1254 struct sock *sk = chan->sk;
1255
1256 BT_DBG("chan %p", chan);
1257
1258 bh_lock_sock(sk);
1259 chan->retry_count = 1;
1260 __set_monitor_timer(chan);
1261
1262 set_bit(CONN_WAIT_F, &chan->conn_state);
1263
1264 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1265 bh_unlock_sock(sk);
1266 }
1267
1268 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1269 {
1270 struct sk_buff *skb;
1271
1272 while ((skb = skb_peek(&chan->tx_q)) &&
1273 chan->unacked_frames) {
1274 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1275 break;
1276
1277 skb = skb_dequeue(&chan->tx_q);
1278 kfree_skb(skb);
1279
1280 chan->unacked_frames--;
1281 }
1282
1283 if (!chan->unacked_frames)
1284 __clear_retrans_timer(chan);
1285 }
1286
1287 static void l2cap_streaming_send(struct l2cap_chan *chan)
1288 {
1289 struct sk_buff *skb;
1290 u32 control;
1291 u16 fcs;
1292
1293 while ((skb = skb_dequeue(&chan->tx_q))) {
1294 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1295 control |= __set_txseq(chan, chan->next_tx_seq);
1296 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1297
1298 if (chan->fcs == L2CAP_FCS_CRC16) {
1299 fcs = crc16(0, (u8 *)skb->data,
1300 skb->len - L2CAP_FCS_SIZE);
1301 put_unaligned_le16(fcs,
1302 skb->data + skb->len - L2CAP_FCS_SIZE);
1303 }
1304
1305 l2cap_do_send(chan, skb);
1306
1307 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1308 }
1309 }
1310
1311 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1312 {
1313 struct sk_buff *skb, *tx_skb;
1314 u16 fcs;
1315 u32 control;
1316
1317 skb = skb_peek(&chan->tx_q);
1318 if (!skb)
1319 return;
1320
1321 do {
1322 if (bt_cb(skb)->tx_seq == tx_seq)
1323 break;
1324
1325 if (skb_queue_is_last(&chan->tx_q, skb))
1326 return;
1327
1328 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1329
1330 if (chan->remote_max_tx &&
1331 bt_cb(skb)->retries == chan->remote_max_tx) {
1332 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1333 return;
1334 }
1335
1336 tx_skb = skb_clone(skb, GFP_ATOMIC);
1337 bt_cb(skb)->retries++;
1338
1339 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1340 control &= __get_sar_mask(chan);
1341
1342 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1343 control |= __set_ctrl_final(chan);
1344
1345 control |= __set_reqseq(chan, chan->buffer_seq);
1346 control |= __set_txseq(chan, tx_seq);
1347
1348 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1349
1350 if (chan->fcs == L2CAP_FCS_CRC16) {
1351 fcs = crc16(0, (u8 *)tx_skb->data,
1352 tx_skb->len - L2CAP_FCS_SIZE);
1353 put_unaligned_le16(fcs,
1354 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1355 }
1356
1357 l2cap_do_send(chan, tx_skb);
1358 }
1359
1360 static int l2cap_ertm_send(struct l2cap_chan *chan)
1361 {
1362 struct sk_buff *skb, *tx_skb;
1363 u16 fcs;
1364 u32 control;
1365 int nsent = 0;
1366
1367 if (chan->state != BT_CONNECTED)
1368 return -ENOTCONN;
1369
1370 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1371
1372 if (chan->remote_max_tx &&
1373 bt_cb(skb)->retries == chan->remote_max_tx) {
1374 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1375 break;
1376 }
1377
1378 tx_skb = skb_clone(skb, GFP_ATOMIC);
1379
1380 bt_cb(skb)->retries++;
1381
1382 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1383 control &= __get_sar_mask(chan);
1384
1385 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1386 control |= __set_ctrl_final(chan);
1387
1388 control |= __set_reqseq(chan, chan->buffer_seq);
1389 control |= __set_txseq(chan, chan->next_tx_seq);
1390
1391 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1392
1393 if (chan->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)skb->data,
1395 tx_skb->len - L2CAP_FCS_SIZE);
1396 put_unaligned_le16(fcs, skb->data +
1397 tx_skb->len - L2CAP_FCS_SIZE);
1398 }
1399
1400 l2cap_do_send(chan, tx_skb);
1401
1402 __set_retrans_timer(chan);
1403
1404 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1405
1406 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1407
1408 if (bt_cb(skb)->retries == 1)
1409 chan->unacked_frames++;
1410
1411 chan->frames_sent++;
1412
1413 if (skb_queue_is_last(&chan->tx_q, skb))
1414 chan->tx_send_head = NULL;
1415 else
1416 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1417
1418 nsent++;
1419 }
1420
1421 return nsent;
1422 }
1423
1424 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1425 {
1426 int ret;
1427
1428 if (!skb_queue_empty(&chan->tx_q))
1429 chan->tx_send_head = chan->tx_q.next;
1430
1431 chan->next_tx_seq = chan->expected_ack_seq;
1432 ret = l2cap_ertm_send(chan);
1433 return ret;
1434 }
1435
1436 static void l2cap_send_ack(struct l2cap_chan *chan)
1437 {
1438 u32 control = 0;
1439
1440 control |= __set_reqseq(chan, chan->buffer_seq);
1441
1442 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1443 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1444 set_bit(CONN_RNR_SENT, &chan->conn_state);
1445 l2cap_send_sframe(chan, control);
1446 return;
1447 }
1448
1449 if (l2cap_ertm_send(chan) > 0)
1450 return;
1451
1452 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1453 l2cap_send_sframe(chan, control);
1454 }
1455
1456 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1457 {
1458 struct srej_list *tail;
1459 u32 control;
1460
1461 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1462 control |= __set_ctrl_final(chan);
1463
1464 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1465 control |= __set_reqseq(chan, tail->tx_seq);
1466
1467 l2cap_send_sframe(chan, control);
1468 }
1469
1470 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1471 {
1472 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1473 struct sk_buff **frag;
1474 int err, sent = 0;
1475
1476 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1477 return -EFAULT;
1478
1479 sent += count;
1480 len -= count;
1481
1482 /* Continuation fragments (no L2CAP header) */
1483 frag = &skb_shinfo(skb)->frag_list;
1484 while (len) {
1485 count = min_t(unsigned int, conn->mtu, len);
1486
1487 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1488 if (!*frag)
1489 return err;
1490 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1491 return -EFAULT;
1492
1493 (*frag)->priority = skb->priority;
1494
1495 sent += count;
1496 len -= count;
1497
1498 frag = &(*frag)->next;
1499 }
1500
1501 return sent;
1502 }
1503
1504 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1505 struct msghdr *msg, size_t len,
1506 u32 priority)
1507 {
1508 struct sock *sk = chan->sk;
1509 struct l2cap_conn *conn = chan->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1512 struct l2cap_hdr *lh;
1513
1514 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1515
1516 count = min_t(unsigned int, (conn->mtu - hlen), len);
1517 skb = bt_skb_send_alloc(sk, count + hlen,
1518 msg->msg_flags & MSG_DONTWAIT, &err);
1519 if (!skb)
1520 return ERR_PTR(err);
1521
1522 skb->priority = priority;
1523
1524 /* Create L2CAP header */
1525 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1526 lh->cid = cpu_to_le16(chan->dcid);
1527 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1528 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1529
1530 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1531 if (unlikely(err < 0)) {
1532 kfree_skb(skb);
1533 return ERR_PTR(err);
1534 }
1535 return skb;
1536 }
1537
1538 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1539 struct msghdr *msg, size_t len,
1540 u32 priority)
1541 {
1542 struct sock *sk = chan->sk;
1543 struct l2cap_conn *conn = chan->conn;
1544 struct sk_buff *skb;
1545 int err, count, hlen = L2CAP_HDR_SIZE;
1546 struct l2cap_hdr *lh;
1547
1548 BT_DBG("sk %p len %d", sk, (int)len);
1549
1550 count = min_t(unsigned int, (conn->mtu - hlen), len);
1551 skb = bt_skb_send_alloc(sk, count + hlen,
1552 msg->msg_flags & MSG_DONTWAIT, &err);
1553 if (!skb)
1554 return ERR_PTR(err);
1555
1556 skb->priority = priority;
1557
1558 /* Create L2CAP header */
1559 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1560 lh->cid = cpu_to_le16(chan->dcid);
1561 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1562
1563 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1564 if (unlikely(err < 0)) {
1565 kfree_skb(skb);
1566 return ERR_PTR(err);
1567 }
1568 return skb;
1569 }
1570
1571 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1572 struct msghdr *msg, size_t len,
1573 u32 control, u16 sdulen)
1574 {
1575 struct sock *sk = chan->sk;
1576 struct l2cap_conn *conn = chan->conn;
1577 struct sk_buff *skb;
1578 int err, count, hlen;
1579 struct l2cap_hdr *lh;
1580
1581 BT_DBG("sk %p len %d", sk, (int)len);
1582
1583 if (!conn)
1584 return ERR_PTR(-ENOTCONN);
1585
1586 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1587 hlen = L2CAP_EXT_HDR_SIZE;
1588 else
1589 hlen = L2CAP_ENH_HDR_SIZE;
1590
1591 if (sdulen)
1592 hlen += L2CAP_SDULEN_SIZE;
1593
1594 if (chan->fcs == L2CAP_FCS_CRC16)
1595 hlen += L2CAP_FCS_SIZE;
1596
1597 count = min_t(unsigned int, (conn->mtu - hlen), len);
1598 skb = bt_skb_send_alloc(sk, count + hlen,
1599 msg->msg_flags & MSG_DONTWAIT, &err);
1600 if (!skb)
1601 return ERR_PTR(err);
1602
1603 /* Create L2CAP header */
1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1605 lh->cid = cpu_to_le16(chan->dcid);
1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1607
1608 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1609
1610 if (sdulen)
1611 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1612
1613 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1614 if (unlikely(err < 0)) {
1615 kfree_skb(skb);
1616 return ERR_PTR(err);
1617 }
1618
1619 if (chan->fcs == L2CAP_FCS_CRC16)
1620 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1621
1622 bt_cb(skb)->retries = 0;
1623 return skb;
1624 }
1625
1626 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1627 {
1628 struct sk_buff *skb;
1629 struct sk_buff_head sar_queue;
1630 u32 control;
1631 size_t size = 0;
1632
1633 skb_queue_head_init(&sar_queue);
1634 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1635 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1636 if (IS_ERR(skb))
1637 return PTR_ERR(skb);
1638
1639 __skb_queue_tail(&sar_queue, skb);
1640 len -= chan->remote_mps;
1641 size += chan->remote_mps;
1642
1643 while (len > 0) {
1644 size_t buflen;
1645
1646 if (len > chan->remote_mps) {
1647 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1648 buflen = chan->remote_mps;
1649 } else {
1650 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1651 buflen = len;
1652 }
1653
1654 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1655 if (IS_ERR(skb)) {
1656 skb_queue_purge(&sar_queue);
1657 return PTR_ERR(skb);
1658 }
1659
1660 __skb_queue_tail(&sar_queue, skb);
1661 len -= buflen;
1662 size += buflen;
1663 }
1664 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1665 if (chan->tx_send_head == NULL)
1666 chan->tx_send_head = sar_queue.next;
1667
1668 return size;
1669 }
1670
1671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1672 u32 priority)
1673 {
1674 struct sk_buff *skb;
1675 u32 control;
1676 int err;
1677
1678 /* Connectionless channel */
1679 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1680 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1681 if (IS_ERR(skb))
1682 return PTR_ERR(skb);
1683
1684 l2cap_do_send(chan, skb);
1685 return len;
1686 }
1687
1688 switch (chan->mode) {
1689 case L2CAP_MODE_BASIC:
1690 /* Check outgoing MTU */
1691 if (len > chan->omtu)
1692 return -EMSGSIZE;
1693
1694 /* Create a basic PDU */
1695 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1696 if (IS_ERR(skb))
1697 return PTR_ERR(skb);
1698
1699 l2cap_do_send(chan, skb);
1700 err = len;
1701 break;
1702
1703 case L2CAP_MODE_ERTM:
1704 case L2CAP_MODE_STREAMING:
1705 /* Entire SDU fits into one PDU */
1706 if (len <= chan->remote_mps) {
1707 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1708 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1709 0);
1710 if (IS_ERR(skb))
1711 return PTR_ERR(skb);
1712
1713 __skb_queue_tail(&chan->tx_q, skb);
1714
1715 if (chan->tx_send_head == NULL)
1716 chan->tx_send_head = skb;
1717
1718 } else {
1719 /* Segment SDU into multiples PDUs */
1720 err = l2cap_sar_segment_sdu(chan, msg, len);
1721 if (err < 0)
1722 return err;
1723 }
1724
1725 if (chan->mode == L2CAP_MODE_STREAMING) {
1726 l2cap_streaming_send(chan);
1727 err = len;
1728 break;
1729 }
1730
1731 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1732 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1733 err = len;
1734 break;
1735 }
1736
1737 err = l2cap_ertm_send(chan);
1738 if (err >= 0)
1739 err = len;
1740
1741 break;
1742
1743 default:
1744 BT_DBG("bad state %1.1x", chan->mode);
1745 err = -EBADFD;
1746 }
1747
1748 return err;
1749 }
1750
1751 /* Copy frame to all raw sockets on that connection */
1752 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1753 {
1754 struct sk_buff *nskb;
1755 struct l2cap_chan *chan;
1756
1757 BT_DBG("conn %p", conn);
1758
1759 read_lock(&conn->chan_lock);
1760 list_for_each_entry(chan, &conn->chan_l, list) {
1761 struct sock *sk = chan->sk;
1762 if (chan->chan_type != L2CAP_CHAN_RAW)
1763 continue;
1764
1765 /* Don't send frame to the socket it came from */
1766 if (skb->sk == sk)
1767 continue;
1768 nskb = skb_clone(skb, GFP_ATOMIC);
1769 if (!nskb)
1770 continue;
1771
1772 if (chan->ops->recv(chan->data, nskb))
1773 kfree_skb(nskb);
1774 }
1775 read_unlock(&conn->chan_lock);
1776 }
1777
1778 /* ---- L2CAP signalling commands ---- */
1779 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1780 u8 code, u8 ident, u16 dlen, void *data)
1781 {
1782 struct sk_buff *skb, **frag;
1783 struct l2cap_cmd_hdr *cmd;
1784 struct l2cap_hdr *lh;
1785 int len, count;
1786
1787 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1788 conn, code, ident, dlen);
1789
1790 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1791 count = min_t(unsigned int, conn->mtu, len);
1792
1793 skb = bt_skb_alloc(count, GFP_ATOMIC);
1794 if (!skb)
1795 return NULL;
1796
1797 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1798 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1799
1800 if (conn->hcon->type == LE_LINK)
1801 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1802 else
1803 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1804
1805 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1806 cmd->code = code;
1807 cmd->ident = ident;
1808 cmd->len = cpu_to_le16(dlen);
1809
1810 if (dlen) {
1811 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1812 memcpy(skb_put(skb, count), data, count);
1813 data += count;
1814 }
1815
1816 len -= skb->len;
1817
1818 /* Continuation fragments (no L2CAP header) */
1819 frag = &skb_shinfo(skb)->frag_list;
1820 while (len) {
1821 count = min_t(unsigned int, conn->mtu, len);
1822
1823 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1824 if (!*frag)
1825 goto fail;
1826
1827 memcpy(skb_put(*frag, count), data, count);
1828
1829 len -= count;
1830 data += count;
1831
1832 frag = &(*frag)->next;
1833 }
1834
1835 return skb;
1836
1837 fail:
1838 kfree_skb(skb);
1839 return NULL;
1840 }
1841
1842 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1843 {
1844 struct l2cap_conf_opt *opt = *ptr;
1845 int len;
1846
1847 len = L2CAP_CONF_OPT_SIZE + opt->len;
1848 *ptr += len;
1849
1850 *type = opt->type;
1851 *olen = opt->len;
1852
1853 switch (opt->len) {
1854 case 1:
1855 *val = *((u8 *) opt->val);
1856 break;
1857
1858 case 2:
1859 *val = get_unaligned_le16(opt->val);
1860 break;
1861
1862 case 4:
1863 *val = get_unaligned_le32(opt->val);
1864 break;
1865
1866 default:
1867 *val = (unsigned long) opt->val;
1868 break;
1869 }
1870
1871 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1872 return len;
1873 }
1874
1875 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1876 {
1877 struct l2cap_conf_opt *opt = *ptr;
1878
1879 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1880
1881 opt->type = type;
1882 opt->len = len;
1883
1884 switch (len) {
1885 case 1:
1886 *((u8 *) opt->val) = val;
1887 break;
1888
1889 case 2:
1890 put_unaligned_le16(val, opt->val);
1891 break;
1892
1893 case 4:
1894 put_unaligned_le32(val, opt->val);
1895 break;
1896
1897 default:
1898 memcpy(opt->val, (void *) val, len);
1899 break;
1900 }
1901
1902 *ptr += L2CAP_CONF_OPT_SIZE + len;
1903 }
1904
1905 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1906 {
1907 struct l2cap_conf_efs efs;
1908
1909 switch(chan->mode) {
1910 case L2CAP_MODE_ERTM:
1911 efs.id = chan->local_id;
1912 efs.stype = chan->local_stype;
1913 efs.msdu = cpu_to_le16(chan->local_msdu);
1914 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1915 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1916 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1917 break;
1918
1919 case L2CAP_MODE_STREAMING:
1920 efs.id = 1;
1921 efs.stype = L2CAP_SERV_BESTEFFORT;
1922 efs.msdu = cpu_to_le16(chan->local_msdu);
1923 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1924 efs.acc_lat = 0;
1925 efs.flush_to = 0;
1926 break;
1927
1928 default:
1929 return;
1930 }
1931
1932 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1933 (unsigned long) &efs);
1934 }
1935
1936 static void l2cap_ack_timeout(unsigned long arg)
1937 {
1938 struct l2cap_chan *chan = (void *) arg;
1939
1940 bh_lock_sock(chan->sk);
1941 l2cap_send_ack(chan);
1942 bh_unlock_sock(chan->sk);
1943 }
1944
1945 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1946 {
1947 struct sock *sk = chan->sk;
1948
1949 chan->expected_ack_seq = 0;
1950 chan->unacked_frames = 0;
1951 chan->buffer_seq = 0;
1952 chan->num_acked = 0;
1953 chan->frames_sent = 0;
1954
1955 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1956 (unsigned long) chan);
1957 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1958 (unsigned long) chan);
1959 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1960
1961 skb_queue_head_init(&chan->srej_q);
1962
1963 INIT_LIST_HEAD(&chan->srej_l);
1964
1965
1966 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1967 }
1968
1969 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1970 {
1971 switch (mode) {
1972 case L2CAP_MODE_STREAMING:
1973 case L2CAP_MODE_ERTM:
1974 if (l2cap_mode_supported(mode, remote_feat_mask))
1975 return mode;
1976 /* fall through */
1977 default:
1978 return L2CAP_MODE_BASIC;
1979 }
1980 }
1981
1982 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1983 {
1984 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1985 }
1986
1987 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1988 {
1989 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1990 }
1991
1992 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1993 {
1994 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1995 __l2cap_ews_supported(chan)) {
1996 /* use extended control field */
1997 set_bit(FLAG_EXT_CTRL, &chan->flags);
1998 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1999 } else {
2000 chan->tx_win = min_t(u16, chan->tx_win,
2001 L2CAP_DEFAULT_TX_WINDOW);
2002 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2003 }
2004 }
2005
2006 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2007 {
2008 struct l2cap_conf_req *req = data;
2009 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2010 void *ptr = req->data;
2011 u16 size;
2012
2013 BT_DBG("chan %p", chan);
2014
2015 if (chan->num_conf_req || chan->num_conf_rsp)
2016 goto done;
2017
2018 switch (chan->mode) {
2019 case L2CAP_MODE_STREAMING:
2020 case L2CAP_MODE_ERTM:
2021 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2022 break;
2023
2024 if (__l2cap_efs_supported(chan))
2025 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2026
2027 /* fall through */
2028 default:
2029 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2030 break;
2031 }
2032
2033 done:
2034 if (chan->imtu != L2CAP_DEFAULT_MTU)
2035 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2036
2037 switch (chan->mode) {
2038 case L2CAP_MODE_BASIC:
2039 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2040 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2041 break;
2042
2043 rfc.mode = L2CAP_MODE_BASIC;
2044 rfc.txwin_size = 0;
2045 rfc.max_transmit = 0;
2046 rfc.retrans_timeout = 0;
2047 rfc.monitor_timeout = 0;
2048 rfc.max_pdu_size = 0;
2049
2050 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2051 (unsigned long) &rfc);
2052 break;
2053
2054 case L2CAP_MODE_ERTM:
2055 rfc.mode = L2CAP_MODE_ERTM;
2056 rfc.max_transmit = chan->max_tx;
2057 rfc.retrans_timeout = 0;
2058 rfc.monitor_timeout = 0;
2059
2060 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2061 L2CAP_EXT_HDR_SIZE -
2062 L2CAP_SDULEN_SIZE -
2063 L2CAP_FCS_SIZE);
2064 rfc.max_pdu_size = cpu_to_le16(size);
2065
2066 l2cap_txwin_setup(chan);
2067
2068 rfc.txwin_size = min_t(u16, chan->tx_win,
2069 L2CAP_DEFAULT_TX_WINDOW);
2070
2071 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2072 (unsigned long) &rfc);
2073
2074 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2075 l2cap_add_opt_efs(&ptr, chan);
2076
2077 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2078 break;
2079
2080 if (chan->fcs == L2CAP_FCS_NONE ||
2081 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2082 chan->fcs = L2CAP_FCS_NONE;
2083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2084 }
2085
2086 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2087 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2088 chan->tx_win);
2089 break;
2090
2091 case L2CAP_MODE_STREAMING:
2092 rfc.mode = L2CAP_MODE_STREAMING;
2093 rfc.txwin_size = 0;
2094 rfc.max_transmit = 0;
2095 rfc.retrans_timeout = 0;
2096 rfc.monitor_timeout = 0;
2097
2098 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2099 L2CAP_EXT_HDR_SIZE -
2100 L2CAP_SDULEN_SIZE -
2101 L2CAP_FCS_SIZE);
2102 rfc.max_pdu_size = cpu_to_le16(size);
2103
2104 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2105 (unsigned long) &rfc);
2106
2107 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2108 l2cap_add_opt_efs(&ptr, chan);
2109
2110 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2111 break;
2112
2113 if (chan->fcs == L2CAP_FCS_NONE ||
2114 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2115 chan->fcs = L2CAP_FCS_NONE;
2116 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2117 }
2118 break;
2119 }
2120
2121 req->dcid = cpu_to_le16(chan->dcid);
2122 req->flags = cpu_to_le16(0);
2123
2124 return ptr - data;
2125 }
2126
2127 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2128 {
2129 struct l2cap_conf_rsp *rsp = data;
2130 void *ptr = rsp->data;
2131 void *req = chan->conf_req;
2132 int len = chan->conf_len;
2133 int type, hint, olen;
2134 unsigned long val;
2135 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2136 struct l2cap_conf_efs efs;
2137 u8 remote_efs = 0;
2138 u16 mtu = L2CAP_DEFAULT_MTU;
2139 u16 result = L2CAP_CONF_SUCCESS;
2140 u16 size;
2141
2142 BT_DBG("chan %p", chan);
2143
2144 while (len >= L2CAP_CONF_OPT_SIZE) {
2145 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2146
2147 hint = type & L2CAP_CONF_HINT;
2148 type &= L2CAP_CONF_MASK;
2149
2150 switch (type) {
2151 case L2CAP_CONF_MTU:
2152 mtu = val;
2153 break;
2154
2155 case L2CAP_CONF_FLUSH_TO:
2156 chan->flush_to = val;
2157 break;
2158
2159 case L2CAP_CONF_QOS:
2160 break;
2161
2162 case L2CAP_CONF_RFC:
2163 if (olen == sizeof(rfc))
2164 memcpy(&rfc, (void *) val, olen);
2165 break;
2166
2167 case L2CAP_CONF_FCS:
2168 if (val == L2CAP_FCS_NONE)
2169 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2170 break;
2171
2172 case L2CAP_CONF_EFS:
2173 remote_efs = 1;
2174 if (olen == sizeof(efs))
2175 memcpy(&efs, (void *) val, olen);
2176 break;
2177
2178 case L2CAP_CONF_EWS:
2179 if (!enable_hs)
2180 return -ECONNREFUSED;
2181
2182 set_bit(FLAG_EXT_CTRL, &chan->flags);
2183 set_bit(CONF_EWS_RECV, &chan->conf_state);
2184 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2185 chan->remote_tx_win = val;
2186 break;
2187
2188 default:
2189 if (hint)
2190 break;
2191
2192 result = L2CAP_CONF_UNKNOWN;
2193 *((u8 *) ptr++) = type;
2194 break;
2195 }
2196 }
2197
2198 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2199 goto done;
2200
2201 switch (chan->mode) {
2202 case L2CAP_MODE_STREAMING:
2203 case L2CAP_MODE_ERTM:
2204 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2205 chan->mode = l2cap_select_mode(rfc.mode,
2206 chan->conn->feat_mask);
2207 break;
2208 }
2209
2210 if (remote_efs) {
2211 if (__l2cap_efs_supported(chan))
2212 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2213 else
2214 return -ECONNREFUSED;
2215 }
2216
2217 if (chan->mode != rfc.mode)
2218 return -ECONNREFUSED;
2219
2220 break;
2221 }
2222
2223 done:
2224 if (chan->mode != rfc.mode) {
2225 result = L2CAP_CONF_UNACCEPT;
2226 rfc.mode = chan->mode;
2227
2228 if (chan->num_conf_rsp == 1)
2229 return -ECONNREFUSED;
2230
2231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2232 sizeof(rfc), (unsigned long) &rfc);
2233 }
2234
2235 if (result == L2CAP_CONF_SUCCESS) {
2236 /* Configure output options and let the other side know
2237 * which ones we don't like. */
2238
2239 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2240 result = L2CAP_CONF_UNACCEPT;
2241 else {
2242 chan->omtu = mtu;
2243 set_bit(CONF_MTU_DONE, &chan->conf_state);
2244 }
2245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2246
2247 if (remote_efs) {
2248 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2249 efs.stype != L2CAP_SERV_NOTRAFIC &&
2250 efs.stype != chan->local_stype) {
2251
2252 result = L2CAP_CONF_UNACCEPT;
2253
2254 if (chan->num_conf_req >= 1)
2255 return -ECONNREFUSED;
2256
2257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2258 sizeof(efs),
2259 (unsigned long) &efs);
2260 } else {
2261 /* Send PENDING Conf Rsp */
2262 result = L2CAP_CONF_PENDING;
2263 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2264 }
2265 }
2266
2267 switch (rfc.mode) {
2268 case L2CAP_MODE_BASIC:
2269 chan->fcs = L2CAP_FCS_NONE;
2270 set_bit(CONF_MODE_DONE, &chan->conf_state);
2271 break;
2272
2273 case L2CAP_MODE_ERTM:
2274 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2275 chan->remote_tx_win = rfc.txwin_size;
2276 else
2277 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2278
2279 chan->remote_max_tx = rfc.max_transmit;
2280
2281 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2282 chan->conn->mtu -
2283 L2CAP_EXT_HDR_SIZE -
2284 L2CAP_SDULEN_SIZE -
2285 L2CAP_FCS_SIZE);
2286 rfc.max_pdu_size = cpu_to_le16(size);
2287 chan->remote_mps = size;
2288
2289 rfc.retrans_timeout =
2290 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2291 rfc.monitor_timeout =
2292 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2293
2294 set_bit(CONF_MODE_DONE, &chan->conf_state);
2295
2296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2297 sizeof(rfc), (unsigned long) &rfc);
2298
2299 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2300 chan->remote_id = efs.id;
2301 chan->remote_stype = efs.stype;
2302 chan->remote_msdu = le16_to_cpu(efs.msdu);
2303 chan->remote_flush_to =
2304 le32_to_cpu(efs.flush_to);
2305 chan->remote_acc_lat =
2306 le32_to_cpu(efs.acc_lat);
2307 chan->remote_sdu_itime =
2308 le32_to_cpu(efs.sdu_itime);
2309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2310 sizeof(efs), (unsigned long) &efs);
2311 }
2312 break;
2313
2314 case L2CAP_MODE_STREAMING:
2315 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2316 chan->conn->mtu -
2317 L2CAP_EXT_HDR_SIZE -
2318 L2CAP_SDULEN_SIZE -
2319 L2CAP_FCS_SIZE);
2320 rfc.max_pdu_size = cpu_to_le16(size);
2321 chan->remote_mps = size;
2322
2323 set_bit(CONF_MODE_DONE, &chan->conf_state);
2324
2325 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2326 sizeof(rfc), (unsigned long) &rfc);
2327
2328 break;
2329
2330 default:
2331 result = L2CAP_CONF_UNACCEPT;
2332
2333 memset(&rfc, 0, sizeof(rfc));
2334 rfc.mode = chan->mode;
2335 }
2336
2337 if (result == L2CAP_CONF_SUCCESS)
2338 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2339 }
2340 rsp->scid = cpu_to_le16(chan->dcid);
2341 rsp->result = cpu_to_le16(result);
2342 rsp->flags = cpu_to_le16(0x0000);
2343
2344 return ptr - data;
2345 }
2346
2347 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2348 {
2349 struct l2cap_conf_req *req = data;
2350 void *ptr = req->data;
2351 int type, olen;
2352 unsigned long val;
2353 struct l2cap_conf_rfc rfc;
2354 struct l2cap_conf_efs efs;
2355
2356 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2357
2358 while (len >= L2CAP_CONF_OPT_SIZE) {
2359 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2360
2361 switch (type) {
2362 case L2CAP_CONF_MTU:
2363 if (val < L2CAP_DEFAULT_MIN_MTU) {
2364 *result = L2CAP_CONF_UNACCEPT;
2365 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2366 } else
2367 chan->imtu = val;
2368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2369 break;
2370
2371 case L2CAP_CONF_FLUSH_TO:
2372 chan->flush_to = val;
2373 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2374 2, chan->flush_to);
2375 break;
2376
2377 case L2CAP_CONF_RFC:
2378 if (olen == sizeof(rfc))
2379 memcpy(&rfc, (void *)val, olen);
2380
2381 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2382 rfc.mode != chan->mode)
2383 return -ECONNREFUSED;
2384
2385 chan->fcs = 0;
2386
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2388 sizeof(rfc), (unsigned long) &rfc);
2389 break;
2390
2391 case L2CAP_CONF_EWS:
2392 chan->tx_win = min_t(u16, val,
2393 L2CAP_DEFAULT_EXT_WINDOW);
2394 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2395 chan->tx_win);
2396 break;
2397
2398 case L2CAP_CONF_EFS:
2399 if (olen == sizeof(efs))
2400 memcpy(&efs, (void *)val, olen);
2401
2402 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2403 efs.stype != L2CAP_SERV_NOTRAFIC &&
2404 efs.stype != chan->local_stype)
2405 return -ECONNREFUSED;
2406
2407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2408 sizeof(efs), (unsigned long) &efs);
2409 break;
2410 }
2411 }
2412
2413 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2414 return -ECONNREFUSED;
2415
2416 chan->mode = rfc.mode;
2417
2418 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2419 switch (rfc.mode) {
2420 case L2CAP_MODE_ERTM:
2421 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2422 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2423 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2424
2425 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2426 chan->local_msdu = le16_to_cpu(efs.msdu);
2427 chan->local_sdu_itime =
2428 le32_to_cpu(efs.sdu_itime);
2429 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2430 chan->local_flush_to =
2431 le32_to_cpu(efs.flush_to);
2432 }
2433 break;
2434
2435 case L2CAP_MODE_STREAMING:
2436 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2437 }
2438 }
2439
2440 req->dcid = cpu_to_le16(chan->dcid);
2441 req->flags = cpu_to_le16(0x0000);
2442
2443 return ptr - data;
2444 }
2445
2446 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2447 {
2448 struct l2cap_conf_rsp *rsp = data;
2449 void *ptr = rsp->data;
2450
2451 BT_DBG("chan %p", chan);
2452
2453 rsp->scid = cpu_to_le16(chan->dcid);
2454 rsp->result = cpu_to_le16(result);
2455 rsp->flags = cpu_to_le16(flags);
2456
2457 return ptr - data;
2458 }
2459
2460 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2461 {
2462 struct l2cap_conn_rsp rsp;
2463 struct l2cap_conn *conn = chan->conn;
2464 u8 buf[128];
2465
2466 rsp.scid = cpu_to_le16(chan->dcid);
2467 rsp.dcid = cpu_to_le16(chan->scid);
2468 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2469 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2470 l2cap_send_cmd(conn, chan->ident,
2471 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2472
2473 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2474 return;
2475
2476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2477 l2cap_build_conf_req(chan, buf), buf);
2478 chan->num_conf_req++;
2479 }
2480
2481 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2482 {
2483 int type, olen;
2484 unsigned long val;
2485 struct l2cap_conf_rfc rfc;
2486
2487 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2488
2489 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2490 return;
2491
2492 while (len >= L2CAP_CONF_OPT_SIZE) {
2493 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2494
2495 switch (type) {
2496 case L2CAP_CONF_RFC:
2497 if (olen == sizeof(rfc))
2498 memcpy(&rfc, (void *)val, olen);
2499 goto done;
2500 }
2501 }
2502
2503 done:
2504 switch (rfc.mode) {
2505 case L2CAP_MODE_ERTM:
2506 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2507 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2508 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2509 break;
2510 case L2CAP_MODE_STREAMING:
2511 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2512 }
2513 }
2514
2515 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2516 {
2517 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2518
2519 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2520 return 0;
2521
2522 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2523 cmd->ident == conn->info_ident) {
2524 del_timer(&conn->info_timer);
2525
2526 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2527 conn->info_ident = 0;
2528
2529 l2cap_conn_start(conn);
2530 }
2531
2532 return 0;
2533 }
2534
2535 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2536 {
2537 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2538 struct l2cap_conn_rsp rsp;
2539 struct l2cap_chan *chan = NULL, *pchan;
2540 struct sock *parent, *sk = NULL;
2541 int result, status = L2CAP_CS_NO_INFO;
2542
2543 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2544 __le16 psm = req->psm;
2545
2546 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2547
2548 /* Check if we have socket listening on psm */
2549 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2550 if (!pchan) {
2551 result = L2CAP_CR_BAD_PSM;
2552 goto sendresp;
2553 }
2554
2555 parent = pchan->sk;
2556
2557 bh_lock_sock(parent);
2558
2559 /* Check if the ACL is secure enough (if not SDP) */
2560 if (psm != cpu_to_le16(0x0001) &&
2561 !hci_conn_check_link_mode(conn->hcon)) {
2562 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2563 result = L2CAP_CR_SEC_BLOCK;
2564 goto response;
2565 }
2566
2567 result = L2CAP_CR_NO_MEM;
2568
2569 /* Check for backlog size */
2570 if (sk_acceptq_is_full(parent)) {
2571 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2572 goto response;
2573 }
2574
2575 chan = pchan->ops->new_connection(pchan->data);
2576 if (!chan)
2577 goto response;
2578
2579 sk = chan->sk;
2580
2581 write_lock_bh(&conn->chan_lock);
2582
2583 /* Check if we already have channel with that dcid */
2584 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2585 write_unlock_bh(&conn->chan_lock);
2586 sock_set_flag(sk, SOCK_ZAPPED);
2587 chan->ops->close(chan->data);
2588 goto response;
2589 }
2590
2591 hci_conn_hold(conn->hcon);
2592
2593 bacpy(&bt_sk(sk)->src, conn->src);
2594 bacpy(&bt_sk(sk)->dst, conn->dst);
2595 chan->psm = psm;
2596 chan->dcid = scid;
2597
2598 bt_accept_enqueue(parent, sk);
2599
2600 __l2cap_chan_add(conn, chan);
2601
2602 dcid = chan->scid;
2603
2604 __set_chan_timer(chan, sk->sk_sndtimeo);
2605
2606 chan->ident = cmd->ident;
2607
2608 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2609 if (l2cap_check_security(chan)) {
2610 if (bt_sk(sk)->defer_setup) {
2611 l2cap_state_change(chan, BT_CONNECT2);
2612 result = L2CAP_CR_PEND;
2613 status = L2CAP_CS_AUTHOR_PEND;
2614 parent->sk_data_ready(parent, 0);
2615 } else {
2616 l2cap_state_change(chan, BT_CONFIG);
2617 result = L2CAP_CR_SUCCESS;
2618 status = L2CAP_CS_NO_INFO;
2619 }
2620 } else {
2621 l2cap_state_change(chan, BT_CONNECT2);
2622 result = L2CAP_CR_PEND;
2623 status = L2CAP_CS_AUTHEN_PEND;
2624 }
2625 } else {
2626 l2cap_state_change(chan, BT_CONNECT2);
2627 result = L2CAP_CR_PEND;
2628 status = L2CAP_CS_NO_INFO;
2629 }
2630
2631 write_unlock_bh(&conn->chan_lock);
2632
2633 response:
2634 bh_unlock_sock(parent);
2635
2636 sendresp:
2637 rsp.scid = cpu_to_le16(scid);
2638 rsp.dcid = cpu_to_le16(dcid);
2639 rsp.result = cpu_to_le16(result);
2640 rsp.status = cpu_to_le16(status);
2641 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2642
2643 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2644 struct l2cap_info_req info;
2645 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2646
2647 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2648 conn->info_ident = l2cap_get_ident(conn);
2649
2650 mod_timer(&conn->info_timer, jiffies +
2651 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2652
2653 l2cap_send_cmd(conn, conn->info_ident,
2654 L2CAP_INFO_REQ, sizeof(info), &info);
2655 }
2656
2657 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2658 result == L2CAP_CR_SUCCESS) {
2659 u8 buf[128];
2660 set_bit(CONF_REQ_SENT, &chan->conf_state);
2661 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2662 l2cap_build_conf_req(chan, buf), buf);
2663 chan->num_conf_req++;
2664 }
2665
2666 return 0;
2667 }
2668
2669 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2670 {
2671 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2672 u16 scid, dcid, result, status;
2673 struct l2cap_chan *chan;
2674 struct sock *sk;
2675 u8 req[128];
2676
2677 scid = __le16_to_cpu(rsp->scid);
2678 dcid = __le16_to_cpu(rsp->dcid);
2679 result = __le16_to_cpu(rsp->result);
2680 status = __le16_to_cpu(rsp->status);
2681
2682 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2683
2684 if (scid) {
2685 chan = l2cap_get_chan_by_scid(conn, scid);
2686 if (!chan)
2687 return -EFAULT;
2688 } else {
2689 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2690 if (!chan)
2691 return -EFAULT;
2692 }
2693
2694 sk = chan->sk;
2695
2696 switch (result) {
2697 case L2CAP_CR_SUCCESS:
2698 l2cap_state_change(chan, BT_CONFIG);
2699 chan->ident = 0;
2700 chan->dcid = dcid;
2701 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2702
2703 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2704 break;
2705
2706 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2707 l2cap_build_conf_req(chan, req), req);
2708 chan->num_conf_req++;
2709 break;
2710
2711 case L2CAP_CR_PEND:
2712 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2713 break;
2714
2715 default:
2716 /* don't delete l2cap channel if sk is owned by user */
2717 if (sock_owned_by_user(sk)) {
2718 l2cap_state_change(chan, BT_DISCONN);
2719 __clear_chan_timer(chan);
2720 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2721 break;
2722 }
2723
2724 l2cap_chan_del(chan, ECONNREFUSED);
2725 break;
2726 }
2727
2728 bh_unlock_sock(sk);
2729 return 0;
2730 }
2731
2732 static inline void set_default_fcs(struct l2cap_chan *chan)
2733 {
2734 /* FCS is enabled only in ERTM or streaming mode, if one or both
2735 * sides request it.
2736 */
2737 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2738 chan->fcs = L2CAP_FCS_NONE;
2739 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2740 chan->fcs = L2CAP_FCS_CRC16;
2741 }
2742
2743 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2744 {
2745 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2746 u16 dcid, flags;
2747 u8 rsp[64];
2748 struct l2cap_chan *chan;
2749 struct sock *sk;
2750 int len;
2751
2752 dcid = __le16_to_cpu(req->dcid);
2753 flags = __le16_to_cpu(req->flags);
2754
2755 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2756
2757 chan = l2cap_get_chan_by_scid(conn, dcid);
2758 if (!chan)
2759 return -ENOENT;
2760
2761 sk = chan->sk;
2762
2763 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2764 struct l2cap_cmd_rej_cid rej;
2765
2766 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2767 rej.scid = cpu_to_le16(chan->scid);
2768 rej.dcid = cpu_to_le16(chan->dcid);
2769
2770 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2771 sizeof(rej), &rej);
2772 goto unlock;
2773 }
2774
2775 /* Reject if config buffer is too small. */
2776 len = cmd_len - sizeof(*req);
2777 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2778 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2779 l2cap_build_conf_rsp(chan, rsp,
2780 L2CAP_CONF_REJECT, flags), rsp);
2781 goto unlock;
2782 }
2783
2784 /* Store config. */
2785 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2786 chan->conf_len += len;
2787
2788 if (flags & 0x0001) {
2789 /* Incomplete config. Send empty response. */
2790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2791 l2cap_build_conf_rsp(chan, rsp,
2792 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2793 goto unlock;
2794 }
2795
2796 /* Complete config. */
2797 len = l2cap_parse_conf_req(chan, rsp);
2798 if (len < 0) {
2799 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2800 goto unlock;
2801 }
2802
2803 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2804 chan->num_conf_rsp++;
2805
2806 /* Reset config buffer. */
2807 chan->conf_len = 0;
2808
2809 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2810 goto unlock;
2811
2812 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2813 set_default_fcs(chan);
2814
2815 l2cap_state_change(chan, BT_CONNECTED);
2816
2817 chan->next_tx_seq = 0;
2818 chan->expected_tx_seq = 0;
2819 skb_queue_head_init(&chan->tx_q);
2820 if (chan->mode == L2CAP_MODE_ERTM)
2821 l2cap_ertm_init(chan);
2822
2823 l2cap_chan_ready(sk);
2824 goto unlock;
2825 }
2826
2827 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2828 u8 buf[64];
2829 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2830 l2cap_build_conf_req(chan, buf), buf);
2831 chan->num_conf_req++;
2832 }
2833
2834 /* Got Conf Rsp PENDING from remote side and asume we sent
2835 Conf Rsp PENDING in the code above */
2836 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2837 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2838
2839 /* check compatibility */
2840
2841 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2842 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2843
2844 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2845 l2cap_build_conf_rsp(chan, rsp,
2846 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2847 }
2848
2849 unlock:
2850 bh_unlock_sock(sk);
2851 return 0;
2852 }
2853
2854 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2855 {
2856 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2857 u16 scid, flags, result;
2858 struct l2cap_chan *chan;
2859 struct sock *sk;
2860 int len = cmd->len - sizeof(*rsp);
2861
2862 scid = __le16_to_cpu(rsp->scid);
2863 flags = __le16_to_cpu(rsp->flags);
2864 result = __le16_to_cpu(rsp->result);
2865
2866 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2867 scid, flags, result);
2868
2869 chan = l2cap_get_chan_by_scid(conn, scid);
2870 if (!chan)
2871 return 0;
2872
2873 sk = chan->sk;
2874
2875 switch (result) {
2876 case L2CAP_CONF_SUCCESS:
2877 l2cap_conf_rfc_get(chan, rsp->data, len);
2878 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2879 break;
2880
2881 case L2CAP_CONF_PENDING:
2882 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2883
2884 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2885 char buf[64];
2886
2887 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2888 buf, &result);
2889 if (len < 0) {
2890 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2891 goto done;
2892 }
2893
2894 /* check compatibility */
2895
2896 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2897 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2898
2899 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2900 l2cap_build_conf_rsp(chan, buf,
2901 L2CAP_CONF_SUCCESS, 0x0000), buf);
2902 }
2903 goto done;
2904
2905 case L2CAP_CONF_UNACCEPT:
2906 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2907 char req[64];
2908
2909 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2910 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2911 goto done;
2912 }
2913
2914 /* throw out any old stored conf requests */
2915 result = L2CAP_CONF_SUCCESS;
2916 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2917 req, &result);
2918 if (len < 0) {
2919 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2920 goto done;
2921 }
2922
2923 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2924 L2CAP_CONF_REQ, len, req);
2925 chan->num_conf_req++;
2926 if (result != L2CAP_CONF_SUCCESS)
2927 goto done;
2928 break;
2929 }
2930
2931 default:
2932 sk->sk_err = ECONNRESET;
2933 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2934 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2935 goto done;
2936 }
2937
2938 if (flags & 0x01)
2939 goto done;
2940
2941 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2942
2943 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2944 set_default_fcs(chan);
2945
2946 l2cap_state_change(chan, BT_CONNECTED);
2947 chan->next_tx_seq = 0;
2948 chan->expected_tx_seq = 0;
2949 skb_queue_head_init(&chan->tx_q);
2950 if (chan->mode == L2CAP_MODE_ERTM)
2951 l2cap_ertm_init(chan);
2952
2953 l2cap_chan_ready(sk);
2954 }
2955
2956 done:
2957 bh_unlock_sock(sk);
2958 return 0;
2959 }
2960
2961 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2962 {
2963 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2964 struct l2cap_disconn_rsp rsp;
2965 u16 dcid, scid;
2966 struct l2cap_chan *chan;
2967 struct sock *sk;
2968
2969 scid = __le16_to_cpu(req->scid);
2970 dcid = __le16_to_cpu(req->dcid);
2971
2972 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2973
2974 chan = l2cap_get_chan_by_scid(conn, dcid);
2975 if (!chan)
2976 return 0;
2977
2978 sk = chan->sk;
2979
2980 rsp.dcid = cpu_to_le16(chan->scid);
2981 rsp.scid = cpu_to_le16(chan->dcid);
2982 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2983
2984 sk->sk_shutdown = SHUTDOWN_MASK;
2985
2986 /* don't delete l2cap channel if sk is owned by user */
2987 if (sock_owned_by_user(sk)) {
2988 l2cap_state_change(chan, BT_DISCONN);
2989 __clear_chan_timer(chan);
2990 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2991 bh_unlock_sock(sk);
2992 return 0;
2993 }
2994
2995 l2cap_chan_del(chan, ECONNRESET);
2996 bh_unlock_sock(sk);
2997
2998 chan->ops->close(chan->data);
2999 return 0;
3000 }
3001
3002 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3003 {
3004 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3005 u16 dcid, scid;
3006 struct l2cap_chan *chan;
3007 struct sock *sk;
3008
3009 scid = __le16_to_cpu(rsp->scid);
3010 dcid = __le16_to_cpu(rsp->dcid);
3011
3012 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3013
3014 chan = l2cap_get_chan_by_scid(conn, scid);
3015 if (!chan)
3016 return 0;
3017
3018 sk = chan->sk;
3019
3020 /* don't delete l2cap channel if sk is owned by user */
3021 if (sock_owned_by_user(sk)) {
3022 l2cap_state_change(chan,BT_DISCONN);
3023 __clear_chan_timer(chan);
3024 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3025 bh_unlock_sock(sk);
3026 return 0;
3027 }
3028
3029 l2cap_chan_del(chan, 0);
3030 bh_unlock_sock(sk);
3031
3032 chan->ops->close(chan->data);
3033 return 0;
3034 }
3035
3036 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3037 {
3038 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3039 u16 type;
3040
3041 type = __le16_to_cpu(req->type);
3042
3043 BT_DBG("type 0x%4.4x", type);
3044
3045 if (type == L2CAP_IT_FEAT_MASK) {
3046 u8 buf[8];
3047 u32 feat_mask = l2cap_feat_mask;
3048 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3049 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3050 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3051 if (!disable_ertm)
3052 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3053 | L2CAP_FEAT_FCS;
3054 if (enable_hs)
3055 feat_mask |= L2CAP_FEAT_EXT_FLOW
3056 | L2CAP_FEAT_EXT_WINDOW;
3057
3058 put_unaligned_le32(feat_mask, rsp->data);
3059 l2cap_send_cmd(conn, cmd->ident,
3060 L2CAP_INFO_RSP, sizeof(buf), buf);
3061 } else if (type == L2CAP_IT_FIXED_CHAN) {
3062 u8 buf[12];
3063 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3064
3065 if (enable_hs)
3066 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3067 else
3068 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3069
3070 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3071 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3072 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3073 l2cap_send_cmd(conn, cmd->ident,
3074 L2CAP_INFO_RSP, sizeof(buf), buf);
3075 } else {
3076 struct l2cap_info_rsp rsp;
3077 rsp.type = cpu_to_le16(type);
3078 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3079 l2cap_send_cmd(conn, cmd->ident,
3080 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3081 }
3082
3083 return 0;
3084 }
3085
3086 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3087 {
3088 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3089 u16 type, result;
3090
3091 type = __le16_to_cpu(rsp->type);
3092 result = __le16_to_cpu(rsp->result);
3093
3094 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3095
3096 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3097 if (cmd->ident != conn->info_ident ||
3098 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3099 return 0;
3100
3101 del_timer(&conn->info_timer);
3102
3103 if (result != L2CAP_IR_SUCCESS) {
3104 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3105 conn->info_ident = 0;
3106
3107 l2cap_conn_start(conn);
3108
3109 return 0;
3110 }
3111
3112 if (type == L2CAP_IT_FEAT_MASK) {
3113 conn->feat_mask = get_unaligned_le32(rsp->data);
3114
3115 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3116 struct l2cap_info_req req;
3117 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3118
3119 conn->info_ident = l2cap_get_ident(conn);
3120
3121 l2cap_send_cmd(conn, conn->info_ident,
3122 L2CAP_INFO_REQ, sizeof(req), &req);
3123 } else {
3124 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3125 conn->info_ident = 0;
3126
3127 l2cap_conn_start(conn);
3128 }
3129 } else if (type == L2CAP_IT_FIXED_CHAN) {
3130 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3131 conn->info_ident = 0;
3132
3133 l2cap_conn_start(conn);
3134 }
3135
3136 return 0;
3137 }
3138
3139 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3140 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3141 void *data)
3142 {
3143 struct l2cap_create_chan_req *req = data;
3144 struct l2cap_create_chan_rsp rsp;
3145 u16 psm, scid;
3146
3147 if (cmd_len != sizeof(*req))
3148 return -EPROTO;
3149
3150 if (!enable_hs)
3151 return -EINVAL;
3152
3153 psm = le16_to_cpu(req->psm);
3154 scid = le16_to_cpu(req->scid);
3155
3156 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3157
3158 /* Placeholder: Always reject */
3159 rsp.dcid = 0;
3160 rsp.scid = cpu_to_le16(scid);
3161 rsp.result = L2CAP_CR_NO_MEM;
3162 rsp.status = L2CAP_CS_NO_INFO;
3163
3164 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3165 sizeof(rsp), &rsp);
3166
3167 return 0;
3168 }
3169
3170 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3171 struct l2cap_cmd_hdr *cmd, void *data)
3172 {
3173 BT_DBG("conn %p", conn);
3174
3175 return l2cap_connect_rsp(conn, cmd, data);
3176 }
3177
3178 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3179 u16 icid, u16 result)
3180 {
3181 struct l2cap_move_chan_rsp rsp;
3182
3183 BT_DBG("icid %d, result %d", icid, result);
3184
3185 rsp.icid = cpu_to_le16(icid);
3186 rsp.result = cpu_to_le16(result);
3187
3188 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3189 }
3190
3191 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3192 struct l2cap_chan *chan, u16 icid, u16 result)
3193 {
3194 struct l2cap_move_chan_cfm cfm;
3195 u8 ident;
3196
3197 BT_DBG("icid %d, result %d", icid, result);
3198
3199 ident = l2cap_get_ident(conn);
3200 if (chan)
3201 chan->ident = ident;
3202
3203 cfm.icid = cpu_to_le16(icid);
3204 cfm.result = cpu_to_le16(result);
3205
3206 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3207 }
3208
3209 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3210 u16 icid)
3211 {
3212 struct l2cap_move_chan_cfm_rsp rsp;
3213
3214 BT_DBG("icid %d", icid);
3215
3216 rsp.icid = cpu_to_le16(icid);
3217 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3218 }
3219
3220 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3221 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3222 {
3223 struct l2cap_move_chan_req *req = data;
3224 u16 icid = 0;
3225 u16 result = L2CAP_MR_NOT_ALLOWED;
3226
3227 if (cmd_len != sizeof(*req))
3228 return -EPROTO;
3229
3230 icid = le16_to_cpu(req->icid);
3231
3232 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3233
3234 if (!enable_hs)
3235 return -EINVAL;
3236
3237 /* Placeholder: Always refuse */
3238 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3239
3240 return 0;
3241 }
3242
3243 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3244 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3245 {
3246 struct l2cap_move_chan_rsp *rsp = data;
3247 u16 icid, result;
3248
3249 if (cmd_len != sizeof(*rsp))
3250 return -EPROTO;
3251
3252 icid = le16_to_cpu(rsp->icid);
3253 result = le16_to_cpu(rsp->result);
3254
3255 BT_DBG("icid %d, result %d", icid, result);
3256
3257 /* Placeholder: Always unconfirmed */
3258 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3259
3260 return 0;
3261 }
3262
3263 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3264 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3265 {
3266 struct l2cap_move_chan_cfm *cfm = data;
3267 u16 icid, result;
3268
3269 if (cmd_len != sizeof(*cfm))
3270 return -EPROTO;
3271
3272 icid = le16_to_cpu(cfm->icid);
3273 result = le16_to_cpu(cfm->result);
3274
3275 BT_DBG("icid %d, result %d", icid, result);
3276
3277 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3278
3279 return 0;
3280 }
3281
3282 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3283 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3284 {
3285 struct l2cap_move_chan_cfm_rsp *rsp = data;
3286 u16 icid;
3287
3288 if (cmd_len != sizeof(*rsp))
3289 return -EPROTO;
3290
3291 icid = le16_to_cpu(rsp->icid);
3292
3293 BT_DBG("icid %d", icid);
3294
3295 return 0;
3296 }
3297
3298 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3299 u16 to_multiplier)
3300 {
3301 u16 max_latency;
3302
3303 if (min > max || min < 6 || max > 3200)
3304 return -EINVAL;
3305
3306 if (to_multiplier < 10 || to_multiplier > 3200)
3307 return -EINVAL;
3308
3309 if (max >= to_multiplier * 8)
3310 return -EINVAL;
3311
3312 max_latency = (to_multiplier * 8 / max) - 1;
3313 if (latency > 499 || latency > max_latency)
3314 return -EINVAL;
3315
3316 return 0;
3317 }
3318
3319 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3320 struct l2cap_cmd_hdr *cmd, u8 *data)
3321 {
3322 struct hci_conn *hcon = conn->hcon;
3323 struct l2cap_conn_param_update_req *req;
3324 struct l2cap_conn_param_update_rsp rsp;
3325 u16 min, max, latency, to_multiplier, cmd_len;
3326 int err;
3327
3328 if (!(hcon->link_mode & HCI_LM_MASTER))
3329 return -EINVAL;
3330
3331 cmd_len = __le16_to_cpu(cmd->len);
3332 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3333 return -EPROTO;
3334
3335 req = (struct l2cap_conn_param_update_req *) data;
3336 min = __le16_to_cpu(req->min);
3337 max = __le16_to_cpu(req->max);
3338 latency = __le16_to_cpu(req->latency);
3339 to_multiplier = __le16_to_cpu(req->to_multiplier);
3340
3341 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3342 min, max, latency, to_multiplier);
3343
3344 memset(&rsp, 0, sizeof(rsp));
3345
3346 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3347 if (err)
3348 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3349 else
3350 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3351
3352 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3353 sizeof(rsp), &rsp);
3354
3355 if (!err)
3356 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3357
3358 return 0;
3359 }
3360
3361 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3362 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3363 {
3364 int err = 0;
3365
3366 switch (cmd->code) {
3367 case L2CAP_COMMAND_REJ:
3368 l2cap_command_rej(conn, cmd, data);
3369 break;
3370
3371 case L2CAP_CONN_REQ:
3372 err = l2cap_connect_req(conn, cmd, data);
3373 break;
3374
3375 case L2CAP_CONN_RSP:
3376 err = l2cap_connect_rsp(conn, cmd, data);
3377 break;
3378
3379 case L2CAP_CONF_REQ:
3380 err = l2cap_config_req(conn, cmd, cmd_len, data);
3381 break;
3382
3383 case L2CAP_CONF_RSP:
3384 err = l2cap_config_rsp(conn, cmd, data);
3385 break;
3386
3387 case L2CAP_DISCONN_REQ:
3388 err = l2cap_disconnect_req(conn, cmd, data);
3389 break;
3390
3391 case L2CAP_DISCONN_RSP:
3392 err = l2cap_disconnect_rsp(conn, cmd, data);
3393 break;
3394
3395 case L2CAP_ECHO_REQ:
3396 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3397 break;
3398
3399 case L2CAP_ECHO_RSP:
3400 break;
3401
3402 case L2CAP_INFO_REQ:
3403 err = l2cap_information_req(conn, cmd, data);
3404 break;
3405
3406 case L2CAP_INFO_RSP:
3407 err = l2cap_information_rsp(conn, cmd, data);
3408 break;
3409
3410 case L2CAP_CREATE_CHAN_REQ:
3411 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3412 break;
3413
3414 case L2CAP_CREATE_CHAN_RSP:
3415 err = l2cap_create_channel_rsp(conn, cmd, data);
3416 break;
3417
3418 case L2CAP_MOVE_CHAN_REQ:
3419 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3420 break;
3421
3422 case L2CAP_MOVE_CHAN_RSP:
3423 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3424 break;
3425
3426 case L2CAP_MOVE_CHAN_CFM:
3427 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3428 break;
3429
3430 case L2CAP_MOVE_CHAN_CFM_RSP:
3431 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3432 break;
3433
3434 default:
3435 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3436 err = -EINVAL;
3437 break;
3438 }
3439
3440 return err;
3441 }
3442
3443 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3444 struct l2cap_cmd_hdr *cmd, u8 *data)
3445 {
3446 switch (cmd->code) {
3447 case L2CAP_COMMAND_REJ:
3448 return 0;
3449
3450 case L2CAP_CONN_PARAM_UPDATE_REQ:
3451 return l2cap_conn_param_update_req(conn, cmd, data);
3452
3453 case L2CAP_CONN_PARAM_UPDATE_RSP:
3454 return 0;
3455
3456 default:
3457 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3458 return -EINVAL;
3459 }
3460 }
3461
3462 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3463 struct sk_buff *skb)
3464 {
3465 u8 *data = skb->data;
3466 int len = skb->len;
3467 struct l2cap_cmd_hdr cmd;
3468 int err;
3469
3470 l2cap_raw_recv(conn, skb);
3471
3472 while (len >= L2CAP_CMD_HDR_SIZE) {
3473 u16 cmd_len;
3474 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3475 data += L2CAP_CMD_HDR_SIZE;
3476 len -= L2CAP_CMD_HDR_SIZE;
3477
3478 cmd_len = le16_to_cpu(cmd.len);
3479
3480 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3481
3482 if (cmd_len > len || !cmd.ident) {
3483 BT_DBG("corrupted command");
3484 break;
3485 }
3486
3487 if (conn->hcon->type == LE_LINK)
3488 err = l2cap_le_sig_cmd(conn, &cmd, data);
3489 else
3490 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3491
3492 if (err) {
3493 struct l2cap_cmd_rej_unk rej;
3494
3495 BT_ERR("Wrong link type (%d)", err);
3496
3497 /* FIXME: Map err to a valid reason */
3498 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3499 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3500 }
3501
3502 data += cmd_len;
3503 len -= cmd_len;
3504 }
3505
3506 kfree_skb(skb);
3507 }
3508
3509 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3510 {
3511 u16 our_fcs, rcv_fcs;
3512 int hdr_size;
3513
3514 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3515 hdr_size = L2CAP_EXT_HDR_SIZE;
3516 else
3517 hdr_size = L2CAP_ENH_HDR_SIZE;
3518
3519 if (chan->fcs == L2CAP_FCS_CRC16) {
3520 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3521 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3522 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3523
3524 if (our_fcs != rcv_fcs)
3525 return -EBADMSG;
3526 }
3527 return 0;
3528 }
3529
3530 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3531 {
3532 u32 control = 0;
3533
3534 chan->frames_sent = 0;
3535
3536 control |= __set_reqseq(chan, chan->buffer_seq);
3537
3538 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3539 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3540 l2cap_send_sframe(chan, control);
3541 set_bit(CONN_RNR_SENT, &chan->conn_state);
3542 }
3543
3544 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3545 l2cap_retransmit_frames(chan);
3546
3547 l2cap_ertm_send(chan);
3548
3549 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3550 chan->frames_sent == 0) {
3551 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3552 l2cap_send_sframe(chan, control);
3553 }
3554 }
3555
3556 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3557 {
3558 struct sk_buff *next_skb;
3559 int tx_seq_offset, next_tx_seq_offset;
3560
3561 bt_cb(skb)->tx_seq = tx_seq;
3562 bt_cb(skb)->sar = sar;
3563
3564 next_skb = skb_peek(&chan->srej_q);
3565 if (!next_skb) {
3566 __skb_queue_tail(&chan->srej_q, skb);
3567 return 0;
3568 }
3569
3570 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3571
3572 do {
3573 if (bt_cb(next_skb)->tx_seq == tx_seq)
3574 return -EINVAL;
3575
3576 next_tx_seq_offset = __seq_offset(chan,
3577 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3578
3579 if (next_tx_seq_offset > tx_seq_offset) {
3580 __skb_queue_before(&chan->srej_q, next_skb, skb);
3581 return 0;
3582 }
3583
3584 if (skb_queue_is_last(&chan->srej_q, next_skb))
3585 break;
3586
3587 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3588
3589 __skb_queue_tail(&chan->srej_q, skb);
3590
3591 return 0;
3592 }
3593
3594 static void append_skb_frag(struct sk_buff *skb,
3595 struct sk_buff *new_frag, struct sk_buff **last_frag)
3596 {
3597 /* skb->len reflects data in skb as well as all fragments
3598 * skb->data_len reflects only data in fragments
3599 */
3600 if (!skb_has_frag_list(skb))
3601 skb_shinfo(skb)->frag_list = new_frag;
3602
3603 new_frag->next = NULL;
3604
3605 (*last_frag)->next = new_frag;
3606 *last_frag = new_frag;
3607
3608 skb->len += new_frag->len;
3609 skb->data_len += new_frag->len;
3610 skb->truesize += new_frag->truesize;
3611 }
3612
3613 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3614 {
3615 int err = -EINVAL;
3616
3617 switch (__get_ctrl_sar(chan, control)) {
3618 case L2CAP_SAR_UNSEGMENTED:
3619 if (chan->sdu)
3620 break;
3621
3622 err = chan->ops->recv(chan->data, skb);
3623 break;
3624
3625 case L2CAP_SAR_START:
3626 if (chan->sdu)
3627 break;
3628
3629 chan->sdu_len = get_unaligned_le16(skb->data);
3630 skb_pull(skb, L2CAP_SDULEN_SIZE);
3631
3632 if (chan->sdu_len > chan->imtu) {
3633 err = -EMSGSIZE;
3634 break;
3635 }
3636
3637 if (skb->len >= chan->sdu_len)
3638 break;
3639
3640 chan->sdu = skb;
3641 chan->sdu_last_frag = skb;
3642
3643 skb = NULL;
3644 err = 0;
3645 break;
3646
3647 case L2CAP_SAR_CONTINUE:
3648 if (!chan->sdu)
3649 break;
3650
3651 append_skb_frag(chan->sdu, skb,
3652 &chan->sdu_last_frag);
3653 skb = NULL;
3654
3655 if (chan->sdu->len >= chan->sdu_len)
3656 break;
3657
3658 err = 0;
3659 break;
3660
3661 case L2CAP_SAR_END:
3662 if (!chan->sdu)
3663 break;
3664
3665 append_skb_frag(chan->sdu, skb,
3666 &chan->sdu_last_frag);
3667 skb = NULL;
3668
3669 if (chan->sdu->len != chan->sdu_len)
3670 break;
3671
3672 err = chan->ops->recv(chan->data, chan->sdu);
3673
3674 if (!err) {
3675 /* Reassembly complete */
3676 chan->sdu = NULL;
3677 chan->sdu_last_frag = NULL;
3678 chan->sdu_len = 0;
3679 }
3680 break;
3681 }
3682
3683 if (err) {
3684 kfree_skb(skb);
3685 kfree_skb(chan->sdu);
3686 chan->sdu = NULL;
3687 chan->sdu_last_frag = NULL;
3688 chan->sdu_len = 0;
3689 }
3690
3691 return err;
3692 }
3693
3694 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3695 {
3696 u32 control;
3697
3698 BT_DBG("chan %p, Enter local busy", chan);
3699
3700 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3701
3702 control = __set_reqseq(chan, chan->buffer_seq);
3703 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3704 l2cap_send_sframe(chan, control);
3705
3706 set_bit(CONN_RNR_SENT, &chan->conn_state);
3707
3708 __clear_ack_timer(chan);
3709 }
3710
3711 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3712 {
3713 u32 control;
3714
3715 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3716 goto done;
3717
3718 control = __set_reqseq(chan, chan->buffer_seq);
3719 control |= __set_ctrl_poll(chan);
3720 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3721 l2cap_send_sframe(chan, control);
3722 chan->retry_count = 1;
3723
3724 __clear_retrans_timer(chan);
3725 __set_monitor_timer(chan);
3726
3727 set_bit(CONN_WAIT_F, &chan->conn_state);
3728
3729 done:
3730 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3731 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3732
3733 BT_DBG("chan %p, Exit local busy", chan);
3734 }
3735
3736 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3737 {
3738 if (chan->mode == L2CAP_MODE_ERTM) {
3739 if (busy)
3740 l2cap_ertm_enter_local_busy(chan);
3741 else
3742 l2cap_ertm_exit_local_busy(chan);
3743 }
3744 }
3745
3746 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3747 {
3748 struct sk_buff *skb;
3749 u32 control;
3750
3751 while ((skb = skb_peek(&chan->srej_q)) &&
3752 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3753 int err;
3754
3755 if (bt_cb(skb)->tx_seq != tx_seq)
3756 break;
3757
3758 skb = skb_dequeue(&chan->srej_q);
3759 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3760 err = l2cap_reassemble_sdu(chan, skb, control);
3761
3762 if (err < 0) {
3763 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3764 break;
3765 }
3766
3767 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3768 tx_seq = __next_seq(chan, tx_seq);
3769 }
3770 }
3771
3772 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3773 {
3774 struct srej_list *l, *tmp;
3775 u32 control;
3776
3777 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3778 if (l->tx_seq == tx_seq) {
3779 list_del(&l->list);
3780 kfree(l);
3781 return;
3782 }
3783 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3784 control |= __set_reqseq(chan, l->tx_seq);
3785 l2cap_send_sframe(chan, control);
3786 list_del(&l->list);
3787 list_add_tail(&l->list, &chan->srej_l);
3788 }
3789 }
3790
3791 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3792 {
3793 struct srej_list *new;
3794 u32 control;
3795
3796 while (tx_seq != chan->expected_tx_seq) {
3797 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3798 control |= __set_reqseq(chan, chan->expected_tx_seq);
3799 l2cap_send_sframe(chan, control);
3800
3801 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3802 new->tx_seq = chan->expected_tx_seq;
3803
3804 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3805
3806 list_add_tail(&new->list, &chan->srej_l);
3807 }
3808
3809 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3810 }
3811
3812 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3813 {
3814 u16 tx_seq = __get_txseq(chan, rx_control);
3815 u16 req_seq = __get_reqseq(chan, rx_control);
3816 u8 sar = __get_ctrl_sar(chan, rx_control);
3817 int tx_seq_offset, expected_tx_seq_offset;
3818 int num_to_ack = (chan->tx_win/6) + 1;
3819 int err = 0;
3820
3821 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3822 tx_seq, rx_control);
3823
3824 if (__is_ctrl_final(chan, rx_control) &&
3825 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3826 __clear_monitor_timer(chan);
3827 if (chan->unacked_frames > 0)
3828 __set_retrans_timer(chan);
3829 clear_bit(CONN_WAIT_F, &chan->conn_state);
3830 }
3831
3832 chan->expected_ack_seq = req_seq;
3833 l2cap_drop_acked_frames(chan);
3834
3835 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3836
3837 /* invalid tx_seq */
3838 if (tx_seq_offset >= chan->tx_win) {
3839 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3840 goto drop;
3841 }
3842
3843 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3844 goto drop;
3845
3846 if (tx_seq == chan->expected_tx_seq)
3847 goto expected;
3848
3849 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3850 struct srej_list *first;
3851
3852 first = list_first_entry(&chan->srej_l,
3853 struct srej_list, list);
3854 if (tx_seq == first->tx_seq) {
3855 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3856 l2cap_check_srej_gap(chan, tx_seq);
3857
3858 list_del(&first->list);
3859 kfree(first);
3860
3861 if (list_empty(&chan->srej_l)) {
3862 chan->buffer_seq = chan->buffer_seq_srej;
3863 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3864 l2cap_send_ack(chan);
3865 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3866 }
3867 } else {
3868 struct srej_list *l;
3869
3870 /* duplicated tx_seq */
3871 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3872 goto drop;
3873
3874 list_for_each_entry(l, &chan->srej_l, list) {
3875 if (l->tx_seq == tx_seq) {
3876 l2cap_resend_srejframe(chan, tx_seq);
3877 return 0;
3878 }
3879 }
3880 l2cap_send_srejframe(chan, tx_seq);
3881 }
3882 } else {
3883 expected_tx_seq_offset = __seq_offset(chan,
3884 chan->expected_tx_seq, chan->buffer_seq);
3885
3886 /* duplicated tx_seq */
3887 if (tx_seq_offset < expected_tx_seq_offset)
3888 goto drop;
3889
3890 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3891
3892 BT_DBG("chan %p, Enter SREJ", chan);
3893
3894 INIT_LIST_HEAD(&chan->srej_l);
3895 chan->buffer_seq_srej = chan->buffer_seq;
3896
3897 __skb_queue_head_init(&chan->srej_q);
3898 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3899
3900 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3901
3902 l2cap_send_srejframe(chan, tx_seq);
3903
3904 __clear_ack_timer(chan);
3905 }
3906 return 0;
3907
3908 expected:
3909 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3910
3911 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3912 bt_cb(skb)->tx_seq = tx_seq;
3913 bt_cb(skb)->sar = sar;
3914 __skb_queue_tail(&chan->srej_q, skb);
3915 return 0;
3916 }
3917
3918 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3919 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3920
3921 if (err < 0) {
3922 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3923 return err;
3924 }
3925
3926 if (__is_ctrl_final(chan, rx_control)) {
3927 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3928 l2cap_retransmit_frames(chan);
3929 }
3930
3931 __set_ack_timer(chan);
3932
3933 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3934 if (chan->num_acked == num_to_ack - 1)
3935 l2cap_send_ack(chan);
3936
3937 return 0;
3938
3939 drop:
3940 kfree_skb(skb);
3941 return 0;
3942 }
3943
3944 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3945 {
3946 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3947 __get_reqseq(chan, rx_control), rx_control);
3948
3949 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3950 l2cap_drop_acked_frames(chan);
3951
3952 if (__is_ctrl_poll(chan, rx_control)) {
3953 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3954 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3955 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3956 (chan->unacked_frames > 0))
3957 __set_retrans_timer(chan);
3958
3959 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3960 l2cap_send_srejtail(chan);
3961 } else {
3962 l2cap_send_i_or_rr_or_rnr(chan);
3963 }
3964
3965 } else if (__is_ctrl_final(chan, rx_control)) {
3966 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3967
3968 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3969 l2cap_retransmit_frames(chan);
3970
3971 } else {
3972 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3973 (chan->unacked_frames > 0))
3974 __set_retrans_timer(chan);
3975
3976 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3977 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3978 l2cap_send_ack(chan);
3979 else
3980 l2cap_ertm_send(chan);
3981 }
3982 }
3983
3984 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3985 {
3986 u16 tx_seq = __get_reqseq(chan, rx_control);
3987
3988 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3989
3990 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3991
3992 chan->expected_ack_seq = tx_seq;
3993 l2cap_drop_acked_frames(chan);
3994
3995 if (__is_ctrl_final(chan, rx_control)) {
3996 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3997 l2cap_retransmit_frames(chan);
3998 } else {
3999 l2cap_retransmit_frames(chan);
4000
4001 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4002 set_bit(CONN_REJ_ACT, &chan->conn_state);
4003 }
4004 }
4005 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4006 {
4007 u16 tx_seq = __get_reqseq(chan, rx_control);
4008
4009 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4010
4011 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4012
4013 if (__is_ctrl_poll(chan, rx_control)) {
4014 chan->expected_ack_seq = tx_seq;
4015 l2cap_drop_acked_frames(chan);
4016
4017 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4018 l2cap_retransmit_one_frame(chan, tx_seq);
4019
4020 l2cap_ertm_send(chan);
4021
4022 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4023 chan->srej_save_reqseq = tx_seq;
4024 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4025 }
4026 } else if (__is_ctrl_final(chan, rx_control)) {
4027 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4028 chan->srej_save_reqseq == tx_seq)
4029 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4030 else
4031 l2cap_retransmit_one_frame(chan, tx_seq);
4032 } else {
4033 l2cap_retransmit_one_frame(chan, tx_seq);
4034 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4035 chan->srej_save_reqseq = tx_seq;
4036 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4037 }
4038 }
4039 }
4040
4041 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4042 {
4043 u16 tx_seq = __get_reqseq(chan, rx_control);
4044
4045 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4046
4047 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4048 chan->expected_ack_seq = tx_seq;
4049 l2cap_drop_acked_frames(chan);
4050
4051 if (__is_ctrl_poll(chan, rx_control))
4052 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4053
4054 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4055 __clear_retrans_timer(chan);
4056 if (__is_ctrl_poll(chan, rx_control))
4057 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4058 return;
4059 }
4060
4061 if (__is_ctrl_poll(chan, rx_control)) {
4062 l2cap_send_srejtail(chan);
4063 } else {
4064 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4065 l2cap_send_sframe(chan, rx_control);
4066 }
4067 }
4068
4069 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4070 {
4071 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4072
4073 if (__is_ctrl_final(chan, rx_control) &&
4074 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4075 __clear_monitor_timer(chan);
4076 if (chan->unacked_frames > 0)
4077 __set_retrans_timer(chan);
4078 clear_bit(CONN_WAIT_F, &chan->conn_state);
4079 }
4080
4081 switch (__get_ctrl_super(chan, rx_control)) {
4082 case L2CAP_SUPER_RR:
4083 l2cap_data_channel_rrframe(chan, rx_control);
4084 break;
4085
4086 case L2CAP_SUPER_REJ:
4087 l2cap_data_channel_rejframe(chan, rx_control);
4088 break;
4089
4090 case L2CAP_SUPER_SREJ:
4091 l2cap_data_channel_srejframe(chan, rx_control);
4092 break;
4093
4094 case L2CAP_SUPER_RNR:
4095 l2cap_data_channel_rnrframe(chan, rx_control);
4096 break;
4097 }
4098
4099 kfree_skb(skb);
4100 return 0;
4101 }
4102
4103 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4104 {
4105 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4106 u32 control;
4107 u16 req_seq;
4108 int len, next_tx_seq_offset, req_seq_offset;
4109
4110 control = __get_control(chan, skb->data);
4111 skb_pull(skb, __ctrl_size(chan));
4112 len = skb->len;
4113
4114 /*
4115 * We can just drop the corrupted I-frame here.
4116 * Receiver will miss it and start proper recovery
4117 * procedures and ask retransmission.
4118 */
4119 if (l2cap_check_fcs(chan, skb))
4120 goto drop;
4121
4122 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4123 len -= L2CAP_SDULEN_SIZE;
4124
4125 if (chan->fcs == L2CAP_FCS_CRC16)
4126 len -= L2CAP_FCS_SIZE;
4127
4128 if (len > chan->mps) {
4129 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4130 goto drop;
4131 }
4132
4133 req_seq = __get_reqseq(chan, control);
4134
4135 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4136
4137 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4138 chan->expected_ack_seq);
4139
4140 /* check for invalid req-seq */
4141 if (req_seq_offset > next_tx_seq_offset) {
4142 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4143 goto drop;
4144 }
4145
4146 if (!__is_sframe(chan, control)) {
4147 if (len < 0) {
4148 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4149 goto drop;
4150 }
4151
4152 l2cap_data_channel_iframe(chan, control, skb);
4153 } else {
4154 if (len != 0) {
4155 BT_ERR("%d", len);
4156 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4157 goto drop;
4158 }
4159
4160 l2cap_data_channel_sframe(chan, control, skb);
4161 }
4162
4163 return 0;
4164
4165 drop:
4166 kfree_skb(skb);
4167 return 0;
4168 }
4169
4170 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4171 {
4172 struct l2cap_chan *chan;
4173 struct sock *sk = NULL;
4174 u32 control;
4175 u16 tx_seq;
4176 int len;
4177
4178 chan = l2cap_get_chan_by_scid(conn, cid);
4179 if (!chan) {
4180 BT_DBG("unknown cid 0x%4.4x", cid);
4181 goto drop;
4182 }
4183
4184 sk = chan->sk;
4185
4186 BT_DBG("chan %p, len %d", chan, skb->len);
4187
4188 if (chan->state != BT_CONNECTED)
4189 goto drop;
4190
4191 switch (chan->mode) {
4192 case L2CAP_MODE_BASIC:
4193 /* If socket recv buffers overflows we drop data here
4194 * which is *bad* because L2CAP has to be reliable.
4195 * But we don't have any other choice. L2CAP doesn't
4196 * provide flow control mechanism. */
4197
4198 if (chan->imtu < skb->len)
4199 goto drop;
4200
4201 if (!chan->ops->recv(chan->data, skb))
4202 goto done;
4203 break;
4204
4205 case L2CAP_MODE_ERTM:
4206 if (!sock_owned_by_user(sk)) {
4207 l2cap_ertm_data_rcv(sk, skb);
4208 } else {
4209 if (sk_add_backlog(sk, skb))
4210 goto drop;
4211 }
4212
4213 goto done;
4214
4215 case L2CAP_MODE_STREAMING:
4216 control = __get_control(chan, skb->data);
4217 skb_pull(skb, __ctrl_size(chan));
4218 len = skb->len;
4219
4220 if (l2cap_check_fcs(chan, skb))
4221 goto drop;
4222
4223 if (__is_sar_start(chan, control))
4224 len -= L2CAP_SDULEN_SIZE;
4225
4226 if (chan->fcs == L2CAP_FCS_CRC16)
4227 len -= L2CAP_FCS_SIZE;
4228
4229 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4230 goto drop;
4231
4232 tx_seq = __get_txseq(chan, control);
4233
4234 if (chan->expected_tx_seq != tx_seq) {
4235 /* Frame(s) missing - must discard partial SDU */
4236 kfree_skb(chan->sdu);
4237 chan->sdu = NULL;
4238 chan->sdu_last_frag = NULL;
4239 chan->sdu_len = 0;
4240
4241 /* TODO: Notify userland of missing data */
4242 }
4243
4244 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4245
4246 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4247 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4248
4249 goto done;
4250
4251 default:
4252 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4253 break;
4254 }
4255
4256 drop:
4257 kfree_skb(skb);
4258
4259 done:
4260 if (sk)
4261 bh_unlock_sock(sk);
4262
4263 return 0;
4264 }
4265
4266 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4267 {
4268 struct sock *sk = NULL;
4269 struct l2cap_chan *chan;
4270
4271 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4272 if (!chan)
4273 goto drop;
4274
4275 sk = chan->sk;
4276
4277 bh_lock_sock(sk);
4278
4279 BT_DBG("sk %p, len %d", sk, skb->len);
4280
4281 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4282 goto drop;
4283
4284 if (chan->imtu < skb->len)
4285 goto drop;
4286
4287 if (!chan->ops->recv(chan->data, skb))
4288 goto done;
4289
4290 drop:
4291 kfree_skb(skb);
4292
4293 done:
4294 if (sk)
4295 bh_unlock_sock(sk);
4296 return 0;
4297 }
4298
4299 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4300 {
4301 struct sock *sk = NULL;
4302 struct l2cap_chan *chan;
4303
4304 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4305 if (!chan)
4306 goto drop;
4307
4308 sk = chan->sk;
4309
4310 bh_lock_sock(sk);
4311
4312 BT_DBG("sk %p, len %d", sk, skb->len);
4313
4314 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4315 goto drop;
4316
4317 if (chan->imtu < skb->len)
4318 goto drop;
4319
4320 if (!chan->ops->recv(chan->data, skb))
4321 goto done;
4322
4323 drop:
4324 kfree_skb(skb);
4325
4326 done:
4327 if (sk)
4328 bh_unlock_sock(sk);
4329 return 0;
4330 }
4331
4332 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4333 {
4334 struct l2cap_hdr *lh = (void *) skb->data;
4335 u16 cid, len;
4336 __le16 psm;
4337
4338 skb_pull(skb, L2CAP_HDR_SIZE);
4339 cid = __le16_to_cpu(lh->cid);
4340 len = __le16_to_cpu(lh->len);
4341
4342 if (len != skb->len) {
4343 kfree_skb(skb);
4344 return;
4345 }
4346
4347 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4348
4349 switch (cid) {
4350 case L2CAP_CID_LE_SIGNALING:
4351 case L2CAP_CID_SIGNALING:
4352 l2cap_sig_channel(conn, skb);
4353 break;
4354
4355 case L2CAP_CID_CONN_LESS:
4356 psm = get_unaligned_le16(skb->data);
4357 skb_pull(skb, 2);
4358 l2cap_conless_channel(conn, psm, skb);
4359 break;
4360
4361 case L2CAP_CID_LE_DATA:
4362 l2cap_att_channel(conn, cid, skb);
4363 break;
4364
4365 case L2CAP_CID_SMP:
4366 if (smp_sig_channel(conn, skb))
4367 l2cap_conn_del(conn->hcon, EACCES);
4368 break;
4369
4370 default:
4371 l2cap_data_channel(conn, cid, skb);
4372 break;
4373 }
4374 }
4375
4376 /* ---- L2CAP interface with lower layer (HCI) ---- */
4377
4378 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4379 {
4380 int exact = 0, lm1 = 0, lm2 = 0;
4381 struct l2cap_chan *c;
4382
4383 if (type != ACL_LINK)
4384 return -EINVAL;
4385
4386 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4387
4388 /* Find listening sockets and check their link_mode */
4389 read_lock(&chan_list_lock);
4390 list_for_each_entry(c, &chan_list, global_l) {
4391 struct sock *sk = c->sk;
4392
4393 if (c->state != BT_LISTEN)
4394 continue;
4395
4396 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4397 lm1 |= HCI_LM_ACCEPT;
4398 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4399 lm1 |= HCI_LM_MASTER;
4400 exact++;
4401 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4402 lm2 |= HCI_LM_ACCEPT;
4403 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4404 lm2 |= HCI_LM_MASTER;
4405 }
4406 }
4407 read_unlock(&chan_list_lock);
4408
4409 return exact ? lm1 : lm2;
4410 }
4411
4412 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4413 {
4414 struct l2cap_conn *conn;
4415
4416 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4417
4418 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4419 return -EINVAL;
4420
4421 if (!status) {
4422 conn = l2cap_conn_add(hcon, status);
4423 if (conn)
4424 l2cap_conn_ready(conn);
4425 } else
4426 l2cap_conn_del(hcon, bt_to_errno(status));
4427
4428 return 0;
4429 }
4430
4431 static int l2cap_disconn_ind(struct hci_conn *hcon)
4432 {
4433 struct l2cap_conn *conn = hcon->l2cap_data;
4434
4435 BT_DBG("hcon %p", hcon);
4436
4437 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4438 return HCI_ERROR_REMOTE_USER_TERM;
4439
4440 return conn->disc_reason;
4441 }
4442
4443 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4444 {
4445 BT_DBG("hcon %p reason %d", hcon, reason);
4446
4447 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4448 return -EINVAL;
4449
4450 l2cap_conn_del(hcon, bt_to_errno(reason));
4451
4452 return 0;
4453 }
4454
4455 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4456 {
4457 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4458 return;
4459
4460 if (encrypt == 0x00) {
4461 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4462 __clear_chan_timer(chan);
4463 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4464 } else if (chan->sec_level == BT_SECURITY_HIGH)
4465 l2cap_chan_close(chan, ECONNREFUSED);
4466 } else {
4467 if (chan->sec_level == BT_SECURITY_MEDIUM)
4468 __clear_chan_timer(chan);
4469 }
4470 }
4471
4472 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4473 {
4474 struct l2cap_conn *conn = hcon->l2cap_data;
4475 struct l2cap_chan *chan;
4476
4477 if (!conn)
4478 return 0;
4479
4480 BT_DBG("conn %p", conn);
4481
4482 if (hcon->type == LE_LINK) {
4483 smp_distribute_keys(conn, 0);
4484 del_timer(&conn->security_timer);
4485 }
4486
4487 read_lock(&conn->chan_lock);
4488
4489 list_for_each_entry(chan, &conn->chan_l, list) {
4490 struct sock *sk = chan->sk;
4491
4492 bh_lock_sock(sk);
4493
4494 BT_DBG("chan->scid %d", chan->scid);
4495
4496 if (chan->scid == L2CAP_CID_LE_DATA) {
4497 if (!status && encrypt) {
4498 chan->sec_level = hcon->sec_level;
4499 l2cap_chan_ready(sk);
4500 }
4501
4502 bh_unlock_sock(sk);
4503 continue;
4504 }
4505
4506 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4507 bh_unlock_sock(sk);
4508 continue;
4509 }
4510
4511 if (!status && (chan->state == BT_CONNECTED ||
4512 chan->state == BT_CONFIG)) {
4513 l2cap_check_encryption(chan, encrypt);
4514 bh_unlock_sock(sk);
4515 continue;
4516 }
4517
4518 if (chan->state == BT_CONNECT) {
4519 if (!status) {
4520 struct l2cap_conn_req req;
4521 req.scid = cpu_to_le16(chan->scid);
4522 req.psm = chan->psm;
4523
4524 chan->ident = l2cap_get_ident(conn);
4525 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4526
4527 l2cap_send_cmd(conn, chan->ident,
4528 L2CAP_CONN_REQ, sizeof(req), &req);
4529 } else {
4530 __clear_chan_timer(chan);
4531 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4532 }
4533 } else if (chan->state == BT_CONNECT2) {
4534 struct l2cap_conn_rsp rsp;
4535 __u16 res, stat;
4536
4537 if (!status) {
4538 if (bt_sk(sk)->defer_setup) {
4539 struct sock *parent = bt_sk(sk)->parent;
4540 res = L2CAP_CR_PEND;
4541 stat = L2CAP_CS_AUTHOR_PEND;
4542 if (parent)
4543 parent->sk_data_ready(parent, 0);
4544 } else {
4545 l2cap_state_change(chan, BT_CONFIG);
4546 res = L2CAP_CR_SUCCESS;
4547 stat = L2CAP_CS_NO_INFO;
4548 }
4549 } else {
4550 l2cap_state_change(chan, BT_DISCONN);
4551 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4552 res = L2CAP_CR_SEC_BLOCK;
4553 stat = L2CAP_CS_NO_INFO;
4554 }
4555
4556 rsp.scid = cpu_to_le16(chan->dcid);
4557 rsp.dcid = cpu_to_le16(chan->scid);
4558 rsp.result = cpu_to_le16(res);
4559 rsp.status = cpu_to_le16(stat);
4560 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4561 sizeof(rsp), &rsp);
4562 }
4563
4564 bh_unlock_sock(sk);
4565 }
4566
4567 read_unlock(&conn->chan_lock);
4568
4569 return 0;
4570 }
4571
4572 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4573 {
4574 struct l2cap_conn *conn = hcon->l2cap_data;
4575
4576 if (!conn)
4577 conn = l2cap_conn_add(hcon, 0);
4578
4579 if (!conn)
4580 goto drop;
4581
4582 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4583
4584 if (!(flags & ACL_CONT)) {
4585 struct l2cap_hdr *hdr;
4586 struct l2cap_chan *chan;
4587 u16 cid;
4588 int len;
4589
4590 if (conn->rx_len) {
4591 BT_ERR("Unexpected start frame (len %d)", skb->len);
4592 kfree_skb(conn->rx_skb);
4593 conn->rx_skb = NULL;
4594 conn->rx_len = 0;
4595 l2cap_conn_unreliable(conn, ECOMM);
4596 }
4597
4598 /* Start fragment always begin with Basic L2CAP header */
4599 if (skb->len < L2CAP_HDR_SIZE) {
4600 BT_ERR("Frame is too short (len %d)", skb->len);
4601 l2cap_conn_unreliable(conn, ECOMM);
4602 goto drop;
4603 }
4604
4605 hdr = (struct l2cap_hdr *) skb->data;
4606 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4607 cid = __le16_to_cpu(hdr->cid);
4608
4609 if (len == skb->len) {
4610 /* Complete frame received */
4611 l2cap_recv_frame(conn, skb);
4612 return 0;
4613 }
4614
4615 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4616
4617 if (skb->len > len) {
4618 BT_ERR("Frame is too long (len %d, expected len %d)",
4619 skb->len, len);
4620 l2cap_conn_unreliable(conn, ECOMM);
4621 goto drop;
4622 }
4623
4624 chan = l2cap_get_chan_by_scid(conn, cid);
4625
4626 if (chan && chan->sk) {
4627 struct sock *sk = chan->sk;
4628
4629 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4630 BT_ERR("Frame exceeding recv MTU (len %d, "
4631 "MTU %d)", len,
4632 chan->imtu);
4633 bh_unlock_sock(sk);
4634 l2cap_conn_unreliable(conn, ECOMM);
4635 goto drop;
4636 }
4637 bh_unlock_sock(sk);
4638 }
4639
4640 /* Allocate skb for the complete frame (with header) */
4641 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4642 if (!conn->rx_skb)
4643 goto drop;
4644
4645 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4646 skb->len);
4647 conn->rx_len = len - skb->len;
4648 } else {
4649 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4650
4651 if (!conn->rx_len) {
4652 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4653 l2cap_conn_unreliable(conn, ECOMM);
4654 goto drop;
4655 }
4656
4657 if (skb->len > conn->rx_len) {
4658 BT_ERR("Fragment is too long (len %d, expected %d)",
4659 skb->len, conn->rx_len);
4660 kfree_skb(conn->rx_skb);
4661 conn->rx_skb = NULL;
4662 conn->rx_len = 0;
4663 l2cap_conn_unreliable(conn, ECOMM);
4664 goto drop;
4665 }
4666
4667 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4668 skb->len);
4669 conn->rx_len -= skb->len;
4670
4671 if (!conn->rx_len) {
4672 /* Complete frame received */
4673 l2cap_recv_frame(conn, conn->rx_skb);
4674 conn->rx_skb = NULL;
4675 }
4676 }
4677
4678 drop:
4679 kfree_skb(skb);
4680 return 0;
4681 }
4682
4683 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4684 {
4685 struct l2cap_chan *c;
4686
4687 read_lock_bh(&chan_list_lock);
4688
4689 list_for_each_entry(c, &chan_list, global_l) {
4690 struct sock *sk = c->sk;
4691
4692 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4693 batostr(&bt_sk(sk)->src),
4694 batostr(&bt_sk(sk)->dst),
4695 c->state, __le16_to_cpu(c->psm),
4696 c->scid, c->dcid, c->imtu, c->omtu,
4697 c->sec_level, c->mode);
4698 }
4699
4700 read_unlock_bh(&chan_list_lock);
4701
4702 return 0;
4703 }
4704
4705 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4706 {
4707 return single_open(file, l2cap_debugfs_show, inode->i_private);
4708 }
4709
4710 static const struct file_operations l2cap_debugfs_fops = {
4711 .open = l2cap_debugfs_open,
4712 .read = seq_read,
4713 .llseek = seq_lseek,
4714 .release = single_release,
4715 };
4716
4717 static struct dentry *l2cap_debugfs;
4718
4719 static struct hci_proto l2cap_hci_proto = {
4720 .name = "L2CAP",
4721 .id = HCI_PROTO_L2CAP,
4722 .connect_ind = l2cap_connect_ind,
4723 .connect_cfm = l2cap_connect_cfm,
4724 .disconn_ind = l2cap_disconn_ind,
4725 .disconn_cfm = l2cap_disconn_cfm,
4726 .security_cfm = l2cap_security_cfm,
4727 .recv_acldata = l2cap_recv_acldata
4728 };
4729
4730 int __init l2cap_init(void)
4731 {
4732 int err;
4733
4734 err = l2cap_init_sockets();
4735 if (err < 0)
4736 return err;
4737
4738 err = hci_register_proto(&l2cap_hci_proto);
4739 if (err < 0) {
4740 BT_ERR("L2CAP protocol registration failed");
4741 bt_sock_unregister(BTPROTO_L2CAP);
4742 goto error;
4743 }
4744
4745 if (bt_debugfs) {
4746 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4747 bt_debugfs, NULL, &l2cap_debugfs_fops);
4748 if (!l2cap_debugfs)
4749 BT_ERR("Failed to create L2CAP debug file");
4750 }
4751
4752 return 0;
4753
4754 error:
4755 l2cap_cleanup_sockets();
4756 return err;
4757 }
4758
4759 void l2cap_exit(void)
4760 {
4761 debugfs_remove(l2cap_debugfs);
4762
4763 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4764 BT_ERR("L2CAP protocol unregistration failed");
4765
4766 l2cap_cleanup_sockets();
4767 }
4768
4769 module_param(disable_ertm, bool, 0644);
4770 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4771
4772 module_param(enable_hs, bool, 0644);
4773 MODULE_PARM_DESC(enable_hs, "Enable High Speed");