Bluetooth: Define HCI reasons instead of magic number
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 int disable_ertm;
60 int enable_hs;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78 /* ---- L2CAP channels ---- */
79
80 static inline void chan_hold(struct l2cap_chan *c)
81 {
82 atomic_inc(&c->refcnt);
83 }
84
85 static inline void chan_put(struct l2cap_chan *c)
86 {
87 if (atomic_dec_and_test(&c->refcnt))
88 kfree(c);
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100
101 }
102
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 {
118 struct l2cap_chan *c;
119
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
122 if (c)
123 bh_lock_sock(c->sk);
124 read_unlock(&conn->chan_lock);
125 return c;
126 }
127
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 {
130 struct l2cap_chan *c;
131
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
134 return c;
135 }
136 return NULL;
137 }
138
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
145 if (c)
146 bh_lock_sock(c->sk);
147 read_unlock(&conn->chan_lock);
148 return c;
149 }
150
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 {
153 struct l2cap_chan *c;
154
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 goto found;
158 }
159
160 c = NULL;
161 found:
162 return c;
163 }
164
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 {
167 int err;
168
169 write_lock_bh(&chan_list_lock);
170
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
172 err = -EADDRINUSE;
173 goto done;
174 }
175
176 if (psm) {
177 chan->psm = psm;
178 chan->sport = psm;
179 err = 0;
180 } else {
181 u16 p;
182
183 err = -EINVAL;
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
188 err = 0;
189 break;
190 }
191 }
192
193 done:
194 write_unlock_bh(&chan_list_lock);
195 return err;
196 }
197
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 {
200 write_lock_bh(&chan_list_lock);
201
202 chan->scid = scid;
203
204 write_unlock_bh(&chan_list_lock);
205
206 return 0;
207 }
208
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 {
211 u16 cid = L2CAP_CID_DYN_START;
212
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
215 return cid;
216 }
217
218 return 0;
219 }
220
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 {
223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
224
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
226 chan_hold(chan);
227 }
228
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 {
231 BT_DBG("chan %p state %d", chan, chan->state);
232
233 if (timer_pending(timer) && del_timer(timer))
234 chan_put(chan);
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 chan->state = state;
240 chan->ops->state_change(chan->data, state);
241 }
242
243 static void l2cap_chan_timeout(unsigned long arg)
244 {
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
247 int reason;
248
249 BT_DBG("chan %p state %d", chan, chan->state);
250
251 bh_lock_sock(sk);
252
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
256 bh_unlock_sock(sk);
257 chan_put(chan);
258 return;
259 }
260
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
266 else
267 reason = ETIMEDOUT;
268
269 l2cap_chan_close(chan, reason);
270
271 bh_unlock_sock(sk);
272
273 chan->ops->close(chan->data);
274 chan_put(chan);
275 }
276
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 {
279 struct l2cap_chan *chan;
280
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
282 if (!chan)
283 return NULL;
284
285 chan->sk = sk;
286
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
290
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292
293 chan->state = BT_OPEN;
294
295 atomic_set(&chan->refcnt, 1);
296
297 BT_DBG("sk %p chan %p", sk, chan);
298
299 return chan;
300 }
301
302 void l2cap_chan_destroy(struct l2cap_chan *chan)
303 {
304 write_lock_bh(&chan_list_lock);
305 list_del(&chan->global_l);
306 write_unlock_bh(&chan_list_lock);
307
308 chan_put(chan);
309 }
310
311 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
312 {
313 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
314 chan->psm, chan->dcid);
315
316 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
317
318 chan->conn = conn;
319
320 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
321 if (conn->hcon->type == LE_LINK) {
322 /* LE connection */
323 chan->omtu = L2CAP_LE_DEFAULT_MTU;
324 chan->scid = L2CAP_CID_LE_DATA;
325 chan->dcid = L2CAP_CID_LE_DATA;
326 } else {
327 /* Alloc CID for connection-oriented socket */
328 chan->scid = l2cap_alloc_cid(conn);
329 chan->omtu = L2CAP_DEFAULT_MTU;
330 }
331 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
332 /* Connectionless socket */
333 chan->scid = L2CAP_CID_CONN_LESS;
334 chan->dcid = L2CAP_CID_CONN_LESS;
335 chan->omtu = L2CAP_DEFAULT_MTU;
336 } else {
337 /* Raw socket can send/recv signalling messages only */
338 chan->scid = L2CAP_CID_SIGNALING;
339 chan->dcid = L2CAP_CID_SIGNALING;
340 chan->omtu = L2CAP_DEFAULT_MTU;
341 }
342
343 chan->local_id = L2CAP_BESTEFFORT_ID;
344 chan->local_stype = L2CAP_SERV_BESTEFFORT;
345 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
346 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
347 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
348 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
349
350 chan_hold(chan);
351
352 list_add(&chan->list, &conn->chan_l);
353 }
354
355 /* Delete channel.
356 * Must be called on the locked socket. */
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
358 {
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
362
363 __clear_chan_timer(chan);
364
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
366
367 if (conn) {
368 /* Delete from channel list */
369 write_lock_bh(&conn->chan_lock);
370 list_del(&chan->list);
371 write_unlock_bh(&conn->chan_lock);
372 chan_put(chan);
373
374 chan->conn = NULL;
375 hci_conn_put(conn->hcon);
376 }
377
378 l2cap_state_change(chan, BT_CLOSED);
379 sock_set_flag(sk, SOCK_ZAPPED);
380
381 if (err)
382 sk->sk_err = err;
383
384 if (parent) {
385 bt_accept_unlink(sk);
386 parent->sk_data_ready(parent, 0);
387 } else
388 sk->sk_state_change(sk);
389
390 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
391 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
392 return;
393
394 skb_queue_purge(&chan->tx_q);
395
396 if (chan->mode == L2CAP_MODE_ERTM) {
397 struct srej_list *l, *tmp;
398
399 __clear_retrans_timer(chan);
400 __clear_monitor_timer(chan);
401 __clear_ack_timer(chan);
402
403 skb_queue_purge(&chan->srej_q);
404
405 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
406 list_del(&l->list);
407 kfree(l);
408 }
409 }
410 }
411
412 static void l2cap_chan_cleanup_listen(struct sock *parent)
413 {
414 struct sock *sk;
415
416 BT_DBG("parent %p", parent);
417
418 /* Close not yet accepted channels */
419 while ((sk = bt_accept_dequeue(parent, NULL))) {
420 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
421 __clear_chan_timer(chan);
422 lock_sock(sk);
423 l2cap_chan_close(chan, ECONNRESET);
424 release_sock(sk);
425 chan->ops->close(chan->data);
426 }
427 }
428
429 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
430 {
431 struct l2cap_conn *conn = chan->conn;
432 struct sock *sk = chan->sk;
433
434 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
435
436 switch (chan->state) {
437 case BT_LISTEN:
438 l2cap_chan_cleanup_listen(sk);
439
440 l2cap_state_change(chan, BT_CLOSED);
441 sock_set_flag(sk, SOCK_ZAPPED);
442 break;
443
444 case BT_CONNECTED:
445 case BT_CONFIG:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 __clear_chan_timer(chan);
449 __set_chan_timer(chan, sk->sk_sndtimeo);
450 l2cap_send_disconn_req(conn, chan, reason);
451 } else
452 l2cap_chan_del(chan, reason);
453 break;
454
455 case BT_CONNECT2:
456 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
457 conn->hcon->type == ACL_LINK) {
458 struct l2cap_conn_rsp rsp;
459 __u16 result;
460
461 if (bt_sk(sk)->defer_setup)
462 result = L2CAP_CR_SEC_BLOCK;
463 else
464 result = L2CAP_CR_BAD_PSM;
465 l2cap_state_change(chan, BT_DISCONN);
466
467 rsp.scid = cpu_to_le16(chan->dcid);
468 rsp.dcid = cpu_to_le16(chan->scid);
469 rsp.result = cpu_to_le16(result);
470 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
471 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
472 sizeof(rsp), &rsp);
473 }
474
475 l2cap_chan_del(chan, reason);
476 break;
477
478 case BT_CONNECT:
479 case BT_DISCONN:
480 l2cap_chan_del(chan, reason);
481 break;
482
483 default:
484 sock_set_flag(sk, SOCK_ZAPPED);
485 break;
486 }
487 }
488
489 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
490 {
491 if (chan->chan_type == L2CAP_CHAN_RAW) {
492 switch (chan->sec_level) {
493 case BT_SECURITY_HIGH:
494 return HCI_AT_DEDICATED_BONDING_MITM;
495 case BT_SECURITY_MEDIUM:
496 return HCI_AT_DEDICATED_BONDING;
497 default:
498 return HCI_AT_NO_BONDING;
499 }
500 } else if (chan->psm == cpu_to_le16(0x0001)) {
501 if (chan->sec_level == BT_SECURITY_LOW)
502 chan->sec_level = BT_SECURITY_SDP;
503
504 if (chan->sec_level == BT_SECURITY_HIGH)
505 return HCI_AT_NO_BONDING_MITM;
506 else
507 return HCI_AT_NO_BONDING;
508 } else {
509 switch (chan->sec_level) {
510 case BT_SECURITY_HIGH:
511 return HCI_AT_GENERAL_BONDING_MITM;
512 case BT_SECURITY_MEDIUM:
513 return HCI_AT_GENERAL_BONDING;
514 default:
515 return HCI_AT_NO_BONDING;
516 }
517 }
518 }
519
520 /* Service level security */
521 static inline int l2cap_check_security(struct l2cap_chan *chan)
522 {
523 struct l2cap_conn *conn = chan->conn;
524 __u8 auth_type;
525
526 auth_type = l2cap_get_auth_type(chan);
527
528 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
529 }
530
531 static u8 l2cap_get_ident(struct l2cap_conn *conn)
532 {
533 u8 id;
534
535 /* Get next available identificator.
536 * 1 - 128 are used by kernel.
537 * 129 - 199 are reserved.
538 * 200 - 254 are used by utilities like l2ping, etc.
539 */
540
541 spin_lock_bh(&conn->lock);
542
543 if (++conn->tx_ident > 128)
544 conn->tx_ident = 1;
545
546 id = conn->tx_ident;
547
548 spin_unlock_bh(&conn->lock);
549
550 return id;
551 }
552
553 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
554 {
555 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
556 u8 flags;
557
558 BT_DBG("code 0x%2.2x", code);
559
560 if (!skb)
561 return;
562
563 if (lmp_no_flush_capable(conn->hcon->hdev))
564 flags = ACL_START_NO_FLUSH;
565 else
566 flags = ACL_START;
567
568 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
569 skb->priority = HCI_PRIO_MAX;
570
571 hci_send_acl(conn->hchan, skb, flags);
572 }
573
574 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
575 {
576 struct hci_conn *hcon = chan->conn->hcon;
577 u16 flags;
578
579 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
580 skb->priority);
581
582 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
583 lmp_no_flush_capable(hcon->hdev))
584 flags = ACL_START_NO_FLUSH;
585 else
586 flags = ACL_START;
587
588 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
589 hci_send_acl(chan->conn->hchan, skb, flags);
590 }
591
592 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
593 {
594 struct sk_buff *skb;
595 struct l2cap_hdr *lh;
596 struct l2cap_conn *conn = chan->conn;
597 int count, hlen;
598
599 if (chan->state != BT_CONNECTED)
600 return;
601
602 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
603 hlen = L2CAP_EXT_HDR_SIZE;
604 else
605 hlen = L2CAP_ENH_HDR_SIZE;
606
607 if (chan->fcs == L2CAP_FCS_CRC16)
608 hlen += L2CAP_FCS_SIZE;
609
610 BT_DBG("chan %p, control 0x%8.8x", chan, control);
611
612 count = min_t(unsigned int, conn->mtu, hlen);
613
614 control |= __set_sframe(chan);
615
616 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
617 control |= __set_ctrl_final(chan);
618
619 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
620 control |= __set_ctrl_poll(chan);
621
622 skb = bt_skb_alloc(count, GFP_ATOMIC);
623 if (!skb)
624 return;
625
626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
627 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
628 lh->cid = cpu_to_le16(chan->dcid);
629
630 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
631
632 if (chan->fcs == L2CAP_FCS_CRC16) {
633 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
634 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
635 }
636
637 skb->priority = HCI_PRIO_MAX;
638 l2cap_do_send(chan, skb);
639 }
640
641 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
642 {
643 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
644 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
645 set_bit(CONN_RNR_SENT, &chan->conn_state);
646 } else
647 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
648
649 control |= __set_reqseq(chan, chan->buffer_seq);
650
651 l2cap_send_sframe(chan, control);
652 }
653
654 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
655 {
656 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
657 }
658
659 static void l2cap_do_start(struct l2cap_chan *chan)
660 {
661 struct l2cap_conn *conn = chan->conn;
662
663 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
664 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
665 return;
666
667 if (l2cap_check_security(chan) &&
668 __l2cap_no_conn_pending(chan)) {
669 struct l2cap_conn_req req;
670 req.scid = cpu_to_le16(chan->scid);
671 req.psm = chan->psm;
672
673 chan->ident = l2cap_get_ident(conn);
674 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
675
676 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
677 sizeof(req), &req);
678 }
679 } else {
680 struct l2cap_info_req req;
681 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
682
683 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
684 conn->info_ident = l2cap_get_ident(conn);
685
686 mod_timer(&conn->info_timer, jiffies +
687 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
688
689 l2cap_send_cmd(conn, conn->info_ident,
690 L2CAP_INFO_REQ, sizeof(req), &req);
691 }
692 }
693
694 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
695 {
696 u32 local_feat_mask = l2cap_feat_mask;
697 if (!disable_ertm)
698 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
699
700 switch (mode) {
701 case L2CAP_MODE_ERTM:
702 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
703 case L2CAP_MODE_STREAMING:
704 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
705 default:
706 return 0x00;
707 }
708 }
709
710 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
711 {
712 struct sock *sk;
713 struct l2cap_disconn_req req;
714
715 if (!conn)
716 return;
717
718 sk = chan->sk;
719
720 if (chan->mode == L2CAP_MODE_ERTM) {
721 __clear_retrans_timer(chan);
722 __clear_monitor_timer(chan);
723 __clear_ack_timer(chan);
724 }
725
726 req.dcid = cpu_to_le16(chan->dcid);
727 req.scid = cpu_to_le16(chan->scid);
728 l2cap_send_cmd(conn, l2cap_get_ident(conn),
729 L2CAP_DISCONN_REQ, sizeof(req), &req);
730
731 l2cap_state_change(chan, BT_DISCONN);
732 sk->sk_err = err;
733 }
734
735 /* ---- L2CAP connections ---- */
736 static void l2cap_conn_start(struct l2cap_conn *conn)
737 {
738 struct l2cap_chan *chan, *tmp;
739
740 BT_DBG("conn %p", conn);
741
742 read_lock(&conn->chan_lock);
743
744 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
745 struct sock *sk = chan->sk;
746
747 bh_lock_sock(sk);
748
749 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
750 bh_unlock_sock(sk);
751 continue;
752 }
753
754 if (chan->state == BT_CONNECT) {
755 struct l2cap_conn_req req;
756
757 if (!l2cap_check_security(chan) ||
758 !__l2cap_no_conn_pending(chan)) {
759 bh_unlock_sock(sk);
760 continue;
761 }
762
763 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
764 && test_bit(CONF_STATE2_DEVICE,
765 &chan->conf_state)) {
766 /* l2cap_chan_close() calls list_del(chan)
767 * so release the lock */
768 read_unlock(&conn->chan_lock);
769 l2cap_chan_close(chan, ECONNRESET);
770 read_lock(&conn->chan_lock);
771 bh_unlock_sock(sk);
772 continue;
773 }
774
775 req.scid = cpu_to_le16(chan->scid);
776 req.psm = chan->psm;
777
778 chan->ident = l2cap_get_ident(conn);
779 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
780
781 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
782 sizeof(req), &req);
783
784 } else if (chan->state == BT_CONNECT2) {
785 struct l2cap_conn_rsp rsp;
786 char buf[128];
787 rsp.scid = cpu_to_le16(chan->dcid);
788 rsp.dcid = cpu_to_le16(chan->scid);
789
790 if (l2cap_check_security(chan)) {
791 if (bt_sk(sk)->defer_setup) {
792 struct sock *parent = bt_sk(sk)->parent;
793 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
794 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
795 if (parent)
796 parent->sk_data_ready(parent, 0);
797
798 } else {
799 l2cap_state_change(chan, BT_CONFIG);
800 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
801 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
802 }
803 } else {
804 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
805 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
806 }
807
808 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
809 sizeof(rsp), &rsp);
810
811 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
812 rsp.result != L2CAP_CR_SUCCESS) {
813 bh_unlock_sock(sk);
814 continue;
815 }
816
817 set_bit(CONF_REQ_SENT, &chan->conf_state);
818 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
819 l2cap_build_conf_req(chan, buf), buf);
820 chan->num_conf_req++;
821 }
822
823 bh_unlock_sock(sk);
824 }
825
826 read_unlock(&conn->chan_lock);
827 }
828
829 /* Find socket with cid and source bdaddr.
830 * Returns closest match, locked.
831 */
832 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
833 {
834 struct l2cap_chan *c, *c1 = NULL;
835
836 read_lock(&chan_list_lock);
837
838 list_for_each_entry(c, &chan_list, global_l) {
839 struct sock *sk = c->sk;
840
841 if (state && c->state != state)
842 continue;
843
844 if (c->scid == cid) {
845 /* Exact match. */
846 if (!bacmp(&bt_sk(sk)->src, src)) {
847 read_unlock(&chan_list_lock);
848 return c;
849 }
850
851 /* Closest match */
852 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
853 c1 = c;
854 }
855 }
856
857 read_unlock(&chan_list_lock);
858
859 return c1;
860 }
861
862 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
863 {
864 struct sock *parent, *sk;
865 struct l2cap_chan *chan, *pchan;
866
867 BT_DBG("");
868
869 /* Check if we have socket listening on cid */
870 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
871 conn->src);
872 if (!pchan)
873 return;
874
875 parent = pchan->sk;
876
877 bh_lock_sock(parent);
878
879 /* Check for backlog size */
880 if (sk_acceptq_is_full(parent)) {
881 BT_DBG("backlog full %d", parent->sk_ack_backlog);
882 goto clean;
883 }
884
885 chan = pchan->ops->new_connection(pchan->data);
886 if (!chan)
887 goto clean;
888
889 sk = chan->sk;
890
891 write_lock_bh(&conn->chan_lock);
892
893 hci_conn_hold(conn->hcon);
894
895 bacpy(&bt_sk(sk)->src, conn->src);
896 bacpy(&bt_sk(sk)->dst, conn->dst);
897
898 bt_accept_enqueue(parent, sk);
899
900 __l2cap_chan_add(conn, chan);
901
902 __set_chan_timer(chan, sk->sk_sndtimeo);
903
904 l2cap_state_change(chan, BT_CONNECTED);
905 parent->sk_data_ready(parent, 0);
906
907 write_unlock_bh(&conn->chan_lock);
908
909 clean:
910 bh_unlock_sock(parent);
911 }
912
913 static void l2cap_chan_ready(struct sock *sk)
914 {
915 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
916 struct sock *parent = bt_sk(sk)->parent;
917
918 BT_DBG("sk %p, parent %p", sk, parent);
919
920 chan->conf_state = 0;
921 __clear_chan_timer(chan);
922
923 l2cap_state_change(chan, BT_CONNECTED);
924 sk->sk_state_change(sk);
925
926 if (parent)
927 parent->sk_data_ready(parent, 0);
928 }
929
930 static void l2cap_conn_ready(struct l2cap_conn *conn)
931 {
932 struct l2cap_chan *chan;
933
934 BT_DBG("conn %p", conn);
935
936 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
937 l2cap_le_conn_ready(conn);
938
939 if (conn->hcon->out && conn->hcon->type == LE_LINK)
940 smp_conn_security(conn, conn->hcon->pending_sec_level);
941
942 read_lock(&conn->chan_lock);
943
944 list_for_each_entry(chan, &conn->chan_l, list) {
945 struct sock *sk = chan->sk;
946
947 bh_lock_sock(sk);
948
949 if (conn->hcon->type == LE_LINK) {
950 if (smp_conn_security(conn, chan->sec_level))
951 l2cap_chan_ready(sk);
952
953 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
954 __clear_chan_timer(chan);
955 l2cap_state_change(chan, BT_CONNECTED);
956 sk->sk_state_change(sk);
957
958 } else if (chan->state == BT_CONNECT)
959 l2cap_do_start(chan);
960
961 bh_unlock_sock(sk);
962 }
963
964 read_unlock(&conn->chan_lock);
965 }
966
967 /* Notify sockets that we cannot guaranty reliability anymore */
968 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
969 {
970 struct l2cap_chan *chan;
971
972 BT_DBG("conn %p", conn);
973
974 read_lock(&conn->chan_lock);
975
976 list_for_each_entry(chan, &conn->chan_l, list) {
977 struct sock *sk = chan->sk;
978
979 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
980 sk->sk_err = err;
981 }
982
983 read_unlock(&conn->chan_lock);
984 }
985
986 static void l2cap_info_timeout(unsigned long arg)
987 {
988 struct l2cap_conn *conn = (void *) arg;
989
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
992
993 l2cap_conn_start(conn);
994 }
995
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
997 {
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1000 struct sock *sk;
1001
1002 if (!conn)
1003 return;
1004
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1006
1007 kfree_skb(conn->rx_skb);
1008
1009 /* Kill channels */
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 sk = chan->sk;
1012 bh_lock_sock(sk);
1013 l2cap_chan_del(chan, err);
1014 bh_unlock_sock(sk);
1015 chan->ops->close(chan->data);
1016 }
1017
1018 hci_chan_del(conn->hchan);
1019
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 del_timer_sync(&conn->info_timer);
1022
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 del_timer(&conn->security_timer);
1025 smp_chan_destroy(conn);
1026 }
1027
1028 hcon->l2cap_data = NULL;
1029 kfree(conn);
1030 }
1031
1032 static void security_timeout(unsigned long arg)
1033 {
1034 struct l2cap_conn *conn = (void *) arg;
1035
1036 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1037 }
1038
1039 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1040 {
1041 struct l2cap_conn *conn = hcon->l2cap_data;
1042 struct hci_chan *hchan;
1043
1044 if (conn || status)
1045 return conn;
1046
1047 hchan = hci_chan_create(hcon);
1048 if (!hchan)
1049 return NULL;
1050
1051 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1052 if (!conn) {
1053 hci_chan_del(hchan);
1054 return NULL;
1055 }
1056
1057 hcon->l2cap_data = conn;
1058 conn->hcon = hcon;
1059 conn->hchan = hchan;
1060
1061 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1062
1063 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1064 conn->mtu = hcon->hdev->le_mtu;
1065 else
1066 conn->mtu = hcon->hdev->acl_mtu;
1067
1068 conn->src = &hcon->hdev->bdaddr;
1069 conn->dst = &hcon->dst;
1070
1071 conn->feat_mask = 0;
1072
1073 spin_lock_init(&conn->lock);
1074 rwlock_init(&conn->chan_lock);
1075
1076 INIT_LIST_HEAD(&conn->chan_l);
1077
1078 if (hcon->type == LE_LINK)
1079 setup_timer(&conn->security_timer, security_timeout,
1080 (unsigned long) conn);
1081 else
1082 setup_timer(&conn->info_timer, l2cap_info_timeout,
1083 (unsigned long) conn);
1084
1085 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1086
1087 return conn;
1088 }
1089
1090 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1091 {
1092 write_lock_bh(&conn->chan_lock);
1093 __l2cap_chan_add(conn, chan);
1094 write_unlock_bh(&conn->chan_lock);
1095 }
1096
1097 /* ---- Socket interface ---- */
1098
1099 /* Find socket with psm and source bdaddr.
1100 * Returns closest match.
1101 */
1102 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1103 {
1104 struct l2cap_chan *c, *c1 = NULL;
1105
1106 read_lock(&chan_list_lock);
1107
1108 list_for_each_entry(c, &chan_list, global_l) {
1109 struct sock *sk = c->sk;
1110
1111 if (state && c->state != state)
1112 continue;
1113
1114 if (c->psm == psm) {
1115 /* Exact match. */
1116 if (!bacmp(&bt_sk(sk)->src, src)) {
1117 read_unlock(&chan_list_lock);
1118 return c;
1119 }
1120
1121 /* Closest match */
1122 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1123 c1 = c;
1124 }
1125 }
1126
1127 read_unlock(&chan_list_lock);
1128
1129 return c1;
1130 }
1131
1132 int l2cap_chan_connect(struct l2cap_chan *chan)
1133 {
1134 struct sock *sk = chan->sk;
1135 bdaddr_t *src = &bt_sk(sk)->src;
1136 bdaddr_t *dst = &bt_sk(sk)->dst;
1137 struct l2cap_conn *conn;
1138 struct hci_conn *hcon;
1139 struct hci_dev *hdev;
1140 __u8 auth_type;
1141 int err;
1142
1143 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1144 chan->psm);
1145
1146 hdev = hci_get_route(dst, src);
1147 if (!hdev)
1148 return -EHOSTUNREACH;
1149
1150 hci_dev_lock_bh(hdev);
1151
1152 auth_type = l2cap_get_auth_type(chan);
1153
1154 if (chan->dcid == L2CAP_CID_LE_DATA)
1155 hcon = hci_connect(hdev, LE_LINK, dst,
1156 chan->sec_level, auth_type);
1157 else
1158 hcon = hci_connect(hdev, ACL_LINK, dst,
1159 chan->sec_level, auth_type);
1160
1161 if (IS_ERR(hcon)) {
1162 err = PTR_ERR(hcon);
1163 goto done;
1164 }
1165
1166 conn = l2cap_conn_add(hcon, 0);
1167 if (!conn) {
1168 hci_conn_put(hcon);
1169 err = -ENOMEM;
1170 goto done;
1171 }
1172
1173 /* Update source addr of the socket */
1174 bacpy(src, conn->src);
1175
1176 l2cap_chan_add(conn, chan);
1177
1178 l2cap_state_change(chan, BT_CONNECT);
1179 __set_chan_timer(chan, sk->sk_sndtimeo);
1180
1181 if (hcon->state == BT_CONNECTED) {
1182 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1183 __clear_chan_timer(chan);
1184 if (l2cap_check_security(chan))
1185 l2cap_state_change(chan, BT_CONNECTED);
1186 } else
1187 l2cap_do_start(chan);
1188 }
1189
1190 err = 0;
1191
1192 done:
1193 hci_dev_unlock_bh(hdev);
1194 hci_dev_put(hdev);
1195 return err;
1196 }
1197
1198 int __l2cap_wait_ack(struct sock *sk)
1199 {
1200 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1201 DECLARE_WAITQUEUE(wait, current);
1202 int err = 0;
1203 int timeo = HZ/5;
1204
1205 add_wait_queue(sk_sleep(sk), &wait);
1206 set_current_state(TASK_INTERRUPTIBLE);
1207 while (chan->unacked_frames > 0 && chan->conn) {
1208 if (!timeo)
1209 timeo = HZ/5;
1210
1211 if (signal_pending(current)) {
1212 err = sock_intr_errno(timeo);
1213 break;
1214 }
1215
1216 release_sock(sk);
1217 timeo = schedule_timeout(timeo);
1218 lock_sock(sk);
1219 set_current_state(TASK_INTERRUPTIBLE);
1220
1221 err = sock_error(sk);
1222 if (err)
1223 break;
1224 }
1225 set_current_state(TASK_RUNNING);
1226 remove_wait_queue(sk_sleep(sk), &wait);
1227 return err;
1228 }
1229
1230 static void l2cap_monitor_timeout(unsigned long arg)
1231 {
1232 struct l2cap_chan *chan = (void *) arg;
1233 struct sock *sk = chan->sk;
1234
1235 BT_DBG("chan %p", chan);
1236
1237 bh_lock_sock(sk);
1238 if (chan->retry_count >= chan->remote_max_tx) {
1239 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1240 bh_unlock_sock(sk);
1241 return;
1242 }
1243
1244 chan->retry_count++;
1245 __set_monitor_timer(chan);
1246
1247 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1248 bh_unlock_sock(sk);
1249 }
1250
1251 static void l2cap_retrans_timeout(unsigned long arg)
1252 {
1253 struct l2cap_chan *chan = (void *) arg;
1254 struct sock *sk = chan->sk;
1255
1256 BT_DBG("chan %p", chan);
1257
1258 bh_lock_sock(sk);
1259 chan->retry_count = 1;
1260 __set_monitor_timer(chan);
1261
1262 set_bit(CONN_WAIT_F, &chan->conn_state);
1263
1264 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1265 bh_unlock_sock(sk);
1266 }
1267
1268 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1269 {
1270 struct sk_buff *skb;
1271
1272 while ((skb = skb_peek(&chan->tx_q)) &&
1273 chan->unacked_frames) {
1274 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1275 break;
1276
1277 skb = skb_dequeue(&chan->tx_q);
1278 kfree_skb(skb);
1279
1280 chan->unacked_frames--;
1281 }
1282
1283 if (!chan->unacked_frames)
1284 __clear_retrans_timer(chan);
1285 }
1286
1287 static void l2cap_streaming_send(struct l2cap_chan *chan)
1288 {
1289 struct sk_buff *skb;
1290 u32 control;
1291 u16 fcs;
1292
1293 while ((skb = skb_dequeue(&chan->tx_q))) {
1294 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1295 control |= __set_txseq(chan, chan->next_tx_seq);
1296 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1297
1298 if (chan->fcs == L2CAP_FCS_CRC16) {
1299 fcs = crc16(0, (u8 *)skb->data,
1300 skb->len - L2CAP_FCS_SIZE);
1301 put_unaligned_le16(fcs,
1302 skb->data + skb->len - L2CAP_FCS_SIZE);
1303 }
1304
1305 l2cap_do_send(chan, skb);
1306
1307 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1308 }
1309 }
1310
1311 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1312 {
1313 struct sk_buff *skb, *tx_skb;
1314 u16 fcs;
1315 u32 control;
1316
1317 skb = skb_peek(&chan->tx_q);
1318 if (!skb)
1319 return;
1320
1321 do {
1322 if (bt_cb(skb)->tx_seq == tx_seq)
1323 break;
1324
1325 if (skb_queue_is_last(&chan->tx_q, skb))
1326 return;
1327
1328 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1329
1330 if (chan->remote_max_tx &&
1331 bt_cb(skb)->retries == chan->remote_max_tx) {
1332 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1333 return;
1334 }
1335
1336 tx_skb = skb_clone(skb, GFP_ATOMIC);
1337 bt_cb(skb)->retries++;
1338
1339 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1340 control &= __get_sar_mask(chan);
1341
1342 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1343 control |= __set_ctrl_final(chan);
1344
1345 control |= __set_reqseq(chan, chan->buffer_seq);
1346 control |= __set_txseq(chan, tx_seq);
1347
1348 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1349
1350 if (chan->fcs == L2CAP_FCS_CRC16) {
1351 fcs = crc16(0, (u8 *)tx_skb->data,
1352 tx_skb->len - L2CAP_FCS_SIZE);
1353 put_unaligned_le16(fcs,
1354 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1355 }
1356
1357 l2cap_do_send(chan, tx_skb);
1358 }
1359
1360 static int l2cap_ertm_send(struct l2cap_chan *chan)
1361 {
1362 struct sk_buff *skb, *tx_skb;
1363 u16 fcs;
1364 u32 control;
1365 int nsent = 0;
1366
1367 if (chan->state != BT_CONNECTED)
1368 return -ENOTCONN;
1369
1370 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1371
1372 if (chan->remote_max_tx &&
1373 bt_cb(skb)->retries == chan->remote_max_tx) {
1374 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1375 break;
1376 }
1377
1378 tx_skb = skb_clone(skb, GFP_ATOMIC);
1379
1380 bt_cb(skb)->retries++;
1381
1382 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1383 control &= __get_sar_mask(chan);
1384
1385 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1386 control |= __set_ctrl_final(chan);
1387
1388 control |= __set_reqseq(chan, chan->buffer_seq);
1389 control |= __set_txseq(chan, chan->next_tx_seq);
1390
1391 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1392
1393 if (chan->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)skb->data,
1395 tx_skb->len - L2CAP_FCS_SIZE);
1396 put_unaligned_le16(fcs, skb->data +
1397 tx_skb->len - L2CAP_FCS_SIZE);
1398 }
1399
1400 l2cap_do_send(chan, tx_skb);
1401
1402 __set_retrans_timer(chan);
1403
1404 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1405
1406 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1407
1408 if (bt_cb(skb)->retries == 1)
1409 chan->unacked_frames++;
1410
1411 chan->frames_sent++;
1412
1413 if (skb_queue_is_last(&chan->tx_q, skb))
1414 chan->tx_send_head = NULL;
1415 else
1416 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1417
1418 nsent++;
1419 }
1420
1421 return nsent;
1422 }
1423
1424 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1425 {
1426 int ret;
1427
1428 if (!skb_queue_empty(&chan->tx_q))
1429 chan->tx_send_head = chan->tx_q.next;
1430
1431 chan->next_tx_seq = chan->expected_ack_seq;
1432 ret = l2cap_ertm_send(chan);
1433 return ret;
1434 }
1435
1436 static void l2cap_send_ack(struct l2cap_chan *chan)
1437 {
1438 u32 control = 0;
1439
1440 control |= __set_reqseq(chan, chan->buffer_seq);
1441
1442 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1443 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1444 set_bit(CONN_RNR_SENT, &chan->conn_state);
1445 l2cap_send_sframe(chan, control);
1446 return;
1447 }
1448
1449 if (l2cap_ertm_send(chan) > 0)
1450 return;
1451
1452 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1453 l2cap_send_sframe(chan, control);
1454 }
1455
1456 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1457 {
1458 struct srej_list *tail;
1459 u32 control;
1460
1461 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1462 control |= __set_ctrl_final(chan);
1463
1464 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1465 control |= __set_reqseq(chan, tail->tx_seq);
1466
1467 l2cap_send_sframe(chan, control);
1468 }
1469
1470 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1471 {
1472 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1473 struct sk_buff **frag;
1474 int err, sent = 0;
1475
1476 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1477 return -EFAULT;
1478
1479 sent += count;
1480 len -= count;
1481
1482 /* Continuation fragments (no L2CAP header) */
1483 frag = &skb_shinfo(skb)->frag_list;
1484 while (len) {
1485 count = min_t(unsigned int, conn->mtu, len);
1486
1487 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1488 if (!*frag)
1489 return err;
1490 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1491 return -EFAULT;
1492
1493 (*frag)->priority = skb->priority;
1494
1495 sent += count;
1496 len -= count;
1497
1498 frag = &(*frag)->next;
1499 }
1500
1501 return sent;
1502 }
1503
1504 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1505 struct msghdr *msg, size_t len,
1506 u32 priority)
1507 {
1508 struct sock *sk = chan->sk;
1509 struct l2cap_conn *conn = chan->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1512 struct l2cap_hdr *lh;
1513
1514 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1515
1516 count = min_t(unsigned int, (conn->mtu - hlen), len);
1517 skb = bt_skb_send_alloc(sk, count + hlen,
1518 msg->msg_flags & MSG_DONTWAIT, &err);
1519 if (!skb)
1520 return ERR_PTR(err);
1521
1522 skb->priority = priority;
1523
1524 /* Create L2CAP header */
1525 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1526 lh->cid = cpu_to_le16(chan->dcid);
1527 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1528 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1529
1530 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1531 if (unlikely(err < 0)) {
1532 kfree_skb(skb);
1533 return ERR_PTR(err);
1534 }
1535 return skb;
1536 }
1537
1538 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1539 struct msghdr *msg, size_t len,
1540 u32 priority)
1541 {
1542 struct sock *sk = chan->sk;
1543 struct l2cap_conn *conn = chan->conn;
1544 struct sk_buff *skb;
1545 int err, count, hlen = L2CAP_HDR_SIZE;
1546 struct l2cap_hdr *lh;
1547
1548 BT_DBG("sk %p len %d", sk, (int)len);
1549
1550 count = min_t(unsigned int, (conn->mtu - hlen), len);
1551 skb = bt_skb_send_alloc(sk, count + hlen,
1552 msg->msg_flags & MSG_DONTWAIT, &err);
1553 if (!skb)
1554 return ERR_PTR(err);
1555
1556 skb->priority = priority;
1557
1558 /* Create L2CAP header */
1559 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1560 lh->cid = cpu_to_le16(chan->dcid);
1561 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1562
1563 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1564 if (unlikely(err < 0)) {
1565 kfree_skb(skb);
1566 return ERR_PTR(err);
1567 }
1568 return skb;
1569 }
1570
1571 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1572 struct msghdr *msg, size_t len,
1573 u32 control, u16 sdulen)
1574 {
1575 struct sock *sk = chan->sk;
1576 struct l2cap_conn *conn = chan->conn;
1577 struct sk_buff *skb;
1578 int err, count, hlen;
1579 struct l2cap_hdr *lh;
1580
1581 BT_DBG("sk %p len %d", sk, (int)len);
1582
1583 if (!conn)
1584 return ERR_PTR(-ENOTCONN);
1585
1586 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1587 hlen = L2CAP_EXT_HDR_SIZE;
1588 else
1589 hlen = L2CAP_ENH_HDR_SIZE;
1590
1591 if (sdulen)
1592 hlen += L2CAP_SDULEN_SIZE;
1593
1594 if (chan->fcs == L2CAP_FCS_CRC16)
1595 hlen += L2CAP_FCS_SIZE;
1596
1597 count = min_t(unsigned int, (conn->mtu - hlen), len);
1598 skb = bt_skb_send_alloc(sk, count + hlen,
1599 msg->msg_flags & MSG_DONTWAIT, &err);
1600 if (!skb)
1601 return ERR_PTR(err);
1602
1603 /* Create L2CAP header */
1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1605 lh->cid = cpu_to_le16(chan->dcid);
1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1607
1608 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1609
1610 if (sdulen)
1611 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1612
1613 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1614 if (unlikely(err < 0)) {
1615 kfree_skb(skb);
1616 return ERR_PTR(err);
1617 }
1618
1619 if (chan->fcs == L2CAP_FCS_CRC16)
1620 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1621
1622 bt_cb(skb)->retries = 0;
1623 return skb;
1624 }
1625
1626 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1627 {
1628 struct sk_buff *skb;
1629 struct sk_buff_head sar_queue;
1630 u32 control;
1631 size_t size = 0;
1632
1633 skb_queue_head_init(&sar_queue);
1634 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1635 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1636 if (IS_ERR(skb))
1637 return PTR_ERR(skb);
1638
1639 __skb_queue_tail(&sar_queue, skb);
1640 len -= chan->remote_mps;
1641 size += chan->remote_mps;
1642
1643 while (len > 0) {
1644 size_t buflen;
1645
1646 if (len > chan->remote_mps) {
1647 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1648 buflen = chan->remote_mps;
1649 } else {
1650 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1651 buflen = len;
1652 }
1653
1654 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1655 if (IS_ERR(skb)) {
1656 skb_queue_purge(&sar_queue);
1657 return PTR_ERR(skb);
1658 }
1659
1660 __skb_queue_tail(&sar_queue, skb);
1661 len -= buflen;
1662 size += buflen;
1663 }
1664 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1665 if (chan->tx_send_head == NULL)
1666 chan->tx_send_head = sar_queue.next;
1667
1668 return size;
1669 }
1670
1671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1672 u32 priority)
1673 {
1674 struct sk_buff *skb;
1675 u32 control;
1676 int err;
1677
1678 /* Connectionless channel */
1679 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1680 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1681 if (IS_ERR(skb))
1682 return PTR_ERR(skb);
1683
1684 l2cap_do_send(chan, skb);
1685 return len;
1686 }
1687
1688 switch (chan->mode) {
1689 case L2CAP_MODE_BASIC:
1690 /* Check outgoing MTU */
1691 if (len > chan->omtu)
1692 return -EMSGSIZE;
1693
1694 /* Create a basic PDU */
1695 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1696 if (IS_ERR(skb))
1697 return PTR_ERR(skb);
1698
1699 l2cap_do_send(chan, skb);
1700 err = len;
1701 break;
1702
1703 case L2CAP_MODE_ERTM:
1704 case L2CAP_MODE_STREAMING:
1705 /* Entire SDU fits into one PDU */
1706 if (len <= chan->remote_mps) {
1707 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1708 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1709 0);
1710 if (IS_ERR(skb))
1711 return PTR_ERR(skb);
1712
1713 __skb_queue_tail(&chan->tx_q, skb);
1714
1715 if (chan->tx_send_head == NULL)
1716 chan->tx_send_head = skb;
1717
1718 } else {
1719 /* Segment SDU into multiples PDUs */
1720 err = l2cap_sar_segment_sdu(chan, msg, len);
1721 if (err < 0)
1722 return err;
1723 }
1724
1725 if (chan->mode == L2CAP_MODE_STREAMING) {
1726 l2cap_streaming_send(chan);
1727 err = len;
1728 break;
1729 }
1730
1731 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1732 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1733 err = len;
1734 break;
1735 }
1736
1737 err = l2cap_ertm_send(chan);
1738 if (err >= 0)
1739 err = len;
1740
1741 break;
1742
1743 default:
1744 BT_DBG("bad state %1.1x", chan->mode);
1745 err = -EBADFD;
1746 }
1747
1748 return err;
1749 }
1750
1751 /* Copy frame to all raw sockets on that connection */
1752 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1753 {
1754 struct sk_buff *nskb;
1755 struct l2cap_chan *chan;
1756
1757 BT_DBG("conn %p", conn);
1758
1759 read_lock(&conn->chan_lock);
1760 list_for_each_entry(chan, &conn->chan_l, list) {
1761 struct sock *sk = chan->sk;
1762 if (chan->chan_type != L2CAP_CHAN_RAW)
1763 continue;
1764
1765 /* Don't send frame to the socket it came from */
1766 if (skb->sk == sk)
1767 continue;
1768 nskb = skb_clone(skb, GFP_ATOMIC);
1769 if (!nskb)
1770 continue;
1771
1772 if (chan->ops->recv(chan->data, nskb))
1773 kfree_skb(nskb);
1774 }
1775 read_unlock(&conn->chan_lock);
1776 }
1777
1778 /* ---- L2CAP signalling commands ---- */
1779 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1780 u8 code, u8 ident, u16 dlen, void *data)
1781 {
1782 struct sk_buff *skb, **frag;
1783 struct l2cap_cmd_hdr *cmd;
1784 struct l2cap_hdr *lh;
1785 int len, count;
1786
1787 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1788 conn, code, ident, dlen);
1789
1790 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1791 count = min_t(unsigned int, conn->mtu, len);
1792
1793 skb = bt_skb_alloc(count, GFP_ATOMIC);
1794 if (!skb)
1795 return NULL;
1796
1797 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1798 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1799
1800 if (conn->hcon->type == LE_LINK)
1801 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1802 else
1803 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1804
1805 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1806 cmd->code = code;
1807 cmd->ident = ident;
1808 cmd->len = cpu_to_le16(dlen);
1809
1810 if (dlen) {
1811 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1812 memcpy(skb_put(skb, count), data, count);
1813 data += count;
1814 }
1815
1816 len -= skb->len;
1817
1818 /* Continuation fragments (no L2CAP header) */
1819 frag = &skb_shinfo(skb)->frag_list;
1820 while (len) {
1821 count = min_t(unsigned int, conn->mtu, len);
1822
1823 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1824 if (!*frag)
1825 goto fail;
1826
1827 memcpy(skb_put(*frag, count), data, count);
1828
1829 len -= count;
1830 data += count;
1831
1832 frag = &(*frag)->next;
1833 }
1834
1835 return skb;
1836
1837 fail:
1838 kfree_skb(skb);
1839 return NULL;
1840 }
1841
1842 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1843 {
1844 struct l2cap_conf_opt *opt = *ptr;
1845 int len;
1846
1847 len = L2CAP_CONF_OPT_SIZE + opt->len;
1848 *ptr += len;
1849
1850 *type = opt->type;
1851 *olen = opt->len;
1852
1853 switch (opt->len) {
1854 case 1:
1855 *val = *((u8 *) opt->val);
1856 break;
1857
1858 case 2:
1859 *val = get_unaligned_le16(opt->val);
1860 break;
1861
1862 case 4:
1863 *val = get_unaligned_le32(opt->val);
1864 break;
1865
1866 default:
1867 *val = (unsigned long) opt->val;
1868 break;
1869 }
1870
1871 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1872 return len;
1873 }
1874
1875 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1876 {
1877 struct l2cap_conf_opt *opt = *ptr;
1878
1879 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1880
1881 opt->type = type;
1882 opt->len = len;
1883
1884 switch (len) {
1885 case 1:
1886 *((u8 *) opt->val) = val;
1887 break;
1888
1889 case 2:
1890 put_unaligned_le16(val, opt->val);
1891 break;
1892
1893 case 4:
1894 put_unaligned_le32(val, opt->val);
1895 break;
1896
1897 default:
1898 memcpy(opt->val, (void *) val, len);
1899 break;
1900 }
1901
1902 *ptr += L2CAP_CONF_OPT_SIZE + len;
1903 }
1904
1905 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1906 {
1907 struct l2cap_conf_efs efs;
1908
1909 switch(chan->mode) {
1910 case L2CAP_MODE_ERTM:
1911 efs.id = chan->local_id;
1912 efs.stype = chan->local_stype;
1913 efs.msdu = cpu_to_le16(chan->local_msdu);
1914 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1915 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1916 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1917 break;
1918
1919 case L2CAP_MODE_STREAMING:
1920 efs.id = 1;
1921 efs.stype = L2CAP_SERV_BESTEFFORT;
1922 efs.msdu = cpu_to_le16(chan->local_msdu);
1923 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1924 efs.acc_lat = 0;
1925 efs.flush_to = 0;
1926 break;
1927
1928 default:
1929 return;
1930 }
1931
1932 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1933 (unsigned long) &efs);
1934 }
1935
1936 static void l2cap_ack_timeout(unsigned long arg)
1937 {
1938 struct l2cap_chan *chan = (void *) arg;
1939
1940 bh_lock_sock(chan->sk);
1941 l2cap_send_ack(chan);
1942 bh_unlock_sock(chan->sk);
1943 }
1944
1945 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1946 {
1947 struct sock *sk = chan->sk;
1948
1949 chan->expected_ack_seq = 0;
1950 chan->unacked_frames = 0;
1951 chan->buffer_seq = 0;
1952 chan->num_acked = 0;
1953 chan->frames_sent = 0;
1954
1955 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1956 (unsigned long) chan);
1957 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1958 (unsigned long) chan);
1959 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1960
1961 skb_queue_head_init(&chan->srej_q);
1962
1963 INIT_LIST_HEAD(&chan->srej_l);
1964
1965
1966 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1967 }
1968
1969 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1970 {
1971 switch (mode) {
1972 case L2CAP_MODE_STREAMING:
1973 case L2CAP_MODE_ERTM:
1974 if (l2cap_mode_supported(mode, remote_feat_mask))
1975 return mode;
1976 /* fall through */
1977 default:
1978 return L2CAP_MODE_BASIC;
1979 }
1980 }
1981
1982 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1983 {
1984 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1985 }
1986
1987 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1988 {
1989 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1990 }
1991
1992 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1993 {
1994 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1995 __l2cap_ews_supported(chan)) {
1996 /* use extended control field */
1997 set_bit(FLAG_EXT_CTRL, &chan->flags);
1998 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1999 } else {
2000 chan->tx_win = min_t(u16, chan->tx_win,
2001 L2CAP_DEFAULT_TX_WINDOW);
2002 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2003 }
2004 }
2005
2006 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2007 {
2008 struct l2cap_conf_req *req = data;
2009 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2010 void *ptr = req->data;
2011 u16 size;
2012
2013 BT_DBG("chan %p", chan);
2014
2015 if (chan->num_conf_req || chan->num_conf_rsp)
2016 goto done;
2017
2018 switch (chan->mode) {
2019 case L2CAP_MODE_STREAMING:
2020 case L2CAP_MODE_ERTM:
2021 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2022 break;
2023
2024 if (__l2cap_efs_supported(chan))
2025 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2026
2027 /* fall through */
2028 default:
2029 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2030 break;
2031 }
2032
2033 done:
2034 if (chan->imtu != L2CAP_DEFAULT_MTU)
2035 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2036
2037 switch (chan->mode) {
2038 case L2CAP_MODE_BASIC:
2039 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2040 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2041 break;
2042
2043 rfc.mode = L2CAP_MODE_BASIC;
2044 rfc.txwin_size = 0;
2045 rfc.max_transmit = 0;
2046 rfc.retrans_timeout = 0;
2047 rfc.monitor_timeout = 0;
2048 rfc.max_pdu_size = 0;
2049
2050 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2051 (unsigned long) &rfc);
2052 break;
2053
2054 case L2CAP_MODE_ERTM:
2055 rfc.mode = L2CAP_MODE_ERTM;
2056 rfc.max_transmit = chan->max_tx;
2057 rfc.retrans_timeout = 0;
2058 rfc.monitor_timeout = 0;
2059
2060 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2061 L2CAP_EXT_HDR_SIZE -
2062 L2CAP_SDULEN_SIZE -
2063 L2CAP_FCS_SIZE);
2064 rfc.max_pdu_size = cpu_to_le16(size);
2065
2066 l2cap_txwin_setup(chan);
2067
2068 rfc.txwin_size = min_t(u16, chan->tx_win,
2069 L2CAP_DEFAULT_TX_WINDOW);
2070
2071 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2072 (unsigned long) &rfc);
2073
2074 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2075 l2cap_add_opt_efs(&ptr, chan);
2076
2077 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2078 break;
2079
2080 if (chan->fcs == L2CAP_FCS_NONE ||
2081 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2082 chan->fcs = L2CAP_FCS_NONE;
2083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2084 }
2085
2086 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2087 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2088 chan->tx_win);
2089 break;
2090
2091 case L2CAP_MODE_STREAMING:
2092 rfc.mode = L2CAP_MODE_STREAMING;
2093 rfc.txwin_size = 0;
2094 rfc.max_transmit = 0;
2095 rfc.retrans_timeout = 0;
2096 rfc.monitor_timeout = 0;
2097
2098 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2099 L2CAP_EXT_HDR_SIZE -
2100 L2CAP_SDULEN_SIZE -
2101 L2CAP_FCS_SIZE);
2102 rfc.max_pdu_size = cpu_to_le16(size);
2103
2104 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2105 (unsigned long) &rfc);
2106
2107 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2108 l2cap_add_opt_efs(&ptr, chan);
2109
2110 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2111 break;
2112
2113 if (chan->fcs == L2CAP_FCS_NONE ||
2114 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2115 chan->fcs = L2CAP_FCS_NONE;
2116 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2117 }
2118 break;
2119 }
2120
2121 req->dcid = cpu_to_le16(chan->dcid);
2122 req->flags = cpu_to_le16(0);
2123
2124 return ptr - data;
2125 }
2126
2127 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2128 {
2129 struct l2cap_conf_rsp *rsp = data;
2130 void *ptr = rsp->data;
2131 void *req = chan->conf_req;
2132 int len = chan->conf_len;
2133 int type, hint, olen;
2134 unsigned long val;
2135 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2136 struct l2cap_conf_efs efs;
2137 u8 remote_efs = 0;
2138 u16 mtu = L2CAP_DEFAULT_MTU;
2139 u16 result = L2CAP_CONF_SUCCESS;
2140 u16 size;
2141
2142 BT_DBG("chan %p", chan);
2143
2144 while (len >= L2CAP_CONF_OPT_SIZE) {
2145 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2146
2147 hint = type & L2CAP_CONF_HINT;
2148 type &= L2CAP_CONF_MASK;
2149
2150 switch (type) {
2151 case L2CAP_CONF_MTU:
2152 mtu = val;
2153 break;
2154
2155 case L2CAP_CONF_FLUSH_TO:
2156 chan->flush_to = val;
2157 break;
2158
2159 case L2CAP_CONF_QOS:
2160 break;
2161
2162 case L2CAP_CONF_RFC:
2163 if (olen == sizeof(rfc))
2164 memcpy(&rfc, (void *) val, olen);
2165 break;
2166
2167 case L2CAP_CONF_FCS:
2168 if (val == L2CAP_FCS_NONE)
2169 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2170 break;
2171
2172 case L2CAP_CONF_EFS:
2173 remote_efs = 1;
2174 if (olen == sizeof(efs))
2175 memcpy(&efs, (void *) val, olen);
2176 break;
2177
2178 case L2CAP_CONF_EWS:
2179 if (!enable_hs)
2180 return -ECONNREFUSED;
2181
2182 set_bit(FLAG_EXT_CTRL, &chan->flags);
2183 set_bit(CONF_EWS_RECV, &chan->conf_state);
2184 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2185 chan->remote_tx_win = val;
2186 break;
2187
2188 default:
2189 if (hint)
2190 break;
2191
2192 result = L2CAP_CONF_UNKNOWN;
2193 *((u8 *) ptr++) = type;
2194 break;
2195 }
2196 }
2197
2198 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2199 goto done;
2200
2201 switch (chan->mode) {
2202 case L2CAP_MODE_STREAMING:
2203 case L2CAP_MODE_ERTM:
2204 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2205 chan->mode = l2cap_select_mode(rfc.mode,
2206 chan->conn->feat_mask);
2207 break;
2208 }
2209
2210 if (remote_efs) {
2211 if (__l2cap_efs_supported(chan))
2212 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2213 else
2214 return -ECONNREFUSED;
2215 }
2216
2217 if (chan->mode != rfc.mode)
2218 return -ECONNREFUSED;
2219
2220 break;
2221 }
2222
2223 done:
2224 if (chan->mode != rfc.mode) {
2225 result = L2CAP_CONF_UNACCEPT;
2226 rfc.mode = chan->mode;
2227
2228 if (chan->num_conf_rsp == 1)
2229 return -ECONNREFUSED;
2230
2231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2232 sizeof(rfc), (unsigned long) &rfc);
2233 }
2234
2235 if (result == L2CAP_CONF_SUCCESS) {
2236 /* Configure output options and let the other side know
2237 * which ones we don't like. */
2238
2239 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2240 result = L2CAP_CONF_UNACCEPT;
2241 else {
2242 chan->omtu = mtu;
2243 set_bit(CONF_MTU_DONE, &chan->conf_state);
2244 }
2245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2246
2247 if (remote_efs) {
2248 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2249 efs.stype != L2CAP_SERV_NOTRAFIC &&
2250 efs.stype != chan->local_stype) {
2251
2252 result = L2CAP_CONF_UNACCEPT;
2253
2254 if (chan->num_conf_req >= 1)
2255 return -ECONNREFUSED;
2256
2257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2258 sizeof(efs),
2259 (unsigned long) &efs);
2260 } else {
2261 /* Send PENDING Conf Rsp */
2262 result = L2CAP_CONF_PENDING;
2263 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2264 }
2265 }
2266
2267 switch (rfc.mode) {
2268 case L2CAP_MODE_BASIC:
2269 chan->fcs = L2CAP_FCS_NONE;
2270 set_bit(CONF_MODE_DONE, &chan->conf_state);
2271 break;
2272
2273 case L2CAP_MODE_ERTM:
2274 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2275 chan->remote_tx_win = rfc.txwin_size;
2276 else
2277 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2278
2279 chan->remote_max_tx = rfc.max_transmit;
2280
2281 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2282 chan->conn->mtu -
2283 L2CAP_EXT_HDR_SIZE -
2284 L2CAP_SDULEN_SIZE -
2285 L2CAP_FCS_SIZE);
2286 rfc.max_pdu_size = cpu_to_le16(size);
2287 chan->remote_mps = size;
2288
2289 rfc.retrans_timeout =
2290 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2291 rfc.monitor_timeout =
2292 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2293
2294 set_bit(CONF_MODE_DONE, &chan->conf_state);
2295
2296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2297 sizeof(rfc), (unsigned long) &rfc);
2298
2299 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2300 chan->remote_id = efs.id;
2301 chan->remote_stype = efs.stype;
2302 chan->remote_msdu = le16_to_cpu(efs.msdu);
2303 chan->remote_flush_to =
2304 le32_to_cpu(efs.flush_to);
2305 chan->remote_acc_lat =
2306 le32_to_cpu(efs.acc_lat);
2307 chan->remote_sdu_itime =
2308 le32_to_cpu(efs.sdu_itime);
2309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2310 sizeof(efs), (unsigned long) &efs);
2311 }
2312 break;
2313
2314 case L2CAP_MODE_STREAMING:
2315 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2316 chan->conn->mtu -
2317 L2CAP_EXT_HDR_SIZE -
2318 L2CAP_SDULEN_SIZE -
2319 L2CAP_FCS_SIZE);
2320 rfc.max_pdu_size = cpu_to_le16(size);
2321 chan->remote_mps = size;
2322
2323 set_bit(CONF_MODE_DONE, &chan->conf_state);
2324
2325 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2326 sizeof(rfc), (unsigned long) &rfc);
2327
2328 break;
2329
2330 default:
2331 result = L2CAP_CONF_UNACCEPT;
2332
2333 memset(&rfc, 0, sizeof(rfc));
2334 rfc.mode = chan->mode;
2335 }
2336
2337 if (result == L2CAP_CONF_SUCCESS)
2338 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2339 }
2340 rsp->scid = cpu_to_le16(chan->dcid);
2341 rsp->result = cpu_to_le16(result);
2342 rsp->flags = cpu_to_le16(0x0000);
2343
2344 return ptr - data;
2345 }
2346
2347 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2348 {
2349 struct l2cap_conf_req *req = data;
2350 void *ptr = req->data;
2351 int type, olen;
2352 unsigned long val;
2353 struct l2cap_conf_rfc rfc;
2354
2355 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2356
2357 while (len >= L2CAP_CONF_OPT_SIZE) {
2358 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2359
2360 switch (type) {
2361 case L2CAP_CONF_MTU:
2362 if (val < L2CAP_DEFAULT_MIN_MTU) {
2363 *result = L2CAP_CONF_UNACCEPT;
2364 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2365 } else
2366 chan->imtu = val;
2367 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2368 break;
2369
2370 case L2CAP_CONF_FLUSH_TO:
2371 chan->flush_to = val;
2372 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2373 2, chan->flush_to);
2374 break;
2375
2376 case L2CAP_CONF_RFC:
2377 if (olen == sizeof(rfc))
2378 memcpy(&rfc, (void *)val, olen);
2379
2380 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2381 rfc.mode != chan->mode)
2382 return -ECONNREFUSED;
2383
2384 chan->fcs = 0;
2385
2386 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2387 sizeof(rfc), (unsigned long) &rfc);
2388 break;
2389
2390 case L2CAP_CONF_EWS:
2391 chan->tx_win = min_t(u16, val,
2392 L2CAP_DEFAULT_EXT_WINDOW);
2393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2394 chan->tx_win);
2395 break;
2396 }
2397 }
2398
2399 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2400 return -ECONNREFUSED;
2401
2402 chan->mode = rfc.mode;
2403
2404 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2405 switch (rfc.mode) {
2406 case L2CAP_MODE_ERTM:
2407 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2408 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2409 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2410 break;
2411 case L2CAP_MODE_STREAMING:
2412 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2413 }
2414 }
2415
2416 req->dcid = cpu_to_le16(chan->dcid);
2417 req->flags = cpu_to_le16(0x0000);
2418
2419 return ptr - data;
2420 }
2421
2422 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2423 {
2424 struct l2cap_conf_rsp *rsp = data;
2425 void *ptr = rsp->data;
2426
2427 BT_DBG("chan %p", chan);
2428
2429 rsp->scid = cpu_to_le16(chan->dcid);
2430 rsp->result = cpu_to_le16(result);
2431 rsp->flags = cpu_to_le16(flags);
2432
2433 return ptr - data;
2434 }
2435
2436 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2437 {
2438 struct l2cap_conn_rsp rsp;
2439 struct l2cap_conn *conn = chan->conn;
2440 u8 buf[128];
2441
2442 rsp.scid = cpu_to_le16(chan->dcid);
2443 rsp.dcid = cpu_to_le16(chan->scid);
2444 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2445 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2446 l2cap_send_cmd(conn, chan->ident,
2447 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2448
2449 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2450 return;
2451
2452 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2453 l2cap_build_conf_req(chan, buf), buf);
2454 chan->num_conf_req++;
2455 }
2456
2457 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2458 {
2459 int type, olen;
2460 unsigned long val;
2461 struct l2cap_conf_rfc rfc;
2462
2463 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2464
2465 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2466 return;
2467
2468 while (len >= L2CAP_CONF_OPT_SIZE) {
2469 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2470
2471 switch (type) {
2472 case L2CAP_CONF_RFC:
2473 if (olen == sizeof(rfc))
2474 memcpy(&rfc, (void *)val, olen);
2475 goto done;
2476 }
2477 }
2478
2479 done:
2480 switch (rfc.mode) {
2481 case L2CAP_MODE_ERTM:
2482 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2483 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2484 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2485 break;
2486 case L2CAP_MODE_STREAMING:
2487 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2488 }
2489 }
2490
2491 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2492 {
2493 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2494
2495 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2496 return 0;
2497
2498 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2499 cmd->ident == conn->info_ident) {
2500 del_timer(&conn->info_timer);
2501
2502 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2503 conn->info_ident = 0;
2504
2505 l2cap_conn_start(conn);
2506 }
2507
2508 return 0;
2509 }
2510
2511 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2512 {
2513 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2514 struct l2cap_conn_rsp rsp;
2515 struct l2cap_chan *chan = NULL, *pchan;
2516 struct sock *parent, *sk = NULL;
2517 int result, status = L2CAP_CS_NO_INFO;
2518
2519 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2520 __le16 psm = req->psm;
2521
2522 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2523
2524 /* Check if we have socket listening on psm */
2525 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2526 if (!pchan) {
2527 result = L2CAP_CR_BAD_PSM;
2528 goto sendresp;
2529 }
2530
2531 parent = pchan->sk;
2532
2533 bh_lock_sock(parent);
2534
2535 /* Check if the ACL is secure enough (if not SDP) */
2536 if (psm != cpu_to_le16(0x0001) &&
2537 !hci_conn_check_link_mode(conn->hcon)) {
2538 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2539 result = L2CAP_CR_SEC_BLOCK;
2540 goto response;
2541 }
2542
2543 result = L2CAP_CR_NO_MEM;
2544
2545 /* Check for backlog size */
2546 if (sk_acceptq_is_full(parent)) {
2547 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2548 goto response;
2549 }
2550
2551 chan = pchan->ops->new_connection(pchan->data);
2552 if (!chan)
2553 goto response;
2554
2555 sk = chan->sk;
2556
2557 write_lock_bh(&conn->chan_lock);
2558
2559 /* Check if we already have channel with that dcid */
2560 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2561 write_unlock_bh(&conn->chan_lock);
2562 sock_set_flag(sk, SOCK_ZAPPED);
2563 chan->ops->close(chan->data);
2564 goto response;
2565 }
2566
2567 hci_conn_hold(conn->hcon);
2568
2569 bacpy(&bt_sk(sk)->src, conn->src);
2570 bacpy(&bt_sk(sk)->dst, conn->dst);
2571 chan->psm = psm;
2572 chan->dcid = scid;
2573
2574 bt_accept_enqueue(parent, sk);
2575
2576 __l2cap_chan_add(conn, chan);
2577
2578 dcid = chan->scid;
2579
2580 __set_chan_timer(chan, sk->sk_sndtimeo);
2581
2582 chan->ident = cmd->ident;
2583
2584 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2585 if (l2cap_check_security(chan)) {
2586 if (bt_sk(sk)->defer_setup) {
2587 l2cap_state_change(chan, BT_CONNECT2);
2588 result = L2CAP_CR_PEND;
2589 status = L2CAP_CS_AUTHOR_PEND;
2590 parent->sk_data_ready(parent, 0);
2591 } else {
2592 l2cap_state_change(chan, BT_CONFIG);
2593 result = L2CAP_CR_SUCCESS;
2594 status = L2CAP_CS_NO_INFO;
2595 }
2596 } else {
2597 l2cap_state_change(chan, BT_CONNECT2);
2598 result = L2CAP_CR_PEND;
2599 status = L2CAP_CS_AUTHEN_PEND;
2600 }
2601 } else {
2602 l2cap_state_change(chan, BT_CONNECT2);
2603 result = L2CAP_CR_PEND;
2604 status = L2CAP_CS_NO_INFO;
2605 }
2606
2607 write_unlock_bh(&conn->chan_lock);
2608
2609 response:
2610 bh_unlock_sock(parent);
2611
2612 sendresp:
2613 rsp.scid = cpu_to_le16(scid);
2614 rsp.dcid = cpu_to_le16(dcid);
2615 rsp.result = cpu_to_le16(result);
2616 rsp.status = cpu_to_le16(status);
2617 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2618
2619 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2620 struct l2cap_info_req info;
2621 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2622
2623 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2624 conn->info_ident = l2cap_get_ident(conn);
2625
2626 mod_timer(&conn->info_timer, jiffies +
2627 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2628
2629 l2cap_send_cmd(conn, conn->info_ident,
2630 L2CAP_INFO_REQ, sizeof(info), &info);
2631 }
2632
2633 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2634 result == L2CAP_CR_SUCCESS) {
2635 u8 buf[128];
2636 set_bit(CONF_REQ_SENT, &chan->conf_state);
2637 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2638 l2cap_build_conf_req(chan, buf), buf);
2639 chan->num_conf_req++;
2640 }
2641
2642 return 0;
2643 }
2644
2645 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2646 {
2647 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2648 u16 scid, dcid, result, status;
2649 struct l2cap_chan *chan;
2650 struct sock *sk;
2651 u8 req[128];
2652
2653 scid = __le16_to_cpu(rsp->scid);
2654 dcid = __le16_to_cpu(rsp->dcid);
2655 result = __le16_to_cpu(rsp->result);
2656 status = __le16_to_cpu(rsp->status);
2657
2658 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2659
2660 if (scid) {
2661 chan = l2cap_get_chan_by_scid(conn, scid);
2662 if (!chan)
2663 return -EFAULT;
2664 } else {
2665 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2666 if (!chan)
2667 return -EFAULT;
2668 }
2669
2670 sk = chan->sk;
2671
2672 switch (result) {
2673 case L2CAP_CR_SUCCESS:
2674 l2cap_state_change(chan, BT_CONFIG);
2675 chan->ident = 0;
2676 chan->dcid = dcid;
2677 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2678
2679 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2680 break;
2681
2682 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2683 l2cap_build_conf_req(chan, req), req);
2684 chan->num_conf_req++;
2685 break;
2686
2687 case L2CAP_CR_PEND:
2688 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2689 break;
2690
2691 default:
2692 /* don't delete l2cap channel if sk is owned by user */
2693 if (sock_owned_by_user(sk)) {
2694 l2cap_state_change(chan, BT_DISCONN);
2695 __clear_chan_timer(chan);
2696 __set_chan_timer(chan, HZ / 5);
2697 break;
2698 }
2699
2700 l2cap_chan_del(chan, ECONNREFUSED);
2701 break;
2702 }
2703
2704 bh_unlock_sock(sk);
2705 return 0;
2706 }
2707
2708 static inline void set_default_fcs(struct l2cap_chan *chan)
2709 {
2710 /* FCS is enabled only in ERTM or streaming mode, if one or both
2711 * sides request it.
2712 */
2713 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2714 chan->fcs = L2CAP_FCS_NONE;
2715 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2716 chan->fcs = L2CAP_FCS_CRC16;
2717 }
2718
2719 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2720 {
2721 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2722 u16 dcid, flags;
2723 u8 rsp[64];
2724 struct l2cap_chan *chan;
2725 struct sock *sk;
2726 int len;
2727
2728 dcid = __le16_to_cpu(req->dcid);
2729 flags = __le16_to_cpu(req->flags);
2730
2731 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2732
2733 chan = l2cap_get_chan_by_scid(conn, dcid);
2734 if (!chan)
2735 return -ENOENT;
2736
2737 sk = chan->sk;
2738
2739 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2740 struct l2cap_cmd_rej_cid rej;
2741
2742 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2743 rej.scid = cpu_to_le16(chan->scid);
2744 rej.dcid = cpu_to_le16(chan->dcid);
2745
2746 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2747 sizeof(rej), &rej);
2748 goto unlock;
2749 }
2750
2751 /* Reject if config buffer is too small. */
2752 len = cmd_len - sizeof(*req);
2753 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2754 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2755 l2cap_build_conf_rsp(chan, rsp,
2756 L2CAP_CONF_REJECT, flags), rsp);
2757 goto unlock;
2758 }
2759
2760 /* Store config. */
2761 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2762 chan->conf_len += len;
2763
2764 if (flags & 0x0001) {
2765 /* Incomplete config. Send empty response. */
2766 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2767 l2cap_build_conf_rsp(chan, rsp,
2768 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2769 goto unlock;
2770 }
2771
2772 /* Complete config. */
2773 len = l2cap_parse_conf_req(chan, rsp);
2774 if (len < 0) {
2775 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2776 goto unlock;
2777 }
2778
2779 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2780 chan->num_conf_rsp++;
2781
2782 /* Reset config buffer. */
2783 chan->conf_len = 0;
2784
2785 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2786 goto unlock;
2787
2788 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2789 set_default_fcs(chan);
2790
2791 l2cap_state_change(chan, BT_CONNECTED);
2792
2793 chan->next_tx_seq = 0;
2794 chan->expected_tx_seq = 0;
2795 skb_queue_head_init(&chan->tx_q);
2796 if (chan->mode == L2CAP_MODE_ERTM)
2797 l2cap_ertm_init(chan);
2798
2799 l2cap_chan_ready(sk);
2800 goto unlock;
2801 }
2802
2803 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2804 u8 buf[64];
2805 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2806 l2cap_build_conf_req(chan, buf), buf);
2807 chan->num_conf_req++;
2808 }
2809
2810 /* Got Conf Rsp PENDING from remote side and asume we sent
2811 Conf Rsp PENDING in the code above */
2812 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2813 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2814
2815 /* check compatibility */
2816
2817 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2818 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2819
2820 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2821 l2cap_build_conf_rsp(chan, rsp,
2822 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2823 }
2824
2825 unlock:
2826 bh_unlock_sock(sk);
2827 return 0;
2828 }
2829
2830 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2831 {
2832 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2833 u16 scid, flags, result;
2834 struct l2cap_chan *chan;
2835 struct sock *sk;
2836 int len = cmd->len - sizeof(*rsp);
2837
2838 scid = __le16_to_cpu(rsp->scid);
2839 flags = __le16_to_cpu(rsp->flags);
2840 result = __le16_to_cpu(rsp->result);
2841
2842 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2843 scid, flags, result);
2844
2845 chan = l2cap_get_chan_by_scid(conn, scid);
2846 if (!chan)
2847 return 0;
2848
2849 sk = chan->sk;
2850
2851 switch (result) {
2852 case L2CAP_CONF_SUCCESS:
2853 l2cap_conf_rfc_get(chan, rsp->data, len);
2854 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2855 break;
2856
2857 case L2CAP_CONF_PENDING:
2858 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2859
2860 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2861 char buf[64];
2862
2863 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2864 buf, &result);
2865 if (len < 0) {
2866 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2867 goto done;
2868 }
2869
2870 /* check compatibility */
2871
2872 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2873 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2874
2875 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2876 l2cap_build_conf_rsp(chan, buf,
2877 L2CAP_CONF_SUCCESS, 0x0000), buf);
2878 }
2879 goto done;
2880
2881 case L2CAP_CONF_UNACCEPT:
2882 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2883 char req[64];
2884
2885 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2886 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2887 goto done;
2888 }
2889
2890 /* throw out any old stored conf requests */
2891 result = L2CAP_CONF_SUCCESS;
2892 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2893 req, &result);
2894 if (len < 0) {
2895 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2896 goto done;
2897 }
2898
2899 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2900 L2CAP_CONF_REQ, len, req);
2901 chan->num_conf_req++;
2902 if (result != L2CAP_CONF_SUCCESS)
2903 goto done;
2904 break;
2905 }
2906
2907 default:
2908 sk->sk_err = ECONNRESET;
2909 __set_chan_timer(chan, HZ * 5);
2910 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2911 goto done;
2912 }
2913
2914 if (flags & 0x01)
2915 goto done;
2916
2917 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2918
2919 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2920 set_default_fcs(chan);
2921
2922 l2cap_state_change(chan, BT_CONNECTED);
2923 chan->next_tx_seq = 0;
2924 chan->expected_tx_seq = 0;
2925 skb_queue_head_init(&chan->tx_q);
2926 if (chan->mode == L2CAP_MODE_ERTM)
2927 l2cap_ertm_init(chan);
2928
2929 l2cap_chan_ready(sk);
2930 }
2931
2932 done:
2933 bh_unlock_sock(sk);
2934 return 0;
2935 }
2936
2937 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2938 {
2939 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2940 struct l2cap_disconn_rsp rsp;
2941 u16 dcid, scid;
2942 struct l2cap_chan *chan;
2943 struct sock *sk;
2944
2945 scid = __le16_to_cpu(req->scid);
2946 dcid = __le16_to_cpu(req->dcid);
2947
2948 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2949
2950 chan = l2cap_get_chan_by_scid(conn, dcid);
2951 if (!chan)
2952 return 0;
2953
2954 sk = chan->sk;
2955
2956 rsp.dcid = cpu_to_le16(chan->scid);
2957 rsp.scid = cpu_to_le16(chan->dcid);
2958 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2959
2960 sk->sk_shutdown = SHUTDOWN_MASK;
2961
2962 /* don't delete l2cap channel if sk is owned by user */
2963 if (sock_owned_by_user(sk)) {
2964 l2cap_state_change(chan, BT_DISCONN);
2965 __clear_chan_timer(chan);
2966 __set_chan_timer(chan, HZ / 5);
2967 bh_unlock_sock(sk);
2968 return 0;
2969 }
2970
2971 l2cap_chan_del(chan, ECONNRESET);
2972 bh_unlock_sock(sk);
2973
2974 chan->ops->close(chan->data);
2975 return 0;
2976 }
2977
2978 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2979 {
2980 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2981 u16 dcid, scid;
2982 struct l2cap_chan *chan;
2983 struct sock *sk;
2984
2985 scid = __le16_to_cpu(rsp->scid);
2986 dcid = __le16_to_cpu(rsp->dcid);
2987
2988 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2989
2990 chan = l2cap_get_chan_by_scid(conn, scid);
2991 if (!chan)
2992 return 0;
2993
2994 sk = chan->sk;
2995
2996 /* don't delete l2cap channel if sk is owned by user */
2997 if (sock_owned_by_user(sk)) {
2998 l2cap_state_change(chan,BT_DISCONN);
2999 __clear_chan_timer(chan);
3000 __set_chan_timer(chan, HZ / 5);
3001 bh_unlock_sock(sk);
3002 return 0;
3003 }
3004
3005 l2cap_chan_del(chan, 0);
3006 bh_unlock_sock(sk);
3007
3008 chan->ops->close(chan->data);
3009 return 0;
3010 }
3011
3012 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3013 {
3014 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3015 u16 type;
3016
3017 type = __le16_to_cpu(req->type);
3018
3019 BT_DBG("type 0x%4.4x", type);
3020
3021 if (type == L2CAP_IT_FEAT_MASK) {
3022 u8 buf[8];
3023 u32 feat_mask = l2cap_feat_mask;
3024 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3025 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3026 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3027 if (!disable_ertm)
3028 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3029 | L2CAP_FEAT_FCS;
3030 if (enable_hs)
3031 feat_mask |= L2CAP_FEAT_EXT_FLOW
3032 | L2CAP_FEAT_EXT_WINDOW;
3033
3034 put_unaligned_le32(feat_mask, rsp->data);
3035 l2cap_send_cmd(conn, cmd->ident,
3036 L2CAP_INFO_RSP, sizeof(buf), buf);
3037 } else if (type == L2CAP_IT_FIXED_CHAN) {
3038 u8 buf[12];
3039 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3040
3041 if (enable_hs)
3042 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3043 else
3044 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3045
3046 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3047 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3048 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3049 l2cap_send_cmd(conn, cmd->ident,
3050 L2CAP_INFO_RSP, sizeof(buf), buf);
3051 } else {
3052 struct l2cap_info_rsp rsp;
3053 rsp.type = cpu_to_le16(type);
3054 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3055 l2cap_send_cmd(conn, cmd->ident,
3056 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3057 }
3058
3059 return 0;
3060 }
3061
3062 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3063 {
3064 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3065 u16 type, result;
3066
3067 type = __le16_to_cpu(rsp->type);
3068 result = __le16_to_cpu(rsp->result);
3069
3070 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3071
3072 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3073 if (cmd->ident != conn->info_ident ||
3074 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3075 return 0;
3076
3077 del_timer(&conn->info_timer);
3078
3079 if (result != L2CAP_IR_SUCCESS) {
3080 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3081 conn->info_ident = 0;
3082
3083 l2cap_conn_start(conn);
3084
3085 return 0;
3086 }
3087
3088 if (type == L2CAP_IT_FEAT_MASK) {
3089 conn->feat_mask = get_unaligned_le32(rsp->data);
3090
3091 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3092 struct l2cap_info_req req;
3093 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3094
3095 conn->info_ident = l2cap_get_ident(conn);
3096
3097 l2cap_send_cmd(conn, conn->info_ident,
3098 L2CAP_INFO_REQ, sizeof(req), &req);
3099 } else {
3100 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3101 conn->info_ident = 0;
3102
3103 l2cap_conn_start(conn);
3104 }
3105 } else if (type == L2CAP_IT_FIXED_CHAN) {
3106 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3107 conn->info_ident = 0;
3108
3109 l2cap_conn_start(conn);
3110 }
3111
3112 return 0;
3113 }
3114
3115 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3116 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3117 void *data)
3118 {
3119 struct l2cap_create_chan_req *req = data;
3120 struct l2cap_create_chan_rsp rsp;
3121 u16 psm, scid;
3122
3123 if (cmd_len != sizeof(*req))
3124 return -EPROTO;
3125
3126 if (!enable_hs)
3127 return -EINVAL;
3128
3129 psm = le16_to_cpu(req->psm);
3130 scid = le16_to_cpu(req->scid);
3131
3132 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3133
3134 /* Placeholder: Always reject */
3135 rsp.dcid = 0;
3136 rsp.scid = cpu_to_le16(scid);
3137 rsp.result = L2CAP_CR_NO_MEM;
3138 rsp.status = L2CAP_CS_NO_INFO;
3139
3140 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3141 sizeof(rsp), &rsp);
3142
3143 return 0;
3144 }
3145
3146 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3147 struct l2cap_cmd_hdr *cmd, void *data)
3148 {
3149 BT_DBG("conn %p", conn);
3150
3151 return l2cap_connect_rsp(conn, cmd, data);
3152 }
3153
3154 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3155 u16 icid, u16 result)
3156 {
3157 struct l2cap_move_chan_rsp rsp;
3158
3159 BT_DBG("icid %d, result %d", icid, result);
3160
3161 rsp.icid = cpu_to_le16(icid);
3162 rsp.result = cpu_to_le16(result);
3163
3164 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3165 }
3166
3167 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3168 struct l2cap_chan *chan, u16 icid, u16 result)
3169 {
3170 struct l2cap_move_chan_cfm cfm;
3171 u8 ident;
3172
3173 BT_DBG("icid %d, result %d", icid, result);
3174
3175 ident = l2cap_get_ident(conn);
3176 if (chan)
3177 chan->ident = ident;
3178
3179 cfm.icid = cpu_to_le16(icid);
3180 cfm.result = cpu_to_le16(result);
3181
3182 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3183 }
3184
3185 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3186 u16 icid)
3187 {
3188 struct l2cap_move_chan_cfm_rsp rsp;
3189
3190 BT_DBG("icid %d", icid);
3191
3192 rsp.icid = cpu_to_le16(icid);
3193 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3194 }
3195
3196 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3197 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3198 {
3199 struct l2cap_move_chan_req *req = data;
3200 u16 icid = 0;
3201 u16 result = L2CAP_MR_NOT_ALLOWED;
3202
3203 if (cmd_len != sizeof(*req))
3204 return -EPROTO;
3205
3206 icid = le16_to_cpu(req->icid);
3207
3208 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3209
3210 if (!enable_hs)
3211 return -EINVAL;
3212
3213 /* Placeholder: Always refuse */
3214 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3215
3216 return 0;
3217 }
3218
3219 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3220 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3221 {
3222 struct l2cap_move_chan_rsp *rsp = data;
3223 u16 icid, result;
3224
3225 if (cmd_len != sizeof(*rsp))
3226 return -EPROTO;
3227
3228 icid = le16_to_cpu(rsp->icid);
3229 result = le16_to_cpu(rsp->result);
3230
3231 BT_DBG("icid %d, result %d", icid, result);
3232
3233 /* Placeholder: Always unconfirmed */
3234 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3235
3236 return 0;
3237 }
3238
3239 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3240 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3241 {
3242 struct l2cap_move_chan_cfm *cfm = data;
3243 u16 icid, result;
3244
3245 if (cmd_len != sizeof(*cfm))
3246 return -EPROTO;
3247
3248 icid = le16_to_cpu(cfm->icid);
3249 result = le16_to_cpu(cfm->result);
3250
3251 BT_DBG("icid %d, result %d", icid, result);
3252
3253 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3254
3255 return 0;
3256 }
3257
3258 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3259 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3260 {
3261 struct l2cap_move_chan_cfm_rsp *rsp = data;
3262 u16 icid;
3263
3264 if (cmd_len != sizeof(*rsp))
3265 return -EPROTO;
3266
3267 icid = le16_to_cpu(rsp->icid);
3268
3269 BT_DBG("icid %d", icid);
3270
3271 return 0;
3272 }
3273
3274 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3275 u16 to_multiplier)
3276 {
3277 u16 max_latency;
3278
3279 if (min > max || min < 6 || max > 3200)
3280 return -EINVAL;
3281
3282 if (to_multiplier < 10 || to_multiplier > 3200)
3283 return -EINVAL;
3284
3285 if (max >= to_multiplier * 8)
3286 return -EINVAL;
3287
3288 max_latency = (to_multiplier * 8 / max) - 1;
3289 if (latency > 499 || latency > max_latency)
3290 return -EINVAL;
3291
3292 return 0;
3293 }
3294
3295 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3296 struct l2cap_cmd_hdr *cmd, u8 *data)
3297 {
3298 struct hci_conn *hcon = conn->hcon;
3299 struct l2cap_conn_param_update_req *req;
3300 struct l2cap_conn_param_update_rsp rsp;
3301 u16 min, max, latency, to_multiplier, cmd_len;
3302 int err;
3303
3304 if (!(hcon->link_mode & HCI_LM_MASTER))
3305 return -EINVAL;
3306
3307 cmd_len = __le16_to_cpu(cmd->len);
3308 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3309 return -EPROTO;
3310
3311 req = (struct l2cap_conn_param_update_req *) data;
3312 min = __le16_to_cpu(req->min);
3313 max = __le16_to_cpu(req->max);
3314 latency = __le16_to_cpu(req->latency);
3315 to_multiplier = __le16_to_cpu(req->to_multiplier);
3316
3317 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3318 min, max, latency, to_multiplier);
3319
3320 memset(&rsp, 0, sizeof(rsp));
3321
3322 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3323 if (err)
3324 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3325 else
3326 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3327
3328 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3329 sizeof(rsp), &rsp);
3330
3331 if (!err)
3332 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3333
3334 return 0;
3335 }
3336
3337 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3338 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3339 {
3340 int err = 0;
3341
3342 switch (cmd->code) {
3343 case L2CAP_COMMAND_REJ:
3344 l2cap_command_rej(conn, cmd, data);
3345 break;
3346
3347 case L2CAP_CONN_REQ:
3348 err = l2cap_connect_req(conn, cmd, data);
3349 break;
3350
3351 case L2CAP_CONN_RSP:
3352 err = l2cap_connect_rsp(conn, cmd, data);
3353 break;
3354
3355 case L2CAP_CONF_REQ:
3356 err = l2cap_config_req(conn, cmd, cmd_len, data);
3357 break;
3358
3359 case L2CAP_CONF_RSP:
3360 err = l2cap_config_rsp(conn, cmd, data);
3361 break;
3362
3363 case L2CAP_DISCONN_REQ:
3364 err = l2cap_disconnect_req(conn, cmd, data);
3365 break;
3366
3367 case L2CAP_DISCONN_RSP:
3368 err = l2cap_disconnect_rsp(conn, cmd, data);
3369 break;
3370
3371 case L2CAP_ECHO_REQ:
3372 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3373 break;
3374
3375 case L2CAP_ECHO_RSP:
3376 break;
3377
3378 case L2CAP_INFO_REQ:
3379 err = l2cap_information_req(conn, cmd, data);
3380 break;
3381
3382 case L2CAP_INFO_RSP:
3383 err = l2cap_information_rsp(conn, cmd, data);
3384 break;
3385
3386 case L2CAP_CREATE_CHAN_REQ:
3387 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3388 break;
3389
3390 case L2CAP_CREATE_CHAN_RSP:
3391 err = l2cap_create_channel_rsp(conn, cmd, data);
3392 break;
3393
3394 case L2CAP_MOVE_CHAN_REQ:
3395 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3396 break;
3397
3398 case L2CAP_MOVE_CHAN_RSP:
3399 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3400 break;
3401
3402 case L2CAP_MOVE_CHAN_CFM:
3403 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3404 break;
3405
3406 case L2CAP_MOVE_CHAN_CFM_RSP:
3407 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3408 break;
3409
3410 default:
3411 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3412 err = -EINVAL;
3413 break;
3414 }
3415
3416 return err;
3417 }
3418
3419 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3420 struct l2cap_cmd_hdr *cmd, u8 *data)
3421 {
3422 switch (cmd->code) {
3423 case L2CAP_COMMAND_REJ:
3424 return 0;
3425
3426 case L2CAP_CONN_PARAM_UPDATE_REQ:
3427 return l2cap_conn_param_update_req(conn, cmd, data);
3428
3429 case L2CAP_CONN_PARAM_UPDATE_RSP:
3430 return 0;
3431
3432 default:
3433 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3434 return -EINVAL;
3435 }
3436 }
3437
3438 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3439 struct sk_buff *skb)
3440 {
3441 u8 *data = skb->data;
3442 int len = skb->len;
3443 struct l2cap_cmd_hdr cmd;
3444 int err;
3445
3446 l2cap_raw_recv(conn, skb);
3447
3448 while (len >= L2CAP_CMD_HDR_SIZE) {
3449 u16 cmd_len;
3450 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3451 data += L2CAP_CMD_HDR_SIZE;
3452 len -= L2CAP_CMD_HDR_SIZE;
3453
3454 cmd_len = le16_to_cpu(cmd.len);
3455
3456 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3457
3458 if (cmd_len > len || !cmd.ident) {
3459 BT_DBG("corrupted command");
3460 break;
3461 }
3462
3463 if (conn->hcon->type == LE_LINK)
3464 err = l2cap_le_sig_cmd(conn, &cmd, data);
3465 else
3466 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3467
3468 if (err) {
3469 struct l2cap_cmd_rej_unk rej;
3470
3471 BT_ERR("Wrong link type (%d)", err);
3472
3473 /* FIXME: Map err to a valid reason */
3474 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3475 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3476 }
3477
3478 data += cmd_len;
3479 len -= cmd_len;
3480 }
3481
3482 kfree_skb(skb);
3483 }
3484
3485 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3486 {
3487 u16 our_fcs, rcv_fcs;
3488 int hdr_size;
3489
3490 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3491 hdr_size = L2CAP_EXT_HDR_SIZE;
3492 else
3493 hdr_size = L2CAP_ENH_HDR_SIZE;
3494
3495 if (chan->fcs == L2CAP_FCS_CRC16) {
3496 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3497 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3498 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3499
3500 if (our_fcs != rcv_fcs)
3501 return -EBADMSG;
3502 }
3503 return 0;
3504 }
3505
3506 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3507 {
3508 u32 control = 0;
3509
3510 chan->frames_sent = 0;
3511
3512 control |= __set_reqseq(chan, chan->buffer_seq);
3513
3514 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3515 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3516 l2cap_send_sframe(chan, control);
3517 set_bit(CONN_RNR_SENT, &chan->conn_state);
3518 }
3519
3520 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3521 l2cap_retransmit_frames(chan);
3522
3523 l2cap_ertm_send(chan);
3524
3525 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3526 chan->frames_sent == 0) {
3527 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3528 l2cap_send_sframe(chan, control);
3529 }
3530 }
3531
3532 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3533 {
3534 struct sk_buff *next_skb;
3535 int tx_seq_offset, next_tx_seq_offset;
3536
3537 bt_cb(skb)->tx_seq = tx_seq;
3538 bt_cb(skb)->sar = sar;
3539
3540 next_skb = skb_peek(&chan->srej_q);
3541 if (!next_skb) {
3542 __skb_queue_tail(&chan->srej_q, skb);
3543 return 0;
3544 }
3545
3546 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3547
3548 do {
3549 if (bt_cb(next_skb)->tx_seq == tx_seq)
3550 return -EINVAL;
3551
3552 next_tx_seq_offset = __seq_offset(chan,
3553 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3554
3555 if (next_tx_seq_offset > tx_seq_offset) {
3556 __skb_queue_before(&chan->srej_q, next_skb, skb);
3557 return 0;
3558 }
3559
3560 if (skb_queue_is_last(&chan->srej_q, next_skb))
3561 break;
3562
3563 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3564
3565 __skb_queue_tail(&chan->srej_q, skb);
3566
3567 return 0;
3568 }
3569
3570 static void append_skb_frag(struct sk_buff *skb,
3571 struct sk_buff *new_frag, struct sk_buff **last_frag)
3572 {
3573 /* skb->len reflects data in skb as well as all fragments
3574 * skb->data_len reflects only data in fragments
3575 */
3576 if (!skb_has_frag_list(skb))
3577 skb_shinfo(skb)->frag_list = new_frag;
3578
3579 new_frag->next = NULL;
3580
3581 (*last_frag)->next = new_frag;
3582 *last_frag = new_frag;
3583
3584 skb->len += new_frag->len;
3585 skb->data_len += new_frag->len;
3586 skb->truesize += new_frag->truesize;
3587 }
3588
3589 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3590 {
3591 int err = -EINVAL;
3592
3593 switch (__get_ctrl_sar(chan, control)) {
3594 case L2CAP_SAR_UNSEGMENTED:
3595 if (chan->sdu)
3596 break;
3597
3598 err = chan->ops->recv(chan->data, skb);
3599 break;
3600
3601 case L2CAP_SAR_START:
3602 if (chan->sdu)
3603 break;
3604
3605 chan->sdu_len = get_unaligned_le16(skb->data);
3606 skb_pull(skb, L2CAP_SDULEN_SIZE);
3607
3608 if (chan->sdu_len > chan->imtu) {
3609 err = -EMSGSIZE;
3610 break;
3611 }
3612
3613 if (skb->len >= chan->sdu_len)
3614 break;
3615
3616 chan->sdu = skb;
3617 chan->sdu_last_frag = skb;
3618
3619 skb = NULL;
3620 err = 0;
3621 break;
3622
3623 case L2CAP_SAR_CONTINUE:
3624 if (!chan->sdu)
3625 break;
3626
3627 append_skb_frag(chan->sdu, skb,
3628 &chan->sdu_last_frag);
3629 skb = NULL;
3630
3631 if (chan->sdu->len >= chan->sdu_len)
3632 break;
3633
3634 err = 0;
3635 break;
3636
3637 case L2CAP_SAR_END:
3638 if (!chan->sdu)
3639 break;
3640
3641 append_skb_frag(chan->sdu, skb,
3642 &chan->sdu_last_frag);
3643 skb = NULL;
3644
3645 if (chan->sdu->len != chan->sdu_len)
3646 break;
3647
3648 err = chan->ops->recv(chan->data, chan->sdu);
3649
3650 if (!err) {
3651 /* Reassembly complete */
3652 chan->sdu = NULL;
3653 chan->sdu_last_frag = NULL;
3654 chan->sdu_len = 0;
3655 }
3656 break;
3657 }
3658
3659 if (err) {
3660 kfree_skb(skb);
3661 kfree_skb(chan->sdu);
3662 chan->sdu = NULL;
3663 chan->sdu_last_frag = NULL;
3664 chan->sdu_len = 0;
3665 }
3666
3667 return err;
3668 }
3669
3670 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3671 {
3672 u32 control;
3673
3674 BT_DBG("chan %p, Enter local busy", chan);
3675
3676 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3677
3678 control = __set_reqseq(chan, chan->buffer_seq);
3679 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3680 l2cap_send_sframe(chan, control);
3681
3682 set_bit(CONN_RNR_SENT, &chan->conn_state);
3683
3684 __clear_ack_timer(chan);
3685 }
3686
3687 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3688 {
3689 u32 control;
3690
3691 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3692 goto done;
3693
3694 control = __set_reqseq(chan, chan->buffer_seq);
3695 control |= __set_ctrl_poll(chan);
3696 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3697 l2cap_send_sframe(chan, control);
3698 chan->retry_count = 1;
3699
3700 __clear_retrans_timer(chan);
3701 __set_monitor_timer(chan);
3702
3703 set_bit(CONN_WAIT_F, &chan->conn_state);
3704
3705 done:
3706 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3707 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3708
3709 BT_DBG("chan %p, Exit local busy", chan);
3710 }
3711
3712 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3713 {
3714 if (chan->mode == L2CAP_MODE_ERTM) {
3715 if (busy)
3716 l2cap_ertm_enter_local_busy(chan);
3717 else
3718 l2cap_ertm_exit_local_busy(chan);
3719 }
3720 }
3721
3722 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3723 {
3724 struct sk_buff *skb;
3725 u32 control;
3726
3727 while ((skb = skb_peek(&chan->srej_q)) &&
3728 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3729 int err;
3730
3731 if (bt_cb(skb)->tx_seq != tx_seq)
3732 break;
3733
3734 skb = skb_dequeue(&chan->srej_q);
3735 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3736 err = l2cap_reassemble_sdu(chan, skb, control);
3737
3738 if (err < 0) {
3739 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3740 break;
3741 }
3742
3743 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3744 tx_seq = __next_seq(chan, tx_seq);
3745 }
3746 }
3747
3748 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3749 {
3750 struct srej_list *l, *tmp;
3751 u32 control;
3752
3753 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3754 if (l->tx_seq == tx_seq) {
3755 list_del(&l->list);
3756 kfree(l);
3757 return;
3758 }
3759 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3760 control |= __set_reqseq(chan, l->tx_seq);
3761 l2cap_send_sframe(chan, control);
3762 list_del(&l->list);
3763 list_add_tail(&l->list, &chan->srej_l);
3764 }
3765 }
3766
3767 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3768 {
3769 struct srej_list *new;
3770 u32 control;
3771
3772 while (tx_seq != chan->expected_tx_seq) {
3773 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3774 control |= __set_reqseq(chan, chan->expected_tx_seq);
3775 l2cap_send_sframe(chan, control);
3776
3777 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3778 new->tx_seq = chan->expected_tx_seq;
3779
3780 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3781
3782 list_add_tail(&new->list, &chan->srej_l);
3783 }
3784
3785 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3786 }
3787
3788 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3789 {
3790 u16 tx_seq = __get_txseq(chan, rx_control);
3791 u16 req_seq = __get_reqseq(chan, rx_control);
3792 u8 sar = __get_ctrl_sar(chan, rx_control);
3793 int tx_seq_offset, expected_tx_seq_offset;
3794 int num_to_ack = (chan->tx_win/6) + 1;
3795 int err = 0;
3796
3797 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3798 tx_seq, rx_control);
3799
3800 if (__is_ctrl_final(chan, rx_control) &&
3801 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3802 __clear_monitor_timer(chan);
3803 if (chan->unacked_frames > 0)
3804 __set_retrans_timer(chan);
3805 clear_bit(CONN_WAIT_F, &chan->conn_state);
3806 }
3807
3808 chan->expected_ack_seq = req_seq;
3809 l2cap_drop_acked_frames(chan);
3810
3811 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3812
3813 /* invalid tx_seq */
3814 if (tx_seq_offset >= chan->tx_win) {
3815 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3816 goto drop;
3817 }
3818
3819 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3820 goto drop;
3821
3822 if (tx_seq == chan->expected_tx_seq)
3823 goto expected;
3824
3825 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3826 struct srej_list *first;
3827
3828 first = list_first_entry(&chan->srej_l,
3829 struct srej_list, list);
3830 if (tx_seq == first->tx_seq) {
3831 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3832 l2cap_check_srej_gap(chan, tx_seq);
3833
3834 list_del(&first->list);
3835 kfree(first);
3836
3837 if (list_empty(&chan->srej_l)) {
3838 chan->buffer_seq = chan->buffer_seq_srej;
3839 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3840 l2cap_send_ack(chan);
3841 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3842 }
3843 } else {
3844 struct srej_list *l;
3845
3846 /* duplicated tx_seq */
3847 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3848 goto drop;
3849
3850 list_for_each_entry(l, &chan->srej_l, list) {
3851 if (l->tx_seq == tx_seq) {
3852 l2cap_resend_srejframe(chan, tx_seq);
3853 return 0;
3854 }
3855 }
3856 l2cap_send_srejframe(chan, tx_seq);
3857 }
3858 } else {
3859 expected_tx_seq_offset = __seq_offset(chan,
3860 chan->expected_tx_seq, chan->buffer_seq);
3861
3862 /* duplicated tx_seq */
3863 if (tx_seq_offset < expected_tx_seq_offset)
3864 goto drop;
3865
3866 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3867
3868 BT_DBG("chan %p, Enter SREJ", chan);
3869
3870 INIT_LIST_HEAD(&chan->srej_l);
3871 chan->buffer_seq_srej = chan->buffer_seq;
3872
3873 __skb_queue_head_init(&chan->srej_q);
3874 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3875
3876 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3877
3878 l2cap_send_srejframe(chan, tx_seq);
3879
3880 __clear_ack_timer(chan);
3881 }
3882 return 0;
3883
3884 expected:
3885 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3886
3887 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3888 bt_cb(skb)->tx_seq = tx_seq;
3889 bt_cb(skb)->sar = sar;
3890 __skb_queue_tail(&chan->srej_q, skb);
3891 return 0;
3892 }
3893
3894 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3895 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3896
3897 if (err < 0) {
3898 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3899 return err;
3900 }
3901
3902 if (__is_ctrl_final(chan, rx_control)) {
3903 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3904 l2cap_retransmit_frames(chan);
3905 }
3906
3907 __set_ack_timer(chan);
3908
3909 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3910 if (chan->num_acked == num_to_ack - 1)
3911 l2cap_send_ack(chan);
3912
3913 return 0;
3914
3915 drop:
3916 kfree_skb(skb);
3917 return 0;
3918 }
3919
3920 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3921 {
3922 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3923 __get_reqseq(chan, rx_control), rx_control);
3924
3925 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3926 l2cap_drop_acked_frames(chan);
3927
3928 if (__is_ctrl_poll(chan, rx_control)) {
3929 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3930 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3931 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3932 (chan->unacked_frames > 0))
3933 __set_retrans_timer(chan);
3934
3935 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3936 l2cap_send_srejtail(chan);
3937 } else {
3938 l2cap_send_i_or_rr_or_rnr(chan);
3939 }
3940
3941 } else if (__is_ctrl_final(chan, rx_control)) {
3942 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3943
3944 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3945 l2cap_retransmit_frames(chan);
3946
3947 } else {
3948 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3949 (chan->unacked_frames > 0))
3950 __set_retrans_timer(chan);
3951
3952 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3953 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3954 l2cap_send_ack(chan);
3955 else
3956 l2cap_ertm_send(chan);
3957 }
3958 }
3959
3960 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3961 {
3962 u16 tx_seq = __get_reqseq(chan, rx_control);
3963
3964 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3965
3966 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3967
3968 chan->expected_ack_seq = tx_seq;
3969 l2cap_drop_acked_frames(chan);
3970
3971 if (__is_ctrl_final(chan, rx_control)) {
3972 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3973 l2cap_retransmit_frames(chan);
3974 } else {
3975 l2cap_retransmit_frames(chan);
3976
3977 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3978 set_bit(CONN_REJ_ACT, &chan->conn_state);
3979 }
3980 }
3981 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
3982 {
3983 u16 tx_seq = __get_reqseq(chan, rx_control);
3984
3985 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3986
3987 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3988
3989 if (__is_ctrl_poll(chan, rx_control)) {
3990 chan->expected_ack_seq = tx_seq;
3991 l2cap_drop_acked_frames(chan);
3992
3993 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3994 l2cap_retransmit_one_frame(chan, tx_seq);
3995
3996 l2cap_ertm_send(chan);
3997
3998 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3999 chan->srej_save_reqseq = tx_seq;
4000 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4001 }
4002 } else if (__is_ctrl_final(chan, rx_control)) {
4003 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4004 chan->srej_save_reqseq == tx_seq)
4005 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4006 else
4007 l2cap_retransmit_one_frame(chan, tx_seq);
4008 } else {
4009 l2cap_retransmit_one_frame(chan, tx_seq);
4010 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4011 chan->srej_save_reqseq = tx_seq;
4012 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4013 }
4014 }
4015 }
4016
4017 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4018 {
4019 u16 tx_seq = __get_reqseq(chan, rx_control);
4020
4021 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4022
4023 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4024 chan->expected_ack_seq = tx_seq;
4025 l2cap_drop_acked_frames(chan);
4026
4027 if (__is_ctrl_poll(chan, rx_control))
4028 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4029
4030 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4031 __clear_retrans_timer(chan);
4032 if (__is_ctrl_poll(chan, rx_control))
4033 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4034 return;
4035 }
4036
4037 if (__is_ctrl_poll(chan, rx_control)) {
4038 l2cap_send_srejtail(chan);
4039 } else {
4040 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4041 l2cap_send_sframe(chan, rx_control);
4042 }
4043 }
4044
4045 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4046 {
4047 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4048
4049 if (__is_ctrl_final(chan, rx_control) &&
4050 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4051 __clear_monitor_timer(chan);
4052 if (chan->unacked_frames > 0)
4053 __set_retrans_timer(chan);
4054 clear_bit(CONN_WAIT_F, &chan->conn_state);
4055 }
4056
4057 switch (__get_ctrl_super(chan, rx_control)) {
4058 case L2CAP_SUPER_RR:
4059 l2cap_data_channel_rrframe(chan, rx_control);
4060 break;
4061
4062 case L2CAP_SUPER_REJ:
4063 l2cap_data_channel_rejframe(chan, rx_control);
4064 break;
4065
4066 case L2CAP_SUPER_SREJ:
4067 l2cap_data_channel_srejframe(chan, rx_control);
4068 break;
4069
4070 case L2CAP_SUPER_RNR:
4071 l2cap_data_channel_rnrframe(chan, rx_control);
4072 break;
4073 }
4074
4075 kfree_skb(skb);
4076 return 0;
4077 }
4078
4079 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4080 {
4081 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4082 u32 control;
4083 u16 req_seq;
4084 int len, next_tx_seq_offset, req_seq_offset;
4085
4086 control = __get_control(chan, skb->data);
4087 skb_pull(skb, __ctrl_size(chan));
4088 len = skb->len;
4089
4090 /*
4091 * We can just drop the corrupted I-frame here.
4092 * Receiver will miss it and start proper recovery
4093 * procedures and ask retransmission.
4094 */
4095 if (l2cap_check_fcs(chan, skb))
4096 goto drop;
4097
4098 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4099 len -= L2CAP_SDULEN_SIZE;
4100
4101 if (chan->fcs == L2CAP_FCS_CRC16)
4102 len -= L2CAP_FCS_SIZE;
4103
4104 if (len > chan->mps) {
4105 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4106 goto drop;
4107 }
4108
4109 req_seq = __get_reqseq(chan, control);
4110
4111 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4112
4113 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4114 chan->expected_ack_seq);
4115
4116 /* check for invalid req-seq */
4117 if (req_seq_offset > next_tx_seq_offset) {
4118 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4119 goto drop;
4120 }
4121
4122 if (!__is_sframe(chan, control)) {
4123 if (len < 0) {
4124 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4125 goto drop;
4126 }
4127
4128 l2cap_data_channel_iframe(chan, control, skb);
4129 } else {
4130 if (len != 0) {
4131 BT_ERR("%d", len);
4132 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4133 goto drop;
4134 }
4135
4136 l2cap_data_channel_sframe(chan, control, skb);
4137 }
4138
4139 return 0;
4140
4141 drop:
4142 kfree_skb(skb);
4143 return 0;
4144 }
4145
4146 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4147 {
4148 struct l2cap_chan *chan;
4149 struct sock *sk = NULL;
4150 u32 control;
4151 u16 tx_seq;
4152 int len;
4153
4154 chan = l2cap_get_chan_by_scid(conn, cid);
4155 if (!chan) {
4156 BT_DBG("unknown cid 0x%4.4x", cid);
4157 goto drop;
4158 }
4159
4160 sk = chan->sk;
4161
4162 BT_DBG("chan %p, len %d", chan, skb->len);
4163
4164 if (chan->state != BT_CONNECTED)
4165 goto drop;
4166
4167 switch (chan->mode) {
4168 case L2CAP_MODE_BASIC:
4169 /* If socket recv buffers overflows we drop data here
4170 * which is *bad* because L2CAP has to be reliable.
4171 * But we don't have any other choice. L2CAP doesn't
4172 * provide flow control mechanism. */
4173
4174 if (chan->imtu < skb->len)
4175 goto drop;
4176
4177 if (!chan->ops->recv(chan->data, skb))
4178 goto done;
4179 break;
4180
4181 case L2CAP_MODE_ERTM:
4182 if (!sock_owned_by_user(sk)) {
4183 l2cap_ertm_data_rcv(sk, skb);
4184 } else {
4185 if (sk_add_backlog(sk, skb))
4186 goto drop;
4187 }
4188
4189 goto done;
4190
4191 case L2CAP_MODE_STREAMING:
4192 control = __get_control(chan, skb->data);
4193 skb_pull(skb, __ctrl_size(chan));
4194 len = skb->len;
4195
4196 if (l2cap_check_fcs(chan, skb))
4197 goto drop;
4198
4199 if (__is_sar_start(chan, control))
4200 len -= L2CAP_SDULEN_SIZE;
4201
4202 if (chan->fcs == L2CAP_FCS_CRC16)
4203 len -= L2CAP_FCS_SIZE;
4204
4205 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4206 goto drop;
4207
4208 tx_seq = __get_txseq(chan, control);
4209
4210 if (chan->expected_tx_seq != tx_seq) {
4211 /* Frame(s) missing - must discard partial SDU */
4212 kfree_skb(chan->sdu);
4213 chan->sdu = NULL;
4214 chan->sdu_last_frag = NULL;
4215 chan->sdu_len = 0;
4216
4217 /* TODO: Notify userland of missing data */
4218 }
4219
4220 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4221
4222 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4223 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4224
4225 goto done;
4226
4227 default:
4228 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4229 break;
4230 }
4231
4232 drop:
4233 kfree_skb(skb);
4234
4235 done:
4236 if (sk)
4237 bh_unlock_sock(sk);
4238
4239 return 0;
4240 }
4241
4242 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4243 {
4244 struct sock *sk = NULL;
4245 struct l2cap_chan *chan;
4246
4247 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4248 if (!chan)
4249 goto drop;
4250
4251 sk = chan->sk;
4252
4253 bh_lock_sock(sk);
4254
4255 BT_DBG("sk %p, len %d", sk, skb->len);
4256
4257 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4258 goto drop;
4259
4260 if (chan->imtu < skb->len)
4261 goto drop;
4262
4263 if (!chan->ops->recv(chan->data, skb))
4264 goto done;
4265
4266 drop:
4267 kfree_skb(skb);
4268
4269 done:
4270 if (sk)
4271 bh_unlock_sock(sk);
4272 return 0;
4273 }
4274
4275 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4276 {
4277 struct sock *sk = NULL;
4278 struct l2cap_chan *chan;
4279
4280 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4281 if (!chan)
4282 goto drop;
4283
4284 sk = chan->sk;
4285
4286 bh_lock_sock(sk);
4287
4288 BT_DBG("sk %p, len %d", sk, skb->len);
4289
4290 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4291 goto drop;
4292
4293 if (chan->imtu < skb->len)
4294 goto drop;
4295
4296 if (!chan->ops->recv(chan->data, skb))
4297 goto done;
4298
4299 drop:
4300 kfree_skb(skb);
4301
4302 done:
4303 if (sk)
4304 bh_unlock_sock(sk);
4305 return 0;
4306 }
4307
4308 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4309 {
4310 struct l2cap_hdr *lh = (void *) skb->data;
4311 u16 cid, len;
4312 __le16 psm;
4313
4314 skb_pull(skb, L2CAP_HDR_SIZE);
4315 cid = __le16_to_cpu(lh->cid);
4316 len = __le16_to_cpu(lh->len);
4317
4318 if (len != skb->len) {
4319 kfree_skb(skb);
4320 return;
4321 }
4322
4323 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4324
4325 switch (cid) {
4326 case L2CAP_CID_LE_SIGNALING:
4327 case L2CAP_CID_SIGNALING:
4328 l2cap_sig_channel(conn, skb);
4329 break;
4330
4331 case L2CAP_CID_CONN_LESS:
4332 psm = get_unaligned_le16(skb->data);
4333 skb_pull(skb, 2);
4334 l2cap_conless_channel(conn, psm, skb);
4335 break;
4336
4337 case L2CAP_CID_LE_DATA:
4338 l2cap_att_channel(conn, cid, skb);
4339 break;
4340
4341 case L2CAP_CID_SMP:
4342 if (smp_sig_channel(conn, skb))
4343 l2cap_conn_del(conn->hcon, EACCES);
4344 break;
4345
4346 default:
4347 l2cap_data_channel(conn, cid, skb);
4348 break;
4349 }
4350 }
4351
4352 /* ---- L2CAP interface with lower layer (HCI) ---- */
4353
4354 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4355 {
4356 int exact = 0, lm1 = 0, lm2 = 0;
4357 struct l2cap_chan *c;
4358
4359 if (type != ACL_LINK)
4360 return -EINVAL;
4361
4362 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4363
4364 /* Find listening sockets and check their link_mode */
4365 read_lock(&chan_list_lock);
4366 list_for_each_entry(c, &chan_list, global_l) {
4367 struct sock *sk = c->sk;
4368
4369 if (c->state != BT_LISTEN)
4370 continue;
4371
4372 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4373 lm1 |= HCI_LM_ACCEPT;
4374 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4375 lm1 |= HCI_LM_MASTER;
4376 exact++;
4377 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4378 lm2 |= HCI_LM_ACCEPT;
4379 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4380 lm2 |= HCI_LM_MASTER;
4381 }
4382 }
4383 read_unlock(&chan_list_lock);
4384
4385 return exact ? lm1 : lm2;
4386 }
4387
4388 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4389 {
4390 struct l2cap_conn *conn;
4391
4392 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4393
4394 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4395 return -EINVAL;
4396
4397 if (!status) {
4398 conn = l2cap_conn_add(hcon, status);
4399 if (conn)
4400 l2cap_conn_ready(conn);
4401 } else
4402 l2cap_conn_del(hcon, bt_to_errno(status));
4403
4404 return 0;
4405 }
4406
4407 static int l2cap_disconn_ind(struct hci_conn *hcon)
4408 {
4409 struct l2cap_conn *conn = hcon->l2cap_data;
4410
4411 BT_DBG("hcon %p", hcon);
4412
4413 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4414 return HCI_ERROR_REMOTE_USER_TERM;
4415
4416 return conn->disc_reason;
4417 }
4418
4419 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4420 {
4421 BT_DBG("hcon %p reason %d", hcon, reason);
4422
4423 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4424 return -EINVAL;
4425
4426 l2cap_conn_del(hcon, bt_to_errno(reason));
4427
4428 return 0;
4429 }
4430
4431 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4432 {
4433 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4434 return;
4435
4436 if (encrypt == 0x00) {
4437 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4438 __clear_chan_timer(chan);
4439 __set_chan_timer(chan, HZ * 5);
4440 } else if (chan->sec_level == BT_SECURITY_HIGH)
4441 l2cap_chan_close(chan, ECONNREFUSED);
4442 } else {
4443 if (chan->sec_level == BT_SECURITY_MEDIUM)
4444 __clear_chan_timer(chan);
4445 }
4446 }
4447
4448 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4449 {
4450 struct l2cap_conn *conn = hcon->l2cap_data;
4451 struct l2cap_chan *chan;
4452
4453 if (!conn)
4454 return 0;
4455
4456 BT_DBG("conn %p", conn);
4457
4458 if (hcon->type == LE_LINK) {
4459 smp_distribute_keys(conn, 0);
4460 del_timer(&conn->security_timer);
4461 }
4462
4463 read_lock(&conn->chan_lock);
4464
4465 list_for_each_entry(chan, &conn->chan_l, list) {
4466 struct sock *sk = chan->sk;
4467
4468 bh_lock_sock(sk);
4469
4470 BT_DBG("chan->scid %d", chan->scid);
4471
4472 if (chan->scid == L2CAP_CID_LE_DATA) {
4473 if (!status && encrypt) {
4474 chan->sec_level = hcon->sec_level;
4475 l2cap_chan_ready(sk);
4476 }
4477
4478 bh_unlock_sock(sk);
4479 continue;
4480 }
4481
4482 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4483 bh_unlock_sock(sk);
4484 continue;
4485 }
4486
4487 if (!status && (chan->state == BT_CONNECTED ||
4488 chan->state == BT_CONFIG)) {
4489 l2cap_check_encryption(chan, encrypt);
4490 bh_unlock_sock(sk);
4491 continue;
4492 }
4493
4494 if (chan->state == BT_CONNECT) {
4495 if (!status) {
4496 struct l2cap_conn_req req;
4497 req.scid = cpu_to_le16(chan->scid);
4498 req.psm = chan->psm;
4499
4500 chan->ident = l2cap_get_ident(conn);
4501 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4502
4503 l2cap_send_cmd(conn, chan->ident,
4504 L2CAP_CONN_REQ, sizeof(req), &req);
4505 } else {
4506 __clear_chan_timer(chan);
4507 __set_chan_timer(chan, HZ / 10);
4508 }
4509 } else if (chan->state == BT_CONNECT2) {
4510 struct l2cap_conn_rsp rsp;
4511 __u16 res, stat;
4512
4513 if (!status) {
4514 if (bt_sk(sk)->defer_setup) {
4515 struct sock *parent = bt_sk(sk)->parent;
4516 res = L2CAP_CR_PEND;
4517 stat = L2CAP_CS_AUTHOR_PEND;
4518 if (parent)
4519 parent->sk_data_ready(parent, 0);
4520 } else {
4521 l2cap_state_change(chan, BT_CONFIG);
4522 res = L2CAP_CR_SUCCESS;
4523 stat = L2CAP_CS_NO_INFO;
4524 }
4525 } else {
4526 l2cap_state_change(chan, BT_DISCONN);
4527 __set_chan_timer(chan, HZ / 10);
4528 res = L2CAP_CR_SEC_BLOCK;
4529 stat = L2CAP_CS_NO_INFO;
4530 }
4531
4532 rsp.scid = cpu_to_le16(chan->dcid);
4533 rsp.dcid = cpu_to_le16(chan->scid);
4534 rsp.result = cpu_to_le16(res);
4535 rsp.status = cpu_to_le16(stat);
4536 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4537 sizeof(rsp), &rsp);
4538 }
4539
4540 bh_unlock_sock(sk);
4541 }
4542
4543 read_unlock(&conn->chan_lock);
4544
4545 return 0;
4546 }
4547
4548 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4549 {
4550 struct l2cap_conn *conn = hcon->l2cap_data;
4551
4552 if (!conn)
4553 conn = l2cap_conn_add(hcon, 0);
4554
4555 if (!conn)
4556 goto drop;
4557
4558 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4559
4560 if (!(flags & ACL_CONT)) {
4561 struct l2cap_hdr *hdr;
4562 struct l2cap_chan *chan;
4563 u16 cid;
4564 int len;
4565
4566 if (conn->rx_len) {
4567 BT_ERR("Unexpected start frame (len %d)", skb->len);
4568 kfree_skb(conn->rx_skb);
4569 conn->rx_skb = NULL;
4570 conn->rx_len = 0;
4571 l2cap_conn_unreliable(conn, ECOMM);
4572 }
4573
4574 /* Start fragment always begin with Basic L2CAP header */
4575 if (skb->len < L2CAP_HDR_SIZE) {
4576 BT_ERR("Frame is too short (len %d)", skb->len);
4577 l2cap_conn_unreliable(conn, ECOMM);
4578 goto drop;
4579 }
4580
4581 hdr = (struct l2cap_hdr *) skb->data;
4582 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4583 cid = __le16_to_cpu(hdr->cid);
4584
4585 if (len == skb->len) {
4586 /* Complete frame received */
4587 l2cap_recv_frame(conn, skb);
4588 return 0;
4589 }
4590
4591 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4592
4593 if (skb->len > len) {
4594 BT_ERR("Frame is too long (len %d, expected len %d)",
4595 skb->len, len);
4596 l2cap_conn_unreliable(conn, ECOMM);
4597 goto drop;
4598 }
4599
4600 chan = l2cap_get_chan_by_scid(conn, cid);
4601
4602 if (chan && chan->sk) {
4603 struct sock *sk = chan->sk;
4604
4605 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4606 BT_ERR("Frame exceeding recv MTU (len %d, "
4607 "MTU %d)", len,
4608 chan->imtu);
4609 bh_unlock_sock(sk);
4610 l2cap_conn_unreliable(conn, ECOMM);
4611 goto drop;
4612 }
4613 bh_unlock_sock(sk);
4614 }
4615
4616 /* Allocate skb for the complete frame (with header) */
4617 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4618 if (!conn->rx_skb)
4619 goto drop;
4620
4621 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4622 skb->len);
4623 conn->rx_len = len - skb->len;
4624 } else {
4625 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4626
4627 if (!conn->rx_len) {
4628 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4629 l2cap_conn_unreliable(conn, ECOMM);
4630 goto drop;
4631 }
4632
4633 if (skb->len > conn->rx_len) {
4634 BT_ERR("Fragment is too long (len %d, expected %d)",
4635 skb->len, conn->rx_len);
4636 kfree_skb(conn->rx_skb);
4637 conn->rx_skb = NULL;
4638 conn->rx_len = 0;
4639 l2cap_conn_unreliable(conn, ECOMM);
4640 goto drop;
4641 }
4642
4643 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4644 skb->len);
4645 conn->rx_len -= skb->len;
4646
4647 if (!conn->rx_len) {
4648 /* Complete frame received */
4649 l2cap_recv_frame(conn, conn->rx_skb);
4650 conn->rx_skb = NULL;
4651 }
4652 }
4653
4654 drop:
4655 kfree_skb(skb);
4656 return 0;
4657 }
4658
4659 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4660 {
4661 struct l2cap_chan *c;
4662
4663 read_lock_bh(&chan_list_lock);
4664
4665 list_for_each_entry(c, &chan_list, global_l) {
4666 struct sock *sk = c->sk;
4667
4668 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4669 batostr(&bt_sk(sk)->src),
4670 batostr(&bt_sk(sk)->dst),
4671 c->state, __le16_to_cpu(c->psm),
4672 c->scid, c->dcid, c->imtu, c->omtu,
4673 c->sec_level, c->mode);
4674 }
4675
4676 read_unlock_bh(&chan_list_lock);
4677
4678 return 0;
4679 }
4680
4681 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4682 {
4683 return single_open(file, l2cap_debugfs_show, inode->i_private);
4684 }
4685
4686 static const struct file_operations l2cap_debugfs_fops = {
4687 .open = l2cap_debugfs_open,
4688 .read = seq_read,
4689 .llseek = seq_lseek,
4690 .release = single_release,
4691 };
4692
4693 static struct dentry *l2cap_debugfs;
4694
4695 static struct hci_proto l2cap_hci_proto = {
4696 .name = "L2CAP",
4697 .id = HCI_PROTO_L2CAP,
4698 .connect_ind = l2cap_connect_ind,
4699 .connect_cfm = l2cap_connect_cfm,
4700 .disconn_ind = l2cap_disconn_ind,
4701 .disconn_cfm = l2cap_disconn_cfm,
4702 .security_cfm = l2cap_security_cfm,
4703 .recv_acldata = l2cap_recv_acldata
4704 };
4705
4706 int __init l2cap_init(void)
4707 {
4708 int err;
4709
4710 err = l2cap_init_sockets();
4711 if (err < 0)
4712 return err;
4713
4714 err = hci_register_proto(&l2cap_hci_proto);
4715 if (err < 0) {
4716 BT_ERR("L2CAP protocol registration failed");
4717 bt_sock_unregister(BTPROTO_L2CAP);
4718 goto error;
4719 }
4720
4721 if (bt_debugfs) {
4722 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4723 bt_debugfs, NULL, &l2cap_debugfs_fops);
4724 if (!l2cap_debugfs)
4725 BT_ERR("Failed to create L2CAP debug file");
4726 }
4727
4728 return 0;
4729
4730 error:
4731 l2cap_cleanup_sockets();
4732 return err;
4733 }
4734
4735 void l2cap_exit(void)
4736 {
4737 debugfs_remove(l2cap_debugfs);
4738
4739 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4740 BT_ERR("L2CAP protocol unregistration failed");
4741
4742 l2cap_cleanup_sockets();
4743 }
4744
4745 module_param(disable_ertm, bool, 0644);
4746 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4747
4748 module_param(enable_hs, bool, 0644);
4749 MODULE_PARM_DESC(enable_hs, "Enable High Speed");