2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static struct workqueue_struct
*_busy_wq
;
66 DEFINE_RWLOCK(chan_list_lock
);
68 static void l2cap_busy_work(struct work_struct
*work
);
70 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
71 u8 code
, u8 ident
, u16 dlen
, void *data
);
72 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
74 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
75 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
76 struct l2cap_chan
*chan
, int err
);
78 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
80 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
85 list_for_each_entry(c
, &conn
->chan_l
, list
) {
93 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
97 list_for_each_entry(c
, &conn
->chan_l
, list
) {
104 /* Find channel with given SCID.
105 * Returns locked socket */
106 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
108 struct l2cap_chan
*c
;
110 read_lock(&conn
->chan_lock
);
111 c
= __l2cap_get_chan_by_scid(conn
, cid
);
114 read_unlock(&conn
->chan_lock
);
118 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
120 struct l2cap_chan
*c
;
122 list_for_each_entry(c
, &conn
->chan_l
, list
) {
123 if (c
->ident
== ident
)
129 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
131 struct l2cap_chan
*c
;
133 read_lock(&conn
->chan_lock
);
134 c
= __l2cap_get_chan_by_ident(conn
, ident
);
137 read_unlock(&conn
->chan_lock
);
141 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
143 struct l2cap_chan
*c
;
145 list_for_each_entry(c
, &chan_list
, global_l
) {
146 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
155 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
159 write_lock_bh(&chan_list_lock
);
161 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
174 for (p
= 0x1001; p
< 0x1100; p
+= 2)
175 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
176 chan
->psm
= cpu_to_le16(p
);
177 chan
->sport
= cpu_to_le16(p
);
184 write_unlock_bh(&chan_list_lock
);
188 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
190 write_lock_bh(&chan_list_lock
);
194 write_unlock_bh(&chan_list_lock
);
199 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
201 u16 cid
= L2CAP_CID_DYN_START
;
203 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
204 if (!__l2cap_get_chan_by_scid(conn
, cid
))
211 static void l2cap_chan_set_timer(struct l2cap_chan
*chan
, long timeout
)
213 BT_DBG("chan %p state %d timeout %ld", chan
->sk
, chan
->sk
->sk_state
,
215 if (!mod_timer(&chan
->chan_timer
, jiffies
+ timeout
))
219 static void l2cap_chan_clear_timer(struct l2cap_chan
*chan
)
221 BT_DBG("chan %p state %d", chan
, chan
->sk
->sk_state
);
223 if (timer_pending(&chan
->chan_timer
) && del_timer(&chan
->chan_timer
))
224 __sock_put(chan
->sk
);
227 static void l2cap_chan_timeout(unsigned long arg
)
229 struct l2cap_chan
*chan
= (struct l2cap_chan
*) arg
;
230 struct sock
*sk
= chan
->sk
;
233 BT_DBG("chan %p state %d", chan
, sk
->sk_state
);
237 if (sock_owned_by_user(sk
)) {
238 /* sk is owned by user. Try again later */
239 l2cap_chan_set_timer(chan
, HZ
/ 5);
245 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
246 reason
= ECONNREFUSED
;
247 else if (sk
->sk_state
== BT_CONNECT
&&
248 chan
->sec_level
!= BT_SECURITY_SDP
)
249 reason
= ECONNREFUSED
;
253 l2cap_chan_close(chan
, reason
);
261 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
263 struct l2cap_chan
*chan
;
265 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
271 write_lock_bh(&chan_list_lock
);
272 list_add(&chan
->global_l
, &chan_list
);
273 write_unlock_bh(&chan_list_lock
);
275 setup_timer(&chan
->chan_timer
, l2cap_chan_timeout
, (unsigned long) chan
);
280 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
282 write_lock_bh(&chan_list_lock
);
283 list_del(&chan
->global_l
);
284 write_unlock_bh(&chan_list_lock
);
289 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
291 struct sock
*sk
= chan
->sk
;
293 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
294 chan
->psm
, chan
->dcid
);
296 conn
->disc_reason
= 0x13;
300 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
301 if (conn
->hcon
->type
== LE_LINK
) {
303 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
304 chan
->scid
= L2CAP_CID_LE_DATA
;
305 chan
->dcid
= L2CAP_CID_LE_DATA
;
307 /* Alloc CID for connection-oriented socket */
308 chan
->scid
= l2cap_alloc_cid(conn
);
309 chan
->omtu
= L2CAP_DEFAULT_MTU
;
311 } else if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
312 /* Connectionless socket */
313 chan
->scid
= L2CAP_CID_CONN_LESS
;
314 chan
->dcid
= L2CAP_CID_CONN_LESS
;
315 chan
->omtu
= L2CAP_DEFAULT_MTU
;
317 /* Raw socket can send/recv signalling messages only */
318 chan
->scid
= L2CAP_CID_SIGNALING
;
319 chan
->dcid
= L2CAP_CID_SIGNALING
;
320 chan
->omtu
= L2CAP_DEFAULT_MTU
;
325 list_add(&chan
->list
, &conn
->chan_l
);
329 * Must be called on the locked socket. */
330 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
332 struct sock
*sk
= chan
->sk
;
333 struct l2cap_conn
*conn
= chan
->conn
;
334 struct sock
*parent
= bt_sk(sk
)->parent
;
336 l2cap_chan_clear_timer(chan
);
338 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
341 /* Delete from channel list */
342 write_lock_bh(&conn
->chan_lock
);
343 list_del(&chan
->list
);
344 write_unlock_bh(&conn
->chan_lock
);
348 hci_conn_put(conn
->hcon
);
351 sk
->sk_state
= BT_CLOSED
;
352 sock_set_flag(sk
, SOCK_ZAPPED
);
358 bt_accept_unlink(sk
);
359 parent
->sk_data_ready(parent
, 0);
361 sk
->sk_state_change(sk
);
363 if (!(chan
->conf_state
& L2CAP_CONF_OUTPUT_DONE
&&
364 chan
->conf_state
& L2CAP_CONF_INPUT_DONE
))
367 skb_queue_purge(&chan
->tx_q
);
369 if (chan
->mode
== L2CAP_MODE_ERTM
) {
370 struct srej_list
*l
, *tmp
;
372 del_timer(&chan
->retrans_timer
);
373 del_timer(&chan
->monitor_timer
);
374 del_timer(&chan
->ack_timer
);
376 skb_queue_purge(&chan
->srej_q
);
377 skb_queue_purge(&chan
->busy_q
);
379 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
386 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
390 BT_DBG("parent %p", parent
);
392 /* Close not yet accepted channels */
393 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
394 l2cap_chan_clear_timer(l2cap_pi(sk
)->chan
);
396 l2cap_chan_close(l2cap_pi(sk
)->chan
, ECONNRESET
);
401 parent
->sk_state
= BT_CLOSED
;
402 sock_set_flag(parent
, SOCK_ZAPPED
);
405 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
407 struct l2cap_conn
*conn
= chan
->conn
;
408 struct sock
*sk
= chan
->sk
;
410 BT_DBG("chan %p state %d socket %p", chan
, sk
->sk_state
, sk
->sk_socket
);
412 switch (sk
->sk_state
) {
414 l2cap_chan_cleanup_listen(sk
);
419 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
420 conn
->hcon
->type
== ACL_LINK
) {
421 l2cap_chan_clear_timer(chan
);
422 l2cap_chan_set_timer(chan
, sk
->sk_sndtimeo
);
423 l2cap_send_disconn_req(conn
, chan
, reason
);
425 l2cap_chan_del(chan
, reason
);
429 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
430 conn
->hcon
->type
== ACL_LINK
) {
431 struct l2cap_conn_rsp rsp
;
434 if (bt_sk(sk
)->defer_setup
)
435 result
= L2CAP_CR_SEC_BLOCK
;
437 result
= L2CAP_CR_BAD_PSM
;
439 rsp
.scid
= cpu_to_le16(chan
->dcid
);
440 rsp
.dcid
= cpu_to_le16(chan
->scid
);
441 rsp
.result
= cpu_to_le16(result
);
442 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
443 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
447 l2cap_chan_del(chan
, reason
);
452 l2cap_chan_del(chan
, reason
);
456 sock_set_flag(sk
, SOCK_ZAPPED
);
461 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
463 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
464 switch (chan
->sec_level
) {
465 case BT_SECURITY_HIGH
:
466 return HCI_AT_DEDICATED_BONDING_MITM
;
467 case BT_SECURITY_MEDIUM
:
468 return HCI_AT_DEDICATED_BONDING
;
470 return HCI_AT_NO_BONDING
;
472 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
473 if (chan
->sec_level
== BT_SECURITY_LOW
)
474 chan
->sec_level
= BT_SECURITY_SDP
;
476 if (chan
->sec_level
== BT_SECURITY_HIGH
)
477 return HCI_AT_NO_BONDING_MITM
;
479 return HCI_AT_NO_BONDING
;
481 switch (chan
->sec_level
) {
482 case BT_SECURITY_HIGH
:
483 return HCI_AT_GENERAL_BONDING_MITM
;
484 case BT_SECURITY_MEDIUM
:
485 return HCI_AT_GENERAL_BONDING
;
487 return HCI_AT_NO_BONDING
;
492 /* Service level security */
493 static inline int l2cap_check_security(struct l2cap_chan
*chan
)
495 struct l2cap_conn
*conn
= chan
->conn
;
498 auth_type
= l2cap_get_auth_type(chan
);
500 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
503 u8
l2cap_get_ident(struct l2cap_conn
*conn
)
507 /* Get next available identificator.
508 * 1 - 128 are used by kernel.
509 * 129 - 199 are reserved.
510 * 200 - 254 are used by utilities like l2ping, etc.
513 spin_lock_bh(&conn
->lock
);
515 if (++conn
->tx_ident
> 128)
520 spin_unlock_bh(&conn
->lock
);
525 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
527 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
530 BT_DBG("code 0x%2.2x", code
);
535 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
536 flags
= ACL_START_NO_FLUSH
;
540 hci_send_acl(conn
->hcon
, skb
, flags
);
543 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u16 control
)
546 struct l2cap_hdr
*lh
;
547 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
548 struct l2cap_conn
*conn
= chan
->conn
;
549 struct sock
*sk
= (struct sock
*)pi
;
550 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
553 if (sk
->sk_state
!= BT_CONNECTED
)
556 if (chan
->fcs
== L2CAP_FCS_CRC16
)
559 BT_DBG("chan %p, control 0x%2.2x", chan
, control
);
561 count
= min_t(unsigned int, conn
->mtu
, hlen
);
562 control
|= L2CAP_CTRL_FRAME_TYPE
;
564 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
565 control
|= L2CAP_CTRL_FINAL
;
566 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
569 if (chan
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
570 control
|= L2CAP_CTRL_POLL
;
571 chan
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
574 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
578 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
579 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
580 lh
->cid
= cpu_to_le16(chan
->dcid
);
581 put_unaligned_le16(control
, skb_put(skb
, 2));
583 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
584 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
585 put_unaligned_le16(fcs
, skb_put(skb
, 2));
588 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
589 flags
= ACL_START_NO_FLUSH
;
593 hci_send_acl(chan
->conn
->hcon
, skb
, flags
);
596 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u16 control
)
598 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
599 control
|= L2CAP_SUPER_RCV_NOT_READY
;
600 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
602 control
|= L2CAP_SUPER_RCV_READY
;
604 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
606 l2cap_send_sframe(chan
, control
);
609 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
611 return !(chan
->conf_state
& L2CAP_CONF_CONNECT_PEND
);
614 static void l2cap_do_start(struct l2cap_chan
*chan
)
616 struct l2cap_conn
*conn
= chan
->conn
;
618 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
619 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
622 if (l2cap_check_security(chan
) &&
623 __l2cap_no_conn_pending(chan
)) {
624 struct l2cap_conn_req req
;
625 req
.scid
= cpu_to_le16(chan
->scid
);
628 chan
->ident
= l2cap_get_ident(conn
);
629 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
631 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
635 struct l2cap_info_req req
;
636 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
638 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
639 conn
->info_ident
= l2cap_get_ident(conn
);
641 mod_timer(&conn
->info_timer
, jiffies
+
642 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
644 l2cap_send_cmd(conn
, conn
->info_ident
,
645 L2CAP_INFO_REQ
, sizeof(req
), &req
);
649 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
651 u32 local_feat_mask
= l2cap_feat_mask
;
653 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
656 case L2CAP_MODE_ERTM
:
657 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
658 case L2CAP_MODE_STREAMING
:
659 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
665 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
668 struct l2cap_disconn_req req
;
675 if (chan
->mode
== L2CAP_MODE_ERTM
) {
676 del_timer(&chan
->retrans_timer
);
677 del_timer(&chan
->monitor_timer
);
678 del_timer(&chan
->ack_timer
);
681 req
.dcid
= cpu_to_le16(chan
->dcid
);
682 req
.scid
= cpu_to_le16(chan
->scid
);
683 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
684 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
686 sk
->sk_state
= BT_DISCONN
;
690 /* ---- L2CAP connections ---- */
691 static void l2cap_conn_start(struct l2cap_conn
*conn
)
693 struct l2cap_chan
*chan
, *tmp
;
695 BT_DBG("conn %p", conn
);
697 read_lock(&conn
->chan_lock
);
699 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
700 struct sock
*sk
= chan
->sk
;
704 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
709 if (sk
->sk_state
== BT_CONNECT
) {
710 struct l2cap_conn_req req
;
712 if (!l2cap_check_security(chan
) ||
713 !__l2cap_no_conn_pending(chan
)) {
718 if (!l2cap_mode_supported(chan
->mode
,
720 && chan
->conf_state
&
721 L2CAP_CONF_STATE2_DEVICE
) {
722 /* l2cap_chan_close() calls list_del(chan)
723 * so release the lock */
724 read_unlock_bh(&conn
->chan_lock
);
725 l2cap_chan_close(chan
, ECONNRESET
);
726 read_lock_bh(&conn
->chan_lock
);
731 req
.scid
= cpu_to_le16(chan
->scid
);
734 chan
->ident
= l2cap_get_ident(conn
);
735 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
737 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
740 } else if (sk
->sk_state
== BT_CONNECT2
) {
741 struct l2cap_conn_rsp rsp
;
743 rsp
.scid
= cpu_to_le16(chan
->dcid
);
744 rsp
.dcid
= cpu_to_le16(chan
->scid
);
746 if (l2cap_check_security(chan
)) {
747 if (bt_sk(sk
)->defer_setup
) {
748 struct sock
*parent
= bt_sk(sk
)->parent
;
749 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
750 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
751 parent
->sk_data_ready(parent
, 0);
754 sk
->sk_state
= BT_CONFIG
;
755 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
756 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
759 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
760 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
763 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
766 if (chan
->conf_state
& L2CAP_CONF_REQ_SENT
||
767 rsp
.result
!= L2CAP_CR_SUCCESS
) {
772 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
773 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
774 l2cap_build_conf_req(chan
, buf
), buf
);
775 chan
->num_conf_req
++;
781 read_unlock(&conn
->chan_lock
);
784 /* Find socket with cid and source bdaddr.
785 * Returns closest match, locked.
787 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
789 struct l2cap_chan
*c
, *c1
= NULL
;
791 read_lock(&chan_list_lock
);
793 list_for_each_entry(c
, &chan_list
, global_l
) {
794 struct sock
*sk
= c
->sk
;
796 if (state
&& sk
->sk_state
!= state
)
799 if (c
->scid
== cid
) {
801 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
802 read_unlock(&chan_list_lock
);
807 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
812 read_unlock(&chan_list_lock
);
817 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
819 struct sock
*parent
, *sk
;
820 struct l2cap_chan
*chan
, *pchan
;
824 /* Check if we have socket listening on cid */
825 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
832 bh_lock_sock(parent
);
834 /* Check for backlog size */
835 if (sk_acceptq_is_full(parent
)) {
836 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
840 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
844 chan
= l2cap_chan_create(sk
);
850 l2cap_pi(sk
)->chan
= chan
;
852 write_lock_bh(&conn
->chan_lock
);
854 hci_conn_hold(conn
->hcon
);
856 l2cap_sock_init(sk
, parent
);
858 bacpy(&bt_sk(sk
)->src
, conn
->src
);
859 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
861 bt_accept_enqueue(parent
, sk
);
863 __l2cap_chan_add(conn
, chan
);
865 l2cap_chan_set_timer(chan
, sk
->sk_sndtimeo
);
867 sk
->sk_state
= BT_CONNECTED
;
868 parent
->sk_data_ready(parent
, 0);
870 write_unlock_bh(&conn
->chan_lock
);
873 bh_unlock_sock(parent
);
876 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
878 struct l2cap_chan
*chan
;
880 BT_DBG("conn %p", conn
);
882 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
883 l2cap_le_conn_ready(conn
);
885 read_lock(&conn
->chan_lock
);
887 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
888 struct sock
*sk
= chan
->sk
;
892 if (conn
->hcon
->type
== LE_LINK
) {
893 l2cap_chan_clear_timer(chan
);
894 sk
->sk_state
= BT_CONNECTED
;
895 sk
->sk_state_change(sk
);
898 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
899 l2cap_chan_clear_timer(chan
);
900 sk
->sk_state
= BT_CONNECTED
;
901 sk
->sk_state_change(sk
);
902 } else if (sk
->sk_state
== BT_CONNECT
)
903 l2cap_do_start(chan
);
908 read_unlock(&conn
->chan_lock
);
911 /* Notify sockets that we cannot guaranty reliability anymore */
912 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
914 struct l2cap_chan
*chan
;
916 BT_DBG("conn %p", conn
);
918 read_lock(&conn
->chan_lock
);
920 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
921 struct sock
*sk
= chan
->sk
;
923 if (chan
->force_reliable
)
927 read_unlock(&conn
->chan_lock
);
930 static void l2cap_info_timeout(unsigned long arg
)
932 struct l2cap_conn
*conn
= (void *) arg
;
934 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
935 conn
->info_ident
= 0;
937 l2cap_conn_start(conn
);
940 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
942 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
947 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
951 hcon
->l2cap_data
= conn
;
954 BT_DBG("hcon %p conn %p", hcon
, conn
);
956 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
957 conn
->mtu
= hcon
->hdev
->le_mtu
;
959 conn
->mtu
= hcon
->hdev
->acl_mtu
;
961 conn
->src
= &hcon
->hdev
->bdaddr
;
962 conn
->dst
= &hcon
->dst
;
966 spin_lock_init(&conn
->lock
);
967 rwlock_init(&conn
->chan_lock
);
969 INIT_LIST_HEAD(&conn
->chan_l
);
971 if (hcon
->type
!= LE_LINK
)
972 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
973 (unsigned long) conn
);
975 conn
->disc_reason
= 0x13;
980 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
982 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
983 struct l2cap_chan
*chan
, *l
;
989 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
991 kfree_skb(conn
->rx_skb
);
994 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
997 l2cap_chan_del(chan
, err
);
1002 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1003 del_timer_sync(&conn
->info_timer
);
1005 hcon
->l2cap_data
= NULL
;
1009 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
1011 write_lock_bh(&conn
->chan_lock
);
1012 __l2cap_chan_add(conn
, chan
);
1013 write_unlock_bh(&conn
->chan_lock
);
1016 /* ---- Socket interface ---- */
1018 /* Find socket with psm and source bdaddr.
1019 * Returns closest match.
1021 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1023 struct l2cap_chan
*c
, *c1
= NULL
;
1025 read_lock(&chan_list_lock
);
1027 list_for_each_entry(c
, &chan_list
, global_l
) {
1028 struct sock
*sk
= c
->sk
;
1030 if (state
&& sk
->sk_state
!= state
)
1033 if (c
->psm
== psm
) {
1035 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1036 read_unlock(&chan_list_lock
);
1041 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1046 read_unlock(&chan_list_lock
);
1051 int l2cap_chan_connect(struct l2cap_chan
*chan
)
1053 struct sock
*sk
= chan
->sk
;
1054 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1055 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1056 struct l2cap_conn
*conn
;
1057 struct hci_conn
*hcon
;
1058 struct hci_dev
*hdev
;
1062 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1065 hdev
= hci_get_route(dst
, src
);
1067 return -EHOSTUNREACH
;
1069 hci_dev_lock_bh(hdev
);
1071 auth_type
= l2cap_get_auth_type(chan
);
1073 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1074 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1075 chan
->sec_level
, auth_type
);
1077 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1078 chan
->sec_level
, auth_type
);
1081 err
= PTR_ERR(hcon
);
1085 conn
= l2cap_conn_add(hcon
, 0);
1092 /* Update source addr of the socket */
1093 bacpy(src
, conn
->src
);
1095 l2cap_chan_add(conn
, chan
);
1097 sk
->sk_state
= BT_CONNECT
;
1098 l2cap_chan_set_timer(chan
, sk
->sk_sndtimeo
);
1100 if (hcon
->state
== BT_CONNECTED
) {
1101 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1102 l2cap_chan_clear_timer(chan
);
1103 if (l2cap_check_security(chan
))
1104 sk
->sk_state
= BT_CONNECTED
;
1106 l2cap_do_start(chan
);
1112 hci_dev_unlock_bh(hdev
);
1117 int __l2cap_wait_ack(struct sock
*sk
)
1119 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1120 DECLARE_WAITQUEUE(wait
, current
);
1124 add_wait_queue(sk_sleep(sk
), &wait
);
1125 while ((chan
->unacked_frames
> 0 && chan
->conn
)) {
1126 set_current_state(TASK_INTERRUPTIBLE
);
1131 if (signal_pending(current
)) {
1132 err
= sock_intr_errno(timeo
);
1137 timeo
= schedule_timeout(timeo
);
1140 err
= sock_error(sk
);
1144 set_current_state(TASK_RUNNING
);
1145 remove_wait_queue(sk_sleep(sk
), &wait
);
1149 static void l2cap_monitor_timeout(unsigned long arg
)
1151 struct l2cap_chan
*chan
= (void *) arg
;
1152 struct sock
*sk
= chan
->sk
;
1154 BT_DBG("chan %p", chan
);
1157 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1158 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1163 chan
->retry_count
++;
1164 __mod_monitor_timer();
1166 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1170 static void l2cap_retrans_timeout(unsigned long arg
)
1172 struct l2cap_chan
*chan
= (void *) arg
;
1173 struct sock
*sk
= chan
->sk
;
1175 BT_DBG("chan %p", chan
);
1178 chan
->retry_count
= 1;
1179 __mod_monitor_timer();
1181 chan
->conn_state
|= L2CAP_CONN_WAIT_F
;
1183 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1187 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1189 struct sk_buff
*skb
;
1191 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1192 chan
->unacked_frames
) {
1193 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1196 skb
= skb_dequeue(&chan
->tx_q
);
1199 chan
->unacked_frames
--;
1202 if (!chan
->unacked_frames
)
1203 del_timer(&chan
->retrans_timer
);
1206 void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
1208 struct hci_conn
*hcon
= chan
->conn
->hcon
;
1211 BT_DBG("chan %p, skb %p len %d", chan
, skb
, skb
->len
);
1213 if (!chan
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
1214 flags
= ACL_START_NO_FLUSH
;
1218 hci_send_acl(hcon
, skb
, flags
);
1221 void l2cap_streaming_send(struct l2cap_chan
*chan
)
1223 struct sk_buff
*skb
;
1226 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1227 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1228 control
|= chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1229 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1231 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1232 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1233 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1236 l2cap_do_send(chan
, skb
);
1238 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1242 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u8 tx_seq
)
1244 struct sk_buff
*skb
, *tx_skb
;
1247 skb
= skb_peek(&chan
->tx_q
);
1252 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1255 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1258 } while ((skb
= skb_queue_next(&chan
->tx_q
, skb
)));
1260 if (chan
->remote_max_tx
&&
1261 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1262 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1266 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1267 bt_cb(skb
)->retries
++;
1268 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1269 control
&= L2CAP_CTRL_SAR
;
1271 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1272 control
|= L2CAP_CTRL_FINAL
;
1273 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1276 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1277 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1279 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1281 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1282 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1283 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1286 l2cap_do_send(chan
, tx_skb
);
1289 int l2cap_ertm_send(struct l2cap_chan
*chan
)
1291 struct sk_buff
*skb
, *tx_skb
;
1292 struct sock
*sk
= chan
->sk
;
1296 if (sk
->sk_state
!= BT_CONNECTED
)
1299 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1301 if (chan
->remote_max_tx
&&
1302 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1303 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1307 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1309 bt_cb(skb
)->retries
++;
1311 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1312 control
&= L2CAP_CTRL_SAR
;
1314 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1315 control
|= L2CAP_CTRL_FINAL
;
1316 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1318 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1319 | (chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1320 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1323 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1324 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1325 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1328 l2cap_do_send(chan
, tx_skb
);
1330 __mod_retrans_timer();
1332 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1333 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1335 if (bt_cb(skb
)->retries
== 1)
1336 chan
->unacked_frames
++;
1338 chan
->frames_sent
++;
1340 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1341 chan
->tx_send_head
= NULL
;
1343 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1351 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1355 if (!skb_queue_empty(&chan
->tx_q
))
1356 chan
->tx_send_head
= chan
->tx_q
.next
;
1358 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1359 ret
= l2cap_ertm_send(chan
);
1363 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1367 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1369 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1370 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1371 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1372 l2cap_send_sframe(chan
, control
);
1376 if (l2cap_ertm_send(chan
) > 0)
1379 control
|= L2CAP_SUPER_RCV_READY
;
1380 l2cap_send_sframe(chan
, control
);
1383 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1385 struct srej_list
*tail
;
1388 control
= L2CAP_SUPER_SELECT_REJECT
;
1389 control
|= L2CAP_CTRL_FINAL
;
1391 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1392 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1394 l2cap_send_sframe(chan
, control
);
1397 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1399 struct l2cap_conn
*conn
= l2cap_pi(sk
)->chan
->conn
;
1400 struct sk_buff
**frag
;
1403 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1409 /* Continuation fragments (no L2CAP header) */
1410 frag
= &skb_shinfo(skb
)->frag_list
;
1412 count
= min_t(unsigned int, conn
->mtu
, len
);
1414 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1417 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1423 frag
= &(*frag
)->next
;
1429 struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1431 struct sock
*sk
= chan
->sk
;
1432 struct l2cap_conn
*conn
= chan
->conn
;
1433 struct sk_buff
*skb
;
1434 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1435 struct l2cap_hdr
*lh
;
1437 BT_DBG("sk %p len %d", sk
, (int)len
);
1439 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1440 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1441 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1443 return ERR_PTR(err
);
1445 /* Create L2CAP header */
1446 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1447 lh
->cid
= cpu_to_le16(chan
->dcid
);
1448 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1449 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1451 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1452 if (unlikely(err
< 0)) {
1454 return ERR_PTR(err
);
1459 struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1461 struct sock
*sk
= chan
->sk
;
1462 struct l2cap_conn
*conn
= chan
->conn
;
1463 struct sk_buff
*skb
;
1464 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1465 struct l2cap_hdr
*lh
;
1467 BT_DBG("sk %p len %d", sk
, (int)len
);
1469 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1470 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1471 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1473 return ERR_PTR(err
);
1475 /* Create L2CAP header */
1476 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1477 lh
->cid
= cpu_to_le16(chan
->dcid
);
1478 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1480 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1481 if (unlikely(err
< 0)) {
1483 return ERR_PTR(err
);
1488 struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1490 struct sock
*sk
= chan
->sk
;
1491 struct l2cap_conn
*conn
= chan
->conn
;
1492 struct sk_buff
*skb
;
1493 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1494 struct l2cap_hdr
*lh
;
1496 BT_DBG("sk %p len %d", sk
, (int)len
);
1499 return ERR_PTR(-ENOTCONN
);
1504 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1507 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1508 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1509 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1511 return ERR_PTR(err
);
1513 /* Create L2CAP header */
1514 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1515 lh
->cid
= cpu_to_le16(chan
->dcid
);
1516 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1517 put_unaligned_le16(control
, skb_put(skb
, 2));
1519 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1521 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1522 if (unlikely(err
< 0)) {
1524 return ERR_PTR(err
);
1527 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1528 put_unaligned_le16(0, skb_put(skb
, 2));
1530 bt_cb(skb
)->retries
= 0;
1534 int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1536 struct sk_buff
*skb
;
1537 struct sk_buff_head sar_queue
;
1541 skb_queue_head_init(&sar_queue
);
1542 control
= L2CAP_SDU_START
;
1543 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1545 return PTR_ERR(skb
);
1547 __skb_queue_tail(&sar_queue
, skb
);
1548 len
-= chan
->remote_mps
;
1549 size
+= chan
->remote_mps
;
1554 if (len
> chan
->remote_mps
) {
1555 control
= L2CAP_SDU_CONTINUE
;
1556 buflen
= chan
->remote_mps
;
1558 control
= L2CAP_SDU_END
;
1562 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1564 skb_queue_purge(&sar_queue
);
1565 return PTR_ERR(skb
);
1568 __skb_queue_tail(&sar_queue
, skb
);
1572 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1573 if (chan
->tx_send_head
== NULL
)
1574 chan
->tx_send_head
= sar_queue
.next
;
1579 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1581 struct sk_buff
*skb
;
1585 /* Connectionless channel */
1586 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1587 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
1589 return PTR_ERR(skb
);
1591 l2cap_do_send(chan
, skb
);
1595 switch (chan
->mode
) {
1596 case L2CAP_MODE_BASIC
:
1597 /* Check outgoing MTU */
1598 if (len
> chan
->omtu
)
1601 /* Create a basic PDU */
1602 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
1604 return PTR_ERR(skb
);
1606 l2cap_do_send(chan
, skb
);
1610 case L2CAP_MODE_ERTM
:
1611 case L2CAP_MODE_STREAMING
:
1612 /* Entire SDU fits into one PDU */
1613 if (len
<= chan
->remote_mps
) {
1614 control
= L2CAP_SDU_UNSEGMENTED
;
1615 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1618 return PTR_ERR(skb
);
1620 __skb_queue_tail(&chan
->tx_q
, skb
);
1622 if (chan
->tx_send_head
== NULL
)
1623 chan
->tx_send_head
= skb
;
1626 /* Segment SDU into multiples PDUs */
1627 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1632 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1633 l2cap_streaming_send(chan
);
1638 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
1639 (chan
->conn_state
& L2CAP_CONN_WAIT_F
)) {
1644 err
= l2cap_ertm_send(chan
);
1651 BT_DBG("bad state %1.1x", chan
->mode
);
1658 static void l2cap_chan_ready(struct sock
*sk
)
1660 struct sock
*parent
= bt_sk(sk
)->parent
;
1661 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1663 BT_DBG("sk %p, parent %p", sk
, parent
);
1665 chan
->conf_state
= 0;
1666 l2cap_chan_clear_timer(chan
);
1669 /* Outgoing channel.
1670 * Wake up socket sleeping on connect.
1672 sk
->sk_state
= BT_CONNECTED
;
1673 sk
->sk_state_change(sk
);
1675 /* Incoming channel.
1676 * Wake up socket sleeping on accept.
1678 parent
->sk_data_ready(parent
, 0);
1682 /* Copy frame to all raw sockets on that connection */
1683 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1685 struct sk_buff
*nskb
;
1686 struct l2cap_chan
*chan
;
1688 BT_DBG("conn %p", conn
);
1690 read_lock(&conn
->chan_lock
);
1691 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1692 struct sock
*sk
= chan
->sk
;
1693 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1696 /* Don't send frame to the socket it came from */
1699 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1703 if (sock_queue_rcv_skb(sk
, nskb
))
1706 read_unlock(&conn
->chan_lock
);
1709 /* ---- L2CAP signalling commands ---- */
1710 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1711 u8 code
, u8 ident
, u16 dlen
, void *data
)
1713 struct sk_buff
*skb
, **frag
;
1714 struct l2cap_cmd_hdr
*cmd
;
1715 struct l2cap_hdr
*lh
;
1718 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1719 conn
, code
, ident
, dlen
);
1721 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1722 count
= min_t(unsigned int, conn
->mtu
, len
);
1724 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1728 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1729 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1731 if (conn
->hcon
->type
== LE_LINK
)
1732 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1734 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1736 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1739 cmd
->len
= cpu_to_le16(dlen
);
1742 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1743 memcpy(skb_put(skb
, count
), data
, count
);
1749 /* Continuation fragments (no L2CAP header) */
1750 frag
= &skb_shinfo(skb
)->frag_list
;
1752 count
= min_t(unsigned int, conn
->mtu
, len
);
1754 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1758 memcpy(skb_put(*frag
, count
), data
, count
);
1763 frag
= &(*frag
)->next
;
1773 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1775 struct l2cap_conf_opt
*opt
= *ptr
;
1778 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1786 *val
= *((u8
*) opt
->val
);
1790 *val
= get_unaligned_le16(opt
->val
);
1794 *val
= get_unaligned_le32(opt
->val
);
1798 *val
= (unsigned long) opt
->val
;
1802 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1806 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1808 struct l2cap_conf_opt
*opt
= *ptr
;
1810 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1817 *((u8
*) opt
->val
) = val
;
1821 put_unaligned_le16(val
, opt
->val
);
1825 put_unaligned_le32(val
, opt
->val
);
1829 memcpy(opt
->val
, (void *) val
, len
);
1833 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1836 static void l2cap_ack_timeout(unsigned long arg
)
1838 struct l2cap_chan
*chan
= (void *) arg
;
1840 bh_lock_sock(chan
->sk
);
1841 l2cap_send_ack(chan
);
1842 bh_unlock_sock(chan
->sk
);
1845 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1847 struct sock
*sk
= chan
->sk
;
1849 chan
->expected_ack_seq
= 0;
1850 chan
->unacked_frames
= 0;
1851 chan
->buffer_seq
= 0;
1852 chan
->num_acked
= 0;
1853 chan
->frames_sent
= 0;
1855 setup_timer(&chan
->retrans_timer
, l2cap_retrans_timeout
,
1856 (unsigned long) chan
);
1857 setup_timer(&chan
->monitor_timer
, l2cap_monitor_timeout
,
1858 (unsigned long) chan
);
1859 setup_timer(&chan
->ack_timer
, l2cap_ack_timeout
, (unsigned long) chan
);
1861 skb_queue_head_init(&chan
->srej_q
);
1862 skb_queue_head_init(&chan
->busy_q
);
1864 INIT_LIST_HEAD(&chan
->srej_l
);
1866 INIT_WORK(&chan
->busy_work
, l2cap_busy_work
);
1868 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1871 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1874 case L2CAP_MODE_STREAMING
:
1875 case L2CAP_MODE_ERTM
:
1876 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1880 return L2CAP_MODE_BASIC
;
1884 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
1886 struct l2cap_conf_req
*req
= data
;
1887 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
1888 void *ptr
= req
->data
;
1890 BT_DBG("chan %p", chan
);
1892 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
1895 switch (chan
->mode
) {
1896 case L2CAP_MODE_STREAMING
:
1897 case L2CAP_MODE_ERTM
:
1898 if (chan
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
1903 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
1908 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
1909 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
1911 switch (chan
->mode
) {
1912 case L2CAP_MODE_BASIC
:
1913 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1914 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1917 rfc
.mode
= L2CAP_MODE_BASIC
;
1919 rfc
.max_transmit
= 0;
1920 rfc
.retrans_timeout
= 0;
1921 rfc
.monitor_timeout
= 0;
1922 rfc
.max_pdu_size
= 0;
1924 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1925 (unsigned long) &rfc
);
1928 case L2CAP_MODE_ERTM
:
1929 rfc
.mode
= L2CAP_MODE_ERTM
;
1930 rfc
.txwin_size
= chan
->tx_win
;
1931 rfc
.max_transmit
= chan
->max_tx
;
1932 rfc
.retrans_timeout
= 0;
1933 rfc
.monitor_timeout
= 0;
1934 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1935 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1936 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1938 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1939 (unsigned long) &rfc
);
1941 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1944 if (chan
->fcs
== L2CAP_FCS_NONE
||
1945 chan
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1946 chan
->fcs
= L2CAP_FCS_NONE
;
1947 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1951 case L2CAP_MODE_STREAMING
:
1952 rfc
.mode
= L2CAP_MODE_STREAMING
;
1954 rfc
.max_transmit
= 0;
1955 rfc
.retrans_timeout
= 0;
1956 rfc
.monitor_timeout
= 0;
1957 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1958 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1959 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1961 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1962 (unsigned long) &rfc
);
1964 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1967 if (chan
->fcs
== L2CAP_FCS_NONE
||
1968 chan
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1969 chan
->fcs
= L2CAP_FCS_NONE
;
1970 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1975 req
->dcid
= cpu_to_le16(chan
->dcid
);
1976 req
->flags
= cpu_to_le16(0);
1981 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
1983 struct l2cap_conf_rsp
*rsp
= data
;
1984 void *ptr
= rsp
->data
;
1985 void *req
= chan
->conf_req
;
1986 int len
= chan
->conf_len
;
1987 int type
, hint
, olen
;
1989 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
1990 u16 mtu
= L2CAP_DEFAULT_MTU
;
1991 u16 result
= L2CAP_CONF_SUCCESS
;
1993 BT_DBG("chan %p", chan
);
1995 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1996 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
1998 hint
= type
& L2CAP_CONF_HINT
;
1999 type
&= L2CAP_CONF_MASK
;
2002 case L2CAP_CONF_MTU
:
2006 case L2CAP_CONF_FLUSH_TO
:
2007 chan
->flush_to
= val
;
2010 case L2CAP_CONF_QOS
:
2013 case L2CAP_CONF_RFC
:
2014 if (olen
== sizeof(rfc
))
2015 memcpy(&rfc
, (void *) val
, olen
);
2018 case L2CAP_CONF_FCS
:
2019 if (val
== L2CAP_FCS_NONE
)
2020 chan
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2028 result
= L2CAP_CONF_UNKNOWN
;
2029 *((u8
*) ptr
++) = type
;
2034 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2037 switch (chan
->mode
) {
2038 case L2CAP_MODE_STREAMING
:
2039 case L2CAP_MODE_ERTM
:
2040 if (!(chan
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2041 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2042 chan
->conn
->feat_mask
);
2046 if (chan
->mode
!= rfc
.mode
)
2047 return -ECONNREFUSED
;
2053 if (chan
->mode
!= rfc
.mode
) {
2054 result
= L2CAP_CONF_UNACCEPT
;
2055 rfc
.mode
= chan
->mode
;
2057 if (chan
->num_conf_rsp
== 1)
2058 return -ECONNREFUSED
;
2060 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2061 sizeof(rfc
), (unsigned long) &rfc
);
2065 if (result
== L2CAP_CONF_SUCCESS
) {
2066 /* Configure output options and let the other side know
2067 * which ones we don't like. */
2069 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2070 result
= L2CAP_CONF_UNACCEPT
;
2073 chan
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2075 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2078 case L2CAP_MODE_BASIC
:
2079 chan
->fcs
= L2CAP_FCS_NONE
;
2080 chan
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2083 case L2CAP_MODE_ERTM
:
2084 chan
->remote_tx_win
= rfc
.txwin_size
;
2085 chan
->remote_max_tx
= rfc
.max_transmit
;
2087 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2088 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2090 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2092 rfc
.retrans_timeout
=
2093 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2094 rfc
.monitor_timeout
=
2095 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2097 chan
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2099 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2100 sizeof(rfc
), (unsigned long) &rfc
);
2104 case L2CAP_MODE_STREAMING
:
2105 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2106 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2108 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2110 chan
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2112 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2113 sizeof(rfc
), (unsigned long) &rfc
);
2118 result
= L2CAP_CONF_UNACCEPT
;
2120 memset(&rfc
, 0, sizeof(rfc
));
2121 rfc
.mode
= chan
->mode
;
2124 if (result
== L2CAP_CONF_SUCCESS
)
2125 chan
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2127 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2128 rsp
->result
= cpu_to_le16(result
);
2129 rsp
->flags
= cpu_to_le16(0x0000);
2134 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2136 struct l2cap_conf_req
*req
= data
;
2137 void *ptr
= req
->data
;
2140 struct l2cap_conf_rfc rfc
;
2142 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2144 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2145 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2148 case L2CAP_CONF_MTU
:
2149 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2150 *result
= L2CAP_CONF_UNACCEPT
;
2151 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2154 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2157 case L2CAP_CONF_FLUSH_TO
:
2158 chan
->flush_to
= val
;
2159 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2163 case L2CAP_CONF_RFC
:
2164 if (olen
== sizeof(rfc
))
2165 memcpy(&rfc
, (void *)val
, olen
);
2167 if ((chan
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2168 rfc
.mode
!= chan
->mode
)
2169 return -ECONNREFUSED
;
2173 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2174 sizeof(rfc
), (unsigned long) &rfc
);
2179 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2180 return -ECONNREFUSED
;
2182 chan
->mode
= rfc
.mode
;
2184 if (*result
== L2CAP_CONF_SUCCESS
) {
2186 case L2CAP_MODE_ERTM
:
2187 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2188 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2189 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2191 case L2CAP_MODE_STREAMING
:
2192 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2196 req
->dcid
= cpu_to_le16(chan
->dcid
);
2197 req
->flags
= cpu_to_le16(0x0000);
2202 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2204 struct l2cap_conf_rsp
*rsp
= data
;
2205 void *ptr
= rsp
->data
;
2207 BT_DBG("chan %p", chan
);
2209 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2210 rsp
->result
= cpu_to_le16(result
);
2211 rsp
->flags
= cpu_to_le16(flags
);
2216 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2218 struct l2cap_conn_rsp rsp
;
2219 struct l2cap_conn
*conn
= chan
->conn
;
2222 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2223 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2224 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2225 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2226 l2cap_send_cmd(conn
, chan
->ident
,
2227 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2229 if (chan
->conf_state
& L2CAP_CONF_REQ_SENT
)
2232 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2233 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2234 l2cap_build_conf_req(chan
, buf
), buf
);
2235 chan
->num_conf_req
++;
2238 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2242 struct l2cap_conf_rfc rfc
;
2244 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2246 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2249 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2250 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2253 case L2CAP_CONF_RFC
:
2254 if (olen
== sizeof(rfc
))
2255 memcpy(&rfc
, (void *)val
, olen
);
2262 case L2CAP_MODE_ERTM
:
2263 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2264 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2265 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2267 case L2CAP_MODE_STREAMING
:
2268 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2272 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2274 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2276 if (rej
->reason
!= 0x0000)
2279 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2280 cmd
->ident
== conn
->info_ident
) {
2281 del_timer(&conn
->info_timer
);
2283 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2284 conn
->info_ident
= 0;
2286 l2cap_conn_start(conn
);
2292 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2294 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2295 struct l2cap_conn_rsp rsp
;
2296 struct l2cap_chan
*chan
= NULL
, *pchan
;
2297 struct sock
*parent
, *sk
= NULL
;
2298 int result
, status
= L2CAP_CS_NO_INFO
;
2300 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2301 __le16 psm
= req
->psm
;
2303 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2305 /* Check if we have socket listening on psm */
2306 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2308 result
= L2CAP_CR_BAD_PSM
;
2314 bh_lock_sock(parent
);
2316 /* Check if the ACL is secure enough (if not SDP) */
2317 if (psm
!= cpu_to_le16(0x0001) &&
2318 !hci_conn_check_link_mode(conn
->hcon
)) {
2319 conn
->disc_reason
= 0x05;
2320 result
= L2CAP_CR_SEC_BLOCK
;
2324 result
= L2CAP_CR_NO_MEM
;
2326 /* Check for backlog size */
2327 if (sk_acceptq_is_full(parent
)) {
2328 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2332 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2336 chan
= l2cap_chan_create(sk
);
2338 l2cap_sock_kill(sk
);
2342 l2cap_pi(sk
)->chan
= chan
;
2344 write_lock_bh(&conn
->chan_lock
);
2346 /* Check if we already have channel with that dcid */
2347 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2348 write_unlock_bh(&conn
->chan_lock
);
2349 sock_set_flag(sk
, SOCK_ZAPPED
);
2350 l2cap_sock_kill(sk
);
2354 hci_conn_hold(conn
->hcon
);
2356 l2cap_sock_init(sk
, parent
);
2357 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2358 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2362 bt_accept_enqueue(parent
, sk
);
2364 __l2cap_chan_add(conn
, chan
);
2368 l2cap_chan_set_timer(chan
, sk
->sk_sndtimeo
);
2370 chan
->ident
= cmd
->ident
;
2372 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2373 if (l2cap_check_security(chan
)) {
2374 if (bt_sk(sk
)->defer_setup
) {
2375 sk
->sk_state
= BT_CONNECT2
;
2376 result
= L2CAP_CR_PEND
;
2377 status
= L2CAP_CS_AUTHOR_PEND
;
2378 parent
->sk_data_ready(parent
, 0);
2380 sk
->sk_state
= BT_CONFIG
;
2381 result
= L2CAP_CR_SUCCESS
;
2382 status
= L2CAP_CS_NO_INFO
;
2385 sk
->sk_state
= BT_CONNECT2
;
2386 result
= L2CAP_CR_PEND
;
2387 status
= L2CAP_CS_AUTHEN_PEND
;
2390 sk
->sk_state
= BT_CONNECT2
;
2391 result
= L2CAP_CR_PEND
;
2392 status
= L2CAP_CS_NO_INFO
;
2395 write_unlock_bh(&conn
->chan_lock
);
2398 bh_unlock_sock(parent
);
2401 rsp
.scid
= cpu_to_le16(scid
);
2402 rsp
.dcid
= cpu_to_le16(dcid
);
2403 rsp
.result
= cpu_to_le16(result
);
2404 rsp
.status
= cpu_to_le16(status
);
2405 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2407 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2408 struct l2cap_info_req info
;
2409 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2411 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2412 conn
->info_ident
= l2cap_get_ident(conn
);
2414 mod_timer(&conn
->info_timer
, jiffies
+
2415 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2417 l2cap_send_cmd(conn
, conn
->info_ident
,
2418 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2421 if (chan
&& !(chan
->conf_state
& L2CAP_CONF_REQ_SENT
) &&
2422 result
== L2CAP_CR_SUCCESS
) {
2424 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2425 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2426 l2cap_build_conf_req(chan
, buf
), buf
);
2427 chan
->num_conf_req
++;
2433 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2435 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2436 u16 scid
, dcid
, result
, status
;
2437 struct l2cap_chan
*chan
;
2441 scid
= __le16_to_cpu(rsp
->scid
);
2442 dcid
= __le16_to_cpu(rsp
->dcid
);
2443 result
= __le16_to_cpu(rsp
->result
);
2444 status
= __le16_to_cpu(rsp
->status
);
2446 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2449 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2453 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2461 case L2CAP_CR_SUCCESS
:
2462 sk
->sk_state
= BT_CONFIG
;
2465 chan
->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2467 if (chan
->conf_state
& L2CAP_CONF_REQ_SENT
)
2470 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2472 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2473 l2cap_build_conf_req(chan
, req
), req
);
2474 chan
->num_conf_req
++;
2478 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2482 /* don't delete l2cap channel if sk is owned by user */
2483 if (sock_owned_by_user(sk
)) {
2484 sk
->sk_state
= BT_DISCONN
;
2485 l2cap_chan_clear_timer(chan
);
2486 l2cap_chan_set_timer(chan
, HZ
/ 5);
2490 l2cap_chan_del(chan
, ECONNREFUSED
);
2498 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2500 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
2502 /* FCS is enabled only in ERTM or streaming mode, if one or both
2505 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2506 chan
->fcs
= L2CAP_FCS_NONE
;
2507 else if (!(pi
->chan
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
2508 chan
->fcs
= L2CAP_FCS_CRC16
;
2511 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2513 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2516 struct l2cap_chan
*chan
;
2520 dcid
= __le16_to_cpu(req
->dcid
);
2521 flags
= __le16_to_cpu(req
->flags
);
2523 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2525 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2531 if (sk
->sk_state
!= BT_CONFIG
) {
2532 struct l2cap_cmd_rej rej
;
2534 rej
.reason
= cpu_to_le16(0x0002);
2535 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2540 /* Reject if config buffer is too small. */
2541 len
= cmd_len
- sizeof(*req
);
2542 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2543 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2544 l2cap_build_conf_rsp(chan
, rsp
,
2545 L2CAP_CONF_REJECT
, flags
), rsp
);
2550 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2551 chan
->conf_len
+= len
;
2553 if (flags
& 0x0001) {
2554 /* Incomplete config. Send empty response. */
2555 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2556 l2cap_build_conf_rsp(chan
, rsp
,
2557 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2561 /* Complete config. */
2562 len
= l2cap_parse_conf_req(chan
, rsp
);
2564 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2568 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2569 chan
->num_conf_rsp
++;
2571 /* Reset config buffer. */
2574 if (!(chan
->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2577 if (chan
->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2578 set_default_fcs(chan
);
2580 sk
->sk_state
= BT_CONNECTED
;
2582 chan
->next_tx_seq
= 0;
2583 chan
->expected_tx_seq
= 0;
2584 skb_queue_head_init(&chan
->tx_q
);
2585 if (chan
->mode
== L2CAP_MODE_ERTM
)
2586 l2cap_ertm_init(chan
);
2588 l2cap_chan_ready(sk
);
2592 if (!(chan
->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2594 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2595 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2596 l2cap_build_conf_req(chan
, buf
), buf
);
2597 chan
->num_conf_req
++;
2605 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2607 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2608 u16 scid
, flags
, result
;
2609 struct l2cap_chan
*chan
;
2611 int len
= cmd
->len
- sizeof(*rsp
);
2613 scid
= __le16_to_cpu(rsp
->scid
);
2614 flags
= __le16_to_cpu(rsp
->flags
);
2615 result
= __le16_to_cpu(rsp
->result
);
2617 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2618 scid
, flags
, result
);
2620 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2627 case L2CAP_CONF_SUCCESS
:
2628 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2631 case L2CAP_CONF_UNACCEPT
:
2632 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2635 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2636 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2640 /* throw out any old stored conf requests */
2641 result
= L2CAP_CONF_SUCCESS
;
2642 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2645 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2649 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2650 L2CAP_CONF_REQ
, len
, req
);
2651 chan
->num_conf_req
++;
2652 if (result
!= L2CAP_CONF_SUCCESS
)
2658 sk
->sk_err
= ECONNRESET
;
2659 l2cap_chan_set_timer(chan
, HZ
* 5);
2660 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2667 chan
->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2669 if (chan
->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2670 set_default_fcs(chan
);
2672 sk
->sk_state
= BT_CONNECTED
;
2673 chan
->next_tx_seq
= 0;
2674 chan
->expected_tx_seq
= 0;
2675 skb_queue_head_init(&chan
->tx_q
);
2676 if (chan
->mode
== L2CAP_MODE_ERTM
)
2677 l2cap_ertm_init(chan
);
2679 l2cap_chan_ready(sk
);
2687 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2689 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2690 struct l2cap_disconn_rsp rsp
;
2692 struct l2cap_chan
*chan
;
2695 scid
= __le16_to_cpu(req
->scid
);
2696 dcid
= __le16_to_cpu(req
->dcid
);
2698 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2700 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2706 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2707 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2708 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2710 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2712 /* don't delete l2cap channel if sk is owned by user */
2713 if (sock_owned_by_user(sk
)) {
2714 sk
->sk_state
= BT_DISCONN
;
2715 l2cap_chan_clear_timer(chan
);
2716 l2cap_chan_set_timer(chan
, HZ
/ 5);
2721 l2cap_chan_del(chan
, ECONNRESET
);
2724 l2cap_sock_kill(sk
);
2728 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2730 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2732 struct l2cap_chan
*chan
;
2735 scid
= __le16_to_cpu(rsp
->scid
);
2736 dcid
= __le16_to_cpu(rsp
->dcid
);
2738 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2740 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2746 /* don't delete l2cap channel if sk is owned by user */
2747 if (sock_owned_by_user(sk
)) {
2748 sk
->sk_state
= BT_DISCONN
;
2749 l2cap_chan_clear_timer(chan
);
2750 l2cap_chan_set_timer(chan
, HZ
/ 5);
2755 l2cap_chan_del(chan
, 0);
2758 l2cap_sock_kill(sk
);
2762 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2764 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2767 type
= __le16_to_cpu(req
->type
);
2769 BT_DBG("type 0x%4.4x", type
);
2771 if (type
== L2CAP_IT_FEAT_MASK
) {
2773 u32 feat_mask
= l2cap_feat_mask
;
2774 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2775 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2776 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2778 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2780 put_unaligned_le32(feat_mask
, rsp
->data
);
2781 l2cap_send_cmd(conn
, cmd
->ident
,
2782 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2783 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2785 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2786 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2787 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2788 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2789 l2cap_send_cmd(conn
, cmd
->ident
,
2790 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2792 struct l2cap_info_rsp rsp
;
2793 rsp
.type
= cpu_to_le16(type
);
2794 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2795 l2cap_send_cmd(conn
, cmd
->ident
,
2796 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2802 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2804 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2807 type
= __le16_to_cpu(rsp
->type
);
2808 result
= __le16_to_cpu(rsp
->result
);
2810 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2812 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2813 if (cmd
->ident
!= conn
->info_ident
||
2814 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2817 del_timer(&conn
->info_timer
);
2819 if (result
!= L2CAP_IR_SUCCESS
) {
2820 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2821 conn
->info_ident
= 0;
2823 l2cap_conn_start(conn
);
2828 if (type
== L2CAP_IT_FEAT_MASK
) {
2829 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2831 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2832 struct l2cap_info_req req
;
2833 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2835 conn
->info_ident
= l2cap_get_ident(conn
);
2837 l2cap_send_cmd(conn
, conn
->info_ident
,
2838 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2840 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2841 conn
->info_ident
= 0;
2843 l2cap_conn_start(conn
);
2845 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2846 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2847 conn
->info_ident
= 0;
2849 l2cap_conn_start(conn
);
2855 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2860 if (min
> max
|| min
< 6 || max
> 3200)
2863 if (to_multiplier
< 10 || to_multiplier
> 3200)
2866 if (max
>= to_multiplier
* 8)
2869 max_latency
= (to_multiplier
* 8 / max
) - 1;
2870 if (latency
> 499 || latency
> max_latency
)
2876 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2877 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2879 struct hci_conn
*hcon
= conn
->hcon
;
2880 struct l2cap_conn_param_update_req
*req
;
2881 struct l2cap_conn_param_update_rsp rsp
;
2882 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
2885 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
2888 cmd_len
= __le16_to_cpu(cmd
->len
);
2889 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
2892 req
= (struct l2cap_conn_param_update_req
*) data
;
2893 min
= __le16_to_cpu(req
->min
);
2894 max
= __le16_to_cpu(req
->max
);
2895 latency
= __le16_to_cpu(req
->latency
);
2896 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
2898 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2899 min
, max
, latency
, to_multiplier
);
2901 memset(&rsp
, 0, sizeof(rsp
));
2903 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
2905 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
2907 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
2909 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
2913 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
2918 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
2919 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2923 switch (cmd
->code
) {
2924 case L2CAP_COMMAND_REJ
:
2925 l2cap_command_rej(conn
, cmd
, data
);
2928 case L2CAP_CONN_REQ
:
2929 err
= l2cap_connect_req(conn
, cmd
, data
);
2932 case L2CAP_CONN_RSP
:
2933 err
= l2cap_connect_rsp(conn
, cmd
, data
);
2936 case L2CAP_CONF_REQ
:
2937 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
2940 case L2CAP_CONF_RSP
:
2941 err
= l2cap_config_rsp(conn
, cmd
, data
);
2944 case L2CAP_DISCONN_REQ
:
2945 err
= l2cap_disconnect_req(conn
, cmd
, data
);
2948 case L2CAP_DISCONN_RSP
:
2949 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
2952 case L2CAP_ECHO_REQ
:
2953 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
2956 case L2CAP_ECHO_RSP
:
2959 case L2CAP_INFO_REQ
:
2960 err
= l2cap_information_req(conn
, cmd
, data
);
2963 case L2CAP_INFO_RSP
:
2964 err
= l2cap_information_rsp(conn
, cmd
, data
);
2968 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
2976 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
2977 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2979 switch (cmd
->code
) {
2980 case L2CAP_COMMAND_REJ
:
2983 case L2CAP_CONN_PARAM_UPDATE_REQ
:
2984 return l2cap_conn_param_update_req(conn
, cmd
, data
);
2986 case L2CAP_CONN_PARAM_UPDATE_RSP
:
2990 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
2995 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
2996 struct sk_buff
*skb
)
2998 u8
*data
= skb
->data
;
3000 struct l2cap_cmd_hdr cmd
;
3003 l2cap_raw_recv(conn
, skb
);
3005 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3007 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3008 data
+= L2CAP_CMD_HDR_SIZE
;
3009 len
-= L2CAP_CMD_HDR_SIZE
;
3011 cmd_len
= le16_to_cpu(cmd
.len
);
3013 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3015 if (cmd_len
> len
|| !cmd
.ident
) {
3016 BT_DBG("corrupted command");
3020 if (conn
->hcon
->type
== LE_LINK
)
3021 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3023 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3026 struct l2cap_cmd_rej rej
;
3028 BT_ERR("Wrong link type (%d)", err
);
3030 /* FIXME: Map err to a valid reason */
3031 rej
.reason
= cpu_to_le16(0);
3032 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3042 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3044 u16 our_fcs
, rcv_fcs
;
3045 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3047 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3048 skb_trim(skb
, skb
->len
- 2);
3049 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3050 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3052 if (our_fcs
!= rcv_fcs
)
3058 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3062 chan
->frames_sent
= 0;
3064 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3066 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3067 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3068 l2cap_send_sframe(chan
, control
);
3069 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3072 if (chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3073 l2cap_retransmit_frames(chan
);
3075 l2cap_ertm_send(chan
);
3077 if (!(chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3078 chan
->frames_sent
== 0) {
3079 control
|= L2CAP_SUPER_RCV_READY
;
3080 l2cap_send_sframe(chan
, control
);
3084 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3086 struct sk_buff
*next_skb
;
3087 int tx_seq_offset
, next_tx_seq_offset
;
3089 bt_cb(skb
)->tx_seq
= tx_seq
;
3090 bt_cb(skb
)->sar
= sar
;
3092 next_skb
= skb_peek(&chan
->srej_q
);
3094 __skb_queue_tail(&chan
->srej_q
, skb
);
3098 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3099 if (tx_seq_offset
< 0)
3100 tx_seq_offset
+= 64;
3103 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3106 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3107 chan
->buffer_seq
) % 64;
3108 if (next_tx_seq_offset
< 0)
3109 next_tx_seq_offset
+= 64;
3111 if (next_tx_seq_offset
> tx_seq_offset
) {
3112 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3116 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3119 } while ((next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
)));
3121 __skb_queue_tail(&chan
->srej_q
, skb
);
3126 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3128 struct sk_buff
*_skb
;
3131 switch (control
& L2CAP_CTRL_SAR
) {
3132 case L2CAP_SDU_UNSEGMENTED
:
3133 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
)
3136 return sock_queue_rcv_skb(chan
->sk
, skb
);
3138 case L2CAP_SDU_START
:
3139 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
)
3142 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3144 if (chan
->sdu_len
> chan
->imtu
)
3147 chan
->sdu
= bt_skb_alloc(chan
->sdu_len
, GFP_ATOMIC
);
3151 /* pull sdu_len bytes only after alloc, because of Local Busy
3152 * condition we have to be sure that this will be executed
3153 * only once, i.e., when alloc does not fail */
3156 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3158 chan
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3159 chan
->partial_sdu_len
= skb
->len
;
3162 case L2CAP_SDU_CONTINUE
:
3163 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3169 chan
->partial_sdu_len
+= skb
->len
;
3170 if (chan
->partial_sdu_len
> chan
->sdu_len
)
3173 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3178 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3184 if (!(chan
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3185 chan
->partial_sdu_len
+= skb
->len
;
3187 if (chan
->partial_sdu_len
> chan
->imtu
)
3190 if (chan
->partial_sdu_len
!= chan
->sdu_len
)
3193 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3196 _skb
= skb_clone(chan
->sdu
, GFP_ATOMIC
);
3198 chan
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3202 err
= sock_queue_rcv_skb(chan
->sk
, _skb
);
3205 chan
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3209 chan
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3210 chan
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3212 kfree_skb(chan
->sdu
);
3220 kfree_skb(chan
->sdu
);
3224 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3229 static int l2cap_try_push_rx_skb(struct l2cap_chan
*chan
)
3231 struct sk_buff
*skb
;
3235 while ((skb
= skb_dequeue(&chan
->busy_q
))) {
3236 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3237 err
= l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3239 skb_queue_head(&chan
->busy_q
, skb
);
3243 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3246 if (!(chan
->conn_state
& L2CAP_CONN_RNR_SENT
))
3249 control
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3250 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3251 l2cap_send_sframe(chan
, control
);
3252 chan
->retry_count
= 1;
3254 del_timer(&chan
->retrans_timer
);
3255 __mod_monitor_timer();
3257 chan
->conn_state
|= L2CAP_CONN_WAIT_F
;
3260 chan
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3261 chan
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3263 BT_DBG("chan %p, Exit local busy", chan
);
3268 static void l2cap_busy_work(struct work_struct
*work
)
3270 DECLARE_WAITQUEUE(wait
, current
);
3271 struct l2cap_chan
*chan
=
3272 container_of(work
, struct l2cap_chan
, busy_work
);
3273 struct sock
*sk
= chan
->sk
;
3274 int n_tries
= 0, timeo
= HZ
/5, err
;
3275 struct sk_buff
*skb
;
3279 add_wait_queue(sk_sleep(sk
), &wait
);
3280 while ((skb
= skb_peek(&chan
->busy_q
))) {
3281 set_current_state(TASK_INTERRUPTIBLE
);
3283 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3285 l2cap_send_disconn_req(chan
->conn
, chan
, EBUSY
);
3292 if (signal_pending(current
)) {
3293 err
= sock_intr_errno(timeo
);
3298 timeo
= schedule_timeout(timeo
);
3301 err
= sock_error(sk
);
3305 if (l2cap_try_push_rx_skb(chan
) == 0)
3309 set_current_state(TASK_RUNNING
);
3310 remove_wait_queue(sk_sleep(sk
), &wait
);
3315 static int l2cap_push_rx_skb(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3319 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3320 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3321 __skb_queue_tail(&chan
->busy_q
, skb
);
3322 return l2cap_try_push_rx_skb(chan
);
3327 err
= l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3329 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3333 /* Busy Condition */
3334 BT_DBG("chan %p, Enter local busy", chan
);
3336 chan
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3337 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3338 __skb_queue_tail(&chan
->busy_q
, skb
);
3340 sctrl
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3341 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3342 l2cap_send_sframe(chan
, sctrl
);
3344 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3346 del_timer(&chan
->ack_timer
);
3348 queue_work(_busy_wq
, &chan
->busy_work
);
3353 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3355 struct sk_buff
*_skb
;
3359 * TODO: We have to notify the userland if some data is lost with the
3363 switch (control
& L2CAP_CTRL_SAR
) {
3364 case L2CAP_SDU_UNSEGMENTED
:
3365 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3366 kfree_skb(chan
->sdu
);
3370 err
= sock_queue_rcv_skb(chan
->sk
, skb
);
3376 case L2CAP_SDU_START
:
3377 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3378 kfree_skb(chan
->sdu
);
3382 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3385 if (chan
->sdu_len
> chan
->imtu
) {
3390 chan
->sdu
= bt_skb_alloc(chan
->sdu_len
, GFP_ATOMIC
);
3396 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3398 chan
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3399 chan
->partial_sdu_len
= skb
->len
;
3403 case L2CAP_SDU_CONTINUE
:
3404 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3407 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3409 chan
->partial_sdu_len
+= skb
->len
;
3410 if (chan
->partial_sdu_len
> chan
->sdu_len
)
3411 kfree_skb(chan
->sdu
);
3418 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3421 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3423 chan
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3424 chan
->partial_sdu_len
+= skb
->len
;
3426 if (chan
->partial_sdu_len
> chan
->imtu
)
3429 if (chan
->partial_sdu_len
== chan
->sdu_len
) {
3430 _skb
= skb_clone(chan
->sdu
, GFP_ATOMIC
);
3431 err
= sock_queue_rcv_skb(chan
->sk
, _skb
);
3438 kfree_skb(chan
->sdu
);
3446 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u8 tx_seq
)
3448 struct sk_buff
*skb
;
3451 while ((skb
= skb_peek(&chan
->srej_q
))) {
3452 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3455 skb
= skb_dequeue(&chan
->srej_q
);
3456 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3457 l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3458 chan
->buffer_seq_srej
=
3459 (chan
->buffer_seq_srej
+ 1) % 64;
3460 tx_seq
= (tx_seq
+ 1) % 64;
3464 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3466 struct srej_list
*l
, *tmp
;
3469 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3470 if (l
->tx_seq
== tx_seq
) {
3475 control
= L2CAP_SUPER_SELECT_REJECT
;
3476 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3477 l2cap_send_sframe(chan
, control
);
3479 list_add_tail(&l
->list
, &chan
->srej_l
);
3483 static void l2cap_send_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3485 struct srej_list
*new;
3488 while (tx_seq
!= chan
->expected_tx_seq
) {
3489 control
= L2CAP_SUPER_SELECT_REJECT
;
3490 control
|= chan
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3491 l2cap_send_sframe(chan
, control
);
3493 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3494 new->tx_seq
= chan
->expected_tx_seq
;
3495 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3496 list_add_tail(&new->list
, &chan
->srej_l
);
3498 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3501 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3503 u8 tx_seq
= __get_txseq(rx_control
);
3504 u8 req_seq
= __get_reqseq(rx_control
);
3505 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3506 int tx_seq_offset
, expected_tx_seq_offset
;
3507 int num_to_ack
= (chan
->tx_win
/6) + 1;
3510 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan
, skb
->len
,
3511 tx_seq
, rx_control
);
3513 if (L2CAP_CTRL_FINAL
& rx_control
&&
3514 chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3515 del_timer(&chan
->monitor_timer
);
3516 if (chan
->unacked_frames
> 0)
3517 __mod_retrans_timer();
3518 chan
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3521 chan
->expected_ack_seq
= req_seq
;
3522 l2cap_drop_acked_frames(chan
);
3524 if (tx_seq
== chan
->expected_tx_seq
)
3527 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3528 if (tx_seq_offset
< 0)
3529 tx_seq_offset
+= 64;
3531 /* invalid tx_seq */
3532 if (tx_seq_offset
>= chan
->tx_win
) {
3533 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3537 if (chan
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3540 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3541 struct srej_list
*first
;
3543 first
= list_first_entry(&chan
->srej_l
,
3544 struct srej_list
, list
);
3545 if (tx_seq
== first
->tx_seq
) {
3546 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3547 l2cap_check_srej_gap(chan
, tx_seq
);
3549 list_del(&first
->list
);
3552 if (list_empty(&chan
->srej_l
)) {
3553 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3554 chan
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3555 l2cap_send_ack(chan
);
3556 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3559 struct srej_list
*l
;
3561 /* duplicated tx_seq */
3562 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3565 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3566 if (l
->tx_seq
== tx_seq
) {
3567 l2cap_resend_srejframe(chan
, tx_seq
);
3571 l2cap_send_srejframe(chan
, tx_seq
);
3574 expected_tx_seq_offset
=
3575 (chan
->expected_tx_seq
- chan
->buffer_seq
) % 64;
3576 if (expected_tx_seq_offset
< 0)
3577 expected_tx_seq_offset
+= 64;
3579 /* duplicated tx_seq */
3580 if (tx_seq_offset
< expected_tx_seq_offset
)
3583 chan
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3585 BT_DBG("chan %p, Enter SREJ", chan
);
3587 INIT_LIST_HEAD(&chan
->srej_l
);
3588 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3590 __skb_queue_head_init(&chan
->srej_q
);
3591 __skb_queue_head_init(&chan
->busy_q
);
3592 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3594 chan
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3596 l2cap_send_srejframe(chan
, tx_seq
);
3598 del_timer(&chan
->ack_timer
);
3603 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3605 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3606 bt_cb(skb
)->tx_seq
= tx_seq
;
3607 bt_cb(skb
)->sar
= sar
;
3608 __skb_queue_tail(&chan
->srej_q
, skb
);
3612 err
= l2cap_push_rx_skb(chan
, skb
, rx_control
);
3616 if (rx_control
& L2CAP_CTRL_FINAL
) {
3617 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3618 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3620 l2cap_retransmit_frames(chan
);
3625 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3626 if (chan
->num_acked
== num_to_ack
- 1)
3627 l2cap_send_ack(chan
);
3636 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3638 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, __get_reqseq(rx_control
),
3641 chan
->expected_ack_seq
= __get_reqseq(rx_control
);
3642 l2cap_drop_acked_frames(chan
);
3644 if (rx_control
& L2CAP_CTRL_POLL
) {
3645 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3646 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3647 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3648 (chan
->unacked_frames
> 0))
3649 __mod_retrans_timer();
3651 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3652 l2cap_send_srejtail(chan
);
3654 l2cap_send_i_or_rr_or_rnr(chan
);
3657 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3658 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3660 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3661 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3663 l2cap_retransmit_frames(chan
);
3666 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3667 (chan
->unacked_frames
> 0))
3668 __mod_retrans_timer();
3670 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3671 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
)
3672 l2cap_send_ack(chan
);
3674 l2cap_ertm_send(chan
);
3678 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3680 u8 tx_seq
= __get_reqseq(rx_control
);
3682 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3684 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3686 chan
->expected_ack_seq
= tx_seq
;
3687 l2cap_drop_acked_frames(chan
);
3689 if (rx_control
& L2CAP_CTRL_FINAL
) {
3690 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3691 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3693 l2cap_retransmit_frames(chan
);
3695 l2cap_retransmit_frames(chan
);
3697 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
)
3698 chan
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3701 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3703 u8 tx_seq
= __get_reqseq(rx_control
);
3705 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3707 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3709 if (rx_control
& L2CAP_CTRL_POLL
) {
3710 chan
->expected_ack_seq
= tx_seq
;
3711 l2cap_drop_acked_frames(chan
);
3713 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3714 l2cap_retransmit_one_frame(chan
, tx_seq
);
3716 l2cap_ertm_send(chan
);
3718 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3719 chan
->srej_save_reqseq
= tx_seq
;
3720 chan
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3722 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3723 if ((chan
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3724 chan
->srej_save_reqseq
== tx_seq
)
3725 chan
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3727 l2cap_retransmit_one_frame(chan
, tx_seq
);
3729 l2cap_retransmit_one_frame(chan
, tx_seq
);
3730 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3731 chan
->srej_save_reqseq
= tx_seq
;
3732 chan
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3737 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3739 u8 tx_seq
= __get_reqseq(rx_control
);
3741 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3743 chan
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3744 chan
->expected_ack_seq
= tx_seq
;
3745 l2cap_drop_acked_frames(chan
);
3747 if (rx_control
& L2CAP_CTRL_POLL
)
3748 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3750 if (!(chan
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
3751 del_timer(&chan
->retrans_timer
);
3752 if (rx_control
& L2CAP_CTRL_POLL
)
3753 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
3757 if (rx_control
& L2CAP_CTRL_POLL
)
3758 l2cap_send_srejtail(chan
);
3760 l2cap_send_sframe(chan
, L2CAP_SUPER_RCV_READY
);
3763 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3765 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan
, rx_control
, skb
->len
);
3767 if (L2CAP_CTRL_FINAL
& rx_control
&&
3768 chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3769 del_timer(&chan
->monitor_timer
);
3770 if (chan
->unacked_frames
> 0)
3771 __mod_retrans_timer();
3772 chan
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3775 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3776 case L2CAP_SUPER_RCV_READY
:
3777 l2cap_data_channel_rrframe(chan
, rx_control
);
3780 case L2CAP_SUPER_REJECT
:
3781 l2cap_data_channel_rejframe(chan
, rx_control
);
3784 case L2CAP_SUPER_SELECT_REJECT
:
3785 l2cap_data_channel_srejframe(chan
, rx_control
);
3788 case L2CAP_SUPER_RCV_NOT_READY
:
3789 l2cap_data_channel_rnrframe(chan
, rx_control
);
3797 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3799 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
3802 int len
, next_tx_seq_offset
, req_seq_offset
;
3804 control
= get_unaligned_le16(skb
->data
);
3809 * We can just drop the corrupted I-frame here.
3810 * Receiver will miss it and start proper recovery
3811 * procedures and ask retransmission.
3813 if (l2cap_check_fcs(chan
, skb
))
3816 if (__is_sar_start(control
) && __is_iframe(control
))
3819 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3822 if (len
> chan
->mps
) {
3823 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3827 req_seq
= __get_reqseq(control
);
3828 req_seq_offset
= (req_seq
- chan
->expected_ack_seq
) % 64;
3829 if (req_seq_offset
< 0)
3830 req_seq_offset
+= 64;
3832 next_tx_seq_offset
=
3833 (chan
->next_tx_seq
- chan
->expected_ack_seq
) % 64;
3834 if (next_tx_seq_offset
< 0)
3835 next_tx_seq_offset
+= 64;
3837 /* check for invalid req-seq */
3838 if (req_seq_offset
> next_tx_seq_offset
) {
3839 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3843 if (__is_iframe(control
)) {
3845 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3849 l2cap_data_channel_iframe(chan
, control
, skb
);
3853 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3857 l2cap_data_channel_sframe(chan
, control
, skb
);
3867 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3869 struct l2cap_chan
*chan
;
3870 struct sock
*sk
= NULL
;
3875 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3877 BT_DBG("unknown cid 0x%4.4x", cid
);
3883 BT_DBG("chan %p, len %d", chan
, skb
->len
);
3885 if (sk
->sk_state
!= BT_CONNECTED
)
3888 switch (chan
->mode
) {
3889 case L2CAP_MODE_BASIC
:
3890 /* If socket recv buffers overflows we drop data here
3891 * which is *bad* because L2CAP has to be reliable.
3892 * But we don't have any other choice. L2CAP doesn't
3893 * provide flow control mechanism. */
3895 if (chan
->imtu
< skb
->len
)
3898 if (!sock_queue_rcv_skb(sk
, skb
))
3902 case L2CAP_MODE_ERTM
:
3903 if (!sock_owned_by_user(sk
)) {
3904 l2cap_ertm_data_rcv(sk
, skb
);
3906 if (sk_add_backlog(sk
, skb
))
3912 case L2CAP_MODE_STREAMING
:
3913 control
= get_unaligned_le16(skb
->data
);
3917 if (l2cap_check_fcs(chan
, skb
))
3920 if (__is_sar_start(control
))
3923 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3926 if (len
> chan
->mps
|| len
< 0 || __is_sframe(control
))
3929 tx_seq
= __get_txseq(control
);
3931 if (chan
->expected_tx_seq
== tx_seq
)
3932 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3934 chan
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3936 l2cap_streaming_reassembly_sdu(chan
, skb
, control
);
3941 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
3955 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3957 struct sock
*sk
= NULL
;
3958 struct l2cap_chan
*chan
;
3960 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
3968 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3970 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3973 if (l2cap_pi(sk
)->chan
->imtu
< skb
->len
)
3976 if (!sock_queue_rcv_skb(sk
, skb
))
3988 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
3990 struct sock
*sk
= NULL
;
3991 struct l2cap_chan
*chan
;
3993 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
4001 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4003 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4006 if (l2cap_pi(sk
)->chan
->imtu
< skb
->len
)
4009 if (!sock_queue_rcv_skb(sk
, skb
))
4021 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4023 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4027 skb_pull(skb
, L2CAP_HDR_SIZE
);
4028 cid
= __le16_to_cpu(lh
->cid
);
4029 len
= __le16_to_cpu(lh
->len
);
4031 if (len
!= skb
->len
) {
4036 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4039 case L2CAP_CID_LE_SIGNALING
:
4040 case L2CAP_CID_SIGNALING
:
4041 l2cap_sig_channel(conn
, skb
);
4044 case L2CAP_CID_CONN_LESS
:
4045 psm
= get_unaligned_le16(skb
->data
);
4047 l2cap_conless_channel(conn
, psm
, skb
);
4050 case L2CAP_CID_LE_DATA
:
4051 l2cap_att_channel(conn
, cid
, skb
);
4055 l2cap_data_channel(conn
, cid
, skb
);
4060 /* ---- L2CAP interface with lower layer (HCI) ---- */
4062 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4064 int exact
= 0, lm1
= 0, lm2
= 0;
4065 struct l2cap_chan
*c
;
4067 if (type
!= ACL_LINK
)
4070 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4072 /* Find listening sockets and check their link_mode */
4073 read_lock(&chan_list_lock
);
4074 list_for_each_entry(c
, &chan_list
, global_l
) {
4075 struct sock
*sk
= c
->sk
;
4077 if (sk
->sk_state
!= BT_LISTEN
)
4080 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4081 lm1
|= HCI_LM_ACCEPT
;
4083 lm1
|= HCI_LM_MASTER
;
4085 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4086 lm2
|= HCI_LM_ACCEPT
;
4088 lm2
|= HCI_LM_MASTER
;
4091 read_unlock(&chan_list_lock
);
4093 return exact
? lm1
: lm2
;
4096 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4098 struct l2cap_conn
*conn
;
4100 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4102 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4106 conn
= l2cap_conn_add(hcon
, status
);
4108 l2cap_conn_ready(conn
);
4110 l2cap_conn_del(hcon
, bt_err(status
));
4115 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4117 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4119 BT_DBG("hcon %p", hcon
);
4121 if (hcon
->type
!= ACL_LINK
|| !conn
)
4124 return conn
->disc_reason
;
4127 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4129 BT_DBG("hcon %p reason %d", hcon
, reason
);
4131 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4134 l2cap_conn_del(hcon
, bt_err(reason
));
4139 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4141 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4144 if (encrypt
== 0x00) {
4145 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4146 l2cap_chan_clear_timer(chan
);
4147 l2cap_chan_set_timer(chan
, HZ
* 5);
4148 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4149 l2cap_chan_close(chan
, ECONNREFUSED
);
4151 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4152 l2cap_chan_clear_timer(chan
);
4156 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4158 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4159 struct l2cap_chan
*chan
;
4164 BT_DBG("conn %p", conn
);
4166 read_lock(&conn
->chan_lock
);
4168 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4169 struct sock
*sk
= chan
->sk
;
4173 if (chan
->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4178 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4179 sk
->sk_state
== BT_CONFIG
)) {
4180 l2cap_check_encryption(chan
, encrypt
);
4185 if (sk
->sk_state
== BT_CONNECT
) {
4187 struct l2cap_conn_req req
;
4188 req
.scid
= cpu_to_le16(chan
->scid
);
4189 req
.psm
= chan
->psm
;
4191 chan
->ident
= l2cap_get_ident(conn
);
4192 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4194 l2cap_send_cmd(conn
, chan
->ident
,
4195 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4197 l2cap_chan_clear_timer(chan
);
4198 l2cap_chan_set_timer(chan
, HZ
/ 10);
4200 } else if (sk
->sk_state
== BT_CONNECT2
) {
4201 struct l2cap_conn_rsp rsp
;
4205 sk
->sk_state
= BT_CONFIG
;
4206 result
= L2CAP_CR_SUCCESS
;
4208 sk
->sk_state
= BT_DISCONN
;
4209 l2cap_chan_set_timer(chan
, HZ
/ 10);
4210 result
= L2CAP_CR_SEC_BLOCK
;
4213 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4214 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4215 rsp
.result
= cpu_to_le16(result
);
4216 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4217 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4224 read_unlock(&conn
->chan_lock
);
4229 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4231 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4234 conn
= l2cap_conn_add(hcon
, 0);
4239 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4241 if (!(flags
& ACL_CONT
)) {
4242 struct l2cap_hdr
*hdr
;
4243 struct l2cap_chan
*chan
;
4248 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4249 kfree_skb(conn
->rx_skb
);
4250 conn
->rx_skb
= NULL
;
4252 l2cap_conn_unreliable(conn
, ECOMM
);
4255 /* Start fragment always begin with Basic L2CAP header */
4256 if (skb
->len
< L2CAP_HDR_SIZE
) {
4257 BT_ERR("Frame is too short (len %d)", skb
->len
);
4258 l2cap_conn_unreliable(conn
, ECOMM
);
4262 hdr
= (struct l2cap_hdr
*) skb
->data
;
4263 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4264 cid
= __le16_to_cpu(hdr
->cid
);
4266 if (len
== skb
->len
) {
4267 /* Complete frame received */
4268 l2cap_recv_frame(conn
, skb
);
4272 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4274 if (skb
->len
> len
) {
4275 BT_ERR("Frame is too long (len %d, expected len %d)",
4277 l2cap_conn_unreliable(conn
, ECOMM
);
4281 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4283 if (chan
&& chan
->sk
) {
4284 struct sock
*sk
= chan
->sk
;
4286 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4287 BT_ERR("Frame exceeding recv MTU (len %d, "
4291 l2cap_conn_unreliable(conn
, ECOMM
);
4297 /* Allocate skb for the complete frame (with header) */
4298 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4302 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4304 conn
->rx_len
= len
- skb
->len
;
4306 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4308 if (!conn
->rx_len
) {
4309 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4310 l2cap_conn_unreliable(conn
, ECOMM
);
4314 if (skb
->len
> conn
->rx_len
) {
4315 BT_ERR("Fragment is too long (len %d, expected %d)",
4316 skb
->len
, conn
->rx_len
);
4317 kfree_skb(conn
->rx_skb
);
4318 conn
->rx_skb
= NULL
;
4320 l2cap_conn_unreliable(conn
, ECOMM
);
4324 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4326 conn
->rx_len
-= skb
->len
;
4328 if (!conn
->rx_len
) {
4329 /* Complete frame received */
4330 l2cap_recv_frame(conn
, conn
->rx_skb
);
4331 conn
->rx_skb
= NULL
;
4340 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4342 struct l2cap_chan
*c
;
4344 read_lock_bh(&chan_list_lock
);
4346 list_for_each_entry(c
, &chan_list
, global_l
) {
4347 struct sock
*sk
= c
->sk
;
4349 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4350 batostr(&bt_sk(sk
)->src
),
4351 batostr(&bt_sk(sk
)->dst
),
4352 sk
->sk_state
, __le16_to_cpu(c
->psm
),
4353 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4354 c
->sec_level
, c
->mode
);
4357 read_unlock_bh(&chan_list_lock
);
4362 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4364 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4367 static const struct file_operations l2cap_debugfs_fops
= {
4368 .open
= l2cap_debugfs_open
,
4370 .llseek
= seq_lseek
,
4371 .release
= single_release
,
4374 static struct dentry
*l2cap_debugfs
;
4376 static struct hci_proto l2cap_hci_proto
= {
4378 .id
= HCI_PROTO_L2CAP
,
4379 .connect_ind
= l2cap_connect_ind
,
4380 .connect_cfm
= l2cap_connect_cfm
,
4381 .disconn_ind
= l2cap_disconn_ind
,
4382 .disconn_cfm
= l2cap_disconn_cfm
,
4383 .security_cfm
= l2cap_security_cfm
,
4384 .recv_acldata
= l2cap_recv_acldata
4387 int __init
l2cap_init(void)
4391 err
= l2cap_init_sockets();
4395 _busy_wq
= create_singlethread_workqueue("l2cap");
4401 err
= hci_register_proto(&l2cap_hci_proto
);
4403 BT_ERR("L2CAP protocol registration failed");
4404 bt_sock_unregister(BTPROTO_L2CAP
);
4409 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4410 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4412 BT_ERR("Failed to create L2CAP debug file");
4418 destroy_workqueue(_busy_wq
);
4419 l2cap_cleanup_sockets();
4423 void l2cap_exit(void)
4425 debugfs_remove(l2cap_debugfs
);
4427 flush_workqueue(_busy_wq
);
4428 destroy_workqueue(_busy_wq
);
4430 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4431 BT_ERR("L2CAP protocol unregistration failed");
4433 l2cap_cleanup_sockets();
4436 module_param(disable_ertm
, bool, 0644);
4437 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");