2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static struct workqueue_struct
*_busy_wq
;
65 struct bt_sock_list l2cap_sk_list
= {
66 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
69 static void l2cap_busy_work(struct work_struct
*work
);
71 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
72 u8 code
, u8 ident
, u16 dlen
, void *data
);
73 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
75 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
82 list_for_each_entry(c
, &conn
->chan_l
, list
) {
83 struct sock
*s
= c
->sk
;
84 if (l2cap_pi(s
)->dcid
== cid
)
91 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
95 list_for_each_entry(c
, &conn
->chan_l
, list
) {
96 struct sock
*s
= c
->sk
;
97 if (l2cap_pi(s
)->scid
== cid
)
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
107 struct l2cap_chan
*c
;
109 read_lock(&conn
->chan_lock
);
110 c
= __l2cap_get_chan_by_scid(conn
, cid
);
113 read_unlock(&conn
->chan_lock
);
117 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
119 struct l2cap_chan
*c
;
121 list_for_each_entry(c
, &conn
->chan_l
, list
) {
122 if (c
->ident
== ident
)
128 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
130 struct l2cap_chan
*c
;
132 read_lock(&conn
->chan_lock
);
133 c
= __l2cap_get_chan_by_ident(conn
, ident
);
136 read_unlock(&conn
->chan_lock
);
140 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
142 u16 cid
= L2CAP_CID_DYN_START
;
144 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
145 if (!__l2cap_get_chan_by_scid(conn
, cid
))
152 static struct l2cap_chan
*l2cap_chan_alloc(struct sock
*sk
)
154 struct l2cap_chan
*chan
;
156 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
165 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
167 struct sock
*sk
= chan
->sk
;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
170 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
172 conn
->disc_reason
= 0x13;
174 l2cap_pi(sk
)->conn
= conn
;
176 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
177 if (conn
->hcon
->type
== LE_LINK
) {
179 l2cap_pi(sk
)->omtu
= L2CAP_LE_DEFAULT_MTU
;
180 l2cap_pi(sk
)->scid
= L2CAP_CID_LE_DATA
;
181 l2cap_pi(sk
)->dcid
= L2CAP_CID_LE_DATA
;
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(conn
);
185 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
187 } else if (sk
->sk_type
== SOCK_DGRAM
) {
188 /* Connectionless socket */
189 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
190 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
191 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
195 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
196 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
201 list_add(&chan
->list
, &conn
->chan_l
);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
208 struct sock
*sk
= chan
->sk
;
209 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
210 struct sock
*parent
= bt_sk(sk
)->parent
;
212 l2cap_sock_clear_timer(sk
);
214 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
217 /* Delete from channel list */
218 write_lock_bh(&conn
->chan_lock
);
219 list_del(&chan
->list
);
220 write_unlock_bh(&conn
->chan_lock
);
223 l2cap_pi(sk
)->conn
= NULL
;
224 hci_conn_put(conn
->hcon
);
227 sk
->sk_state
= BT_CLOSED
;
228 sock_set_flag(sk
, SOCK_ZAPPED
);
234 bt_accept_unlink(sk
);
235 parent
->sk_data_ready(parent
, 0);
237 sk
->sk_state_change(sk
);
239 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
&&
240 l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
))
243 skb_queue_purge(&chan
->tx_q
);
245 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
246 struct srej_list
*l
, *tmp
;
248 del_timer(&chan
->retrans_timer
);
249 del_timer(&chan
->monitor_timer
);
250 del_timer(&chan
->ack_timer
);
252 skb_queue_purge(&chan
->srej_q
);
253 skb_queue_purge(&chan
->busy_q
);
255 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
265 static inline u8
l2cap_get_auth_type(struct sock
*sk
)
267 if (sk
->sk_type
== SOCK_RAW
) {
268 switch (l2cap_pi(sk
)->sec_level
) {
269 case BT_SECURITY_HIGH
:
270 return HCI_AT_DEDICATED_BONDING_MITM
;
271 case BT_SECURITY_MEDIUM
:
272 return HCI_AT_DEDICATED_BONDING
;
274 return HCI_AT_NO_BONDING
;
276 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
278 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
280 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
281 return HCI_AT_NO_BONDING_MITM
;
283 return HCI_AT_NO_BONDING
;
285 switch (l2cap_pi(sk
)->sec_level
) {
286 case BT_SECURITY_HIGH
:
287 return HCI_AT_GENERAL_BONDING_MITM
;
288 case BT_SECURITY_MEDIUM
:
289 return HCI_AT_GENERAL_BONDING
;
291 return HCI_AT_NO_BONDING
;
296 /* Service level security */
297 static inline int l2cap_check_security(struct sock
*sk
)
299 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
302 auth_type
= l2cap_get_auth_type(sk
);
304 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
308 u8
l2cap_get_ident(struct l2cap_conn
*conn
)
312 /* Get next available identificator.
313 * 1 - 128 are used by kernel.
314 * 129 - 199 are reserved.
315 * 200 - 254 are used by utilities like l2ping, etc.
318 spin_lock_bh(&conn
->lock
);
320 if (++conn
->tx_ident
> 128)
325 spin_unlock_bh(&conn
->lock
);
330 void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
332 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
335 BT_DBG("code 0x%2.2x", code
);
340 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
341 flags
= ACL_START_NO_FLUSH
;
345 hci_send_acl(conn
->hcon
, skb
, flags
);
348 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u16 control
)
351 struct l2cap_hdr
*lh
;
352 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
353 struct l2cap_conn
*conn
= pi
->conn
;
354 struct sock
*sk
= (struct sock
*)pi
;
355 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
358 if (sk
->sk_state
!= BT_CONNECTED
)
361 if (pi
->fcs
== L2CAP_FCS_CRC16
)
364 BT_DBG("chan %p, control 0x%2.2x", chan
, control
);
366 count
= min_t(unsigned int, conn
->mtu
, hlen
);
367 control
|= L2CAP_CTRL_FRAME_TYPE
;
369 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
370 control
|= L2CAP_CTRL_FINAL
;
371 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
374 if (chan
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
375 control
|= L2CAP_CTRL_POLL
;
376 chan
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
379 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
383 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
384 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
385 lh
->cid
= cpu_to_le16(pi
->dcid
);
386 put_unaligned_le16(control
, skb_put(skb
, 2));
388 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
389 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
390 put_unaligned_le16(fcs
, skb_put(skb
, 2));
393 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
394 flags
= ACL_START_NO_FLUSH
;
398 hci_send_acl(pi
->conn
->hcon
, skb
, flags
);
401 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u16 control
)
403 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
404 control
|= L2CAP_SUPER_RCV_NOT_READY
;
405 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
407 control
|= L2CAP_SUPER_RCV_READY
;
409 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
411 l2cap_send_sframe(chan
, control
);
414 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
416 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
419 static void l2cap_do_start(struct l2cap_chan
*chan
)
421 struct sock
*sk
= chan
->sk
;
422 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
424 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
425 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
428 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
429 struct l2cap_conn_req req
;
430 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
431 req
.psm
= l2cap_pi(sk
)->psm
;
433 chan
->ident
= l2cap_get_ident(conn
);
434 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
436 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
440 struct l2cap_info_req req
;
441 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
443 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
444 conn
->info_ident
= l2cap_get_ident(conn
);
446 mod_timer(&conn
->info_timer
, jiffies
+
447 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
449 l2cap_send_cmd(conn
, conn
->info_ident
,
450 L2CAP_INFO_REQ
, sizeof(req
), &req
);
454 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
456 u32 local_feat_mask
= l2cap_feat_mask
;
458 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
461 case L2CAP_MODE_ERTM
:
462 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
463 case L2CAP_MODE_STREAMING
:
464 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
470 void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
473 struct l2cap_disconn_req req
;
480 skb_queue_purge(&chan
->tx_q
);
482 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
483 del_timer(&chan
->retrans_timer
);
484 del_timer(&chan
->monitor_timer
);
485 del_timer(&chan
->ack_timer
);
488 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
489 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
490 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
491 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
493 sk
->sk_state
= BT_DISCONN
;
497 /* ---- L2CAP connections ---- */
498 static void l2cap_conn_start(struct l2cap_conn
*conn
)
500 struct l2cap_chan
*chan
, *tmp
;
502 BT_DBG("conn %p", conn
);
504 read_lock(&conn
->chan_lock
);
506 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
507 struct sock
*sk
= chan
->sk
;
511 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
512 sk
->sk_type
!= SOCK_STREAM
) {
517 if (sk
->sk_state
== BT_CONNECT
) {
518 struct l2cap_conn_req req
;
520 if (!l2cap_check_security(sk
) ||
521 !__l2cap_no_conn_pending(sk
)) {
526 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
528 && l2cap_pi(sk
)->conf_state
&
529 L2CAP_CONF_STATE2_DEVICE
) {
530 /* __l2cap_sock_close() calls list_del(chan)
531 * so release the lock */
532 read_unlock_bh(&conn
->chan_lock
);
533 __l2cap_sock_close(sk
, ECONNRESET
);
534 read_lock_bh(&conn
->chan_lock
);
539 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
540 req
.psm
= l2cap_pi(sk
)->psm
;
542 chan
->ident
= l2cap_get_ident(conn
);
543 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
545 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
548 } else if (sk
->sk_state
== BT_CONNECT2
) {
549 struct l2cap_conn_rsp rsp
;
551 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
552 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
554 if (l2cap_check_security(sk
)) {
555 if (bt_sk(sk
)->defer_setup
) {
556 struct sock
*parent
= bt_sk(sk
)->parent
;
557 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
558 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
559 parent
->sk_data_ready(parent
, 0);
562 sk
->sk_state
= BT_CONFIG
;
563 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
564 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
567 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
568 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
571 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
574 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
||
575 rsp
.result
!= L2CAP_CR_SUCCESS
) {
580 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
581 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
582 l2cap_build_conf_req(chan
, buf
), buf
);
583 chan
->num_conf_req
++;
589 read_unlock(&conn
->chan_lock
);
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
595 static struct sock
*l2cap_get_sock_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
597 struct sock
*sk
= NULL
, *sk1
= NULL
;
598 struct hlist_node
*node
;
600 read_lock(&l2cap_sk_list
.lock
);
602 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
603 if (state
&& sk
->sk_state
!= state
)
606 if (l2cap_pi(sk
)->scid
== cid
) {
608 if (!bacmp(&bt_sk(sk
)->src
, src
))
612 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
617 read_unlock(&l2cap_sk_list
.lock
);
619 return node
? sk
: sk1
;
622 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
624 struct sock
*parent
, *sk
;
625 struct l2cap_chan
*chan
;
629 /* Check if we have socket listening on cid */
630 parent
= l2cap_get_sock_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
635 /* Check for backlog size */
636 if (sk_acceptq_is_full(parent
)) {
637 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
641 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
645 chan
= l2cap_chan_alloc(sk
);
651 write_lock_bh(&conn
->chan_lock
);
653 hci_conn_hold(conn
->hcon
);
655 l2cap_sock_init(sk
, parent
);
657 bacpy(&bt_sk(sk
)->src
, conn
->src
);
658 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
660 bt_accept_enqueue(parent
, sk
);
662 __l2cap_chan_add(conn
, chan
);
664 l2cap_pi(sk
)->chan
= chan
;
666 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
668 sk
->sk_state
= BT_CONNECTED
;
669 parent
->sk_data_ready(parent
, 0);
671 write_unlock_bh(&conn
->chan_lock
);
674 bh_unlock_sock(parent
);
677 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
679 struct l2cap_chan
*chan
;
681 BT_DBG("conn %p", conn
);
683 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
684 l2cap_le_conn_ready(conn
);
686 read_lock(&conn
->chan_lock
);
688 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
689 struct sock
*sk
= chan
->sk
;
693 if (conn
->hcon
->type
== LE_LINK
) {
694 l2cap_sock_clear_timer(sk
);
695 sk
->sk_state
= BT_CONNECTED
;
696 sk
->sk_state_change(sk
);
699 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
700 sk
->sk_type
!= SOCK_STREAM
) {
701 l2cap_sock_clear_timer(sk
);
702 sk
->sk_state
= BT_CONNECTED
;
703 sk
->sk_state_change(sk
);
704 } else if (sk
->sk_state
== BT_CONNECT
)
705 l2cap_do_start(chan
);
710 read_unlock(&conn
->chan_lock
);
713 /* Notify sockets that we cannot guaranty reliability anymore */
714 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
716 struct l2cap_chan
*chan
;
718 BT_DBG("conn %p", conn
);
720 read_lock(&conn
->chan_lock
);
722 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
723 struct sock
*sk
= chan
->sk
;
725 if (l2cap_pi(sk
)->force_reliable
)
729 read_unlock(&conn
->chan_lock
);
732 static void l2cap_info_timeout(unsigned long arg
)
734 struct l2cap_conn
*conn
= (void *) arg
;
736 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
737 conn
->info_ident
= 0;
739 l2cap_conn_start(conn
);
742 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
744 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
749 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
753 hcon
->l2cap_data
= conn
;
756 BT_DBG("hcon %p conn %p", hcon
, conn
);
758 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
759 conn
->mtu
= hcon
->hdev
->le_mtu
;
761 conn
->mtu
= hcon
->hdev
->acl_mtu
;
763 conn
->src
= &hcon
->hdev
->bdaddr
;
764 conn
->dst
= &hcon
->dst
;
768 spin_lock_init(&conn
->lock
);
769 rwlock_init(&conn
->chan_lock
);
771 INIT_LIST_HEAD(&conn
->chan_l
);
773 if (hcon
->type
!= LE_LINK
)
774 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
775 (unsigned long) conn
);
777 conn
->disc_reason
= 0x13;
782 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
784 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
785 struct l2cap_chan
*chan
, *l
;
791 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
793 kfree_skb(conn
->rx_skb
);
796 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
799 l2cap_chan_del(chan
, err
);
804 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
805 del_timer_sync(&conn
->info_timer
);
807 hcon
->l2cap_data
= NULL
;
811 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
813 write_lock_bh(&conn
->chan_lock
);
814 __l2cap_chan_add(conn
, chan
);
815 write_unlock_bh(&conn
->chan_lock
);
818 /* ---- Socket interface ---- */
820 /* Find socket with psm and source bdaddr.
821 * Returns closest match.
823 static struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
825 struct sock
*sk
= NULL
, *sk1
= NULL
;
826 struct hlist_node
*node
;
828 read_lock(&l2cap_sk_list
.lock
);
830 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
831 if (state
&& sk
->sk_state
!= state
)
834 if (l2cap_pi(sk
)->psm
== psm
) {
836 if (!bacmp(&bt_sk(sk
)->src
, src
))
840 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
845 read_unlock(&l2cap_sk_list
.lock
);
847 return node
? sk
: sk1
;
850 int l2cap_do_connect(struct sock
*sk
)
852 bdaddr_t
*src
= &bt_sk(sk
)->src
;
853 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
854 struct l2cap_conn
*conn
;
855 struct l2cap_chan
*chan
;
856 struct hci_conn
*hcon
;
857 struct hci_dev
*hdev
;
861 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
864 hdev
= hci_get_route(dst
, src
);
866 return -EHOSTUNREACH
;
868 hci_dev_lock_bh(hdev
);
870 auth_type
= l2cap_get_auth_type(sk
);
872 if (l2cap_pi(sk
)->dcid
== L2CAP_CID_LE_DATA
)
873 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
874 l2cap_pi(sk
)->sec_level
, auth_type
);
876 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
877 l2cap_pi(sk
)->sec_level
, auth_type
);
884 conn
= l2cap_conn_add(hcon
, 0);
891 chan
= l2cap_chan_alloc(sk
);
898 /* Update source addr of the socket */
899 bacpy(src
, conn
->src
);
901 l2cap_chan_add(conn
, chan
);
903 l2cap_pi(sk
)->chan
= chan
;
905 sk
->sk_state
= BT_CONNECT
;
906 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
908 if (hcon
->state
== BT_CONNECTED
) {
909 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
910 sk
->sk_type
!= SOCK_STREAM
) {
911 l2cap_sock_clear_timer(sk
);
912 if (l2cap_check_security(sk
))
913 sk
->sk_state
= BT_CONNECTED
;
915 l2cap_do_start(chan
);
921 hci_dev_unlock_bh(hdev
);
926 int __l2cap_wait_ack(struct sock
*sk
)
928 DECLARE_WAITQUEUE(wait
, current
);
932 add_wait_queue(sk_sleep(sk
), &wait
);
933 while ((l2cap_pi(sk
)->chan
->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
934 set_current_state(TASK_INTERRUPTIBLE
);
939 if (signal_pending(current
)) {
940 err
= sock_intr_errno(timeo
);
945 timeo
= schedule_timeout(timeo
);
948 err
= sock_error(sk
);
952 set_current_state(TASK_RUNNING
);
953 remove_wait_queue(sk_sleep(sk
), &wait
);
957 static void l2cap_monitor_timeout(unsigned long arg
)
959 struct l2cap_chan
*chan
= (void *) arg
;
960 struct sock
*sk
= chan
->sk
;
962 BT_DBG("chan %p", chan
);
965 if (chan
->retry_count
>= chan
->remote_max_tx
) {
966 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, chan
, ECONNABORTED
);
972 __mod_monitor_timer();
974 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
978 static void l2cap_retrans_timeout(unsigned long arg
)
980 struct l2cap_chan
*chan
= (void *) arg
;
981 struct sock
*sk
= chan
->sk
;
983 BT_DBG("chan %p", chan
);
986 chan
->retry_count
= 1;
987 __mod_monitor_timer();
989 chan
->conn_state
|= L2CAP_CONN_WAIT_F
;
991 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
995 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
999 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1000 chan
->unacked_frames
) {
1001 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1004 skb
= skb_dequeue(&chan
->tx_q
);
1007 chan
->unacked_frames
--;
1010 if (!chan
->unacked_frames
)
1011 del_timer(&chan
->retrans_timer
);
1014 void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1016 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1017 struct hci_conn
*hcon
= pi
->conn
->hcon
;
1020 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1022 if (!pi
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
1023 flags
= ACL_START_NO_FLUSH
;
1027 hci_send_acl(hcon
, skb
, flags
);
1030 void l2cap_streaming_send(struct l2cap_chan
*chan
)
1032 struct sock
*sk
= chan
->sk
;
1033 struct sk_buff
*skb
;
1034 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1037 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1038 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1039 control
|= chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1040 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1042 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1043 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1044 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1047 l2cap_do_send(sk
, skb
);
1049 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1053 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u8 tx_seq
)
1055 struct sock
*sk
= chan
->sk
;
1056 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1057 struct sk_buff
*skb
, *tx_skb
;
1060 skb
= skb_peek(&chan
->tx_q
);
1065 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1068 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1071 } while ((skb
= skb_queue_next(&chan
->tx_q
, skb
)));
1073 if (chan
->remote_max_tx
&&
1074 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1075 l2cap_send_disconn_req(pi
->conn
, chan
, ECONNABORTED
);
1079 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1080 bt_cb(skb
)->retries
++;
1081 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1083 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1084 control
|= L2CAP_CTRL_FINAL
;
1085 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1088 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1089 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1091 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1093 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1094 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1095 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1098 l2cap_do_send(sk
, tx_skb
);
1101 int l2cap_ertm_send(struct l2cap_chan
*chan
)
1103 struct sk_buff
*skb
, *tx_skb
;
1104 struct sock
*sk
= chan
->sk
;
1105 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1109 if (sk
->sk_state
!= BT_CONNECTED
)
1112 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1114 if (chan
->remote_max_tx
&&
1115 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1116 l2cap_send_disconn_req(pi
->conn
, chan
, ECONNABORTED
);
1120 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1122 bt_cb(skb
)->retries
++;
1124 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1125 control
&= L2CAP_CTRL_SAR
;
1127 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1128 control
|= L2CAP_CTRL_FINAL
;
1129 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1131 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1132 | (chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1133 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1136 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1137 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1138 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1141 l2cap_do_send(sk
, tx_skb
);
1143 __mod_retrans_timer();
1145 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1146 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1148 if (bt_cb(skb
)->retries
== 1)
1149 chan
->unacked_frames
++;
1151 chan
->frames_sent
++;
1153 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1154 chan
->tx_send_head
= NULL
;
1156 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1164 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1168 if (!skb_queue_empty(&chan
->tx_q
))
1169 chan
->tx_send_head
= chan
->tx_q
.next
;
1171 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1172 ret
= l2cap_ertm_send(chan
);
1176 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1180 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1182 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1183 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1184 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1185 l2cap_send_sframe(chan
, control
);
1189 if (l2cap_ertm_send(chan
) > 0)
1192 control
|= L2CAP_SUPER_RCV_READY
;
1193 l2cap_send_sframe(chan
, control
);
1196 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1198 struct srej_list
*tail
;
1201 control
= L2CAP_SUPER_SELECT_REJECT
;
1202 control
|= L2CAP_CTRL_FINAL
;
1204 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1205 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1207 l2cap_send_sframe(chan
, control
);
1210 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1212 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1213 struct sk_buff
**frag
;
1216 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1222 /* Continuation fragments (no L2CAP header) */
1223 frag
= &skb_shinfo(skb
)->frag_list
;
1225 count
= min_t(unsigned int, conn
->mtu
, len
);
1227 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1230 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1236 frag
= &(*frag
)->next
;
1242 struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1244 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1245 struct sk_buff
*skb
;
1246 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1247 struct l2cap_hdr
*lh
;
1249 BT_DBG("sk %p len %d", sk
, (int)len
);
1251 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1252 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1253 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1255 return ERR_PTR(err
);
1257 /* Create L2CAP header */
1258 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1259 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1260 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1261 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1263 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1264 if (unlikely(err
< 0)) {
1266 return ERR_PTR(err
);
1271 struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1273 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1274 struct sk_buff
*skb
;
1275 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1276 struct l2cap_hdr
*lh
;
1278 BT_DBG("sk %p len %d", sk
, (int)len
);
1280 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1281 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1282 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1284 return ERR_PTR(err
);
1286 /* Create L2CAP header */
1287 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1288 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1289 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1291 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1292 if (unlikely(err
< 0)) {
1294 return ERR_PTR(err
);
1299 struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1301 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1302 struct sk_buff
*skb
;
1303 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1304 struct l2cap_hdr
*lh
;
1306 BT_DBG("sk %p len %d", sk
, (int)len
);
1309 return ERR_PTR(-ENOTCONN
);
1314 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1317 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1318 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1319 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1321 return ERR_PTR(err
);
1323 /* Create L2CAP header */
1324 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1325 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1326 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1327 put_unaligned_le16(control
, skb_put(skb
, 2));
1329 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1331 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1332 if (unlikely(err
< 0)) {
1334 return ERR_PTR(err
);
1337 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1338 put_unaligned_le16(0, skb_put(skb
, 2));
1340 bt_cb(skb
)->retries
= 0;
1344 int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1346 struct sock
*sk
= chan
->sk
;
1347 struct sk_buff
*skb
;
1348 struct sk_buff_head sar_queue
;
1352 skb_queue_head_init(&sar_queue
);
1353 control
= L2CAP_SDU_START
;
1354 skb
= l2cap_create_iframe_pdu(sk
, msg
, chan
->remote_mps
, control
, len
);
1356 return PTR_ERR(skb
);
1358 __skb_queue_tail(&sar_queue
, skb
);
1359 len
-= chan
->remote_mps
;
1360 size
+= chan
->remote_mps
;
1365 if (len
> chan
->remote_mps
) {
1366 control
= L2CAP_SDU_CONTINUE
;
1367 buflen
= chan
->remote_mps
;
1369 control
= L2CAP_SDU_END
;
1373 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1375 skb_queue_purge(&sar_queue
);
1376 return PTR_ERR(skb
);
1379 __skb_queue_tail(&sar_queue
, skb
);
1383 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1384 if (chan
->tx_send_head
== NULL
)
1385 chan
->tx_send_head
= sar_queue
.next
;
1390 static void l2cap_chan_ready(struct sock
*sk
)
1392 struct sock
*parent
= bt_sk(sk
)->parent
;
1394 BT_DBG("sk %p, parent %p", sk
, parent
);
1396 l2cap_pi(sk
)->conf_state
= 0;
1397 l2cap_sock_clear_timer(sk
);
1400 /* Outgoing channel.
1401 * Wake up socket sleeping on connect.
1403 sk
->sk_state
= BT_CONNECTED
;
1404 sk
->sk_state_change(sk
);
1406 /* Incoming channel.
1407 * Wake up socket sleeping on accept.
1409 parent
->sk_data_ready(parent
, 0);
1413 /* Copy frame to all raw sockets on that connection */
1414 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1416 struct sk_buff
*nskb
;
1417 struct l2cap_chan
*chan
;
1419 BT_DBG("conn %p", conn
);
1421 read_lock(&conn
->chan_lock
);
1422 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1423 struct sock
*sk
= chan
->sk
;
1424 if (sk
->sk_type
!= SOCK_RAW
)
1427 /* Don't send frame to the socket it came from */
1430 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1434 if (sock_queue_rcv_skb(sk
, nskb
))
1437 read_unlock(&conn
->chan_lock
);
1440 /* ---- L2CAP signalling commands ---- */
1441 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1442 u8 code
, u8 ident
, u16 dlen
, void *data
)
1444 struct sk_buff
*skb
, **frag
;
1445 struct l2cap_cmd_hdr
*cmd
;
1446 struct l2cap_hdr
*lh
;
1449 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1450 conn
, code
, ident
, dlen
);
1452 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1453 count
= min_t(unsigned int, conn
->mtu
, len
);
1455 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1459 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1460 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1462 if (conn
->hcon
->type
== LE_LINK
)
1463 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1465 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1467 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1470 cmd
->len
= cpu_to_le16(dlen
);
1473 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1474 memcpy(skb_put(skb
, count
), data
, count
);
1480 /* Continuation fragments (no L2CAP header) */
1481 frag
= &skb_shinfo(skb
)->frag_list
;
1483 count
= min_t(unsigned int, conn
->mtu
, len
);
1485 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1489 memcpy(skb_put(*frag
, count
), data
, count
);
1494 frag
= &(*frag
)->next
;
1504 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1506 struct l2cap_conf_opt
*opt
= *ptr
;
1509 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1517 *val
= *((u8
*) opt
->val
);
1521 *val
= get_unaligned_le16(opt
->val
);
1525 *val
= get_unaligned_le32(opt
->val
);
1529 *val
= (unsigned long) opt
->val
;
1533 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1537 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1539 struct l2cap_conf_opt
*opt
= *ptr
;
1541 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1548 *((u8
*) opt
->val
) = val
;
1552 put_unaligned_le16(val
, opt
->val
);
1556 put_unaligned_le32(val
, opt
->val
);
1560 memcpy(opt
->val
, (void *) val
, len
);
1564 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1567 static void l2cap_ack_timeout(unsigned long arg
)
1569 struct l2cap_chan
*chan
= (void *) arg
;
1571 bh_lock_sock(chan
->sk
);
1572 l2cap_send_ack(chan
);
1573 bh_unlock_sock(chan
->sk
);
1576 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1578 struct sock
*sk
= chan
->sk
;
1580 chan
->expected_ack_seq
= 0;
1581 chan
->unacked_frames
= 0;
1582 chan
->buffer_seq
= 0;
1583 chan
->num_acked
= 0;
1584 chan
->frames_sent
= 0;
1586 setup_timer(&chan
->retrans_timer
, l2cap_retrans_timeout
,
1587 (unsigned long) chan
);
1588 setup_timer(&chan
->monitor_timer
, l2cap_monitor_timeout
,
1589 (unsigned long) chan
);
1590 setup_timer(&chan
->ack_timer
, l2cap_ack_timeout
, (unsigned long) chan
);
1592 skb_queue_head_init(&chan
->srej_q
);
1593 skb_queue_head_init(&chan
->busy_q
);
1595 INIT_LIST_HEAD(&chan
->srej_l
);
1597 INIT_WORK(&chan
->busy_work
, l2cap_busy_work
);
1599 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1602 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1605 case L2CAP_MODE_STREAMING
:
1606 case L2CAP_MODE_ERTM
:
1607 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1611 return L2CAP_MODE_BASIC
;
1615 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
1617 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
1618 struct l2cap_conf_req
*req
= data
;
1619 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
1620 void *ptr
= req
->data
;
1622 BT_DBG("chan %p", chan
);
1624 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
1628 case L2CAP_MODE_STREAMING
:
1629 case L2CAP_MODE_ERTM
:
1630 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
1635 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
1640 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
1641 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
1644 case L2CAP_MODE_BASIC
:
1645 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1646 !(pi
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1649 rfc
.mode
= L2CAP_MODE_BASIC
;
1651 rfc
.max_transmit
= 0;
1652 rfc
.retrans_timeout
= 0;
1653 rfc
.monitor_timeout
= 0;
1654 rfc
.max_pdu_size
= 0;
1656 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1657 (unsigned long) &rfc
);
1660 case L2CAP_MODE_ERTM
:
1661 rfc
.mode
= L2CAP_MODE_ERTM
;
1662 rfc
.txwin_size
= pi
->tx_win
;
1663 rfc
.max_transmit
= pi
->max_tx
;
1664 rfc
.retrans_timeout
= 0;
1665 rfc
.monitor_timeout
= 0;
1666 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1667 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
1668 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1670 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1671 (unsigned long) &rfc
);
1673 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1676 if (pi
->fcs
== L2CAP_FCS_NONE
||
1677 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1678 pi
->fcs
= L2CAP_FCS_NONE
;
1679 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
1683 case L2CAP_MODE_STREAMING
:
1684 rfc
.mode
= L2CAP_MODE_STREAMING
;
1686 rfc
.max_transmit
= 0;
1687 rfc
.retrans_timeout
= 0;
1688 rfc
.monitor_timeout
= 0;
1689 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1690 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
1691 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1693 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1694 (unsigned long) &rfc
);
1696 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1699 if (pi
->fcs
== L2CAP_FCS_NONE
||
1700 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1701 pi
->fcs
= L2CAP_FCS_NONE
;
1702 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
1707 req
->dcid
= cpu_to_le16(pi
->dcid
);
1708 req
->flags
= cpu_to_le16(0);
1713 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
1715 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
1716 struct l2cap_conf_rsp
*rsp
= data
;
1717 void *ptr
= rsp
->data
;
1718 void *req
= chan
->conf_req
;
1719 int len
= chan
->conf_len
;
1720 int type
, hint
, olen
;
1722 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
1723 u16 mtu
= L2CAP_DEFAULT_MTU
;
1724 u16 result
= L2CAP_CONF_SUCCESS
;
1726 BT_DBG("chan %p", chan
);
1728 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1729 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
1731 hint
= type
& L2CAP_CONF_HINT
;
1732 type
&= L2CAP_CONF_MASK
;
1735 case L2CAP_CONF_MTU
:
1739 case L2CAP_CONF_FLUSH_TO
:
1743 case L2CAP_CONF_QOS
:
1746 case L2CAP_CONF_RFC
:
1747 if (olen
== sizeof(rfc
))
1748 memcpy(&rfc
, (void *) val
, olen
);
1751 case L2CAP_CONF_FCS
:
1752 if (val
== L2CAP_FCS_NONE
)
1753 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
1761 result
= L2CAP_CONF_UNKNOWN
;
1762 *((u8
*) ptr
++) = type
;
1767 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
1771 case L2CAP_MODE_STREAMING
:
1772 case L2CAP_MODE_ERTM
:
1773 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
1774 pi
->mode
= l2cap_select_mode(rfc
.mode
,
1775 pi
->conn
->feat_mask
);
1779 if (pi
->mode
!= rfc
.mode
)
1780 return -ECONNREFUSED
;
1786 if (pi
->mode
!= rfc
.mode
) {
1787 result
= L2CAP_CONF_UNACCEPT
;
1788 rfc
.mode
= pi
->mode
;
1790 if (chan
->num_conf_rsp
== 1)
1791 return -ECONNREFUSED
;
1793 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1794 sizeof(rfc
), (unsigned long) &rfc
);
1798 if (result
== L2CAP_CONF_SUCCESS
) {
1799 /* Configure output options and let the other side know
1800 * which ones we don't like. */
1802 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
1803 result
= L2CAP_CONF_UNACCEPT
;
1806 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
1808 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
1811 case L2CAP_MODE_BASIC
:
1812 pi
->fcs
= L2CAP_FCS_NONE
;
1813 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1816 case L2CAP_MODE_ERTM
:
1817 chan
->remote_tx_win
= rfc
.txwin_size
;
1818 chan
->remote_max_tx
= rfc
.max_transmit
;
1820 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
1821 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1823 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1825 rfc
.retrans_timeout
=
1826 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
1827 rfc
.monitor_timeout
=
1828 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
1830 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1832 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1833 sizeof(rfc
), (unsigned long) &rfc
);
1837 case L2CAP_MODE_STREAMING
:
1838 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
1839 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1841 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1843 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1845 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1846 sizeof(rfc
), (unsigned long) &rfc
);
1851 result
= L2CAP_CONF_UNACCEPT
;
1853 memset(&rfc
, 0, sizeof(rfc
));
1854 rfc
.mode
= pi
->mode
;
1857 if (result
== L2CAP_CONF_SUCCESS
)
1858 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
1860 rsp
->scid
= cpu_to_le16(pi
->dcid
);
1861 rsp
->result
= cpu_to_le16(result
);
1862 rsp
->flags
= cpu_to_le16(0x0000);
1867 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
1869 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1870 struct l2cap_conf_req
*req
= data
;
1871 void *ptr
= req
->data
;
1874 struct l2cap_conf_rfc rfc
;
1876 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
1878 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1879 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
1882 case L2CAP_CONF_MTU
:
1883 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
1884 *result
= L2CAP_CONF_UNACCEPT
;
1885 pi
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
1888 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
1891 case L2CAP_CONF_FLUSH_TO
:
1893 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
1897 case L2CAP_CONF_RFC
:
1898 if (olen
== sizeof(rfc
))
1899 memcpy(&rfc
, (void *)val
, olen
);
1901 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
1902 rfc
.mode
!= pi
->mode
)
1903 return -ECONNREFUSED
;
1907 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1908 sizeof(rfc
), (unsigned long) &rfc
);
1913 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
1914 return -ECONNREFUSED
;
1916 pi
->mode
= rfc
.mode
;
1918 if (*result
== L2CAP_CONF_SUCCESS
) {
1920 case L2CAP_MODE_ERTM
:
1921 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
1922 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
1923 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1925 case L2CAP_MODE_STREAMING
:
1926 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1930 req
->dcid
= cpu_to_le16(pi
->dcid
);
1931 req
->flags
= cpu_to_le16(0x0000);
1936 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
1938 struct l2cap_conf_rsp
*rsp
= data
;
1939 void *ptr
= rsp
->data
;
1941 BT_DBG("sk %p", sk
);
1943 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1944 rsp
->result
= cpu_to_le16(result
);
1945 rsp
->flags
= cpu_to_le16(flags
);
1950 void __l2cap_connect_rsp_defer(struct sock
*sk
)
1952 struct l2cap_conn_rsp rsp
;
1953 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1954 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1957 sk
->sk_state
= BT_CONFIG
;
1959 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1960 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1961 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1962 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1963 l2cap_send_cmd(conn
, chan
->ident
,
1964 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1966 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
1969 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
1970 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1971 l2cap_build_conf_req(chan
, buf
), buf
);
1972 chan
->num_conf_req
++;
1975 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
1977 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1980 struct l2cap_conf_rfc rfc
;
1982 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
1984 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
1987 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1988 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
1991 case L2CAP_CONF_RFC
:
1992 if (olen
== sizeof(rfc
))
1993 memcpy(&rfc
, (void *)val
, olen
);
2000 case L2CAP_MODE_ERTM
:
2001 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2002 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2003 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2005 case L2CAP_MODE_STREAMING
:
2006 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2010 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2012 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2014 if (rej
->reason
!= 0x0000)
2017 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2018 cmd
->ident
== conn
->info_ident
) {
2019 del_timer(&conn
->info_timer
);
2021 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2022 conn
->info_ident
= 0;
2024 l2cap_conn_start(conn
);
2030 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2032 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2033 struct l2cap_conn_rsp rsp
;
2034 struct l2cap_chan
*chan
= NULL
;
2035 struct sock
*parent
, *sk
= NULL
;
2036 int result
, status
= L2CAP_CS_NO_INFO
;
2038 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2039 __le16 psm
= req
->psm
;
2041 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2043 /* Check if we have socket listening on psm */
2044 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2046 result
= L2CAP_CR_BAD_PSM
;
2050 bh_lock_sock(parent
);
2052 /* Check if the ACL is secure enough (if not SDP) */
2053 if (psm
!= cpu_to_le16(0x0001) &&
2054 !hci_conn_check_link_mode(conn
->hcon
)) {
2055 conn
->disc_reason
= 0x05;
2056 result
= L2CAP_CR_SEC_BLOCK
;
2060 result
= L2CAP_CR_NO_MEM
;
2062 /* Check for backlog size */
2063 if (sk_acceptq_is_full(parent
)) {
2064 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2068 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2072 chan
= l2cap_chan_alloc(sk
);
2074 l2cap_sock_kill(sk
);
2078 write_lock_bh(&conn
->chan_lock
);
2080 /* Check if we already have channel with that dcid */
2081 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2082 write_unlock_bh(&conn
->chan_lock
);
2083 sock_set_flag(sk
, SOCK_ZAPPED
);
2084 l2cap_sock_kill(sk
);
2088 hci_conn_hold(conn
->hcon
);
2090 l2cap_sock_init(sk
, parent
);
2091 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2092 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2093 l2cap_pi(sk
)->psm
= psm
;
2094 l2cap_pi(sk
)->dcid
= scid
;
2096 bt_accept_enqueue(parent
, sk
);
2098 __l2cap_chan_add(conn
, chan
);
2100 l2cap_pi(sk
)->chan
= chan
;
2102 dcid
= l2cap_pi(sk
)->scid
;
2104 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2106 chan
->ident
= cmd
->ident
;
2108 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2109 if (l2cap_check_security(sk
)) {
2110 if (bt_sk(sk
)->defer_setup
) {
2111 sk
->sk_state
= BT_CONNECT2
;
2112 result
= L2CAP_CR_PEND
;
2113 status
= L2CAP_CS_AUTHOR_PEND
;
2114 parent
->sk_data_ready(parent
, 0);
2116 sk
->sk_state
= BT_CONFIG
;
2117 result
= L2CAP_CR_SUCCESS
;
2118 status
= L2CAP_CS_NO_INFO
;
2121 sk
->sk_state
= BT_CONNECT2
;
2122 result
= L2CAP_CR_PEND
;
2123 status
= L2CAP_CS_AUTHEN_PEND
;
2126 sk
->sk_state
= BT_CONNECT2
;
2127 result
= L2CAP_CR_PEND
;
2128 status
= L2CAP_CS_NO_INFO
;
2131 write_unlock_bh(&conn
->chan_lock
);
2134 bh_unlock_sock(parent
);
2137 rsp
.scid
= cpu_to_le16(scid
);
2138 rsp
.dcid
= cpu_to_le16(dcid
);
2139 rsp
.result
= cpu_to_le16(result
);
2140 rsp
.status
= cpu_to_le16(status
);
2141 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2143 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2144 struct l2cap_info_req info
;
2145 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2147 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2148 conn
->info_ident
= l2cap_get_ident(conn
);
2150 mod_timer(&conn
->info_timer
, jiffies
+
2151 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2153 l2cap_send_cmd(conn
, conn
->info_ident
,
2154 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2157 if (chan
&& !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) &&
2158 result
== L2CAP_CR_SUCCESS
) {
2160 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2161 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2162 l2cap_build_conf_req(chan
, buf
), buf
);
2163 chan
->num_conf_req
++;
2169 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2171 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2172 u16 scid
, dcid
, result
, status
;
2173 struct l2cap_chan
*chan
;
2177 scid
= __le16_to_cpu(rsp
->scid
);
2178 dcid
= __le16_to_cpu(rsp
->dcid
);
2179 result
= __le16_to_cpu(rsp
->result
);
2180 status
= __le16_to_cpu(rsp
->status
);
2182 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2185 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2189 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2197 case L2CAP_CR_SUCCESS
:
2198 sk
->sk_state
= BT_CONFIG
;
2200 l2cap_pi(sk
)->dcid
= dcid
;
2201 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2203 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
2206 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2208 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2209 l2cap_build_conf_req(chan
, req
), req
);
2210 chan
->num_conf_req
++;
2214 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2218 /* don't delete l2cap channel if sk is owned by user */
2219 if (sock_owned_by_user(sk
)) {
2220 sk
->sk_state
= BT_DISCONN
;
2221 l2cap_sock_clear_timer(sk
);
2222 l2cap_sock_set_timer(sk
, HZ
/ 5);
2226 l2cap_chan_del(chan
, ECONNREFUSED
);
2234 static inline void set_default_fcs(struct l2cap_pinfo
*pi
)
2236 /* FCS is enabled only in ERTM or streaming mode, if one or both
2239 if (pi
->mode
!= L2CAP_MODE_ERTM
&& pi
->mode
!= L2CAP_MODE_STREAMING
)
2240 pi
->fcs
= L2CAP_FCS_NONE
;
2241 else if (!(pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
2242 pi
->fcs
= L2CAP_FCS_CRC16
;
2245 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2247 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2250 struct l2cap_chan
*chan
;
2254 dcid
= __le16_to_cpu(req
->dcid
);
2255 flags
= __le16_to_cpu(req
->flags
);
2257 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2259 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2265 if (sk
->sk_state
!= BT_CONFIG
) {
2266 struct l2cap_cmd_rej rej
;
2268 rej
.reason
= cpu_to_le16(0x0002);
2269 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2274 /* Reject if config buffer is too small. */
2275 len
= cmd_len
- sizeof(*req
);
2276 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2277 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2278 l2cap_build_conf_rsp(sk
, rsp
,
2279 L2CAP_CONF_REJECT
, flags
), rsp
);
2284 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2285 chan
->conf_len
+= len
;
2287 if (flags
& 0x0001) {
2288 /* Incomplete config. Send empty response. */
2289 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2290 l2cap_build_conf_rsp(sk
, rsp
,
2291 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2295 /* Complete config. */
2296 len
= l2cap_parse_conf_req(chan
, rsp
);
2298 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2302 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2303 chan
->num_conf_rsp
++;
2305 /* Reset config buffer. */
2308 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2311 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2312 set_default_fcs(l2cap_pi(sk
));
2314 sk
->sk_state
= BT_CONNECTED
;
2316 chan
->next_tx_seq
= 0;
2317 chan
->expected_tx_seq
= 0;
2318 skb_queue_head_init(&chan
->tx_q
);
2319 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2320 l2cap_ertm_init(chan
);
2322 l2cap_chan_ready(sk
);
2326 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2328 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2329 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2330 l2cap_build_conf_req(chan
, buf
), buf
);
2331 chan
->num_conf_req
++;
2339 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2341 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2342 u16 scid
, flags
, result
;
2343 struct l2cap_chan
*chan
;
2345 int len
= cmd
->len
- sizeof(*rsp
);
2347 scid
= __le16_to_cpu(rsp
->scid
);
2348 flags
= __le16_to_cpu(rsp
->flags
);
2349 result
= __le16_to_cpu(rsp
->result
);
2351 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2352 scid
, flags
, result
);
2354 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2361 case L2CAP_CONF_SUCCESS
:
2362 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
2365 case L2CAP_CONF_UNACCEPT
:
2366 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2369 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2370 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2374 /* throw out any old stored conf requests */
2375 result
= L2CAP_CONF_SUCCESS
;
2376 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2379 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2383 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2384 L2CAP_CONF_REQ
, len
, req
);
2385 chan
->num_conf_req
++;
2386 if (result
!= L2CAP_CONF_SUCCESS
)
2392 sk
->sk_err
= ECONNRESET
;
2393 l2cap_sock_set_timer(sk
, HZ
* 5);
2394 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2401 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2403 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2404 set_default_fcs(l2cap_pi(sk
));
2406 sk
->sk_state
= BT_CONNECTED
;
2407 chan
->next_tx_seq
= 0;
2408 chan
->expected_tx_seq
= 0;
2409 skb_queue_head_init(&chan
->tx_q
);
2410 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2411 l2cap_ertm_init(chan
);
2413 l2cap_chan_ready(sk
);
2421 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2423 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2424 struct l2cap_disconn_rsp rsp
;
2426 struct l2cap_chan
*chan
;
2429 scid
= __le16_to_cpu(req
->scid
);
2430 dcid
= __le16_to_cpu(req
->dcid
);
2432 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2434 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2440 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2441 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2442 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2444 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2446 /* don't delete l2cap channel if sk is owned by user */
2447 if (sock_owned_by_user(sk
)) {
2448 sk
->sk_state
= BT_DISCONN
;
2449 l2cap_sock_clear_timer(sk
);
2450 l2cap_sock_set_timer(sk
, HZ
/ 5);
2455 l2cap_chan_del(chan
, ECONNRESET
);
2458 l2cap_sock_kill(sk
);
2462 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2464 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2466 struct l2cap_chan
*chan
;
2469 scid
= __le16_to_cpu(rsp
->scid
);
2470 dcid
= __le16_to_cpu(rsp
->dcid
);
2472 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2474 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2480 /* don't delete l2cap channel if sk is owned by user */
2481 if (sock_owned_by_user(sk
)) {
2482 sk
->sk_state
= BT_DISCONN
;
2483 l2cap_sock_clear_timer(sk
);
2484 l2cap_sock_set_timer(sk
, HZ
/ 5);
2489 l2cap_chan_del(chan
, 0);
2492 l2cap_sock_kill(sk
);
2496 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2498 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2501 type
= __le16_to_cpu(req
->type
);
2503 BT_DBG("type 0x%4.4x", type
);
2505 if (type
== L2CAP_IT_FEAT_MASK
) {
2507 u32 feat_mask
= l2cap_feat_mask
;
2508 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2509 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2510 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2512 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2514 put_unaligned_le32(feat_mask
, rsp
->data
);
2515 l2cap_send_cmd(conn
, cmd
->ident
,
2516 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2517 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2519 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2520 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2521 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2522 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2523 l2cap_send_cmd(conn
, cmd
->ident
,
2524 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2526 struct l2cap_info_rsp rsp
;
2527 rsp
.type
= cpu_to_le16(type
);
2528 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2529 l2cap_send_cmd(conn
, cmd
->ident
,
2530 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2536 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2538 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2541 type
= __le16_to_cpu(rsp
->type
);
2542 result
= __le16_to_cpu(rsp
->result
);
2544 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2546 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2547 if (cmd
->ident
!= conn
->info_ident
||
2548 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2551 del_timer(&conn
->info_timer
);
2553 if (result
!= L2CAP_IR_SUCCESS
) {
2554 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2555 conn
->info_ident
= 0;
2557 l2cap_conn_start(conn
);
2562 if (type
== L2CAP_IT_FEAT_MASK
) {
2563 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2565 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2566 struct l2cap_info_req req
;
2567 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2569 conn
->info_ident
= l2cap_get_ident(conn
);
2571 l2cap_send_cmd(conn
, conn
->info_ident
,
2572 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2574 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2575 conn
->info_ident
= 0;
2577 l2cap_conn_start(conn
);
2579 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2580 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2581 conn
->info_ident
= 0;
2583 l2cap_conn_start(conn
);
2589 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2594 if (min
> max
|| min
< 6 || max
> 3200)
2597 if (to_multiplier
< 10 || to_multiplier
> 3200)
2600 if (max
>= to_multiplier
* 8)
2603 max_latency
= (to_multiplier
* 8 / max
) - 1;
2604 if (latency
> 499 || latency
> max_latency
)
2610 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2611 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2613 struct hci_conn
*hcon
= conn
->hcon
;
2614 struct l2cap_conn_param_update_req
*req
;
2615 struct l2cap_conn_param_update_rsp rsp
;
2616 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
2619 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
2622 cmd_len
= __le16_to_cpu(cmd
->len
);
2623 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
2626 req
= (struct l2cap_conn_param_update_req
*) data
;
2627 min
= __le16_to_cpu(req
->min
);
2628 max
= __le16_to_cpu(req
->max
);
2629 latency
= __le16_to_cpu(req
->latency
);
2630 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
2632 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2633 min
, max
, latency
, to_multiplier
);
2635 memset(&rsp
, 0, sizeof(rsp
));
2637 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
2639 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
2641 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
2643 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
2647 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
2652 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
2653 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2657 switch (cmd
->code
) {
2658 case L2CAP_COMMAND_REJ
:
2659 l2cap_command_rej(conn
, cmd
, data
);
2662 case L2CAP_CONN_REQ
:
2663 err
= l2cap_connect_req(conn
, cmd
, data
);
2666 case L2CAP_CONN_RSP
:
2667 err
= l2cap_connect_rsp(conn
, cmd
, data
);
2670 case L2CAP_CONF_REQ
:
2671 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
2674 case L2CAP_CONF_RSP
:
2675 err
= l2cap_config_rsp(conn
, cmd
, data
);
2678 case L2CAP_DISCONN_REQ
:
2679 err
= l2cap_disconnect_req(conn
, cmd
, data
);
2682 case L2CAP_DISCONN_RSP
:
2683 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
2686 case L2CAP_ECHO_REQ
:
2687 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
2690 case L2CAP_ECHO_RSP
:
2693 case L2CAP_INFO_REQ
:
2694 err
= l2cap_information_req(conn
, cmd
, data
);
2697 case L2CAP_INFO_RSP
:
2698 err
= l2cap_information_rsp(conn
, cmd
, data
);
2702 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
2710 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
2711 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2713 switch (cmd
->code
) {
2714 case L2CAP_COMMAND_REJ
:
2717 case L2CAP_CONN_PARAM_UPDATE_REQ
:
2718 return l2cap_conn_param_update_req(conn
, cmd
, data
);
2720 case L2CAP_CONN_PARAM_UPDATE_RSP
:
2724 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
2729 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
2730 struct sk_buff
*skb
)
2732 u8
*data
= skb
->data
;
2734 struct l2cap_cmd_hdr cmd
;
2737 l2cap_raw_recv(conn
, skb
);
2739 while (len
>= L2CAP_CMD_HDR_SIZE
) {
2741 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
2742 data
+= L2CAP_CMD_HDR_SIZE
;
2743 len
-= L2CAP_CMD_HDR_SIZE
;
2745 cmd_len
= le16_to_cpu(cmd
.len
);
2747 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
2749 if (cmd_len
> len
|| !cmd
.ident
) {
2750 BT_DBG("corrupted command");
2754 if (conn
->hcon
->type
== LE_LINK
)
2755 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
2757 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
2760 struct l2cap_cmd_rej rej
;
2762 BT_ERR("Wrong link type (%d)", err
);
2764 /* FIXME: Map err to a valid reason */
2765 rej
.reason
= cpu_to_le16(0);
2766 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
2776 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
2778 u16 our_fcs
, rcv_fcs
;
2779 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
2781 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
2782 skb_trim(skb
, skb
->len
- 2);
2783 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
2784 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
2786 if (our_fcs
!= rcv_fcs
)
2792 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
2796 chan
->frames_sent
= 0;
2798 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2800 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
2801 control
|= L2CAP_SUPER_RCV_NOT_READY
;
2802 l2cap_send_sframe(chan
, control
);
2803 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
2806 if (chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
2807 l2cap_retransmit_frames(chan
);
2809 l2cap_ertm_send(chan
);
2811 if (!(chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
2812 chan
->frames_sent
== 0) {
2813 control
|= L2CAP_SUPER_RCV_READY
;
2814 l2cap_send_sframe(chan
, control
);
2818 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
2820 struct sk_buff
*next_skb
;
2821 int tx_seq_offset
, next_tx_seq_offset
;
2823 bt_cb(skb
)->tx_seq
= tx_seq
;
2824 bt_cb(skb
)->sar
= sar
;
2826 next_skb
= skb_peek(&chan
->srej_q
);
2828 __skb_queue_tail(&chan
->srej_q
, skb
);
2832 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
2833 if (tx_seq_offset
< 0)
2834 tx_seq_offset
+= 64;
2837 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
2840 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
2841 chan
->buffer_seq
) % 64;
2842 if (next_tx_seq_offset
< 0)
2843 next_tx_seq_offset
+= 64;
2845 if (next_tx_seq_offset
> tx_seq_offset
) {
2846 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
2850 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
2853 } while ((next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
)));
2855 __skb_queue_tail(&chan
->srej_q
, skb
);
2860 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
2862 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
2863 struct sk_buff
*_skb
;
2866 switch (control
& L2CAP_CTRL_SAR
) {
2867 case L2CAP_SDU_UNSEGMENTED
:
2868 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
)
2871 err
= sock_queue_rcv_skb(chan
->sk
, skb
);
2877 case L2CAP_SDU_START
:
2878 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
)
2881 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
2883 if (chan
->sdu_len
> pi
->imtu
)
2886 chan
->sdu
= bt_skb_alloc(chan
->sdu_len
, GFP_ATOMIC
);
2890 /* pull sdu_len bytes only after alloc, because of Local Busy
2891 * condition we have to be sure that this will be executed
2892 * only once, i.e., when alloc does not fail */
2895 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2897 chan
->conn_state
|= L2CAP_CONN_SAR_SDU
;
2898 chan
->partial_sdu_len
= skb
->len
;
2901 case L2CAP_SDU_CONTINUE
:
2902 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
2908 chan
->partial_sdu_len
+= skb
->len
;
2909 if (chan
->partial_sdu_len
> chan
->sdu_len
)
2912 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2917 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
2923 if (!(chan
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
2924 chan
->partial_sdu_len
+= skb
->len
;
2926 if (chan
->partial_sdu_len
> pi
->imtu
)
2929 if (chan
->partial_sdu_len
!= chan
->sdu_len
)
2932 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2935 _skb
= skb_clone(chan
->sdu
, GFP_ATOMIC
);
2937 chan
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2941 err
= sock_queue_rcv_skb(chan
->sk
, _skb
);
2944 chan
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2948 chan
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
2949 chan
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
2951 kfree_skb(chan
->sdu
);
2959 kfree_skb(chan
->sdu
);
2963 l2cap_send_disconn_req(pi
->conn
, chan
, ECONNRESET
);
2968 static int l2cap_try_push_rx_skb(struct l2cap_chan
*chan
)
2970 struct sk_buff
*skb
;
2974 while ((skb
= skb_dequeue(&chan
->busy_q
))) {
2975 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
2976 err
= l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
2978 skb_queue_head(&chan
->busy_q
, skb
);
2982 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
2985 if (!(chan
->conn_state
& L2CAP_CONN_RNR_SENT
))
2988 control
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2989 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
2990 l2cap_send_sframe(chan
, control
);
2991 chan
->retry_count
= 1;
2993 del_timer(&chan
->retrans_timer
);
2994 __mod_monitor_timer();
2996 chan
->conn_state
|= L2CAP_CONN_WAIT_F
;
2999 chan
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3000 chan
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3002 BT_DBG("chan %p, Exit local busy", chan
);
3007 static void l2cap_busy_work(struct work_struct
*work
)
3009 DECLARE_WAITQUEUE(wait
, current
);
3010 struct l2cap_chan
*chan
=
3011 container_of(work
, struct l2cap_chan
, busy_work
);
3012 struct sock
*sk
= chan
->sk
;
3013 int n_tries
= 0, timeo
= HZ
/5, err
;
3014 struct sk_buff
*skb
;
3018 add_wait_queue(sk_sleep(sk
), &wait
);
3019 while ((skb
= skb_peek(&chan
->busy_q
))) {
3020 set_current_state(TASK_INTERRUPTIBLE
);
3022 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3024 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, chan
, EBUSY
);
3031 if (signal_pending(current
)) {
3032 err
= sock_intr_errno(timeo
);
3037 timeo
= schedule_timeout(timeo
);
3040 err
= sock_error(sk
);
3044 if (l2cap_try_push_rx_skb(chan
) == 0)
3048 set_current_state(TASK_RUNNING
);
3049 remove_wait_queue(sk_sleep(sk
), &wait
);
3054 static int l2cap_push_rx_skb(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3058 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3059 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3060 __skb_queue_tail(&chan
->busy_q
, skb
);
3061 return l2cap_try_push_rx_skb(chan
);
3066 err
= l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3068 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3072 /* Busy Condition */
3073 BT_DBG("chan %p, Enter local busy", chan
);
3075 chan
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3076 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3077 __skb_queue_tail(&chan
->busy_q
, skb
);
3079 sctrl
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3080 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3081 l2cap_send_sframe(chan
, sctrl
);
3083 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3085 del_timer(&chan
->ack_timer
);
3087 queue_work(_busy_wq
, &chan
->busy_work
);
3092 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3094 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
3095 struct sk_buff
*_skb
;
3099 * TODO: We have to notify the userland if some data is lost with the
3103 switch (control
& L2CAP_CTRL_SAR
) {
3104 case L2CAP_SDU_UNSEGMENTED
:
3105 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3106 kfree_skb(chan
->sdu
);
3110 err
= sock_queue_rcv_skb(chan
->sk
, skb
);
3116 case L2CAP_SDU_START
:
3117 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3118 kfree_skb(chan
->sdu
);
3122 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3125 if (chan
->sdu_len
> pi
->imtu
) {
3130 chan
->sdu
= bt_skb_alloc(chan
->sdu_len
, GFP_ATOMIC
);
3136 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3138 chan
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3139 chan
->partial_sdu_len
= skb
->len
;
3143 case L2CAP_SDU_CONTINUE
:
3144 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3147 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3149 chan
->partial_sdu_len
+= skb
->len
;
3150 if (chan
->partial_sdu_len
> chan
->sdu_len
)
3151 kfree_skb(chan
->sdu
);
3158 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3161 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3163 chan
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3164 chan
->partial_sdu_len
+= skb
->len
;
3166 if (chan
->partial_sdu_len
> pi
->imtu
)
3169 if (chan
->partial_sdu_len
== chan
->sdu_len
) {
3170 _skb
= skb_clone(chan
->sdu
, GFP_ATOMIC
);
3171 err
= sock_queue_rcv_skb(chan
->sk
, _skb
);
3178 kfree_skb(chan
->sdu
);
3186 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u8 tx_seq
)
3188 struct sk_buff
*skb
;
3191 while ((skb
= skb_peek(&chan
->srej_q
))) {
3192 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3195 skb
= skb_dequeue(&chan
->srej_q
);
3196 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3197 l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3198 chan
->buffer_seq_srej
=
3199 (chan
->buffer_seq_srej
+ 1) % 64;
3200 tx_seq
= (tx_seq
+ 1) % 64;
3204 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3206 struct srej_list
*l
, *tmp
;
3209 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3210 if (l
->tx_seq
== tx_seq
) {
3215 control
= L2CAP_SUPER_SELECT_REJECT
;
3216 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3217 l2cap_send_sframe(chan
, control
);
3219 list_add_tail(&l
->list
, &chan
->srej_l
);
3223 static void l2cap_send_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3225 struct srej_list
*new;
3228 while (tx_seq
!= chan
->expected_tx_seq
) {
3229 control
= L2CAP_SUPER_SELECT_REJECT
;
3230 control
|= chan
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3231 l2cap_send_sframe(chan
, control
);
3233 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3234 new->tx_seq
= chan
->expected_tx_seq
;
3235 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3236 list_add_tail(&new->list
, &chan
->srej_l
);
3238 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3241 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3243 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
3244 u8 tx_seq
= __get_txseq(rx_control
);
3245 u8 req_seq
= __get_reqseq(rx_control
);
3246 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3247 int tx_seq_offset
, expected_tx_seq_offset
;
3248 int num_to_ack
= (pi
->tx_win
/6) + 1;
3251 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan
, skb
->len
,
3252 tx_seq
, rx_control
);
3254 if (L2CAP_CTRL_FINAL
& rx_control
&&
3255 chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3256 del_timer(&chan
->monitor_timer
);
3257 if (chan
->unacked_frames
> 0)
3258 __mod_retrans_timer();
3259 chan
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3262 chan
->expected_ack_seq
= req_seq
;
3263 l2cap_drop_acked_frames(chan
);
3265 if (tx_seq
== chan
->expected_tx_seq
)
3268 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3269 if (tx_seq_offset
< 0)
3270 tx_seq_offset
+= 64;
3272 /* invalid tx_seq */
3273 if (tx_seq_offset
>= pi
->tx_win
) {
3274 l2cap_send_disconn_req(pi
->conn
, chan
, ECONNRESET
);
3278 if (chan
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3281 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3282 struct srej_list
*first
;
3284 first
= list_first_entry(&chan
->srej_l
,
3285 struct srej_list
, list
);
3286 if (tx_seq
== first
->tx_seq
) {
3287 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3288 l2cap_check_srej_gap(chan
, tx_seq
);
3290 list_del(&first
->list
);
3293 if (list_empty(&chan
->srej_l
)) {
3294 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3295 chan
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3296 l2cap_send_ack(chan
);
3297 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3300 struct srej_list
*l
;
3302 /* duplicated tx_seq */
3303 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3306 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3307 if (l
->tx_seq
== tx_seq
) {
3308 l2cap_resend_srejframe(chan
, tx_seq
);
3312 l2cap_send_srejframe(chan
, tx_seq
);
3315 expected_tx_seq_offset
=
3316 (chan
->expected_tx_seq
- chan
->buffer_seq
) % 64;
3317 if (expected_tx_seq_offset
< 0)
3318 expected_tx_seq_offset
+= 64;
3320 /* duplicated tx_seq */
3321 if (tx_seq_offset
< expected_tx_seq_offset
)
3324 chan
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3326 BT_DBG("chan %p, Enter SREJ", chan
);
3328 INIT_LIST_HEAD(&chan
->srej_l
);
3329 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3331 __skb_queue_head_init(&chan
->srej_q
);
3332 __skb_queue_head_init(&chan
->busy_q
);
3333 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3335 chan
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3337 l2cap_send_srejframe(chan
, tx_seq
);
3339 del_timer(&chan
->ack_timer
);
3344 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3346 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3347 bt_cb(skb
)->tx_seq
= tx_seq
;
3348 bt_cb(skb
)->sar
= sar
;
3349 __skb_queue_tail(&chan
->srej_q
, skb
);
3353 err
= l2cap_push_rx_skb(chan
, skb
, rx_control
);
3357 if (rx_control
& L2CAP_CTRL_FINAL
) {
3358 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3359 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3361 l2cap_retransmit_frames(chan
);
3366 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3367 if (chan
->num_acked
== num_to_ack
- 1)
3368 l2cap_send_ack(chan
);
3377 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3379 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, __get_reqseq(rx_control
),
3382 chan
->expected_ack_seq
= __get_reqseq(rx_control
);
3383 l2cap_drop_acked_frames(chan
);
3385 if (rx_control
& L2CAP_CTRL_POLL
) {
3386 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3387 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3388 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3389 (chan
->unacked_frames
> 0))
3390 __mod_retrans_timer();
3392 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3393 l2cap_send_srejtail(chan
);
3395 l2cap_send_i_or_rr_or_rnr(chan
);
3398 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3399 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3401 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3402 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3404 l2cap_retransmit_frames(chan
);
3407 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3408 (chan
->unacked_frames
> 0))
3409 __mod_retrans_timer();
3411 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3412 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
)
3413 l2cap_send_ack(chan
);
3415 l2cap_ertm_send(chan
);
3419 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3421 u8 tx_seq
= __get_reqseq(rx_control
);
3423 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3425 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3427 chan
->expected_ack_seq
= tx_seq
;
3428 l2cap_drop_acked_frames(chan
);
3430 if (rx_control
& L2CAP_CTRL_FINAL
) {
3431 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3432 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3434 l2cap_retransmit_frames(chan
);
3436 l2cap_retransmit_frames(chan
);
3438 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
)
3439 chan
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3442 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3444 u8 tx_seq
= __get_reqseq(rx_control
);
3446 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3448 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3450 if (rx_control
& L2CAP_CTRL_POLL
) {
3451 chan
->expected_ack_seq
= tx_seq
;
3452 l2cap_drop_acked_frames(chan
);
3454 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3455 l2cap_retransmit_one_frame(chan
, tx_seq
);
3457 l2cap_ertm_send(chan
);
3459 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3460 chan
->srej_save_reqseq
= tx_seq
;
3461 chan
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3463 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3464 if ((chan
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3465 chan
->srej_save_reqseq
== tx_seq
)
3466 chan
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3468 l2cap_retransmit_one_frame(chan
, tx_seq
);
3470 l2cap_retransmit_one_frame(chan
, tx_seq
);
3471 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3472 chan
->srej_save_reqseq
= tx_seq
;
3473 chan
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3478 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3480 u8 tx_seq
= __get_reqseq(rx_control
);
3482 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3484 chan
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3485 chan
->expected_ack_seq
= tx_seq
;
3486 l2cap_drop_acked_frames(chan
);
3488 if (rx_control
& L2CAP_CTRL_POLL
)
3489 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3491 if (!(chan
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
3492 del_timer(&chan
->retrans_timer
);
3493 if (rx_control
& L2CAP_CTRL_POLL
)
3494 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
3498 if (rx_control
& L2CAP_CTRL_POLL
)
3499 l2cap_send_srejtail(chan
);
3501 l2cap_send_sframe(chan
, L2CAP_SUPER_RCV_READY
);
3504 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3506 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan
, rx_control
, skb
->len
);
3508 if (L2CAP_CTRL_FINAL
& rx_control
&&
3509 chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3510 del_timer(&chan
->monitor_timer
);
3511 if (chan
->unacked_frames
> 0)
3512 __mod_retrans_timer();
3513 chan
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3516 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3517 case L2CAP_SUPER_RCV_READY
:
3518 l2cap_data_channel_rrframe(chan
, rx_control
);
3521 case L2CAP_SUPER_REJECT
:
3522 l2cap_data_channel_rejframe(chan
, rx_control
);
3525 case L2CAP_SUPER_SELECT_REJECT
:
3526 l2cap_data_channel_srejframe(chan
, rx_control
);
3529 case L2CAP_SUPER_RCV_NOT_READY
:
3530 l2cap_data_channel_rnrframe(chan
, rx_control
);
3538 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3540 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
3541 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3544 int len
, next_tx_seq_offset
, req_seq_offset
;
3546 control
= get_unaligned_le16(skb
->data
);
3551 * We can just drop the corrupted I-frame here.
3552 * Receiver will miss it and start proper recovery
3553 * procedures and ask retransmission.
3555 if (l2cap_check_fcs(pi
, skb
))
3558 if (__is_sar_start(control
) && __is_iframe(control
))
3561 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3564 if (len
> pi
->mps
) {
3565 l2cap_send_disconn_req(pi
->conn
, chan
, ECONNRESET
);
3569 req_seq
= __get_reqseq(control
);
3570 req_seq_offset
= (req_seq
- chan
->expected_ack_seq
) % 64;
3571 if (req_seq_offset
< 0)
3572 req_seq_offset
+= 64;
3574 next_tx_seq_offset
=
3575 (chan
->next_tx_seq
- chan
->expected_ack_seq
) % 64;
3576 if (next_tx_seq_offset
< 0)
3577 next_tx_seq_offset
+= 64;
3579 /* check for invalid req-seq */
3580 if (req_seq_offset
> next_tx_seq_offset
) {
3581 l2cap_send_disconn_req(pi
->conn
, chan
, ECONNRESET
);
3585 if (__is_iframe(control
)) {
3587 l2cap_send_disconn_req(pi
->conn
, chan
, ECONNRESET
);
3591 l2cap_data_channel_iframe(chan
, control
, skb
);
3595 l2cap_send_disconn_req(pi
->conn
, chan
, ECONNRESET
);
3599 l2cap_data_channel_sframe(chan
, control
, skb
);
3609 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3611 struct l2cap_chan
*chan
;
3613 struct l2cap_pinfo
*pi
;
3618 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3620 BT_DBG("unknown cid 0x%4.4x", cid
);
3627 BT_DBG("chan %p, len %d", chan
, skb
->len
);
3629 if (sk
->sk_state
!= BT_CONNECTED
)
3633 case L2CAP_MODE_BASIC
:
3634 /* If socket recv buffers overflows we drop data here
3635 * which is *bad* because L2CAP has to be reliable.
3636 * But we don't have any other choice. L2CAP doesn't
3637 * provide flow control mechanism. */
3639 if (pi
->imtu
< skb
->len
)
3642 if (!sock_queue_rcv_skb(sk
, skb
))
3646 case L2CAP_MODE_ERTM
:
3647 if (!sock_owned_by_user(sk
)) {
3648 l2cap_ertm_data_rcv(sk
, skb
);
3650 if (sk_add_backlog(sk
, skb
))
3656 case L2CAP_MODE_STREAMING
:
3657 control
= get_unaligned_le16(skb
->data
);
3661 if (l2cap_check_fcs(pi
, skb
))
3664 if (__is_sar_start(control
))
3667 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3670 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
3673 tx_seq
= __get_txseq(control
);
3675 if (chan
->expected_tx_seq
== tx_seq
)
3676 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3678 chan
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3680 l2cap_streaming_reassembly_sdu(chan
, skb
, control
);
3685 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, pi
->mode
);
3699 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3703 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3709 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3711 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3714 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3717 if (!sock_queue_rcv_skb(sk
, skb
))
3729 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
3733 sk
= l2cap_get_sock_by_scid(0, cid
, conn
->src
);
3739 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3741 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3744 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3747 if (!sock_queue_rcv_skb(sk
, skb
))
3759 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3761 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3765 skb_pull(skb
, L2CAP_HDR_SIZE
);
3766 cid
= __le16_to_cpu(lh
->cid
);
3767 len
= __le16_to_cpu(lh
->len
);
3769 if (len
!= skb
->len
) {
3774 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3777 case L2CAP_CID_LE_SIGNALING
:
3778 case L2CAP_CID_SIGNALING
:
3779 l2cap_sig_channel(conn
, skb
);
3782 case L2CAP_CID_CONN_LESS
:
3783 psm
= get_unaligned_le16(skb
->data
);
3785 l2cap_conless_channel(conn
, psm
, skb
);
3788 case L2CAP_CID_LE_DATA
:
3789 l2cap_att_channel(conn
, cid
, skb
);
3793 l2cap_data_channel(conn
, cid
, skb
);
3798 /* ---- L2CAP interface with lower layer (HCI) ---- */
3800 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3802 int exact
= 0, lm1
= 0, lm2
= 0;
3803 register struct sock
*sk
;
3804 struct hlist_node
*node
;
3806 if (type
!= ACL_LINK
)
3809 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3811 /* Find listening sockets and check their link_mode */
3812 read_lock(&l2cap_sk_list
.lock
);
3813 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3814 if (sk
->sk_state
!= BT_LISTEN
)
3817 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3818 lm1
|= HCI_LM_ACCEPT
;
3819 if (l2cap_pi(sk
)->role_switch
)
3820 lm1
|= HCI_LM_MASTER
;
3822 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3823 lm2
|= HCI_LM_ACCEPT
;
3824 if (l2cap_pi(sk
)->role_switch
)
3825 lm2
|= HCI_LM_MASTER
;
3828 read_unlock(&l2cap_sk_list
.lock
);
3830 return exact
? lm1
: lm2
;
3833 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3835 struct l2cap_conn
*conn
;
3837 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3839 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3843 conn
= l2cap_conn_add(hcon
, status
);
3845 l2cap_conn_ready(conn
);
3847 l2cap_conn_del(hcon
, bt_err(status
));
3852 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3854 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3856 BT_DBG("hcon %p", hcon
);
3858 if (hcon
->type
!= ACL_LINK
|| !conn
)
3861 return conn
->disc_reason
;
3864 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3866 BT_DBG("hcon %p reason %d", hcon
, reason
);
3868 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3871 l2cap_conn_del(hcon
, bt_err(reason
));
3876 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3878 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
3881 if (encrypt
== 0x00) {
3882 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3883 l2cap_sock_clear_timer(sk
);
3884 l2cap_sock_set_timer(sk
, HZ
* 5);
3885 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3886 __l2cap_sock_close(sk
, ECONNREFUSED
);
3888 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3889 l2cap_sock_clear_timer(sk
);
3893 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3895 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3896 struct l2cap_chan
*chan
;
3901 BT_DBG("conn %p", conn
);
3903 read_lock(&conn
->chan_lock
);
3905 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
3906 struct sock
*sk
= chan
->sk
;
3910 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3915 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3916 sk
->sk_state
== BT_CONFIG
)) {
3917 l2cap_check_encryption(sk
, encrypt
);
3922 if (sk
->sk_state
== BT_CONNECT
) {
3924 struct l2cap_conn_req req
;
3925 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3926 req
.psm
= l2cap_pi(sk
)->psm
;
3928 chan
->ident
= l2cap_get_ident(conn
);
3929 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3931 l2cap_send_cmd(conn
, chan
->ident
,
3932 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3934 l2cap_sock_clear_timer(sk
);
3935 l2cap_sock_set_timer(sk
, HZ
/ 10);
3937 } else if (sk
->sk_state
== BT_CONNECT2
) {
3938 struct l2cap_conn_rsp rsp
;
3942 sk
->sk_state
= BT_CONFIG
;
3943 result
= L2CAP_CR_SUCCESS
;
3945 sk
->sk_state
= BT_DISCONN
;
3946 l2cap_sock_set_timer(sk
, HZ
/ 10);
3947 result
= L2CAP_CR_SEC_BLOCK
;
3950 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3951 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3952 rsp
.result
= cpu_to_le16(result
);
3953 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3954 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
3961 read_unlock(&conn
->chan_lock
);
3966 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3968 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3971 conn
= l2cap_conn_add(hcon
, 0);
3976 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3978 if (!(flags
& ACL_CONT
)) {
3979 struct l2cap_hdr
*hdr
;
3980 struct l2cap_chan
*chan
;
3985 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3986 kfree_skb(conn
->rx_skb
);
3987 conn
->rx_skb
= NULL
;
3989 l2cap_conn_unreliable(conn
, ECOMM
);
3992 /* Start fragment always begin with Basic L2CAP header */
3993 if (skb
->len
< L2CAP_HDR_SIZE
) {
3994 BT_ERR("Frame is too short (len %d)", skb
->len
);
3995 l2cap_conn_unreliable(conn
, ECOMM
);
3999 hdr
= (struct l2cap_hdr
*) skb
->data
;
4000 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4001 cid
= __le16_to_cpu(hdr
->cid
);
4003 if (len
== skb
->len
) {
4004 /* Complete frame received */
4005 l2cap_recv_frame(conn
, skb
);
4009 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4011 if (skb
->len
> len
) {
4012 BT_ERR("Frame is too long (len %d, expected len %d)",
4014 l2cap_conn_unreliable(conn
, ECOMM
);
4018 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4020 if (chan
&& chan
->sk
) {
4021 struct sock
*sk
= chan
->sk
;
4023 if (l2cap_pi(sk
)->imtu
< len
- L2CAP_HDR_SIZE
) {
4024 BT_ERR("Frame exceeding recv MTU (len %d, "
4026 l2cap_pi(sk
)->imtu
);
4028 l2cap_conn_unreliable(conn
, ECOMM
);
4034 /* Allocate skb for the complete frame (with header) */
4035 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4039 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4041 conn
->rx_len
= len
- skb
->len
;
4043 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4045 if (!conn
->rx_len
) {
4046 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4047 l2cap_conn_unreliable(conn
, ECOMM
);
4051 if (skb
->len
> conn
->rx_len
) {
4052 BT_ERR("Fragment is too long (len %d, expected %d)",
4053 skb
->len
, conn
->rx_len
);
4054 kfree_skb(conn
->rx_skb
);
4055 conn
->rx_skb
= NULL
;
4057 l2cap_conn_unreliable(conn
, ECOMM
);
4061 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4063 conn
->rx_len
-= skb
->len
;
4065 if (!conn
->rx_len
) {
4066 /* Complete frame received */
4067 l2cap_recv_frame(conn
, conn
->rx_skb
);
4068 conn
->rx_skb
= NULL
;
4077 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4080 struct hlist_node
*node
;
4082 read_lock_bh(&l2cap_sk_list
.lock
);
4084 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4085 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4087 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4088 batostr(&bt_sk(sk
)->src
),
4089 batostr(&bt_sk(sk
)->dst
),
4090 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4092 pi
->imtu
, pi
->omtu
, pi
->sec_level
,
4096 read_unlock_bh(&l2cap_sk_list
.lock
);
4101 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4103 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4106 static const struct file_operations l2cap_debugfs_fops
= {
4107 .open
= l2cap_debugfs_open
,
4109 .llseek
= seq_lseek
,
4110 .release
= single_release
,
4113 static struct dentry
*l2cap_debugfs
;
4115 static struct hci_proto l2cap_hci_proto
= {
4117 .id
= HCI_PROTO_L2CAP
,
4118 .connect_ind
= l2cap_connect_ind
,
4119 .connect_cfm
= l2cap_connect_cfm
,
4120 .disconn_ind
= l2cap_disconn_ind
,
4121 .disconn_cfm
= l2cap_disconn_cfm
,
4122 .security_cfm
= l2cap_security_cfm
,
4123 .recv_acldata
= l2cap_recv_acldata
4126 int __init
l2cap_init(void)
4130 err
= l2cap_init_sockets();
4134 _busy_wq
= create_singlethread_workqueue("l2cap");
4140 err
= hci_register_proto(&l2cap_hci_proto
);
4142 BT_ERR("L2CAP protocol registration failed");
4143 bt_sock_unregister(BTPROTO_L2CAP
);
4148 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4149 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4151 BT_ERR("Failed to create L2CAP debug file");
4157 destroy_workqueue(_busy_wq
);
4158 l2cap_cleanup_sockets();
4162 void l2cap_exit(void)
4164 debugfs_remove(l2cap_debugfs
);
4166 flush_workqueue(_busy_wq
);
4167 destroy_workqueue(_busy_wq
);
4169 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4170 BT_ERR("L2CAP protocol unregistration failed");
4172 l2cap_cleanup_sockets();
4175 module_param(disable_ertm
, bool, 0644);
4176 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");