2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { 0x02, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan
*c
)
82 atomic_inc(&c
->refcnt
);
85 static inline void chan_put(struct l2cap_chan
*c
)
87 if (atomic_dec_and_test(&c
->refcnt
))
91 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
95 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
105 struct l2cap_chan
*c
;
107 list_for_each_entry(c
, &conn
->chan_l
, list
) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
118 struct l2cap_chan
*c
;
120 read_lock(&conn
->chan_lock
);
121 c
= __l2cap_get_chan_by_scid(conn
, cid
);
124 read_unlock(&conn
->chan_lock
);
128 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
130 struct l2cap_chan
*c
;
132 list_for_each_entry(c
, &conn
->chan_l
, list
) {
133 if (c
->ident
== ident
)
139 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
141 struct l2cap_chan
*c
;
143 read_lock(&conn
->chan_lock
);
144 c
= __l2cap_get_chan_by_ident(conn
, ident
);
147 read_unlock(&conn
->chan_lock
);
151 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
153 struct l2cap_chan
*c
;
155 list_for_each_entry(c
, &chan_list
, global_l
) {
156 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
165 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
169 write_lock_bh(&chan_list_lock
);
171 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
184 for (p
= 0x1001; p
< 0x1100; p
+= 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
186 chan
->psm
= cpu_to_le16(p
);
187 chan
->sport
= cpu_to_le16(p
);
194 write_unlock_bh(&chan_list_lock
);
198 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
200 write_lock_bh(&chan_list_lock
);
204 write_unlock_bh(&chan_list_lock
);
209 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
211 u16 cid
= L2CAP_CID_DYN_START
;
213 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
214 if (!__l2cap_get_chan_by_scid(conn
, cid
))
221 static void l2cap_set_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
, long timeout
)
223 BT_DBG("chan %p state %d timeout %ld", chan
->sk
, chan
->state
, timeout
);
225 if (!mod_timer(timer
, jiffies
+ msecs_to_jiffies(timeout
)))
229 static void l2cap_clear_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
)
231 BT_DBG("chan %p state %d", chan
, chan
->state
);
233 if (timer_pending(timer
) && del_timer(timer
))
237 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
240 chan
->ops
->state_change(chan
->data
, state
);
243 static void l2cap_chan_timeout(unsigned long arg
)
245 struct l2cap_chan
*chan
= (struct l2cap_chan
*) arg
;
246 struct sock
*sk
= chan
->sk
;
249 BT_DBG("chan %p state %d", chan
, chan
->state
);
253 if (sock_owned_by_user(sk
)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan
, HZ
/ 5);
261 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
262 reason
= ECONNREFUSED
;
263 else if (chan
->state
== BT_CONNECT
&&
264 chan
->sec_level
!= BT_SECURITY_SDP
)
265 reason
= ECONNREFUSED
;
269 l2cap_chan_close(chan
, reason
);
273 chan
->ops
->close(chan
->data
);
277 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
279 struct l2cap_chan
*chan
;
281 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
287 write_lock_bh(&chan_list_lock
);
288 list_add(&chan
->global_l
, &chan_list
);
289 write_unlock_bh(&chan_list_lock
);
291 setup_timer(&chan
->chan_timer
, l2cap_chan_timeout
, (unsigned long) chan
);
293 chan
->state
= BT_OPEN
;
295 atomic_set(&chan
->refcnt
, 1);
300 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
302 write_lock_bh(&chan_list_lock
);
303 list_del(&chan
->global_l
);
304 write_unlock_bh(&chan_list_lock
);
309 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
312 chan
->psm
, chan
->dcid
);
314 conn
->disc_reason
= 0x13;
318 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
319 if (conn
->hcon
->type
== LE_LINK
) {
321 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
322 chan
->scid
= L2CAP_CID_LE_DATA
;
323 chan
->dcid
= L2CAP_CID_LE_DATA
;
325 /* Alloc CID for connection-oriented socket */
326 chan
->scid
= l2cap_alloc_cid(conn
);
327 chan
->omtu
= L2CAP_DEFAULT_MTU
;
329 } else if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
330 /* Connectionless socket */
331 chan
->scid
= L2CAP_CID_CONN_LESS
;
332 chan
->dcid
= L2CAP_CID_CONN_LESS
;
333 chan
->omtu
= L2CAP_DEFAULT_MTU
;
335 /* Raw socket can send/recv signalling messages only */
336 chan
->scid
= L2CAP_CID_SIGNALING
;
337 chan
->dcid
= L2CAP_CID_SIGNALING
;
338 chan
->omtu
= L2CAP_DEFAULT_MTU
;
341 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
342 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
343 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
344 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
345 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
346 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
350 list_add(&chan
->list
, &conn
->chan_l
);
354 * Must be called on the locked socket. */
355 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
357 struct sock
*sk
= chan
->sk
;
358 struct l2cap_conn
*conn
= chan
->conn
;
359 struct sock
*parent
= bt_sk(sk
)->parent
;
361 __clear_chan_timer(chan
);
363 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
366 /* Delete from channel list */
367 write_lock_bh(&conn
->chan_lock
);
368 list_del(&chan
->list
);
369 write_unlock_bh(&conn
->chan_lock
);
373 hci_conn_put(conn
->hcon
);
376 l2cap_state_change(chan
, BT_CLOSED
);
377 sock_set_flag(sk
, SOCK_ZAPPED
);
383 bt_accept_unlink(sk
);
384 parent
->sk_data_ready(parent
, 0);
386 sk
->sk_state_change(sk
);
388 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
389 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
392 skb_queue_purge(&chan
->tx_q
);
394 if (chan
->mode
== L2CAP_MODE_ERTM
) {
395 struct srej_list
*l
, *tmp
;
397 __clear_retrans_timer(chan
);
398 __clear_monitor_timer(chan
);
399 __clear_ack_timer(chan
);
401 skb_queue_purge(&chan
->srej_q
);
403 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
410 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
414 BT_DBG("parent %p", parent
);
416 /* Close not yet accepted channels */
417 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
418 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
419 __clear_chan_timer(chan
);
421 l2cap_chan_close(chan
, ECONNRESET
);
423 chan
->ops
->close(chan
->data
);
427 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
429 struct l2cap_conn
*conn
= chan
->conn
;
430 struct sock
*sk
= chan
->sk
;
432 BT_DBG("chan %p state %d socket %p", chan
, chan
->state
, sk
->sk_socket
);
434 switch (chan
->state
) {
436 l2cap_chan_cleanup_listen(sk
);
438 l2cap_state_change(chan
, BT_CLOSED
);
439 sock_set_flag(sk
, SOCK_ZAPPED
);
444 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
445 conn
->hcon
->type
== ACL_LINK
) {
446 __clear_chan_timer(chan
);
447 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
448 l2cap_send_disconn_req(conn
, chan
, reason
);
450 l2cap_chan_del(chan
, reason
);
454 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
455 conn
->hcon
->type
== ACL_LINK
) {
456 struct l2cap_conn_rsp rsp
;
459 if (bt_sk(sk
)->defer_setup
)
460 result
= L2CAP_CR_SEC_BLOCK
;
462 result
= L2CAP_CR_BAD_PSM
;
463 l2cap_state_change(chan
, BT_DISCONN
);
465 rsp
.scid
= cpu_to_le16(chan
->dcid
);
466 rsp
.dcid
= cpu_to_le16(chan
->scid
);
467 rsp
.result
= cpu_to_le16(result
);
468 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
469 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
473 l2cap_chan_del(chan
, reason
);
478 l2cap_chan_del(chan
, reason
);
482 sock_set_flag(sk
, SOCK_ZAPPED
);
487 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
489 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
490 switch (chan
->sec_level
) {
491 case BT_SECURITY_HIGH
:
492 return HCI_AT_DEDICATED_BONDING_MITM
;
493 case BT_SECURITY_MEDIUM
:
494 return HCI_AT_DEDICATED_BONDING
;
496 return HCI_AT_NO_BONDING
;
498 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
499 if (chan
->sec_level
== BT_SECURITY_LOW
)
500 chan
->sec_level
= BT_SECURITY_SDP
;
502 if (chan
->sec_level
== BT_SECURITY_HIGH
)
503 return HCI_AT_NO_BONDING_MITM
;
505 return HCI_AT_NO_BONDING
;
507 switch (chan
->sec_level
) {
508 case BT_SECURITY_HIGH
:
509 return HCI_AT_GENERAL_BONDING_MITM
;
510 case BT_SECURITY_MEDIUM
:
511 return HCI_AT_GENERAL_BONDING
;
513 return HCI_AT_NO_BONDING
;
518 /* Service level security */
519 static inline int l2cap_check_security(struct l2cap_chan
*chan
)
521 struct l2cap_conn
*conn
= chan
->conn
;
524 auth_type
= l2cap_get_auth_type(chan
);
526 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
529 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
533 /* Get next available identificator.
534 * 1 - 128 are used by kernel.
535 * 129 - 199 are reserved.
536 * 200 - 254 are used by utilities like l2ping, etc.
539 spin_lock_bh(&conn
->lock
);
541 if (++conn
->tx_ident
> 128)
546 spin_unlock_bh(&conn
->lock
);
551 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
553 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
556 BT_DBG("code 0x%2.2x", code
);
561 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
562 flags
= ACL_START_NO_FLUSH
;
566 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
568 hci_send_acl(conn
->hcon
, skb
, flags
);
571 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u16 control
)
574 struct l2cap_hdr
*lh
;
575 struct l2cap_conn
*conn
= chan
->conn
;
579 if (chan
->state
!= BT_CONNECTED
)
582 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
583 hlen
= L2CAP_EXT_HDR_SIZE
;
585 hlen
= L2CAP_ENH_HDR_SIZE
;
587 if (chan
->fcs
== L2CAP_FCS_CRC16
)
590 BT_DBG("chan %p, control 0x%2.2x", chan
, control
);
592 count
= min_t(unsigned int, conn
->mtu
, hlen
);
594 control
|= __set_sframe(chan
);
596 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
597 control
|= __set_ctrl_final(chan
);
599 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
600 control
|= __set_ctrl_poll(chan
);
602 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
606 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
607 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
608 lh
->cid
= cpu_to_le16(chan
->dcid
);
609 put_unaligned_le16(control
, skb_put(skb
, 2));
611 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
612 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
613 put_unaligned_le16(fcs
, skb_put(skb
, 2));
616 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
617 flags
= ACL_START_NO_FLUSH
;
621 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
623 hci_send_acl(chan
->conn
->hcon
, skb
, flags
);
626 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u16 control
)
628 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
629 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
630 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
632 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
634 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
636 l2cap_send_sframe(chan
, control
);
639 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
641 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
644 static void l2cap_do_start(struct l2cap_chan
*chan
)
646 struct l2cap_conn
*conn
= chan
->conn
;
648 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
649 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
652 if (l2cap_check_security(chan
) &&
653 __l2cap_no_conn_pending(chan
)) {
654 struct l2cap_conn_req req
;
655 req
.scid
= cpu_to_le16(chan
->scid
);
658 chan
->ident
= l2cap_get_ident(conn
);
659 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
661 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
665 struct l2cap_info_req req
;
666 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
668 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
669 conn
->info_ident
= l2cap_get_ident(conn
);
671 mod_timer(&conn
->info_timer
, jiffies
+
672 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
674 l2cap_send_cmd(conn
, conn
->info_ident
,
675 L2CAP_INFO_REQ
, sizeof(req
), &req
);
679 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
681 u32 local_feat_mask
= l2cap_feat_mask
;
683 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
686 case L2CAP_MODE_ERTM
:
687 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
688 case L2CAP_MODE_STREAMING
:
689 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
695 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
698 struct l2cap_disconn_req req
;
705 if (chan
->mode
== L2CAP_MODE_ERTM
) {
706 __clear_retrans_timer(chan
);
707 __clear_monitor_timer(chan
);
708 __clear_ack_timer(chan
);
711 req
.dcid
= cpu_to_le16(chan
->dcid
);
712 req
.scid
= cpu_to_le16(chan
->scid
);
713 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
714 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
716 l2cap_state_change(chan
, BT_DISCONN
);
720 /* ---- L2CAP connections ---- */
721 static void l2cap_conn_start(struct l2cap_conn
*conn
)
723 struct l2cap_chan
*chan
, *tmp
;
725 BT_DBG("conn %p", conn
);
727 read_lock(&conn
->chan_lock
);
729 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
730 struct sock
*sk
= chan
->sk
;
734 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
739 if (chan
->state
== BT_CONNECT
) {
740 struct l2cap_conn_req req
;
742 if (!l2cap_check_security(chan
) ||
743 !__l2cap_no_conn_pending(chan
)) {
748 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
749 && test_bit(CONF_STATE2_DEVICE
,
750 &chan
->conf_state
)) {
751 /* l2cap_chan_close() calls list_del(chan)
752 * so release the lock */
753 read_unlock(&conn
->chan_lock
);
754 l2cap_chan_close(chan
, ECONNRESET
);
755 read_lock(&conn
->chan_lock
);
760 req
.scid
= cpu_to_le16(chan
->scid
);
763 chan
->ident
= l2cap_get_ident(conn
);
764 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
766 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
769 } else if (chan
->state
== BT_CONNECT2
) {
770 struct l2cap_conn_rsp rsp
;
772 rsp
.scid
= cpu_to_le16(chan
->dcid
);
773 rsp
.dcid
= cpu_to_le16(chan
->scid
);
775 if (l2cap_check_security(chan
)) {
776 if (bt_sk(sk
)->defer_setup
) {
777 struct sock
*parent
= bt_sk(sk
)->parent
;
778 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
779 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
781 parent
->sk_data_ready(parent
, 0);
784 l2cap_state_change(chan
, BT_CONFIG
);
785 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
786 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
789 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
790 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
793 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
796 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
797 rsp
.result
!= L2CAP_CR_SUCCESS
) {
802 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
803 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
804 l2cap_build_conf_req(chan
, buf
), buf
);
805 chan
->num_conf_req
++;
811 read_unlock(&conn
->chan_lock
);
814 /* Find socket with cid and source bdaddr.
815 * Returns closest match, locked.
817 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
819 struct l2cap_chan
*c
, *c1
= NULL
;
821 read_lock(&chan_list_lock
);
823 list_for_each_entry(c
, &chan_list
, global_l
) {
824 struct sock
*sk
= c
->sk
;
826 if (state
&& c
->state
!= state
)
829 if (c
->scid
== cid
) {
831 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
832 read_unlock(&chan_list_lock
);
837 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
842 read_unlock(&chan_list_lock
);
847 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
849 struct sock
*parent
, *sk
;
850 struct l2cap_chan
*chan
, *pchan
;
854 /* Check if we have socket listening on cid */
855 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
862 bh_lock_sock(parent
);
864 /* Check for backlog size */
865 if (sk_acceptq_is_full(parent
)) {
866 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
870 chan
= pchan
->ops
->new_connection(pchan
->data
);
876 write_lock_bh(&conn
->chan_lock
);
878 hci_conn_hold(conn
->hcon
);
880 bacpy(&bt_sk(sk
)->src
, conn
->src
);
881 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
883 bt_accept_enqueue(parent
, sk
);
885 __l2cap_chan_add(conn
, chan
);
887 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
889 l2cap_state_change(chan
, BT_CONNECTED
);
890 parent
->sk_data_ready(parent
, 0);
892 write_unlock_bh(&conn
->chan_lock
);
895 bh_unlock_sock(parent
);
898 static void l2cap_chan_ready(struct sock
*sk
)
900 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
901 struct sock
*parent
= bt_sk(sk
)->parent
;
903 BT_DBG("sk %p, parent %p", sk
, parent
);
905 chan
->conf_state
= 0;
906 __clear_chan_timer(chan
);
908 l2cap_state_change(chan
, BT_CONNECTED
);
909 sk
->sk_state_change(sk
);
912 parent
->sk_data_ready(parent
, 0);
915 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
917 struct l2cap_chan
*chan
;
919 BT_DBG("conn %p", conn
);
921 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
922 l2cap_le_conn_ready(conn
);
924 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
925 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
927 read_lock(&conn
->chan_lock
);
929 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
930 struct sock
*sk
= chan
->sk
;
934 if (conn
->hcon
->type
== LE_LINK
) {
935 if (smp_conn_security(conn
, chan
->sec_level
))
936 l2cap_chan_ready(sk
);
938 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
939 __clear_chan_timer(chan
);
940 l2cap_state_change(chan
, BT_CONNECTED
);
941 sk
->sk_state_change(sk
);
943 } else if (chan
->state
== BT_CONNECT
)
944 l2cap_do_start(chan
);
949 read_unlock(&conn
->chan_lock
);
952 /* Notify sockets that we cannot guaranty reliability anymore */
953 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
955 struct l2cap_chan
*chan
;
957 BT_DBG("conn %p", conn
);
959 read_lock(&conn
->chan_lock
);
961 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
962 struct sock
*sk
= chan
->sk
;
964 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
968 read_unlock(&conn
->chan_lock
);
971 static void l2cap_info_timeout(unsigned long arg
)
973 struct l2cap_conn
*conn
= (void *) arg
;
975 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
976 conn
->info_ident
= 0;
978 l2cap_conn_start(conn
);
981 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
983 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
984 struct l2cap_chan
*chan
, *l
;
990 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
992 kfree_skb(conn
->rx_skb
);
995 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
998 l2cap_chan_del(chan
, err
);
1000 chan
->ops
->close(chan
->data
);
1003 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1004 del_timer_sync(&conn
->info_timer
);
1006 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->pend
)) {
1007 del_timer(&conn
->security_timer
);
1008 smp_chan_destroy(conn
);
1011 hcon
->l2cap_data
= NULL
;
1015 static void security_timeout(unsigned long arg
)
1017 struct l2cap_conn
*conn
= (void *) arg
;
1019 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1022 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1024 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1029 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1033 hcon
->l2cap_data
= conn
;
1036 BT_DBG("hcon %p conn %p", hcon
, conn
);
1038 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1039 conn
->mtu
= hcon
->hdev
->le_mtu
;
1041 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1043 conn
->src
= &hcon
->hdev
->bdaddr
;
1044 conn
->dst
= &hcon
->dst
;
1046 conn
->feat_mask
= 0;
1048 spin_lock_init(&conn
->lock
);
1049 rwlock_init(&conn
->chan_lock
);
1051 INIT_LIST_HEAD(&conn
->chan_l
);
1053 if (hcon
->type
== LE_LINK
)
1054 setup_timer(&conn
->security_timer
, security_timeout
,
1055 (unsigned long) conn
);
1057 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
1058 (unsigned long) conn
);
1060 conn
->disc_reason
= 0x13;
1065 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
1067 write_lock_bh(&conn
->chan_lock
);
1068 __l2cap_chan_add(conn
, chan
);
1069 write_unlock_bh(&conn
->chan_lock
);
1072 /* ---- Socket interface ---- */
1074 /* Find socket with psm and source bdaddr.
1075 * Returns closest match.
1077 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1079 struct l2cap_chan
*c
, *c1
= NULL
;
1081 read_lock(&chan_list_lock
);
1083 list_for_each_entry(c
, &chan_list
, global_l
) {
1084 struct sock
*sk
= c
->sk
;
1086 if (state
&& c
->state
!= state
)
1089 if (c
->psm
== psm
) {
1091 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1092 read_unlock(&chan_list_lock
);
1097 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1102 read_unlock(&chan_list_lock
);
1107 int l2cap_chan_connect(struct l2cap_chan
*chan
)
1109 struct sock
*sk
= chan
->sk
;
1110 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1111 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1112 struct l2cap_conn
*conn
;
1113 struct hci_conn
*hcon
;
1114 struct hci_dev
*hdev
;
1118 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1121 hdev
= hci_get_route(dst
, src
);
1123 return -EHOSTUNREACH
;
1125 hci_dev_lock_bh(hdev
);
1127 auth_type
= l2cap_get_auth_type(chan
);
1129 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1130 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1131 chan
->sec_level
, auth_type
);
1133 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1134 chan
->sec_level
, auth_type
);
1137 err
= PTR_ERR(hcon
);
1141 conn
= l2cap_conn_add(hcon
, 0);
1148 /* Update source addr of the socket */
1149 bacpy(src
, conn
->src
);
1151 l2cap_chan_add(conn
, chan
);
1153 l2cap_state_change(chan
, BT_CONNECT
);
1154 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1156 if (hcon
->state
== BT_CONNECTED
) {
1157 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1158 __clear_chan_timer(chan
);
1159 if (l2cap_check_security(chan
))
1160 l2cap_state_change(chan
, BT_CONNECTED
);
1162 l2cap_do_start(chan
);
1168 hci_dev_unlock_bh(hdev
);
1173 int __l2cap_wait_ack(struct sock
*sk
)
1175 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1176 DECLARE_WAITQUEUE(wait
, current
);
1180 add_wait_queue(sk_sleep(sk
), &wait
);
1181 set_current_state(TASK_INTERRUPTIBLE
);
1182 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1186 if (signal_pending(current
)) {
1187 err
= sock_intr_errno(timeo
);
1192 timeo
= schedule_timeout(timeo
);
1194 set_current_state(TASK_INTERRUPTIBLE
);
1196 err
= sock_error(sk
);
1200 set_current_state(TASK_RUNNING
);
1201 remove_wait_queue(sk_sleep(sk
), &wait
);
1205 static void l2cap_monitor_timeout(unsigned long arg
)
1207 struct l2cap_chan
*chan
= (void *) arg
;
1208 struct sock
*sk
= chan
->sk
;
1210 BT_DBG("chan %p", chan
);
1213 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1214 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1219 chan
->retry_count
++;
1220 __set_monitor_timer(chan
);
1222 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1226 static void l2cap_retrans_timeout(unsigned long arg
)
1228 struct l2cap_chan
*chan
= (void *) arg
;
1229 struct sock
*sk
= chan
->sk
;
1231 BT_DBG("chan %p", chan
);
1234 chan
->retry_count
= 1;
1235 __set_monitor_timer(chan
);
1237 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1239 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1243 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1245 struct sk_buff
*skb
;
1247 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1248 chan
->unacked_frames
) {
1249 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1252 skb
= skb_dequeue(&chan
->tx_q
);
1255 chan
->unacked_frames
--;
1258 if (!chan
->unacked_frames
)
1259 __clear_retrans_timer(chan
);
1262 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
1264 struct hci_conn
*hcon
= chan
->conn
->hcon
;
1267 BT_DBG("chan %p, skb %p len %d", chan
, skb
, skb
->len
);
1269 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
1270 lmp_no_flush_capable(hcon
->hdev
))
1271 flags
= ACL_START_NO_FLUSH
;
1275 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
1276 hci_send_acl(hcon
, skb
, flags
);
1279 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1281 struct sk_buff
*skb
;
1284 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1285 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1286 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1287 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1289 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1290 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1291 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1294 l2cap_do_send(chan
, skb
);
1296 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1300 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1302 struct sk_buff
*skb
, *tx_skb
;
1305 skb
= skb_peek(&chan
->tx_q
);
1310 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1313 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1316 } while ((skb
= skb_queue_next(&chan
->tx_q
, skb
)));
1318 if (chan
->remote_max_tx
&&
1319 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1320 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1324 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1325 bt_cb(skb
)->retries
++;
1326 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1327 control
&= __get_sar_mask(chan
);
1329 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1330 control
|= __set_ctrl_final(chan
);
1332 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1333 control
|= __set_txseq(chan
, tx_seq
);
1335 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1337 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1338 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1339 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1342 l2cap_do_send(chan
, tx_skb
);
1345 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1347 struct sk_buff
*skb
, *tx_skb
;
1351 if (chan
->state
!= BT_CONNECTED
)
1354 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1356 if (chan
->remote_max_tx
&&
1357 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1358 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1362 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1364 bt_cb(skb
)->retries
++;
1366 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1367 control
&= __get_sar_mask(chan
);
1369 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1370 control
|= __set_ctrl_final(chan
);
1372 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1373 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1374 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1377 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1378 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1379 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1382 l2cap_do_send(chan
, tx_skb
);
1384 __set_retrans_timer(chan
);
1386 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1387 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1389 if (bt_cb(skb
)->retries
== 1)
1390 chan
->unacked_frames
++;
1392 chan
->frames_sent
++;
1394 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1395 chan
->tx_send_head
= NULL
;
1397 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1405 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1409 if (!skb_queue_empty(&chan
->tx_q
))
1410 chan
->tx_send_head
= chan
->tx_q
.next
;
1412 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1413 ret
= l2cap_ertm_send(chan
);
1417 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1421 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1423 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1424 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1425 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1426 l2cap_send_sframe(chan
, control
);
1430 if (l2cap_ertm_send(chan
) > 0)
1433 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1434 l2cap_send_sframe(chan
, control
);
1437 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1439 struct srej_list
*tail
;
1442 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1443 control
|= __set_ctrl_final(chan
);
1445 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1446 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1448 l2cap_send_sframe(chan
, control
);
1451 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1453 struct l2cap_conn
*conn
= l2cap_pi(sk
)->chan
->conn
;
1454 struct sk_buff
**frag
;
1457 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1463 /* Continuation fragments (no L2CAP header) */
1464 frag
= &skb_shinfo(skb
)->frag_list
;
1466 count
= min_t(unsigned int, conn
->mtu
, len
);
1468 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1471 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1477 frag
= &(*frag
)->next
;
1483 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1485 struct sock
*sk
= chan
->sk
;
1486 struct l2cap_conn
*conn
= chan
->conn
;
1487 struct sk_buff
*skb
;
1488 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1489 struct l2cap_hdr
*lh
;
1491 BT_DBG("sk %p len %d", sk
, (int)len
);
1493 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1494 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1495 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1497 return ERR_PTR(err
);
1499 /* Create L2CAP header */
1500 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1501 lh
->cid
= cpu_to_le16(chan
->dcid
);
1502 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1503 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1505 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1506 if (unlikely(err
< 0)) {
1508 return ERR_PTR(err
);
1513 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1515 struct sock
*sk
= chan
->sk
;
1516 struct l2cap_conn
*conn
= chan
->conn
;
1517 struct sk_buff
*skb
;
1518 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1519 struct l2cap_hdr
*lh
;
1521 BT_DBG("sk %p len %d", sk
, (int)len
);
1523 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1524 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1525 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1527 return ERR_PTR(err
);
1529 /* Create L2CAP header */
1530 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1531 lh
->cid
= cpu_to_le16(chan
->dcid
);
1532 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1534 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1535 if (unlikely(err
< 0)) {
1537 return ERR_PTR(err
);
1542 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1543 struct msghdr
*msg
, size_t len
,
1544 u16 control
, u16 sdulen
)
1546 struct sock
*sk
= chan
->sk
;
1547 struct l2cap_conn
*conn
= chan
->conn
;
1548 struct sk_buff
*skb
;
1549 int err
, count
, hlen
;
1550 struct l2cap_hdr
*lh
;
1552 BT_DBG("sk %p len %d", sk
, (int)len
);
1555 return ERR_PTR(-ENOTCONN
);
1557 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1558 hlen
= L2CAP_EXT_HDR_SIZE
;
1560 hlen
= L2CAP_ENH_HDR_SIZE
;
1565 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1568 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1569 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1570 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1572 return ERR_PTR(err
);
1574 /* Create L2CAP header */
1575 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1576 lh
->cid
= cpu_to_le16(chan
->dcid
);
1577 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1578 put_unaligned_le16(control
, skb_put(skb
, 2));
1580 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1582 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1583 if (unlikely(err
< 0)) {
1585 return ERR_PTR(err
);
1588 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1589 put_unaligned_le16(0, skb_put(skb
, 2));
1591 bt_cb(skb
)->retries
= 0;
1595 static int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1597 struct sk_buff
*skb
;
1598 struct sk_buff_head sar_queue
;
1602 skb_queue_head_init(&sar_queue
);
1603 control
= __set_ctrl_sar(chan
, L2CAP_SAR_START
);
1604 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1606 return PTR_ERR(skb
);
1608 __skb_queue_tail(&sar_queue
, skb
);
1609 len
-= chan
->remote_mps
;
1610 size
+= chan
->remote_mps
;
1615 if (len
> chan
->remote_mps
) {
1616 control
= __set_ctrl_sar(chan
, L2CAP_SAR_CONTINUE
);
1617 buflen
= chan
->remote_mps
;
1619 control
= __set_ctrl_sar(chan
, L2CAP_SAR_END
);
1623 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1625 skb_queue_purge(&sar_queue
);
1626 return PTR_ERR(skb
);
1629 __skb_queue_tail(&sar_queue
, skb
);
1633 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1634 if (chan
->tx_send_head
== NULL
)
1635 chan
->tx_send_head
= sar_queue
.next
;
1640 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1642 struct sk_buff
*skb
;
1646 /* Connectionless channel */
1647 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1648 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
1650 return PTR_ERR(skb
);
1652 l2cap_do_send(chan
, skb
);
1656 switch (chan
->mode
) {
1657 case L2CAP_MODE_BASIC
:
1658 /* Check outgoing MTU */
1659 if (len
> chan
->omtu
)
1662 /* Create a basic PDU */
1663 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
1665 return PTR_ERR(skb
);
1667 l2cap_do_send(chan
, skb
);
1671 case L2CAP_MODE_ERTM
:
1672 case L2CAP_MODE_STREAMING
:
1673 /* Entire SDU fits into one PDU */
1674 if (len
<= chan
->remote_mps
) {
1675 control
= __set_ctrl_sar(chan
, L2CAP_SAR_UNSEGMENTED
);
1676 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1679 return PTR_ERR(skb
);
1681 __skb_queue_tail(&chan
->tx_q
, skb
);
1683 if (chan
->tx_send_head
== NULL
)
1684 chan
->tx_send_head
= skb
;
1687 /* Segment SDU into multiples PDUs */
1688 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1693 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1694 l2cap_streaming_send(chan
);
1699 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
1700 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
1705 err
= l2cap_ertm_send(chan
);
1712 BT_DBG("bad state %1.1x", chan
->mode
);
1719 /* Copy frame to all raw sockets on that connection */
1720 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1722 struct sk_buff
*nskb
;
1723 struct l2cap_chan
*chan
;
1725 BT_DBG("conn %p", conn
);
1727 read_lock(&conn
->chan_lock
);
1728 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1729 struct sock
*sk
= chan
->sk
;
1730 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1733 /* Don't send frame to the socket it came from */
1736 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1740 if (chan
->ops
->recv(chan
->data
, nskb
))
1743 read_unlock(&conn
->chan_lock
);
1746 /* ---- L2CAP signalling commands ---- */
1747 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1748 u8 code
, u8 ident
, u16 dlen
, void *data
)
1750 struct sk_buff
*skb
, **frag
;
1751 struct l2cap_cmd_hdr
*cmd
;
1752 struct l2cap_hdr
*lh
;
1755 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1756 conn
, code
, ident
, dlen
);
1758 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1759 count
= min_t(unsigned int, conn
->mtu
, len
);
1761 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1765 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1766 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1768 if (conn
->hcon
->type
== LE_LINK
)
1769 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1771 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1773 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1776 cmd
->len
= cpu_to_le16(dlen
);
1779 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1780 memcpy(skb_put(skb
, count
), data
, count
);
1786 /* Continuation fragments (no L2CAP header) */
1787 frag
= &skb_shinfo(skb
)->frag_list
;
1789 count
= min_t(unsigned int, conn
->mtu
, len
);
1791 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1795 memcpy(skb_put(*frag
, count
), data
, count
);
1800 frag
= &(*frag
)->next
;
1810 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1812 struct l2cap_conf_opt
*opt
= *ptr
;
1815 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1823 *val
= *((u8
*) opt
->val
);
1827 *val
= get_unaligned_le16(opt
->val
);
1831 *val
= get_unaligned_le32(opt
->val
);
1835 *val
= (unsigned long) opt
->val
;
1839 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1843 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1845 struct l2cap_conf_opt
*opt
= *ptr
;
1847 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1854 *((u8
*) opt
->val
) = val
;
1858 put_unaligned_le16(val
, opt
->val
);
1862 put_unaligned_le32(val
, opt
->val
);
1866 memcpy(opt
->val
, (void *) val
, len
);
1870 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1873 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
1875 struct l2cap_conf_efs efs
;
1877 switch(chan
->mode
) {
1878 case L2CAP_MODE_ERTM
:
1879 efs
.id
= chan
->local_id
;
1880 efs
.stype
= chan
->local_stype
;
1881 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1882 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
1883 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
1884 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
1887 case L2CAP_MODE_STREAMING
:
1889 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
1890 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1891 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
1900 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
1901 (unsigned long) &efs
);
1904 static void l2cap_ack_timeout(unsigned long arg
)
1906 struct l2cap_chan
*chan
= (void *) arg
;
1908 bh_lock_sock(chan
->sk
);
1909 l2cap_send_ack(chan
);
1910 bh_unlock_sock(chan
->sk
);
1913 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1915 struct sock
*sk
= chan
->sk
;
1917 chan
->expected_ack_seq
= 0;
1918 chan
->unacked_frames
= 0;
1919 chan
->buffer_seq
= 0;
1920 chan
->num_acked
= 0;
1921 chan
->frames_sent
= 0;
1923 setup_timer(&chan
->retrans_timer
, l2cap_retrans_timeout
,
1924 (unsigned long) chan
);
1925 setup_timer(&chan
->monitor_timer
, l2cap_monitor_timeout
,
1926 (unsigned long) chan
);
1927 setup_timer(&chan
->ack_timer
, l2cap_ack_timeout
, (unsigned long) chan
);
1929 skb_queue_head_init(&chan
->srej_q
);
1931 INIT_LIST_HEAD(&chan
->srej_l
);
1934 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1937 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1940 case L2CAP_MODE_STREAMING
:
1941 case L2CAP_MODE_ERTM
:
1942 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1946 return L2CAP_MODE_BASIC
;
1950 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
1952 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
1955 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
1957 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
1960 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
1962 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
1963 __l2cap_ews_supported(chan
))
1964 /* use extended control field */
1965 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
1967 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
1968 L2CAP_DEFAULT_TX_WINDOW
);
1971 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
1973 struct l2cap_conf_req
*req
= data
;
1974 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
1975 void *ptr
= req
->data
;
1977 BT_DBG("chan %p", chan
);
1979 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
1982 switch (chan
->mode
) {
1983 case L2CAP_MODE_STREAMING
:
1984 case L2CAP_MODE_ERTM
:
1985 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
1988 if (__l2cap_efs_supported(chan
))
1989 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
1993 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
1998 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
1999 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2001 switch (chan
->mode
) {
2002 case L2CAP_MODE_BASIC
:
2003 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2004 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2007 rfc
.mode
= L2CAP_MODE_BASIC
;
2009 rfc
.max_transmit
= 0;
2010 rfc
.retrans_timeout
= 0;
2011 rfc
.monitor_timeout
= 0;
2012 rfc
.max_pdu_size
= 0;
2014 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2015 (unsigned long) &rfc
);
2018 case L2CAP_MODE_ERTM
:
2019 rfc
.mode
= L2CAP_MODE_ERTM
;
2020 rfc
.max_transmit
= chan
->max_tx
;
2021 rfc
.retrans_timeout
= 0;
2022 rfc
.monitor_timeout
= 0;
2023 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2024 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
2025 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2027 l2cap_txwin_setup(chan
);
2029 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2030 L2CAP_DEFAULT_TX_WINDOW
);
2032 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2033 (unsigned long) &rfc
);
2035 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2036 l2cap_add_opt_efs(&ptr
, chan
);
2038 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2041 if (chan
->fcs
== L2CAP_FCS_NONE
||
2042 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2043 chan
->fcs
= L2CAP_FCS_NONE
;
2044 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2047 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2048 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2052 case L2CAP_MODE_STREAMING
:
2053 rfc
.mode
= L2CAP_MODE_STREAMING
;
2055 rfc
.max_transmit
= 0;
2056 rfc
.retrans_timeout
= 0;
2057 rfc
.monitor_timeout
= 0;
2058 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2059 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
2060 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2062 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2063 (unsigned long) &rfc
);
2065 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2066 l2cap_add_opt_efs(&ptr
, chan
);
2068 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2071 if (chan
->fcs
== L2CAP_FCS_NONE
||
2072 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2073 chan
->fcs
= L2CAP_FCS_NONE
;
2074 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2079 req
->dcid
= cpu_to_le16(chan
->dcid
);
2080 req
->flags
= cpu_to_le16(0);
2085 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2087 struct l2cap_conf_rsp
*rsp
= data
;
2088 void *ptr
= rsp
->data
;
2089 void *req
= chan
->conf_req
;
2090 int len
= chan
->conf_len
;
2091 int type
, hint
, olen
;
2093 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2094 u16 mtu
= L2CAP_DEFAULT_MTU
;
2095 u16 result
= L2CAP_CONF_SUCCESS
;
2097 BT_DBG("chan %p", chan
);
2099 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2100 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2102 hint
= type
& L2CAP_CONF_HINT
;
2103 type
&= L2CAP_CONF_MASK
;
2106 case L2CAP_CONF_MTU
:
2110 case L2CAP_CONF_FLUSH_TO
:
2111 chan
->flush_to
= val
;
2114 case L2CAP_CONF_QOS
:
2117 case L2CAP_CONF_RFC
:
2118 if (olen
== sizeof(rfc
))
2119 memcpy(&rfc
, (void *) val
, olen
);
2122 case L2CAP_CONF_FCS
:
2123 if (val
== L2CAP_FCS_NONE
)
2124 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2128 case L2CAP_CONF_EWS
:
2130 return -ECONNREFUSED
;
2132 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2133 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2134 chan
->remote_tx_win
= val
;
2141 result
= L2CAP_CONF_UNKNOWN
;
2142 *((u8
*) ptr
++) = type
;
2147 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2150 switch (chan
->mode
) {
2151 case L2CAP_MODE_STREAMING
:
2152 case L2CAP_MODE_ERTM
:
2153 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2154 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2155 chan
->conn
->feat_mask
);
2159 if (chan
->mode
!= rfc
.mode
)
2160 return -ECONNREFUSED
;
2166 if (chan
->mode
!= rfc
.mode
) {
2167 result
= L2CAP_CONF_UNACCEPT
;
2168 rfc
.mode
= chan
->mode
;
2170 if (chan
->num_conf_rsp
== 1)
2171 return -ECONNREFUSED
;
2173 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2174 sizeof(rfc
), (unsigned long) &rfc
);
2178 if (result
== L2CAP_CONF_SUCCESS
) {
2179 /* Configure output options and let the other side know
2180 * which ones we don't like. */
2182 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2183 result
= L2CAP_CONF_UNACCEPT
;
2186 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2188 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2191 case L2CAP_MODE_BASIC
:
2192 chan
->fcs
= L2CAP_FCS_NONE
;
2193 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2196 case L2CAP_MODE_ERTM
:
2197 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2198 chan
->remote_tx_win
= rfc
.txwin_size
;
2200 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2202 chan
->remote_max_tx
= rfc
.max_transmit
;
2204 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2205 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2207 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2209 rfc
.retrans_timeout
=
2210 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2211 rfc
.monitor_timeout
=
2212 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2214 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2216 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2217 sizeof(rfc
), (unsigned long) &rfc
);
2221 case L2CAP_MODE_STREAMING
:
2222 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2223 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2225 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2227 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2229 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2230 sizeof(rfc
), (unsigned long) &rfc
);
2235 result
= L2CAP_CONF_UNACCEPT
;
2237 memset(&rfc
, 0, sizeof(rfc
));
2238 rfc
.mode
= chan
->mode
;
2241 if (result
== L2CAP_CONF_SUCCESS
)
2242 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2244 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2245 rsp
->result
= cpu_to_le16(result
);
2246 rsp
->flags
= cpu_to_le16(0x0000);
2251 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2253 struct l2cap_conf_req
*req
= data
;
2254 void *ptr
= req
->data
;
2257 struct l2cap_conf_rfc rfc
;
2259 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2261 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2262 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2265 case L2CAP_CONF_MTU
:
2266 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2267 *result
= L2CAP_CONF_UNACCEPT
;
2268 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2271 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2274 case L2CAP_CONF_FLUSH_TO
:
2275 chan
->flush_to
= val
;
2276 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2280 case L2CAP_CONF_RFC
:
2281 if (olen
== sizeof(rfc
))
2282 memcpy(&rfc
, (void *)val
, olen
);
2284 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2285 rfc
.mode
!= chan
->mode
)
2286 return -ECONNREFUSED
;
2290 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2291 sizeof(rfc
), (unsigned long) &rfc
);
2294 case L2CAP_CONF_EWS
:
2295 chan
->tx_win
= min_t(u16
, val
,
2296 L2CAP_DEFAULT_EXT_WINDOW
);
2297 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
,
2303 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2304 return -ECONNREFUSED
;
2306 chan
->mode
= rfc
.mode
;
2308 if (*result
== L2CAP_CONF_SUCCESS
) {
2310 case L2CAP_MODE_ERTM
:
2311 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2312 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2313 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2315 case L2CAP_MODE_STREAMING
:
2316 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2320 req
->dcid
= cpu_to_le16(chan
->dcid
);
2321 req
->flags
= cpu_to_le16(0x0000);
2326 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2328 struct l2cap_conf_rsp
*rsp
= data
;
2329 void *ptr
= rsp
->data
;
2331 BT_DBG("chan %p", chan
);
2333 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2334 rsp
->result
= cpu_to_le16(result
);
2335 rsp
->flags
= cpu_to_le16(flags
);
2340 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2342 struct l2cap_conn_rsp rsp
;
2343 struct l2cap_conn
*conn
= chan
->conn
;
2346 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2347 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2348 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2349 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2350 l2cap_send_cmd(conn
, chan
->ident
,
2351 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2353 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2356 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2357 l2cap_build_conf_req(chan
, buf
), buf
);
2358 chan
->num_conf_req
++;
2361 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2365 struct l2cap_conf_rfc rfc
;
2367 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2369 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2372 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2373 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2376 case L2CAP_CONF_RFC
:
2377 if (olen
== sizeof(rfc
))
2378 memcpy(&rfc
, (void *)val
, olen
);
2385 case L2CAP_MODE_ERTM
:
2386 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2387 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2388 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2390 case L2CAP_MODE_STREAMING
:
2391 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2395 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2397 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2399 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2402 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2403 cmd
->ident
== conn
->info_ident
) {
2404 del_timer(&conn
->info_timer
);
2406 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2407 conn
->info_ident
= 0;
2409 l2cap_conn_start(conn
);
2415 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2417 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2418 struct l2cap_conn_rsp rsp
;
2419 struct l2cap_chan
*chan
= NULL
, *pchan
;
2420 struct sock
*parent
, *sk
= NULL
;
2421 int result
, status
= L2CAP_CS_NO_INFO
;
2423 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2424 __le16 psm
= req
->psm
;
2426 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2428 /* Check if we have socket listening on psm */
2429 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2431 result
= L2CAP_CR_BAD_PSM
;
2437 bh_lock_sock(parent
);
2439 /* Check if the ACL is secure enough (if not SDP) */
2440 if (psm
!= cpu_to_le16(0x0001) &&
2441 !hci_conn_check_link_mode(conn
->hcon
)) {
2442 conn
->disc_reason
= 0x05;
2443 result
= L2CAP_CR_SEC_BLOCK
;
2447 result
= L2CAP_CR_NO_MEM
;
2449 /* Check for backlog size */
2450 if (sk_acceptq_is_full(parent
)) {
2451 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2455 chan
= pchan
->ops
->new_connection(pchan
->data
);
2461 write_lock_bh(&conn
->chan_lock
);
2463 /* Check if we already have channel with that dcid */
2464 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2465 write_unlock_bh(&conn
->chan_lock
);
2466 sock_set_flag(sk
, SOCK_ZAPPED
);
2467 chan
->ops
->close(chan
->data
);
2471 hci_conn_hold(conn
->hcon
);
2473 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2474 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2478 bt_accept_enqueue(parent
, sk
);
2480 __l2cap_chan_add(conn
, chan
);
2484 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2486 chan
->ident
= cmd
->ident
;
2488 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2489 if (l2cap_check_security(chan
)) {
2490 if (bt_sk(sk
)->defer_setup
) {
2491 l2cap_state_change(chan
, BT_CONNECT2
);
2492 result
= L2CAP_CR_PEND
;
2493 status
= L2CAP_CS_AUTHOR_PEND
;
2494 parent
->sk_data_ready(parent
, 0);
2496 l2cap_state_change(chan
, BT_CONFIG
);
2497 result
= L2CAP_CR_SUCCESS
;
2498 status
= L2CAP_CS_NO_INFO
;
2501 l2cap_state_change(chan
, BT_CONNECT2
);
2502 result
= L2CAP_CR_PEND
;
2503 status
= L2CAP_CS_AUTHEN_PEND
;
2506 l2cap_state_change(chan
, BT_CONNECT2
);
2507 result
= L2CAP_CR_PEND
;
2508 status
= L2CAP_CS_NO_INFO
;
2511 write_unlock_bh(&conn
->chan_lock
);
2514 bh_unlock_sock(parent
);
2517 rsp
.scid
= cpu_to_le16(scid
);
2518 rsp
.dcid
= cpu_to_le16(dcid
);
2519 rsp
.result
= cpu_to_le16(result
);
2520 rsp
.status
= cpu_to_le16(status
);
2521 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2523 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2524 struct l2cap_info_req info
;
2525 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2527 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2528 conn
->info_ident
= l2cap_get_ident(conn
);
2530 mod_timer(&conn
->info_timer
, jiffies
+
2531 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2533 l2cap_send_cmd(conn
, conn
->info_ident
,
2534 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2537 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
2538 result
== L2CAP_CR_SUCCESS
) {
2540 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
2541 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2542 l2cap_build_conf_req(chan
, buf
), buf
);
2543 chan
->num_conf_req
++;
2549 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2551 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2552 u16 scid
, dcid
, result
, status
;
2553 struct l2cap_chan
*chan
;
2557 scid
= __le16_to_cpu(rsp
->scid
);
2558 dcid
= __le16_to_cpu(rsp
->dcid
);
2559 result
= __le16_to_cpu(rsp
->result
);
2560 status
= __le16_to_cpu(rsp
->status
);
2562 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2565 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2569 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2577 case L2CAP_CR_SUCCESS
:
2578 l2cap_state_change(chan
, BT_CONFIG
);
2581 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2583 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2586 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2587 l2cap_build_conf_req(chan
, req
), req
);
2588 chan
->num_conf_req
++;
2592 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2596 /* don't delete l2cap channel if sk is owned by user */
2597 if (sock_owned_by_user(sk
)) {
2598 l2cap_state_change(chan
, BT_DISCONN
);
2599 __clear_chan_timer(chan
);
2600 __set_chan_timer(chan
, HZ
/ 5);
2604 l2cap_chan_del(chan
, ECONNREFUSED
);
2612 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2614 /* FCS is enabled only in ERTM or streaming mode, if one or both
2617 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2618 chan
->fcs
= L2CAP_FCS_NONE
;
2619 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
2620 chan
->fcs
= L2CAP_FCS_CRC16
;
2623 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2625 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2628 struct l2cap_chan
*chan
;
2632 dcid
= __le16_to_cpu(req
->dcid
);
2633 flags
= __le16_to_cpu(req
->flags
);
2635 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2637 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2643 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
2644 struct l2cap_cmd_rej_cid rej
;
2646 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
2647 rej
.scid
= cpu_to_le16(chan
->scid
);
2648 rej
.dcid
= cpu_to_le16(chan
->dcid
);
2650 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2655 /* Reject if config buffer is too small. */
2656 len
= cmd_len
- sizeof(*req
);
2657 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2658 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2659 l2cap_build_conf_rsp(chan
, rsp
,
2660 L2CAP_CONF_REJECT
, flags
), rsp
);
2665 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2666 chan
->conf_len
+= len
;
2668 if (flags
& 0x0001) {
2669 /* Incomplete config. Send empty response. */
2670 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2671 l2cap_build_conf_rsp(chan
, rsp
,
2672 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2676 /* Complete config. */
2677 len
= l2cap_parse_conf_req(chan
, rsp
);
2679 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2683 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2684 chan
->num_conf_rsp
++;
2686 /* Reset config buffer. */
2689 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
2692 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
2693 set_default_fcs(chan
);
2695 l2cap_state_change(chan
, BT_CONNECTED
);
2697 chan
->next_tx_seq
= 0;
2698 chan
->expected_tx_seq
= 0;
2699 skb_queue_head_init(&chan
->tx_q
);
2700 if (chan
->mode
== L2CAP_MODE_ERTM
)
2701 l2cap_ertm_init(chan
);
2703 l2cap_chan_ready(sk
);
2707 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
2709 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2710 l2cap_build_conf_req(chan
, buf
), buf
);
2711 chan
->num_conf_req
++;
2719 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2721 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2722 u16 scid
, flags
, result
;
2723 struct l2cap_chan
*chan
;
2725 int len
= cmd
->len
- sizeof(*rsp
);
2727 scid
= __le16_to_cpu(rsp
->scid
);
2728 flags
= __le16_to_cpu(rsp
->flags
);
2729 result
= __le16_to_cpu(rsp
->result
);
2731 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2732 scid
, flags
, result
);
2734 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2741 case L2CAP_CONF_SUCCESS
:
2742 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2745 case L2CAP_CONF_UNACCEPT
:
2746 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2749 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2750 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2754 /* throw out any old stored conf requests */
2755 result
= L2CAP_CONF_SUCCESS
;
2756 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2759 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2763 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2764 L2CAP_CONF_REQ
, len
, req
);
2765 chan
->num_conf_req
++;
2766 if (result
!= L2CAP_CONF_SUCCESS
)
2772 sk
->sk_err
= ECONNRESET
;
2773 __set_chan_timer(chan
, HZ
* 5);
2774 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2781 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
2783 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
2784 set_default_fcs(chan
);
2786 l2cap_state_change(chan
, BT_CONNECTED
);
2787 chan
->next_tx_seq
= 0;
2788 chan
->expected_tx_seq
= 0;
2789 skb_queue_head_init(&chan
->tx_q
);
2790 if (chan
->mode
== L2CAP_MODE_ERTM
)
2791 l2cap_ertm_init(chan
);
2793 l2cap_chan_ready(sk
);
2801 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2803 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2804 struct l2cap_disconn_rsp rsp
;
2806 struct l2cap_chan
*chan
;
2809 scid
= __le16_to_cpu(req
->scid
);
2810 dcid
= __le16_to_cpu(req
->dcid
);
2812 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2814 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2820 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2821 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2822 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2824 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2826 /* don't delete l2cap channel if sk is owned by user */
2827 if (sock_owned_by_user(sk
)) {
2828 l2cap_state_change(chan
, BT_DISCONN
);
2829 __clear_chan_timer(chan
);
2830 __set_chan_timer(chan
, HZ
/ 5);
2835 l2cap_chan_del(chan
, ECONNRESET
);
2838 chan
->ops
->close(chan
->data
);
2842 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2844 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2846 struct l2cap_chan
*chan
;
2849 scid
= __le16_to_cpu(rsp
->scid
);
2850 dcid
= __le16_to_cpu(rsp
->dcid
);
2852 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2854 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2860 /* don't delete l2cap channel if sk is owned by user */
2861 if (sock_owned_by_user(sk
)) {
2862 l2cap_state_change(chan
,BT_DISCONN
);
2863 __clear_chan_timer(chan
);
2864 __set_chan_timer(chan
, HZ
/ 5);
2869 l2cap_chan_del(chan
, 0);
2872 chan
->ops
->close(chan
->data
);
2876 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2878 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2881 type
= __le16_to_cpu(req
->type
);
2883 BT_DBG("type 0x%4.4x", type
);
2885 if (type
== L2CAP_IT_FEAT_MASK
) {
2887 u32 feat_mask
= l2cap_feat_mask
;
2888 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2889 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2890 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2892 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2895 feat_mask
|= L2CAP_FEAT_EXT_FLOW
2896 | L2CAP_FEAT_EXT_WINDOW
;
2898 put_unaligned_le32(feat_mask
, rsp
->data
);
2899 l2cap_send_cmd(conn
, cmd
->ident
,
2900 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2901 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2903 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2904 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2905 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2906 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2907 l2cap_send_cmd(conn
, cmd
->ident
,
2908 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2910 struct l2cap_info_rsp rsp
;
2911 rsp
.type
= cpu_to_le16(type
);
2912 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2913 l2cap_send_cmd(conn
, cmd
->ident
,
2914 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2920 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2922 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2925 type
= __le16_to_cpu(rsp
->type
);
2926 result
= __le16_to_cpu(rsp
->result
);
2928 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2930 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2931 if (cmd
->ident
!= conn
->info_ident
||
2932 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2935 del_timer(&conn
->info_timer
);
2937 if (result
!= L2CAP_IR_SUCCESS
) {
2938 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2939 conn
->info_ident
= 0;
2941 l2cap_conn_start(conn
);
2946 if (type
== L2CAP_IT_FEAT_MASK
) {
2947 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2949 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2950 struct l2cap_info_req req
;
2951 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2953 conn
->info_ident
= l2cap_get_ident(conn
);
2955 l2cap_send_cmd(conn
, conn
->info_ident
,
2956 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2958 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2959 conn
->info_ident
= 0;
2961 l2cap_conn_start(conn
);
2963 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2964 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2965 conn
->info_ident
= 0;
2967 l2cap_conn_start(conn
);
2973 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2978 if (min
> max
|| min
< 6 || max
> 3200)
2981 if (to_multiplier
< 10 || to_multiplier
> 3200)
2984 if (max
>= to_multiplier
* 8)
2987 max_latency
= (to_multiplier
* 8 / max
) - 1;
2988 if (latency
> 499 || latency
> max_latency
)
2994 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2995 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2997 struct hci_conn
*hcon
= conn
->hcon
;
2998 struct l2cap_conn_param_update_req
*req
;
2999 struct l2cap_conn_param_update_rsp rsp
;
3000 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
3003 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
3006 cmd_len
= __le16_to_cpu(cmd
->len
);
3007 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
3010 req
= (struct l2cap_conn_param_update_req
*) data
;
3011 min
= __le16_to_cpu(req
->min
);
3012 max
= __le16_to_cpu(req
->max
);
3013 latency
= __le16_to_cpu(req
->latency
);
3014 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
3016 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3017 min
, max
, latency
, to_multiplier
);
3019 memset(&rsp
, 0, sizeof(rsp
));
3021 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
3023 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
3025 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
3027 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
3031 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
3036 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
3037 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3041 switch (cmd
->code
) {
3042 case L2CAP_COMMAND_REJ
:
3043 l2cap_command_rej(conn
, cmd
, data
);
3046 case L2CAP_CONN_REQ
:
3047 err
= l2cap_connect_req(conn
, cmd
, data
);
3050 case L2CAP_CONN_RSP
:
3051 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3054 case L2CAP_CONF_REQ
:
3055 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3058 case L2CAP_CONF_RSP
:
3059 err
= l2cap_config_rsp(conn
, cmd
, data
);
3062 case L2CAP_DISCONN_REQ
:
3063 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3066 case L2CAP_DISCONN_RSP
:
3067 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3070 case L2CAP_ECHO_REQ
:
3071 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3074 case L2CAP_ECHO_RSP
:
3077 case L2CAP_INFO_REQ
:
3078 err
= l2cap_information_req(conn
, cmd
, data
);
3081 case L2CAP_INFO_RSP
:
3082 err
= l2cap_information_rsp(conn
, cmd
, data
);
3086 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3094 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3095 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3097 switch (cmd
->code
) {
3098 case L2CAP_COMMAND_REJ
:
3101 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3102 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3104 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3108 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3113 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3114 struct sk_buff
*skb
)
3116 u8
*data
= skb
->data
;
3118 struct l2cap_cmd_hdr cmd
;
3121 l2cap_raw_recv(conn
, skb
);
3123 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3125 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3126 data
+= L2CAP_CMD_HDR_SIZE
;
3127 len
-= L2CAP_CMD_HDR_SIZE
;
3129 cmd_len
= le16_to_cpu(cmd
.len
);
3131 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3133 if (cmd_len
> len
|| !cmd
.ident
) {
3134 BT_DBG("corrupted command");
3138 if (conn
->hcon
->type
== LE_LINK
)
3139 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3141 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3144 struct l2cap_cmd_rej_unk rej
;
3146 BT_ERR("Wrong link type (%d)", err
);
3148 /* FIXME: Map err to a valid reason */
3149 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3150 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3160 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3162 u16 our_fcs
, rcv_fcs
;
3165 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3166 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3168 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3170 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3171 skb_trim(skb
, skb
->len
- 2);
3172 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3173 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3175 if (our_fcs
!= rcv_fcs
)
3181 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3185 chan
->frames_sent
= 0;
3187 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3189 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3190 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3191 l2cap_send_sframe(chan
, control
);
3192 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3195 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3196 l2cap_retransmit_frames(chan
);
3198 l2cap_ertm_send(chan
);
3200 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3201 chan
->frames_sent
== 0) {
3202 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3203 l2cap_send_sframe(chan
, control
);
3207 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
3209 struct sk_buff
*next_skb
;
3210 int tx_seq_offset
, next_tx_seq_offset
;
3212 bt_cb(skb
)->tx_seq
= tx_seq
;
3213 bt_cb(skb
)->sar
= sar
;
3215 next_skb
= skb_peek(&chan
->srej_q
);
3217 __skb_queue_tail(&chan
->srej_q
, skb
);
3221 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3222 if (tx_seq_offset
< 0)
3223 tx_seq_offset
+= 64;
3226 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3229 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3230 chan
->buffer_seq
) % 64;
3231 if (next_tx_seq_offset
< 0)
3232 next_tx_seq_offset
+= 64;
3234 if (next_tx_seq_offset
> tx_seq_offset
) {
3235 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3239 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3242 } while ((next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
)));
3244 __skb_queue_tail(&chan
->srej_q
, skb
);
3249 static void append_skb_frag(struct sk_buff
*skb
,
3250 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
3252 /* skb->len reflects data in skb as well as all fragments
3253 * skb->data_len reflects only data in fragments
3255 if (!skb_has_frag_list(skb
))
3256 skb_shinfo(skb
)->frag_list
= new_frag
;
3258 new_frag
->next
= NULL
;
3260 (*last_frag
)->next
= new_frag
;
3261 *last_frag
= new_frag
;
3263 skb
->len
+= new_frag
->len
;
3264 skb
->data_len
+= new_frag
->len
;
3265 skb
->truesize
+= new_frag
->truesize
;
3268 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3272 switch (__get_ctrl_sar(chan
, control
)) {
3273 case L2CAP_SAR_UNSEGMENTED
:
3277 err
= chan
->ops
->recv(chan
->data
, skb
);
3280 case L2CAP_SAR_START
:
3284 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3287 if (chan
->sdu_len
> chan
->imtu
) {
3292 if (skb
->len
>= chan
->sdu_len
)
3296 chan
->sdu_last_frag
= skb
;
3302 case L2CAP_SAR_CONTINUE
:
3306 append_skb_frag(chan
->sdu
, skb
,
3307 &chan
->sdu_last_frag
);
3310 if (chan
->sdu
->len
>= chan
->sdu_len
)
3320 append_skb_frag(chan
->sdu
, skb
,
3321 &chan
->sdu_last_frag
);
3324 if (chan
->sdu
->len
!= chan
->sdu_len
)
3327 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
3330 /* Reassembly complete */
3332 chan
->sdu_last_frag
= NULL
;
3340 kfree_skb(chan
->sdu
);
3342 chan
->sdu_last_frag
= NULL
;
3349 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
3353 BT_DBG("chan %p, Enter local busy", chan
);
3355 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3357 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3358 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3359 l2cap_send_sframe(chan
, control
);
3361 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3363 __clear_ack_timer(chan
);
3366 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
3370 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3373 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3374 control
|= __set_ctrl_poll(chan
);
3375 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3376 l2cap_send_sframe(chan
, control
);
3377 chan
->retry_count
= 1;
3379 __clear_retrans_timer(chan
);
3380 __set_monitor_timer(chan
);
3382 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
3385 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3386 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3388 BT_DBG("chan %p, Exit local busy", chan
);
3391 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
3393 if (chan
->mode
== L2CAP_MODE_ERTM
) {
3395 l2cap_ertm_enter_local_busy(chan
);
3397 l2cap_ertm_exit_local_busy(chan
);
3401 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
3403 struct sk_buff
*skb
;
3406 while ((skb
= skb_peek(&chan
->srej_q
)) &&
3407 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3410 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3413 skb
= skb_dequeue(&chan
->srej_q
);
3414 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->sar
);
3415 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
3418 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3422 chan
->buffer_seq_srej
=
3423 (chan
->buffer_seq_srej
+ 1) % 64;
3424 tx_seq
= (tx_seq
+ 1) % 64;
3428 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3430 struct srej_list
*l
, *tmp
;
3433 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3434 if (l
->tx_seq
== tx_seq
) {
3439 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3440 control
|= __set_reqseq(chan
, l
->tx_seq
);
3441 l2cap_send_sframe(chan
, control
);
3443 list_add_tail(&l
->list
, &chan
->srej_l
);
3447 static void l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3449 struct srej_list
*new;
3452 while (tx_seq
!= chan
->expected_tx_seq
) {
3453 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3454 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
3455 l2cap_send_sframe(chan
, control
);
3457 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3458 new->tx_seq
= chan
->expected_tx_seq
;
3459 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3460 list_add_tail(&new->list
, &chan
->srej_l
);
3462 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3465 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3467 u16 tx_seq
= __get_txseq(chan
, rx_control
);
3468 u16 req_seq
= __get_reqseq(chan
, rx_control
);
3469 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
3470 int tx_seq_offset
, expected_tx_seq_offset
;
3471 int num_to_ack
= (chan
->tx_win
/6) + 1;
3474 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan
, skb
->len
,
3475 tx_seq
, rx_control
);
3477 if (__is_ctrl_final(chan
, rx_control
) &&
3478 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3479 __clear_monitor_timer(chan
);
3480 if (chan
->unacked_frames
> 0)
3481 __set_retrans_timer(chan
);
3482 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3485 chan
->expected_ack_seq
= req_seq
;
3486 l2cap_drop_acked_frames(chan
);
3488 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3489 if (tx_seq_offset
< 0)
3490 tx_seq_offset
+= 64;
3492 /* invalid tx_seq */
3493 if (tx_seq_offset
>= chan
->tx_win
) {
3494 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3498 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
3501 if (tx_seq
== chan
->expected_tx_seq
)
3504 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3505 struct srej_list
*first
;
3507 first
= list_first_entry(&chan
->srej_l
,
3508 struct srej_list
, list
);
3509 if (tx_seq
== first
->tx_seq
) {
3510 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3511 l2cap_check_srej_gap(chan
, tx_seq
);
3513 list_del(&first
->list
);
3516 if (list_empty(&chan
->srej_l
)) {
3517 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3518 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3519 l2cap_send_ack(chan
);
3520 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3523 struct srej_list
*l
;
3525 /* duplicated tx_seq */
3526 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3529 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3530 if (l
->tx_seq
== tx_seq
) {
3531 l2cap_resend_srejframe(chan
, tx_seq
);
3535 l2cap_send_srejframe(chan
, tx_seq
);
3538 expected_tx_seq_offset
=
3539 (chan
->expected_tx_seq
- chan
->buffer_seq
) % 64;
3540 if (expected_tx_seq_offset
< 0)
3541 expected_tx_seq_offset
+= 64;
3543 /* duplicated tx_seq */
3544 if (tx_seq_offset
< expected_tx_seq_offset
)
3547 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3549 BT_DBG("chan %p, Enter SREJ", chan
);
3551 INIT_LIST_HEAD(&chan
->srej_l
);
3552 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3554 __skb_queue_head_init(&chan
->srej_q
);
3555 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3557 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
3559 l2cap_send_srejframe(chan
, tx_seq
);
3561 __clear_ack_timer(chan
);
3566 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3568 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3569 bt_cb(skb
)->tx_seq
= tx_seq
;
3570 bt_cb(skb
)->sar
= sar
;
3571 __skb_queue_tail(&chan
->srej_q
, skb
);
3575 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
3576 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3578 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3582 if (__is_ctrl_final(chan
, rx_control
)) {
3583 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3584 l2cap_retransmit_frames(chan
);
3587 __set_ack_timer(chan
);
3589 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3590 if (chan
->num_acked
== num_to_ack
- 1)
3591 l2cap_send_ack(chan
);
3600 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3602 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
,
3603 __get_reqseq(chan
, rx_control
), rx_control
);
3605 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
3606 l2cap_drop_acked_frames(chan
);
3608 if (__is_ctrl_poll(chan
, rx_control
)) {
3609 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3610 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3611 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3612 (chan
->unacked_frames
> 0))
3613 __set_retrans_timer(chan
);
3615 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3616 l2cap_send_srejtail(chan
);
3618 l2cap_send_i_or_rr_or_rnr(chan
);
3621 } else if (__is_ctrl_final(chan
, rx_control
)) {
3622 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3624 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3625 l2cap_retransmit_frames(chan
);
3628 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3629 (chan
->unacked_frames
> 0))
3630 __set_retrans_timer(chan
);
3632 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3633 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
3634 l2cap_send_ack(chan
);
3636 l2cap_ertm_send(chan
);
3640 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3642 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
3644 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3646 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3648 chan
->expected_ack_seq
= tx_seq
;
3649 l2cap_drop_acked_frames(chan
);
3651 if (__is_ctrl_final(chan
, rx_control
)) {
3652 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3653 l2cap_retransmit_frames(chan
);
3655 l2cap_retransmit_frames(chan
);
3657 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
3658 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
3661 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3663 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
3665 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3667 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3669 if (__is_ctrl_poll(chan
, rx_control
)) {
3670 chan
->expected_ack_seq
= tx_seq
;
3671 l2cap_drop_acked_frames(chan
);
3673 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3674 l2cap_retransmit_one_frame(chan
, tx_seq
);
3676 l2cap_ertm_send(chan
);
3678 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3679 chan
->srej_save_reqseq
= tx_seq
;
3680 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3682 } else if (__is_ctrl_final(chan
, rx_control
)) {
3683 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
3684 chan
->srej_save_reqseq
== tx_seq
)
3685 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3687 l2cap_retransmit_one_frame(chan
, tx_seq
);
3689 l2cap_retransmit_one_frame(chan
, tx_seq
);
3690 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3691 chan
->srej_save_reqseq
= tx_seq
;
3692 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
3697 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3699 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
3701 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3703 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3704 chan
->expected_ack_seq
= tx_seq
;
3705 l2cap_drop_acked_frames(chan
);
3707 if (__is_ctrl_poll(chan
, rx_control
))
3708 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3710 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3711 __clear_retrans_timer(chan
);
3712 if (__is_ctrl_poll(chan
, rx_control
))
3713 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
3717 if (__is_ctrl_poll(chan
, rx_control
)) {
3718 l2cap_send_srejtail(chan
);
3720 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3721 l2cap_send_sframe(chan
, rx_control
);
3725 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3727 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan
, rx_control
, skb
->len
);
3729 if (__is_ctrl_final(chan
, rx_control
) &&
3730 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3731 __clear_monitor_timer(chan
);
3732 if (chan
->unacked_frames
> 0)
3733 __set_retrans_timer(chan
);
3734 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3737 switch (__get_ctrl_super(chan
, rx_control
)) {
3738 case L2CAP_SUPER_RR
:
3739 l2cap_data_channel_rrframe(chan
, rx_control
);
3742 case L2CAP_SUPER_REJ
:
3743 l2cap_data_channel_rejframe(chan
, rx_control
);
3746 case L2CAP_SUPER_SREJ
:
3747 l2cap_data_channel_srejframe(chan
, rx_control
);
3750 case L2CAP_SUPER_RNR
:
3751 l2cap_data_channel_rnrframe(chan
, rx_control
);
3759 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3761 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
3764 int len
, next_tx_seq_offset
, req_seq_offset
;
3766 control
= get_unaligned_le16(skb
->data
);
3771 * We can just drop the corrupted I-frame here.
3772 * Receiver will miss it and start proper recovery
3773 * procedures and ask retransmission.
3775 if (l2cap_check_fcs(chan
, skb
))
3778 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
3781 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3784 if (len
> chan
->mps
) {
3785 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3789 req_seq
= __get_reqseq(chan
, control
);
3790 req_seq_offset
= (req_seq
- chan
->expected_ack_seq
) % 64;
3791 if (req_seq_offset
< 0)
3792 req_seq_offset
+= 64;
3794 next_tx_seq_offset
=
3795 (chan
->next_tx_seq
- chan
->expected_ack_seq
) % 64;
3796 if (next_tx_seq_offset
< 0)
3797 next_tx_seq_offset
+= 64;
3799 /* check for invalid req-seq */
3800 if (req_seq_offset
> next_tx_seq_offset
) {
3801 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3805 if (!__is_sframe(chan
, control
)) {
3807 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3811 l2cap_data_channel_iframe(chan
, control
, skb
);
3815 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3819 l2cap_data_channel_sframe(chan
, control
, skb
);
3829 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3831 struct l2cap_chan
*chan
;
3832 struct sock
*sk
= NULL
;
3837 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3839 BT_DBG("unknown cid 0x%4.4x", cid
);
3845 BT_DBG("chan %p, len %d", chan
, skb
->len
);
3847 if (chan
->state
!= BT_CONNECTED
)
3850 switch (chan
->mode
) {
3851 case L2CAP_MODE_BASIC
:
3852 /* If socket recv buffers overflows we drop data here
3853 * which is *bad* because L2CAP has to be reliable.
3854 * But we don't have any other choice. L2CAP doesn't
3855 * provide flow control mechanism. */
3857 if (chan
->imtu
< skb
->len
)
3860 if (!chan
->ops
->recv(chan
->data
, skb
))
3864 case L2CAP_MODE_ERTM
:
3865 if (!sock_owned_by_user(sk
)) {
3866 l2cap_ertm_data_rcv(sk
, skb
);
3868 if (sk_add_backlog(sk
, skb
))
3874 case L2CAP_MODE_STREAMING
:
3875 control
= get_unaligned_le16(skb
->data
);
3879 if (l2cap_check_fcs(chan
, skb
))
3882 if (__is_sar_start(chan
, control
))
3885 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3888 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
3891 tx_seq
= __get_txseq(chan
, control
);
3893 if (chan
->expected_tx_seq
!= tx_seq
) {
3894 /* Frame(s) missing - must discard partial SDU */
3895 kfree_skb(chan
->sdu
);
3897 chan
->sdu_last_frag
= NULL
;
3900 /* TODO: Notify userland of missing data */
3903 chan
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3905 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
3906 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3911 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
3925 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3927 struct sock
*sk
= NULL
;
3928 struct l2cap_chan
*chan
;
3930 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
3938 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3940 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
3943 if (chan
->imtu
< skb
->len
)
3946 if (!chan
->ops
->recv(chan
->data
, skb
))
3958 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
3960 struct sock
*sk
= NULL
;
3961 struct l2cap_chan
*chan
;
3963 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
3971 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3973 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
3976 if (chan
->imtu
< skb
->len
)
3979 if (!chan
->ops
->recv(chan
->data
, skb
))
3991 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3993 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3997 skb_pull(skb
, L2CAP_HDR_SIZE
);
3998 cid
= __le16_to_cpu(lh
->cid
);
3999 len
= __le16_to_cpu(lh
->len
);
4001 if (len
!= skb
->len
) {
4006 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4009 case L2CAP_CID_LE_SIGNALING
:
4010 case L2CAP_CID_SIGNALING
:
4011 l2cap_sig_channel(conn
, skb
);
4014 case L2CAP_CID_CONN_LESS
:
4015 psm
= get_unaligned_le16(skb
->data
);
4017 l2cap_conless_channel(conn
, psm
, skb
);
4020 case L2CAP_CID_LE_DATA
:
4021 l2cap_att_channel(conn
, cid
, skb
);
4025 if (smp_sig_channel(conn
, skb
))
4026 l2cap_conn_del(conn
->hcon
, EACCES
);
4030 l2cap_data_channel(conn
, cid
, skb
);
4035 /* ---- L2CAP interface with lower layer (HCI) ---- */
4037 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4039 int exact
= 0, lm1
= 0, lm2
= 0;
4040 struct l2cap_chan
*c
;
4042 if (type
!= ACL_LINK
)
4045 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4047 /* Find listening sockets and check their link_mode */
4048 read_lock(&chan_list_lock
);
4049 list_for_each_entry(c
, &chan_list
, global_l
) {
4050 struct sock
*sk
= c
->sk
;
4052 if (c
->state
!= BT_LISTEN
)
4055 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4056 lm1
|= HCI_LM_ACCEPT
;
4057 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4058 lm1
|= HCI_LM_MASTER
;
4060 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4061 lm2
|= HCI_LM_ACCEPT
;
4062 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4063 lm2
|= HCI_LM_MASTER
;
4066 read_unlock(&chan_list_lock
);
4068 return exact
? lm1
: lm2
;
4071 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4073 struct l2cap_conn
*conn
;
4075 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4077 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4081 conn
= l2cap_conn_add(hcon
, status
);
4083 l2cap_conn_ready(conn
);
4085 l2cap_conn_del(hcon
, bt_to_errno(status
));
4090 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4092 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4094 BT_DBG("hcon %p", hcon
);
4096 if ((hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
) || !conn
)
4099 return conn
->disc_reason
;
4102 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4104 BT_DBG("hcon %p reason %d", hcon
, reason
);
4106 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4109 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4114 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4116 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4119 if (encrypt
== 0x00) {
4120 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4121 __clear_chan_timer(chan
);
4122 __set_chan_timer(chan
, HZ
* 5);
4123 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4124 l2cap_chan_close(chan
, ECONNREFUSED
);
4126 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4127 __clear_chan_timer(chan
);
4131 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4133 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4134 struct l2cap_chan
*chan
;
4139 BT_DBG("conn %p", conn
);
4141 if (hcon
->type
== LE_LINK
) {
4142 smp_distribute_keys(conn
, 0);
4143 del_timer(&conn
->security_timer
);
4146 read_lock(&conn
->chan_lock
);
4148 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4149 struct sock
*sk
= chan
->sk
;
4153 BT_DBG("chan->scid %d", chan
->scid
);
4155 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4156 if (!status
&& encrypt
) {
4157 chan
->sec_level
= hcon
->sec_level
;
4158 l2cap_chan_ready(sk
);
4165 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4170 if (!status
&& (chan
->state
== BT_CONNECTED
||
4171 chan
->state
== BT_CONFIG
)) {
4172 l2cap_check_encryption(chan
, encrypt
);
4177 if (chan
->state
== BT_CONNECT
) {
4179 struct l2cap_conn_req req
;
4180 req
.scid
= cpu_to_le16(chan
->scid
);
4181 req
.psm
= chan
->psm
;
4183 chan
->ident
= l2cap_get_ident(conn
);
4184 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4186 l2cap_send_cmd(conn
, chan
->ident
,
4187 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4189 __clear_chan_timer(chan
);
4190 __set_chan_timer(chan
, HZ
/ 10);
4192 } else if (chan
->state
== BT_CONNECT2
) {
4193 struct l2cap_conn_rsp rsp
;
4197 if (bt_sk(sk
)->defer_setup
) {
4198 struct sock
*parent
= bt_sk(sk
)->parent
;
4199 res
= L2CAP_CR_PEND
;
4200 stat
= L2CAP_CS_AUTHOR_PEND
;
4202 parent
->sk_data_ready(parent
, 0);
4204 l2cap_state_change(chan
, BT_CONFIG
);
4205 res
= L2CAP_CR_SUCCESS
;
4206 stat
= L2CAP_CS_NO_INFO
;
4209 l2cap_state_change(chan
, BT_DISCONN
);
4210 __set_chan_timer(chan
, HZ
/ 10);
4211 res
= L2CAP_CR_SEC_BLOCK
;
4212 stat
= L2CAP_CS_NO_INFO
;
4215 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4216 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4217 rsp
.result
= cpu_to_le16(res
);
4218 rsp
.status
= cpu_to_le16(stat
);
4219 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4226 read_unlock(&conn
->chan_lock
);
4231 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4233 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4236 conn
= l2cap_conn_add(hcon
, 0);
4241 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4243 if (!(flags
& ACL_CONT
)) {
4244 struct l2cap_hdr
*hdr
;
4245 struct l2cap_chan
*chan
;
4250 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4251 kfree_skb(conn
->rx_skb
);
4252 conn
->rx_skb
= NULL
;
4254 l2cap_conn_unreliable(conn
, ECOMM
);
4257 /* Start fragment always begin with Basic L2CAP header */
4258 if (skb
->len
< L2CAP_HDR_SIZE
) {
4259 BT_ERR("Frame is too short (len %d)", skb
->len
);
4260 l2cap_conn_unreliable(conn
, ECOMM
);
4264 hdr
= (struct l2cap_hdr
*) skb
->data
;
4265 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4266 cid
= __le16_to_cpu(hdr
->cid
);
4268 if (len
== skb
->len
) {
4269 /* Complete frame received */
4270 l2cap_recv_frame(conn
, skb
);
4274 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4276 if (skb
->len
> len
) {
4277 BT_ERR("Frame is too long (len %d, expected len %d)",
4279 l2cap_conn_unreliable(conn
, ECOMM
);
4283 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4285 if (chan
&& chan
->sk
) {
4286 struct sock
*sk
= chan
->sk
;
4288 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4289 BT_ERR("Frame exceeding recv MTU (len %d, "
4293 l2cap_conn_unreliable(conn
, ECOMM
);
4299 /* Allocate skb for the complete frame (with header) */
4300 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4304 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4306 conn
->rx_len
= len
- skb
->len
;
4308 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4310 if (!conn
->rx_len
) {
4311 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4312 l2cap_conn_unreliable(conn
, ECOMM
);
4316 if (skb
->len
> conn
->rx_len
) {
4317 BT_ERR("Fragment is too long (len %d, expected %d)",
4318 skb
->len
, conn
->rx_len
);
4319 kfree_skb(conn
->rx_skb
);
4320 conn
->rx_skb
= NULL
;
4322 l2cap_conn_unreliable(conn
, ECOMM
);
4326 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4328 conn
->rx_len
-= skb
->len
;
4330 if (!conn
->rx_len
) {
4331 /* Complete frame received */
4332 l2cap_recv_frame(conn
, conn
->rx_skb
);
4333 conn
->rx_skb
= NULL
;
4342 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4344 struct l2cap_chan
*c
;
4346 read_lock_bh(&chan_list_lock
);
4348 list_for_each_entry(c
, &chan_list
, global_l
) {
4349 struct sock
*sk
= c
->sk
;
4351 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4352 batostr(&bt_sk(sk
)->src
),
4353 batostr(&bt_sk(sk
)->dst
),
4354 c
->state
, __le16_to_cpu(c
->psm
),
4355 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4356 c
->sec_level
, c
->mode
);
4359 read_unlock_bh(&chan_list_lock
);
4364 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4366 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4369 static const struct file_operations l2cap_debugfs_fops
= {
4370 .open
= l2cap_debugfs_open
,
4372 .llseek
= seq_lseek
,
4373 .release
= single_release
,
4376 static struct dentry
*l2cap_debugfs
;
4378 static struct hci_proto l2cap_hci_proto
= {
4380 .id
= HCI_PROTO_L2CAP
,
4381 .connect_ind
= l2cap_connect_ind
,
4382 .connect_cfm
= l2cap_connect_cfm
,
4383 .disconn_ind
= l2cap_disconn_ind
,
4384 .disconn_cfm
= l2cap_disconn_cfm
,
4385 .security_cfm
= l2cap_security_cfm
,
4386 .recv_acldata
= l2cap_recv_acldata
4389 int __init
l2cap_init(void)
4393 err
= l2cap_init_sockets();
4397 err
= hci_register_proto(&l2cap_hci_proto
);
4399 BT_ERR("L2CAP protocol registration failed");
4400 bt_sock_unregister(BTPROTO_L2CAP
);
4405 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4406 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4408 BT_ERR("Failed to create L2CAP debug file");
4414 l2cap_cleanup_sockets();
4418 void l2cap_exit(void)
4420 debugfs_remove(l2cap_debugfs
);
4422 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4423 BT_ERR("L2CAP protocol unregistration failed");
4425 l2cap_cleanup_sockets();
4428 module_param(disable_ertm
, bool, 0644);
4429 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");
4431 module_param(enable_hs
, bool, 0644);
4432 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");