2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan
*c
)
82 atomic_inc(&c
->refcnt
);
85 static inline void chan_put(struct l2cap_chan
*c
)
87 if (atomic_dec_and_test(&c
->refcnt
))
91 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
93 struct l2cap_chan
*c
, *r
= NULL
;
97 list_for_each_entry_rcu(c
, &conn
->chan_l
, list
) {
108 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
110 struct l2cap_chan
*c
, *r
= NULL
;
114 list_for_each_entry_rcu(c
, &conn
->chan_l
, list
) {
115 if (c
->scid
== cid
) {
125 /* Find channel with given SCID.
126 * Returns locked socket */
127 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
129 struct l2cap_chan
*c
;
131 c
= __l2cap_get_chan_by_scid(conn
, cid
);
137 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
139 struct l2cap_chan
*c
, *r
= NULL
;
143 list_for_each_entry_rcu(c
, &conn
->chan_l
, list
) {
144 if (c
->ident
== ident
) {
154 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
156 struct l2cap_chan
*c
;
158 c
= __l2cap_get_chan_by_ident(conn
, ident
);
164 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
166 struct l2cap_chan
*c
;
168 list_for_each_entry(c
, &chan_list
, global_l
) {
169 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
175 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
179 write_lock_bh(&chan_list_lock
);
181 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
194 for (p
= 0x1001; p
< 0x1100; p
+= 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
196 chan
->psm
= cpu_to_le16(p
);
197 chan
->sport
= cpu_to_le16(p
);
204 write_unlock_bh(&chan_list_lock
);
208 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
210 write_lock_bh(&chan_list_lock
);
214 write_unlock_bh(&chan_list_lock
);
219 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
221 u16 cid
= L2CAP_CID_DYN_START
;
223 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
224 if (!__l2cap_get_chan_by_scid(conn
, cid
))
231 static void l2cap_set_timer(struct l2cap_chan
*chan
, struct delayed_work
*work
, long timeout
)
233 BT_DBG("chan %p state %d timeout %ld", chan
, chan
->state
, timeout
);
235 cancel_delayed_work_sync(work
);
237 schedule_delayed_work(work
, timeout
);
240 static void l2cap_clear_timer(struct delayed_work
*work
)
242 cancel_delayed_work_sync(work
);
245 static char *state_to_string(int state
)
249 return "BT_CONNECTED";
259 return "BT_CONNECT2";
268 return "invalid state";
271 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
273 BT_DBG("%p %s -> %s", chan
, state_to_string(chan
->state
),
274 state_to_string(state
));
277 chan
->ops
->state_change(chan
->data
, state
);
280 static void l2cap_chan_timeout(struct work_struct
*work
)
282 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
284 struct sock
*sk
= chan
->sk
;
287 BT_DBG("chan %p state %d", chan
, chan
->state
);
291 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
292 reason
= ECONNREFUSED
;
293 else if (chan
->state
== BT_CONNECT
&&
294 chan
->sec_level
!= BT_SECURITY_SDP
)
295 reason
= ECONNREFUSED
;
299 l2cap_chan_close(chan
, reason
);
303 chan
->ops
->close(chan
->data
);
307 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
309 struct l2cap_chan
*chan
;
311 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
317 write_lock_bh(&chan_list_lock
);
318 list_add(&chan
->global_l
, &chan_list
);
319 write_unlock_bh(&chan_list_lock
);
321 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
323 chan
->state
= BT_OPEN
;
325 atomic_set(&chan
->refcnt
, 1);
327 BT_DBG("sk %p chan %p", sk
, chan
);
332 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
334 write_lock_bh(&chan_list_lock
);
335 list_del(&chan
->global_l
);
336 write_unlock_bh(&chan_list_lock
);
341 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
343 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
344 chan
->psm
, chan
->dcid
);
346 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
350 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
351 if (conn
->hcon
->type
== LE_LINK
) {
353 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
354 chan
->scid
= L2CAP_CID_LE_DATA
;
355 chan
->dcid
= L2CAP_CID_LE_DATA
;
357 /* Alloc CID for connection-oriented socket */
358 chan
->scid
= l2cap_alloc_cid(conn
);
359 chan
->omtu
= L2CAP_DEFAULT_MTU
;
361 } else if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
362 /* Connectionless socket */
363 chan
->scid
= L2CAP_CID_CONN_LESS
;
364 chan
->dcid
= L2CAP_CID_CONN_LESS
;
365 chan
->omtu
= L2CAP_DEFAULT_MTU
;
367 /* Raw socket can send/recv signalling messages only */
368 chan
->scid
= L2CAP_CID_SIGNALING
;
369 chan
->dcid
= L2CAP_CID_SIGNALING
;
370 chan
->omtu
= L2CAP_DEFAULT_MTU
;
373 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
374 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
375 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
376 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
377 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
378 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
382 list_add_rcu(&chan
->list
, &conn
->chan_l
);
386 * Must be called on the locked socket. */
387 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
389 struct sock
*sk
= chan
->sk
;
390 struct l2cap_conn
*conn
= chan
->conn
;
391 struct sock
*parent
= bt_sk(sk
)->parent
;
393 __clear_chan_timer(chan
);
395 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
398 /* Delete from channel list */
399 list_del_rcu(&chan
->list
);
405 hci_conn_put(conn
->hcon
);
408 l2cap_state_change(chan
, BT_CLOSED
);
409 sock_set_flag(sk
, SOCK_ZAPPED
);
415 bt_accept_unlink(sk
);
416 parent
->sk_data_ready(parent
, 0);
418 sk
->sk_state_change(sk
);
420 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
421 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
424 skb_queue_purge(&chan
->tx_q
);
426 if (chan
->mode
== L2CAP_MODE_ERTM
) {
427 struct srej_list
*l
, *tmp
;
429 __clear_retrans_timer(chan
);
430 __clear_monitor_timer(chan
);
431 __clear_ack_timer(chan
);
433 skb_queue_purge(&chan
->srej_q
);
435 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
442 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
446 BT_DBG("parent %p", parent
);
448 /* Close not yet accepted channels */
449 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
450 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
451 __clear_chan_timer(chan
);
453 l2cap_chan_close(chan
, ECONNRESET
);
455 chan
->ops
->close(chan
->data
);
459 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
461 struct l2cap_conn
*conn
= chan
->conn
;
462 struct sock
*sk
= chan
->sk
;
464 BT_DBG("chan %p state %d socket %p", chan
, chan
->state
, sk
->sk_socket
);
466 switch (chan
->state
) {
468 l2cap_chan_cleanup_listen(sk
);
470 l2cap_state_change(chan
, BT_CLOSED
);
471 sock_set_flag(sk
, SOCK_ZAPPED
);
476 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
477 conn
->hcon
->type
== ACL_LINK
) {
478 __clear_chan_timer(chan
);
479 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
480 l2cap_send_disconn_req(conn
, chan
, reason
);
482 l2cap_chan_del(chan
, reason
);
486 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
487 conn
->hcon
->type
== ACL_LINK
) {
488 struct l2cap_conn_rsp rsp
;
491 if (bt_sk(sk
)->defer_setup
)
492 result
= L2CAP_CR_SEC_BLOCK
;
494 result
= L2CAP_CR_BAD_PSM
;
495 l2cap_state_change(chan
, BT_DISCONN
);
497 rsp
.scid
= cpu_to_le16(chan
->dcid
);
498 rsp
.dcid
= cpu_to_le16(chan
->scid
);
499 rsp
.result
= cpu_to_le16(result
);
500 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
501 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
505 l2cap_chan_del(chan
, reason
);
510 l2cap_chan_del(chan
, reason
);
514 sock_set_flag(sk
, SOCK_ZAPPED
);
519 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
521 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
522 switch (chan
->sec_level
) {
523 case BT_SECURITY_HIGH
:
524 return HCI_AT_DEDICATED_BONDING_MITM
;
525 case BT_SECURITY_MEDIUM
:
526 return HCI_AT_DEDICATED_BONDING
;
528 return HCI_AT_NO_BONDING
;
530 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
531 if (chan
->sec_level
== BT_SECURITY_LOW
)
532 chan
->sec_level
= BT_SECURITY_SDP
;
534 if (chan
->sec_level
== BT_SECURITY_HIGH
)
535 return HCI_AT_NO_BONDING_MITM
;
537 return HCI_AT_NO_BONDING
;
539 switch (chan
->sec_level
) {
540 case BT_SECURITY_HIGH
:
541 return HCI_AT_GENERAL_BONDING_MITM
;
542 case BT_SECURITY_MEDIUM
:
543 return HCI_AT_GENERAL_BONDING
;
545 return HCI_AT_NO_BONDING
;
550 /* Service level security */
551 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
553 struct l2cap_conn
*conn
= chan
->conn
;
556 auth_type
= l2cap_get_auth_type(chan
);
558 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
561 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
565 /* Get next available identificator.
566 * 1 - 128 are used by kernel.
567 * 129 - 199 are reserved.
568 * 200 - 254 are used by utilities like l2ping, etc.
571 spin_lock_bh(&conn
->lock
);
573 if (++conn
->tx_ident
> 128)
578 spin_unlock_bh(&conn
->lock
);
583 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
585 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
588 BT_DBG("code 0x%2.2x", code
);
593 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
594 flags
= ACL_START_NO_FLUSH
;
598 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
599 skb
->priority
= HCI_PRIO_MAX
;
601 hci_send_acl(conn
->hchan
, skb
, flags
);
604 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
606 struct hci_conn
*hcon
= chan
->conn
->hcon
;
609 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
612 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
613 lmp_no_flush_capable(hcon
->hdev
))
614 flags
= ACL_START_NO_FLUSH
;
618 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
619 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
622 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u32 control
)
625 struct l2cap_hdr
*lh
;
626 struct l2cap_conn
*conn
= chan
->conn
;
629 if (chan
->state
!= BT_CONNECTED
)
632 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
633 hlen
= L2CAP_EXT_HDR_SIZE
;
635 hlen
= L2CAP_ENH_HDR_SIZE
;
637 if (chan
->fcs
== L2CAP_FCS_CRC16
)
638 hlen
+= L2CAP_FCS_SIZE
;
640 BT_DBG("chan %p, control 0x%8.8x", chan
, control
);
642 count
= min_t(unsigned int, conn
->mtu
, hlen
);
644 control
|= __set_sframe(chan
);
646 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
647 control
|= __set_ctrl_final(chan
);
649 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
650 control
|= __set_ctrl_poll(chan
);
652 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
656 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
657 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
658 lh
->cid
= cpu_to_le16(chan
->dcid
);
660 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
662 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
663 u16 fcs
= crc16(0, (u8
*)lh
, count
- L2CAP_FCS_SIZE
);
664 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
667 skb
->priority
= HCI_PRIO_MAX
;
668 l2cap_do_send(chan
, skb
);
671 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u32 control
)
673 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
674 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
675 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
677 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
679 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
681 l2cap_send_sframe(chan
, control
);
684 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
686 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
689 static void l2cap_do_start(struct l2cap_chan
*chan
)
691 struct l2cap_conn
*conn
= chan
->conn
;
693 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
694 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
697 if (l2cap_chan_check_security(chan
) &&
698 __l2cap_no_conn_pending(chan
)) {
699 struct l2cap_conn_req req
;
700 req
.scid
= cpu_to_le16(chan
->scid
);
703 chan
->ident
= l2cap_get_ident(conn
);
704 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
706 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
710 struct l2cap_info_req req
;
711 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
713 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
714 conn
->info_ident
= l2cap_get_ident(conn
);
716 schedule_delayed_work(&conn
->info_work
,
717 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
719 l2cap_send_cmd(conn
, conn
->info_ident
,
720 L2CAP_INFO_REQ
, sizeof(req
), &req
);
724 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
726 u32 local_feat_mask
= l2cap_feat_mask
;
728 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
731 case L2CAP_MODE_ERTM
:
732 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
733 case L2CAP_MODE_STREAMING
:
734 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
740 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
743 struct l2cap_disconn_req req
;
750 if (chan
->mode
== L2CAP_MODE_ERTM
) {
751 __clear_retrans_timer(chan
);
752 __clear_monitor_timer(chan
);
753 __clear_ack_timer(chan
);
756 req
.dcid
= cpu_to_le16(chan
->dcid
);
757 req
.scid
= cpu_to_le16(chan
->scid
);
758 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
759 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
761 l2cap_state_change(chan
, BT_DISCONN
);
765 /* ---- L2CAP connections ---- */
766 static void l2cap_conn_start(struct l2cap_conn
*conn
)
768 struct l2cap_chan
*chan
;
770 BT_DBG("conn %p", conn
);
774 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
775 struct sock
*sk
= chan
->sk
;
779 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
784 if (chan
->state
== BT_CONNECT
) {
785 struct l2cap_conn_req req
;
787 if (!l2cap_chan_check_security(chan
) ||
788 !__l2cap_no_conn_pending(chan
)) {
793 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
794 && test_bit(CONF_STATE2_DEVICE
,
795 &chan
->conf_state
)) {
796 /* l2cap_chan_close() calls list_del(chan)
797 * so release the lock */
798 l2cap_chan_close(chan
, ECONNRESET
);
803 req
.scid
= cpu_to_le16(chan
->scid
);
806 chan
->ident
= l2cap_get_ident(conn
);
807 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
809 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
812 } else if (chan
->state
== BT_CONNECT2
) {
813 struct l2cap_conn_rsp rsp
;
815 rsp
.scid
= cpu_to_le16(chan
->dcid
);
816 rsp
.dcid
= cpu_to_le16(chan
->scid
);
818 if (l2cap_chan_check_security(chan
)) {
819 if (bt_sk(sk
)->defer_setup
) {
820 struct sock
*parent
= bt_sk(sk
)->parent
;
821 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
822 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
824 parent
->sk_data_ready(parent
, 0);
827 l2cap_state_change(chan
, BT_CONFIG
);
828 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
829 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
832 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
833 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
836 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
839 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
840 rsp
.result
!= L2CAP_CR_SUCCESS
) {
845 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
846 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
847 l2cap_build_conf_req(chan
, buf
), buf
);
848 chan
->num_conf_req
++;
857 /* Find socket with cid and source bdaddr.
858 * Returns closest match, locked.
860 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
862 struct l2cap_chan
*c
, *c1
= NULL
;
864 read_lock(&chan_list_lock
);
866 list_for_each_entry(c
, &chan_list
, global_l
) {
867 struct sock
*sk
= c
->sk
;
869 if (state
&& c
->state
!= state
)
872 if (c
->scid
== cid
) {
874 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
875 read_unlock(&chan_list_lock
);
880 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
885 read_unlock(&chan_list_lock
);
890 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
892 struct sock
*parent
, *sk
;
893 struct l2cap_chan
*chan
, *pchan
;
897 /* Check if we have socket listening on cid */
898 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
907 /* Check for backlog size */
908 if (sk_acceptq_is_full(parent
)) {
909 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
913 chan
= pchan
->ops
->new_connection(pchan
->data
);
919 hci_conn_hold(conn
->hcon
);
921 bacpy(&bt_sk(sk
)->src
, conn
->src
);
922 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
924 bt_accept_enqueue(parent
, sk
);
926 l2cap_chan_add(conn
, chan
);
928 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
930 l2cap_state_change(chan
, BT_CONNECTED
);
931 parent
->sk_data_ready(parent
, 0);
934 release_sock(parent
);
937 static void l2cap_chan_ready(struct sock
*sk
)
939 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
940 struct sock
*parent
= bt_sk(sk
)->parent
;
942 BT_DBG("sk %p, parent %p", sk
, parent
);
944 chan
->conf_state
= 0;
945 __clear_chan_timer(chan
);
947 l2cap_state_change(chan
, BT_CONNECTED
);
948 sk
->sk_state_change(sk
);
951 parent
->sk_data_ready(parent
, 0);
954 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
956 struct l2cap_chan
*chan
;
958 BT_DBG("conn %p", conn
);
960 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
961 l2cap_le_conn_ready(conn
);
963 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
964 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
968 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
969 struct sock
*sk
= chan
->sk
;
973 if (conn
->hcon
->type
== LE_LINK
) {
974 if (smp_conn_security(conn
, chan
->sec_level
))
975 l2cap_chan_ready(sk
);
977 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
978 __clear_chan_timer(chan
);
979 l2cap_state_change(chan
, BT_CONNECTED
);
980 sk
->sk_state_change(sk
);
982 } else if (chan
->state
== BT_CONNECT
)
983 l2cap_do_start(chan
);
991 /* Notify sockets that we cannot guaranty reliability anymore */
992 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
994 struct l2cap_chan
*chan
;
996 BT_DBG("conn %p", conn
);
1000 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
1001 struct sock
*sk
= chan
->sk
;
1003 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1010 static void l2cap_info_timeout(struct work_struct
*work
)
1012 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1015 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1016 conn
->info_ident
= 0;
1018 l2cap_conn_start(conn
);
1021 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1023 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1024 struct l2cap_chan
*chan
, *l
;
1030 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1032 kfree_skb(conn
->rx_skb
);
1035 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1038 l2cap_chan_del(chan
, err
);
1040 chan
->ops
->close(chan
->data
);
1043 hci_chan_del(conn
->hchan
);
1045 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1046 cancel_delayed_work_sync(&conn
->info_work
);
1048 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->pend
)) {
1049 del_timer(&conn
->security_timer
);
1050 smp_chan_destroy(conn
);
1053 hcon
->l2cap_data
= NULL
;
1057 static void security_timeout(unsigned long arg
)
1059 struct l2cap_conn
*conn
= (void *) arg
;
1061 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1064 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1066 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1067 struct hci_chan
*hchan
;
1072 hchan
= hci_chan_create(hcon
);
1076 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1078 hci_chan_del(hchan
);
1082 hcon
->l2cap_data
= conn
;
1084 conn
->hchan
= hchan
;
1086 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1088 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1089 conn
->mtu
= hcon
->hdev
->le_mtu
;
1091 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1093 conn
->src
= &hcon
->hdev
->bdaddr
;
1094 conn
->dst
= &hcon
->dst
;
1096 conn
->feat_mask
= 0;
1098 spin_lock_init(&conn
->lock
);
1100 INIT_LIST_HEAD(&conn
->chan_l
);
1102 if (hcon
->type
== LE_LINK
)
1103 setup_timer(&conn
->security_timer
, security_timeout
,
1104 (unsigned long) conn
);
1106 INIT_DELAYED_WORK(&conn
->info_work
, l2cap_info_timeout
);
1108 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1113 /* ---- Socket interface ---- */
1115 /* Find socket with psm and source bdaddr.
1116 * Returns closest match.
1118 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1120 struct l2cap_chan
*c
, *c1
= NULL
;
1122 read_lock(&chan_list_lock
);
1124 list_for_each_entry(c
, &chan_list
, global_l
) {
1125 struct sock
*sk
= c
->sk
;
1127 if (state
&& c
->state
!= state
)
1130 if (c
->psm
== psm
) {
1132 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1133 read_unlock(&chan_list_lock
);
1138 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1143 read_unlock(&chan_list_lock
);
1148 inline int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
, bdaddr_t
*dst
)
1150 struct sock
*sk
= chan
->sk
;
1151 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1152 struct l2cap_conn
*conn
;
1153 struct hci_conn
*hcon
;
1154 struct hci_dev
*hdev
;
1158 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1161 hdev
= hci_get_route(dst
, src
);
1163 return -EHOSTUNREACH
;
1169 /* PSM must be odd and lsb of upper byte must be 0 */
1170 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1171 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1176 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1181 switch (chan
->mode
) {
1182 case L2CAP_MODE_BASIC
:
1184 case L2CAP_MODE_ERTM
:
1185 case L2CAP_MODE_STREAMING
:
1194 switch (sk
->sk_state
) {
1198 /* Already connecting */
1203 /* Already connected */
1217 /* Set destination address and psm */
1218 bacpy(&bt_sk(sk
)->dst
, src
);
1222 auth_type
= l2cap_get_auth_type(chan
);
1224 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1225 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1226 chan
->sec_level
, auth_type
);
1228 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1229 chan
->sec_level
, auth_type
);
1232 err
= PTR_ERR(hcon
);
1236 conn
= l2cap_conn_add(hcon
, 0);
1243 /* Update source addr of the socket */
1244 bacpy(src
, conn
->src
);
1246 l2cap_chan_add(conn
, chan
);
1248 l2cap_state_change(chan
, BT_CONNECT
);
1249 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1251 if (hcon
->state
== BT_CONNECTED
) {
1252 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1253 __clear_chan_timer(chan
);
1254 if (l2cap_chan_check_security(chan
))
1255 l2cap_state_change(chan
, BT_CONNECTED
);
1257 l2cap_do_start(chan
);
1263 hci_dev_unlock(hdev
);
1268 int __l2cap_wait_ack(struct sock
*sk
)
1270 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1271 DECLARE_WAITQUEUE(wait
, current
);
1275 add_wait_queue(sk_sleep(sk
), &wait
);
1276 set_current_state(TASK_INTERRUPTIBLE
);
1277 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1281 if (signal_pending(current
)) {
1282 err
= sock_intr_errno(timeo
);
1287 timeo
= schedule_timeout(timeo
);
1289 set_current_state(TASK_INTERRUPTIBLE
);
1291 err
= sock_error(sk
);
1295 set_current_state(TASK_RUNNING
);
1296 remove_wait_queue(sk_sleep(sk
), &wait
);
1300 static void l2cap_monitor_timeout(struct work_struct
*work
)
1302 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1303 monitor_timer
.work
);
1304 struct sock
*sk
= chan
->sk
;
1306 BT_DBG("chan %p", chan
);
1309 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1310 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1315 chan
->retry_count
++;
1316 __set_monitor_timer(chan
);
1318 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1322 static void l2cap_retrans_timeout(struct work_struct
*work
)
1324 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1325 retrans_timer
.work
);
1326 struct sock
*sk
= chan
->sk
;
1328 BT_DBG("chan %p", chan
);
1331 chan
->retry_count
= 1;
1332 __set_monitor_timer(chan
);
1334 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1336 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1340 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1342 struct sk_buff
*skb
;
1344 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1345 chan
->unacked_frames
) {
1346 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1349 skb
= skb_dequeue(&chan
->tx_q
);
1352 chan
->unacked_frames
--;
1355 if (!chan
->unacked_frames
)
1356 __clear_retrans_timer(chan
);
1359 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1361 struct sk_buff
*skb
;
1365 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1366 control
= __get_control(chan
, skb
->data
+ L2CAP_HDR_SIZE
);
1367 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1368 __put_control(chan
, control
, skb
->data
+ L2CAP_HDR_SIZE
);
1370 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1371 fcs
= crc16(0, (u8
*)skb
->data
,
1372 skb
->len
- L2CAP_FCS_SIZE
);
1373 put_unaligned_le16(fcs
,
1374 skb
->data
+ skb
->len
- L2CAP_FCS_SIZE
);
1377 l2cap_do_send(chan
, skb
);
1379 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1383 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1385 struct sk_buff
*skb
, *tx_skb
;
1389 skb
= skb_peek(&chan
->tx_q
);
1393 while (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1394 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1397 skb
= skb_queue_next(&chan
->tx_q
, skb
);
1400 if (chan
->remote_max_tx
&&
1401 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1402 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1406 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1407 bt_cb(skb
)->retries
++;
1409 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1410 control
&= __get_sar_mask(chan
);
1412 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1413 control
|= __set_ctrl_final(chan
);
1415 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1416 control
|= __set_txseq(chan
, tx_seq
);
1418 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1420 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1421 fcs
= crc16(0, (u8
*)tx_skb
->data
,
1422 tx_skb
->len
- L2CAP_FCS_SIZE
);
1423 put_unaligned_le16(fcs
,
1424 tx_skb
->data
+ tx_skb
->len
- L2CAP_FCS_SIZE
);
1427 l2cap_do_send(chan
, tx_skb
);
1430 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1432 struct sk_buff
*skb
, *tx_skb
;
1437 if (chan
->state
!= BT_CONNECTED
)
1440 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1442 if (chan
->remote_max_tx
&&
1443 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1444 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1448 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1450 bt_cb(skb
)->retries
++;
1452 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1453 control
&= __get_sar_mask(chan
);
1455 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1456 control
|= __set_ctrl_final(chan
);
1458 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1459 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1461 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1463 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1464 fcs
= crc16(0, (u8
*)skb
->data
,
1465 tx_skb
->len
- L2CAP_FCS_SIZE
);
1466 put_unaligned_le16(fcs
, skb
->data
+
1467 tx_skb
->len
- L2CAP_FCS_SIZE
);
1470 l2cap_do_send(chan
, tx_skb
);
1472 __set_retrans_timer(chan
);
1474 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1476 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1478 if (bt_cb(skb
)->retries
== 1)
1479 chan
->unacked_frames
++;
1481 chan
->frames_sent
++;
1483 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1484 chan
->tx_send_head
= NULL
;
1486 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1494 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1498 if (!skb_queue_empty(&chan
->tx_q
))
1499 chan
->tx_send_head
= chan
->tx_q
.next
;
1501 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1502 ret
= l2cap_ertm_send(chan
);
1506 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1510 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1512 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1513 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1514 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1515 l2cap_send_sframe(chan
, control
);
1519 if (l2cap_ertm_send(chan
) > 0)
1522 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1523 l2cap_send_sframe(chan
, control
);
1526 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1528 struct srej_list
*tail
;
1531 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1532 control
|= __set_ctrl_final(chan
);
1534 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1535 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1537 l2cap_send_sframe(chan
, control
);
1540 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1542 struct l2cap_conn
*conn
= l2cap_pi(sk
)->chan
->conn
;
1543 struct sk_buff
**frag
;
1546 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1552 /* Continuation fragments (no L2CAP header) */
1553 frag
= &skb_shinfo(skb
)->frag_list
;
1555 count
= min_t(unsigned int, conn
->mtu
, len
);
1557 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1560 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1563 (*frag
)->priority
= skb
->priority
;
1568 frag
= &(*frag
)->next
;
1574 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1575 struct msghdr
*msg
, size_t len
,
1578 struct sock
*sk
= chan
->sk
;
1579 struct l2cap_conn
*conn
= chan
->conn
;
1580 struct sk_buff
*skb
;
1581 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1582 struct l2cap_hdr
*lh
;
1584 BT_DBG("sk %p len %d priority %u", sk
, (int)len
, priority
);
1586 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1587 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1588 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1590 return ERR_PTR(err
);
1592 skb
->priority
= priority
;
1594 /* Create L2CAP header */
1595 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1596 lh
->cid
= cpu_to_le16(chan
->dcid
);
1597 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1598 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1600 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1601 if (unlikely(err
< 0)) {
1603 return ERR_PTR(err
);
1608 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1609 struct msghdr
*msg
, size_t len
,
1612 struct sock
*sk
= chan
->sk
;
1613 struct l2cap_conn
*conn
= chan
->conn
;
1614 struct sk_buff
*skb
;
1615 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1616 struct l2cap_hdr
*lh
;
1618 BT_DBG("sk %p len %d", sk
, (int)len
);
1620 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1621 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1622 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1624 return ERR_PTR(err
);
1626 skb
->priority
= priority
;
1628 /* Create L2CAP header */
1629 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1630 lh
->cid
= cpu_to_le16(chan
->dcid
);
1631 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1633 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1634 if (unlikely(err
< 0)) {
1636 return ERR_PTR(err
);
1641 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1642 struct msghdr
*msg
, size_t len
,
1643 u32 control
, u16 sdulen
)
1645 struct sock
*sk
= chan
->sk
;
1646 struct l2cap_conn
*conn
= chan
->conn
;
1647 struct sk_buff
*skb
;
1648 int err
, count
, hlen
;
1649 struct l2cap_hdr
*lh
;
1651 BT_DBG("sk %p len %d", sk
, (int)len
);
1654 return ERR_PTR(-ENOTCONN
);
1656 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1657 hlen
= L2CAP_EXT_HDR_SIZE
;
1659 hlen
= L2CAP_ENH_HDR_SIZE
;
1662 hlen
+= L2CAP_SDULEN_SIZE
;
1664 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1665 hlen
+= L2CAP_FCS_SIZE
;
1667 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1668 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1669 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1671 return ERR_PTR(err
);
1673 /* Create L2CAP header */
1674 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1675 lh
->cid
= cpu_to_le16(chan
->dcid
);
1676 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1678 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
1681 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
1683 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1684 if (unlikely(err
< 0)) {
1686 return ERR_PTR(err
);
1689 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1690 put_unaligned_le16(0, skb_put(skb
, L2CAP_FCS_SIZE
));
1692 bt_cb(skb
)->retries
= 0;
1696 static int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1698 struct sk_buff
*skb
;
1699 struct sk_buff_head sar_queue
;
1703 skb_queue_head_init(&sar_queue
);
1704 control
= __set_ctrl_sar(chan
, L2CAP_SAR_START
);
1705 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1707 return PTR_ERR(skb
);
1709 __skb_queue_tail(&sar_queue
, skb
);
1710 len
-= chan
->remote_mps
;
1711 size
+= chan
->remote_mps
;
1716 if (len
> chan
->remote_mps
) {
1717 control
= __set_ctrl_sar(chan
, L2CAP_SAR_CONTINUE
);
1718 buflen
= chan
->remote_mps
;
1720 control
= __set_ctrl_sar(chan
, L2CAP_SAR_END
);
1724 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1726 skb_queue_purge(&sar_queue
);
1727 return PTR_ERR(skb
);
1730 __skb_queue_tail(&sar_queue
, skb
);
1734 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1735 if (chan
->tx_send_head
== NULL
)
1736 chan
->tx_send_head
= sar_queue
.next
;
1741 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
1744 struct sk_buff
*skb
;
1748 /* Connectionless channel */
1749 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1750 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
1752 return PTR_ERR(skb
);
1754 l2cap_do_send(chan
, skb
);
1758 switch (chan
->mode
) {
1759 case L2CAP_MODE_BASIC
:
1760 /* Check outgoing MTU */
1761 if (len
> chan
->omtu
)
1764 /* Create a basic PDU */
1765 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
1767 return PTR_ERR(skb
);
1769 l2cap_do_send(chan
, skb
);
1773 case L2CAP_MODE_ERTM
:
1774 case L2CAP_MODE_STREAMING
:
1775 /* Entire SDU fits into one PDU */
1776 if (len
<= chan
->remote_mps
) {
1777 control
= __set_ctrl_sar(chan
, L2CAP_SAR_UNSEGMENTED
);
1778 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1781 return PTR_ERR(skb
);
1783 __skb_queue_tail(&chan
->tx_q
, skb
);
1785 if (chan
->tx_send_head
== NULL
)
1786 chan
->tx_send_head
= skb
;
1789 /* Segment SDU into multiples PDUs */
1790 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1795 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1796 l2cap_streaming_send(chan
);
1801 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
1802 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
1807 err
= l2cap_ertm_send(chan
);
1814 BT_DBG("bad state %1.1x", chan
->mode
);
1821 /* Copy frame to all raw sockets on that connection */
1822 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1824 struct sk_buff
*nskb
;
1825 struct l2cap_chan
*chan
;
1827 BT_DBG("conn %p", conn
);
1831 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
1832 struct sock
*sk
= chan
->sk
;
1833 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1836 /* Don't send frame to the socket it came from */
1839 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1843 if (chan
->ops
->recv(chan
->data
, nskb
))
1850 /* ---- L2CAP signalling commands ---- */
1851 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1852 u8 code
, u8 ident
, u16 dlen
, void *data
)
1854 struct sk_buff
*skb
, **frag
;
1855 struct l2cap_cmd_hdr
*cmd
;
1856 struct l2cap_hdr
*lh
;
1859 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1860 conn
, code
, ident
, dlen
);
1862 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1863 count
= min_t(unsigned int, conn
->mtu
, len
);
1865 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1869 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1870 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1872 if (conn
->hcon
->type
== LE_LINK
)
1873 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1875 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1877 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1880 cmd
->len
= cpu_to_le16(dlen
);
1883 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1884 memcpy(skb_put(skb
, count
), data
, count
);
1890 /* Continuation fragments (no L2CAP header) */
1891 frag
= &skb_shinfo(skb
)->frag_list
;
1893 count
= min_t(unsigned int, conn
->mtu
, len
);
1895 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1899 memcpy(skb_put(*frag
, count
), data
, count
);
1904 frag
= &(*frag
)->next
;
1914 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1916 struct l2cap_conf_opt
*opt
= *ptr
;
1919 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1927 *val
= *((u8
*) opt
->val
);
1931 *val
= get_unaligned_le16(opt
->val
);
1935 *val
= get_unaligned_le32(opt
->val
);
1939 *val
= (unsigned long) opt
->val
;
1943 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1947 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1949 struct l2cap_conf_opt
*opt
= *ptr
;
1951 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1958 *((u8
*) opt
->val
) = val
;
1962 put_unaligned_le16(val
, opt
->val
);
1966 put_unaligned_le32(val
, opt
->val
);
1970 memcpy(opt
->val
, (void *) val
, len
);
1974 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1977 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
1979 struct l2cap_conf_efs efs
;
1981 switch (chan
->mode
) {
1982 case L2CAP_MODE_ERTM
:
1983 efs
.id
= chan
->local_id
;
1984 efs
.stype
= chan
->local_stype
;
1985 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1986 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
1987 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
1988 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
1991 case L2CAP_MODE_STREAMING
:
1993 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
1994 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1995 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2004 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2005 (unsigned long) &efs
);
2008 static void l2cap_ack_timeout(struct work_struct
*work
)
2010 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2013 lock_sock(chan
->sk
);
2014 l2cap_send_ack(chan
);
2015 release_sock(chan
->sk
);
2018 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
2020 chan
->expected_ack_seq
= 0;
2021 chan
->unacked_frames
= 0;
2022 chan
->buffer_seq
= 0;
2023 chan
->num_acked
= 0;
2024 chan
->frames_sent
= 0;
2026 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2027 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2028 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2030 skb_queue_head_init(&chan
->srej_q
);
2032 INIT_LIST_HEAD(&chan
->srej_l
);
2035 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2038 case L2CAP_MODE_STREAMING
:
2039 case L2CAP_MODE_ERTM
:
2040 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2044 return L2CAP_MODE_BASIC
;
2048 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2050 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2053 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2055 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2058 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2060 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2061 __l2cap_ews_supported(chan
)) {
2062 /* use extended control field */
2063 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2064 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2066 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2067 L2CAP_DEFAULT_TX_WINDOW
);
2068 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2072 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2074 struct l2cap_conf_req
*req
= data
;
2075 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2076 void *ptr
= req
->data
;
2079 BT_DBG("chan %p", chan
);
2081 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2084 switch (chan
->mode
) {
2085 case L2CAP_MODE_STREAMING
:
2086 case L2CAP_MODE_ERTM
:
2087 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2090 if (__l2cap_efs_supported(chan
))
2091 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2095 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2100 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2101 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2103 switch (chan
->mode
) {
2104 case L2CAP_MODE_BASIC
:
2105 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2106 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2109 rfc
.mode
= L2CAP_MODE_BASIC
;
2111 rfc
.max_transmit
= 0;
2112 rfc
.retrans_timeout
= 0;
2113 rfc
.monitor_timeout
= 0;
2114 rfc
.max_pdu_size
= 0;
2116 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2117 (unsigned long) &rfc
);
2120 case L2CAP_MODE_ERTM
:
2121 rfc
.mode
= L2CAP_MODE_ERTM
;
2122 rfc
.max_transmit
= chan
->max_tx
;
2123 rfc
.retrans_timeout
= 0;
2124 rfc
.monitor_timeout
= 0;
2126 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2127 L2CAP_EXT_HDR_SIZE
-
2130 rfc
.max_pdu_size
= cpu_to_le16(size
);
2132 l2cap_txwin_setup(chan
);
2134 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2135 L2CAP_DEFAULT_TX_WINDOW
);
2137 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2138 (unsigned long) &rfc
);
2140 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2141 l2cap_add_opt_efs(&ptr
, chan
);
2143 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2146 if (chan
->fcs
== L2CAP_FCS_NONE
||
2147 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2148 chan
->fcs
= L2CAP_FCS_NONE
;
2149 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2152 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2153 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2157 case L2CAP_MODE_STREAMING
:
2158 rfc
.mode
= L2CAP_MODE_STREAMING
;
2160 rfc
.max_transmit
= 0;
2161 rfc
.retrans_timeout
= 0;
2162 rfc
.monitor_timeout
= 0;
2164 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2165 L2CAP_EXT_HDR_SIZE
-
2168 rfc
.max_pdu_size
= cpu_to_le16(size
);
2170 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2171 (unsigned long) &rfc
);
2173 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2174 l2cap_add_opt_efs(&ptr
, chan
);
2176 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2179 if (chan
->fcs
== L2CAP_FCS_NONE
||
2180 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2181 chan
->fcs
= L2CAP_FCS_NONE
;
2182 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2187 req
->dcid
= cpu_to_le16(chan
->dcid
);
2188 req
->flags
= cpu_to_le16(0);
2193 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2195 struct l2cap_conf_rsp
*rsp
= data
;
2196 void *ptr
= rsp
->data
;
2197 void *req
= chan
->conf_req
;
2198 int len
= chan
->conf_len
;
2199 int type
, hint
, olen
;
2201 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2202 struct l2cap_conf_efs efs
;
2204 u16 mtu
= L2CAP_DEFAULT_MTU
;
2205 u16 result
= L2CAP_CONF_SUCCESS
;
2208 BT_DBG("chan %p", chan
);
2210 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2211 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2213 hint
= type
& L2CAP_CONF_HINT
;
2214 type
&= L2CAP_CONF_MASK
;
2217 case L2CAP_CONF_MTU
:
2221 case L2CAP_CONF_FLUSH_TO
:
2222 chan
->flush_to
= val
;
2225 case L2CAP_CONF_QOS
:
2228 case L2CAP_CONF_RFC
:
2229 if (olen
== sizeof(rfc
))
2230 memcpy(&rfc
, (void *) val
, olen
);
2233 case L2CAP_CONF_FCS
:
2234 if (val
== L2CAP_FCS_NONE
)
2235 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2238 case L2CAP_CONF_EFS
:
2240 if (olen
== sizeof(efs
))
2241 memcpy(&efs
, (void *) val
, olen
);
2244 case L2CAP_CONF_EWS
:
2246 return -ECONNREFUSED
;
2248 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2249 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2250 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2251 chan
->remote_tx_win
= val
;
2258 result
= L2CAP_CONF_UNKNOWN
;
2259 *((u8
*) ptr
++) = type
;
2264 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2267 switch (chan
->mode
) {
2268 case L2CAP_MODE_STREAMING
:
2269 case L2CAP_MODE_ERTM
:
2270 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2271 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2272 chan
->conn
->feat_mask
);
2277 if (__l2cap_efs_supported(chan
))
2278 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2280 return -ECONNREFUSED
;
2283 if (chan
->mode
!= rfc
.mode
)
2284 return -ECONNREFUSED
;
2290 if (chan
->mode
!= rfc
.mode
) {
2291 result
= L2CAP_CONF_UNACCEPT
;
2292 rfc
.mode
= chan
->mode
;
2294 if (chan
->num_conf_rsp
== 1)
2295 return -ECONNREFUSED
;
2297 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2298 sizeof(rfc
), (unsigned long) &rfc
);
2301 if (result
== L2CAP_CONF_SUCCESS
) {
2302 /* Configure output options and let the other side know
2303 * which ones we don't like. */
2305 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2306 result
= L2CAP_CONF_UNACCEPT
;
2309 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2311 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2314 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2315 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2316 efs
.stype
!= chan
->local_stype
) {
2318 result
= L2CAP_CONF_UNACCEPT
;
2320 if (chan
->num_conf_req
>= 1)
2321 return -ECONNREFUSED
;
2323 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2325 (unsigned long) &efs
);
2327 /* Send PENDING Conf Rsp */
2328 result
= L2CAP_CONF_PENDING
;
2329 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2334 case L2CAP_MODE_BASIC
:
2335 chan
->fcs
= L2CAP_FCS_NONE
;
2336 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2339 case L2CAP_MODE_ERTM
:
2340 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2341 chan
->remote_tx_win
= rfc
.txwin_size
;
2343 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2345 chan
->remote_max_tx
= rfc
.max_transmit
;
2347 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2349 L2CAP_EXT_HDR_SIZE
-
2352 rfc
.max_pdu_size
= cpu_to_le16(size
);
2353 chan
->remote_mps
= size
;
2355 rfc
.retrans_timeout
=
2356 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2357 rfc
.monitor_timeout
=
2358 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2360 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2362 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2363 sizeof(rfc
), (unsigned long) &rfc
);
2365 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2366 chan
->remote_id
= efs
.id
;
2367 chan
->remote_stype
= efs
.stype
;
2368 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
2369 chan
->remote_flush_to
=
2370 le32_to_cpu(efs
.flush_to
);
2371 chan
->remote_acc_lat
=
2372 le32_to_cpu(efs
.acc_lat
);
2373 chan
->remote_sdu_itime
=
2374 le32_to_cpu(efs
.sdu_itime
);
2375 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2376 sizeof(efs
), (unsigned long) &efs
);
2380 case L2CAP_MODE_STREAMING
:
2381 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2383 L2CAP_EXT_HDR_SIZE
-
2386 rfc
.max_pdu_size
= cpu_to_le16(size
);
2387 chan
->remote_mps
= size
;
2389 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2391 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2392 sizeof(rfc
), (unsigned long) &rfc
);
2397 result
= L2CAP_CONF_UNACCEPT
;
2399 memset(&rfc
, 0, sizeof(rfc
));
2400 rfc
.mode
= chan
->mode
;
2403 if (result
== L2CAP_CONF_SUCCESS
)
2404 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2406 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2407 rsp
->result
= cpu_to_le16(result
);
2408 rsp
->flags
= cpu_to_le16(0x0000);
2413 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2415 struct l2cap_conf_req
*req
= data
;
2416 void *ptr
= req
->data
;
2419 struct l2cap_conf_rfc rfc
;
2420 struct l2cap_conf_efs efs
;
2422 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2424 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2425 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2428 case L2CAP_CONF_MTU
:
2429 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2430 *result
= L2CAP_CONF_UNACCEPT
;
2431 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2434 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2437 case L2CAP_CONF_FLUSH_TO
:
2438 chan
->flush_to
= val
;
2439 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2443 case L2CAP_CONF_RFC
:
2444 if (olen
== sizeof(rfc
))
2445 memcpy(&rfc
, (void *)val
, olen
);
2447 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2448 rfc
.mode
!= chan
->mode
)
2449 return -ECONNREFUSED
;
2453 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2454 sizeof(rfc
), (unsigned long) &rfc
);
2457 case L2CAP_CONF_EWS
:
2458 chan
->tx_win
= min_t(u16
, val
,
2459 L2CAP_DEFAULT_EXT_WINDOW
);
2460 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2464 case L2CAP_CONF_EFS
:
2465 if (olen
== sizeof(efs
))
2466 memcpy(&efs
, (void *)val
, olen
);
2468 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2469 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2470 efs
.stype
!= chan
->local_stype
)
2471 return -ECONNREFUSED
;
2473 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2474 sizeof(efs
), (unsigned long) &efs
);
2479 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2480 return -ECONNREFUSED
;
2482 chan
->mode
= rfc
.mode
;
2484 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
2486 case L2CAP_MODE_ERTM
:
2487 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2488 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2489 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2491 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2492 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
2493 chan
->local_sdu_itime
=
2494 le32_to_cpu(efs
.sdu_itime
);
2495 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
2496 chan
->local_flush_to
=
2497 le32_to_cpu(efs
.flush_to
);
2501 case L2CAP_MODE_STREAMING
:
2502 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2506 req
->dcid
= cpu_to_le16(chan
->dcid
);
2507 req
->flags
= cpu_to_le16(0x0000);
2512 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2514 struct l2cap_conf_rsp
*rsp
= data
;
2515 void *ptr
= rsp
->data
;
2517 BT_DBG("chan %p", chan
);
2519 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2520 rsp
->result
= cpu_to_le16(result
);
2521 rsp
->flags
= cpu_to_le16(flags
);
2526 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2528 struct l2cap_conn_rsp rsp
;
2529 struct l2cap_conn
*conn
= chan
->conn
;
2532 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2533 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2534 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2535 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2536 l2cap_send_cmd(conn
, chan
->ident
,
2537 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2539 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2542 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2543 l2cap_build_conf_req(chan
, buf
), buf
);
2544 chan
->num_conf_req
++;
2547 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2551 struct l2cap_conf_rfc rfc
;
2553 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2555 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2558 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2559 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2562 case L2CAP_CONF_RFC
:
2563 if (olen
== sizeof(rfc
))
2564 memcpy(&rfc
, (void *)val
, olen
);
2571 case L2CAP_MODE_ERTM
:
2572 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2573 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2574 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2576 case L2CAP_MODE_STREAMING
:
2577 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2581 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2583 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2585 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2588 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2589 cmd
->ident
== conn
->info_ident
) {
2590 cancel_delayed_work_sync(&conn
->info_work
);
2592 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2593 conn
->info_ident
= 0;
2595 l2cap_conn_start(conn
);
2601 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2603 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2604 struct l2cap_conn_rsp rsp
;
2605 struct l2cap_chan
*chan
= NULL
, *pchan
;
2606 struct sock
*parent
, *sk
= NULL
;
2607 int result
, status
= L2CAP_CS_NO_INFO
;
2609 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2610 __le16 psm
= req
->psm
;
2612 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2614 /* Check if we have socket listening on psm */
2615 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2617 result
= L2CAP_CR_BAD_PSM
;
2625 /* Check if the ACL is secure enough (if not SDP) */
2626 if (psm
!= cpu_to_le16(0x0001) &&
2627 !hci_conn_check_link_mode(conn
->hcon
)) {
2628 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
2629 result
= L2CAP_CR_SEC_BLOCK
;
2633 result
= L2CAP_CR_NO_MEM
;
2635 /* Check for backlog size */
2636 if (sk_acceptq_is_full(parent
)) {
2637 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2641 chan
= pchan
->ops
->new_connection(pchan
->data
);
2647 /* Check if we already have channel with that dcid */
2648 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2649 sock_set_flag(sk
, SOCK_ZAPPED
);
2650 chan
->ops
->close(chan
->data
);
2654 hci_conn_hold(conn
->hcon
);
2656 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2657 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2661 bt_accept_enqueue(parent
, sk
);
2663 l2cap_chan_add(conn
, chan
);
2667 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2669 chan
->ident
= cmd
->ident
;
2671 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2672 if (l2cap_chan_check_security(chan
)) {
2673 if (bt_sk(sk
)->defer_setup
) {
2674 l2cap_state_change(chan
, BT_CONNECT2
);
2675 result
= L2CAP_CR_PEND
;
2676 status
= L2CAP_CS_AUTHOR_PEND
;
2677 parent
->sk_data_ready(parent
, 0);
2679 l2cap_state_change(chan
, BT_CONFIG
);
2680 result
= L2CAP_CR_SUCCESS
;
2681 status
= L2CAP_CS_NO_INFO
;
2684 l2cap_state_change(chan
, BT_CONNECT2
);
2685 result
= L2CAP_CR_PEND
;
2686 status
= L2CAP_CS_AUTHEN_PEND
;
2689 l2cap_state_change(chan
, BT_CONNECT2
);
2690 result
= L2CAP_CR_PEND
;
2691 status
= L2CAP_CS_NO_INFO
;
2695 release_sock(parent
);
2698 rsp
.scid
= cpu_to_le16(scid
);
2699 rsp
.dcid
= cpu_to_le16(dcid
);
2700 rsp
.result
= cpu_to_le16(result
);
2701 rsp
.status
= cpu_to_le16(status
);
2702 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2704 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2705 struct l2cap_info_req info
;
2706 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2708 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2709 conn
->info_ident
= l2cap_get_ident(conn
);
2711 schedule_delayed_work(&conn
->info_work
,
2712 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2714 l2cap_send_cmd(conn
, conn
->info_ident
,
2715 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2718 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
2719 result
== L2CAP_CR_SUCCESS
) {
2721 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
2722 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2723 l2cap_build_conf_req(chan
, buf
), buf
);
2724 chan
->num_conf_req
++;
2730 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2732 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2733 u16 scid
, dcid
, result
, status
;
2734 struct l2cap_chan
*chan
;
2738 scid
= __le16_to_cpu(rsp
->scid
);
2739 dcid
= __le16_to_cpu(rsp
->dcid
);
2740 result
= __le16_to_cpu(rsp
->result
);
2741 status
= __le16_to_cpu(rsp
->status
);
2743 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2746 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2750 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2758 case L2CAP_CR_SUCCESS
:
2759 l2cap_state_change(chan
, BT_CONFIG
);
2762 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2764 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2767 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2768 l2cap_build_conf_req(chan
, req
), req
);
2769 chan
->num_conf_req
++;
2773 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2777 l2cap_chan_del(chan
, ECONNREFUSED
);
2785 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2787 /* FCS is enabled only in ERTM or streaming mode, if one or both
2790 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2791 chan
->fcs
= L2CAP_FCS_NONE
;
2792 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
2793 chan
->fcs
= L2CAP_FCS_CRC16
;
2796 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2798 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2801 struct l2cap_chan
*chan
;
2805 dcid
= __le16_to_cpu(req
->dcid
);
2806 flags
= __le16_to_cpu(req
->flags
);
2808 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2810 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2816 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
2817 struct l2cap_cmd_rej_cid rej
;
2819 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
2820 rej
.scid
= cpu_to_le16(chan
->scid
);
2821 rej
.dcid
= cpu_to_le16(chan
->dcid
);
2823 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2828 /* Reject if config buffer is too small. */
2829 len
= cmd_len
- sizeof(*req
);
2830 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2831 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2832 l2cap_build_conf_rsp(chan
, rsp
,
2833 L2CAP_CONF_REJECT
, flags
), rsp
);
2838 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2839 chan
->conf_len
+= len
;
2841 if (flags
& 0x0001) {
2842 /* Incomplete config. Send empty response. */
2843 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2844 l2cap_build_conf_rsp(chan
, rsp
,
2845 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2849 /* Complete config. */
2850 len
= l2cap_parse_conf_req(chan
, rsp
);
2852 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2856 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2857 chan
->num_conf_rsp
++;
2859 /* Reset config buffer. */
2862 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
2865 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
2866 set_default_fcs(chan
);
2868 l2cap_state_change(chan
, BT_CONNECTED
);
2870 chan
->next_tx_seq
= 0;
2871 chan
->expected_tx_seq
= 0;
2872 skb_queue_head_init(&chan
->tx_q
);
2873 if (chan
->mode
== L2CAP_MODE_ERTM
)
2874 l2cap_ertm_init(chan
);
2876 l2cap_chan_ready(sk
);
2880 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
2882 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2883 l2cap_build_conf_req(chan
, buf
), buf
);
2884 chan
->num_conf_req
++;
2887 /* Got Conf Rsp PENDING from remote side and asume we sent
2888 Conf Rsp PENDING in the code above */
2889 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
2890 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
2892 /* check compatibility */
2894 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2895 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2897 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2898 l2cap_build_conf_rsp(chan
, rsp
,
2899 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
2907 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2909 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2910 u16 scid
, flags
, result
;
2911 struct l2cap_chan
*chan
;
2913 int len
= cmd
->len
- sizeof(*rsp
);
2915 scid
= __le16_to_cpu(rsp
->scid
);
2916 flags
= __le16_to_cpu(rsp
->flags
);
2917 result
= __le16_to_cpu(rsp
->result
);
2919 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2920 scid
, flags
, result
);
2922 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2929 case L2CAP_CONF_SUCCESS
:
2930 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2931 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
2934 case L2CAP_CONF_PENDING
:
2935 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
2937 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
2940 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2943 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2947 /* check compatibility */
2949 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2950 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2952 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2953 l2cap_build_conf_rsp(chan
, buf
,
2954 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
2958 case L2CAP_CONF_UNACCEPT
:
2959 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2962 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2963 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2967 /* throw out any old stored conf requests */
2968 result
= L2CAP_CONF_SUCCESS
;
2969 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2972 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2976 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2977 L2CAP_CONF_REQ
, len
, req
);
2978 chan
->num_conf_req
++;
2979 if (result
!= L2CAP_CONF_SUCCESS
)
2985 sk
->sk_err
= ECONNRESET
;
2986 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
2987 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2994 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
2996 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
2997 set_default_fcs(chan
);
2999 l2cap_state_change(chan
, BT_CONNECTED
);
3000 chan
->next_tx_seq
= 0;
3001 chan
->expected_tx_seq
= 0;
3002 skb_queue_head_init(&chan
->tx_q
);
3003 if (chan
->mode
== L2CAP_MODE_ERTM
)
3004 l2cap_ertm_init(chan
);
3006 l2cap_chan_ready(sk
);
3014 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3016 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3017 struct l2cap_disconn_rsp rsp
;
3019 struct l2cap_chan
*chan
;
3022 scid
= __le16_to_cpu(req
->scid
);
3023 dcid
= __le16_to_cpu(req
->dcid
);
3025 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3027 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3033 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3034 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3035 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3037 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3039 l2cap_chan_del(chan
, ECONNRESET
);
3042 chan
->ops
->close(chan
->data
);
3046 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3048 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3050 struct l2cap_chan
*chan
;
3053 scid
= __le16_to_cpu(rsp
->scid
);
3054 dcid
= __le16_to_cpu(rsp
->dcid
);
3056 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3058 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3064 l2cap_chan_del(chan
, 0);
3067 chan
->ops
->close(chan
->data
);
3071 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3073 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3076 type
= __le16_to_cpu(req
->type
);
3078 BT_DBG("type 0x%4.4x", type
);
3080 if (type
== L2CAP_IT_FEAT_MASK
) {
3082 u32 feat_mask
= l2cap_feat_mask
;
3083 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3084 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3085 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3087 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3090 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3091 | L2CAP_FEAT_EXT_WINDOW
;
3093 put_unaligned_le32(feat_mask
, rsp
->data
);
3094 l2cap_send_cmd(conn
, cmd
->ident
,
3095 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3096 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3098 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3101 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3103 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3105 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3106 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3107 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3108 l2cap_send_cmd(conn
, cmd
->ident
,
3109 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3111 struct l2cap_info_rsp rsp
;
3112 rsp
.type
= cpu_to_le16(type
);
3113 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3114 l2cap_send_cmd(conn
, cmd
->ident
,
3115 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3121 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3123 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3126 type
= __le16_to_cpu(rsp
->type
);
3127 result
= __le16_to_cpu(rsp
->result
);
3129 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3131 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3132 if (cmd
->ident
!= conn
->info_ident
||
3133 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3136 cancel_delayed_work_sync(&conn
->info_work
);
3138 if (result
!= L2CAP_IR_SUCCESS
) {
3139 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3140 conn
->info_ident
= 0;
3142 l2cap_conn_start(conn
);
3147 if (type
== L2CAP_IT_FEAT_MASK
) {
3148 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3150 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3151 struct l2cap_info_req req
;
3152 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3154 conn
->info_ident
= l2cap_get_ident(conn
);
3156 l2cap_send_cmd(conn
, conn
->info_ident
,
3157 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3159 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3160 conn
->info_ident
= 0;
3162 l2cap_conn_start(conn
);
3164 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3165 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3166 conn
->info_ident
= 0;
3168 l2cap_conn_start(conn
);
3174 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3175 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3178 struct l2cap_create_chan_req
*req
= data
;
3179 struct l2cap_create_chan_rsp rsp
;
3182 if (cmd_len
!= sizeof(*req
))
3188 psm
= le16_to_cpu(req
->psm
);
3189 scid
= le16_to_cpu(req
->scid
);
3191 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3193 /* Placeholder: Always reject */
3195 rsp
.scid
= cpu_to_le16(scid
);
3196 rsp
.result
= L2CAP_CR_NO_MEM
;
3197 rsp
.status
= L2CAP_CS_NO_INFO
;
3199 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3205 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3206 struct l2cap_cmd_hdr
*cmd
, void *data
)
3208 BT_DBG("conn %p", conn
);
3210 return l2cap_connect_rsp(conn
, cmd
, data
);
3213 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3214 u16 icid
, u16 result
)
3216 struct l2cap_move_chan_rsp rsp
;
3218 BT_DBG("icid %d, result %d", icid
, result
);
3220 rsp
.icid
= cpu_to_le16(icid
);
3221 rsp
.result
= cpu_to_le16(result
);
3223 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3226 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3227 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3229 struct l2cap_move_chan_cfm cfm
;
3232 BT_DBG("icid %d, result %d", icid
, result
);
3234 ident
= l2cap_get_ident(conn
);
3236 chan
->ident
= ident
;
3238 cfm
.icid
= cpu_to_le16(icid
);
3239 cfm
.result
= cpu_to_le16(result
);
3241 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3244 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
3247 struct l2cap_move_chan_cfm_rsp rsp
;
3249 BT_DBG("icid %d", icid
);
3251 rsp
.icid
= cpu_to_le16(icid
);
3252 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
3255 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
3256 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3258 struct l2cap_move_chan_req
*req
= data
;
3260 u16 result
= L2CAP_MR_NOT_ALLOWED
;
3262 if (cmd_len
!= sizeof(*req
))
3265 icid
= le16_to_cpu(req
->icid
);
3267 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
3272 /* Placeholder: Always refuse */
3273 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
3278 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
3279 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3281 struct l2cap_move_chan_rsp
*rsp
= data
;
3284 if (cmd_len
!= sizeof(*rsp
))
3287 icid
= le16_to_cpu(rsp
->icid
);
3288 result
= le16_to_cpu(rsp
->result
);
3290 BT_DBG("icid %d, result %d", icid
, result
);
3292 /* Placeholder: Always unconfirmed */
3293 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
3298 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
3299 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3301 struct l2cap_move_chan_cfm
*cfm
= data
;
3304 if (cmd_len
!= sizeof(*cfm
))
3307 icid
= le16_to_cpu(cfm
->icid
);
3308 result
= le16_to_cpu(cfm
->result
);
3310 BT_DBG("icid %d, result %d", icid
, result
);
3312 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
3317 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
3318 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3320 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
3323 if (cmd_len
!= sizeof(*rsp
))
3326 icid
= le16_to_cpu(rsp
->icid
);
3328 BT_DBG("icid %d", icid
);
3333 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
3338 if (min
> max
|| min
< 6 || max
> 3200)
3341 if (to_multiplier
< 10 || to_multiplier
> 3200)
3344 if (max
>= to_multiplier
* 8)
3347 max_latency
= (to_multiplier
* 8 / max
) - 1;
3348 if (latency
> 499 || latency
> max_latency
)
3354 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
3355 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3357 struct hci_conn
*hcon
= conn
->hcon
;
3358 struct l2cap_conn_param_update_req
*req
;
3359 struct l2cap_conn_param_update_rsp rsp
;
3360 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
3363 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
3366 cmd_len
= __le16_to_cpu(cmd
->len
);
3367 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
3370 req
= (struct l2cap_conn_param_update_req
*) data
;
3371 min
= __le16_to_cpu(req
->min
);
3372 max
= __le16_to_cpu(req
->max
);
3373 latency
= __le16_to_cpu(req
->latency
);
3374 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
3376 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3377 min
, max
, latency
, to_multiplier
);
3379 memset(&rsp
, 0, sizeof(rsp
));
3381 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
3383 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
3385 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
3387 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
3391 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
3396 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
3397 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3401 switch (cmd
->code
) {
3402 case L2CAP_COMMAND_REJ
:
3403 l2cap_command_rej(conn
, cmd
, data
);
3406 case L2CAP_CONN_REQ
:
3407 err
= l2cap_connect_req(conn
, cmd
, data
);
3410 case L2CAP_CONN_RSP
:
3411 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3414 case L2CAP_CONF_REQ
:
3415 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3418 case L2CAP_CONF_RSP
:
3419 err
= l2cap_config_rsp(conn
, cmd
, data
);
3422 case L2CAP_DISCONN_REQ
:
3423 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3426 case L2CAP_DISCONN_RSP
:
3427 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3430 case L2CAP_ECHO_REQ
:
3431 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3434 case L2CAP_ECHO_RSP
:
3437 case L2CAP_INFO_REQ
:
3438 err
= l2cap_information_req(conn
, cmd
, data
);
3441 case L2CAP_INFO_RSP
:
3442 err
= l2cap_information_rsp(conn
, cmd
, data
);
3445 case L2CAP_CREATE_CHAN_REQ
:
3446 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
3449 case L2CAP_CREATE_CHAN_RSP
:
3450 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
3453 case L2CAP_MOVE_CHAN_REQ
:
3454 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
3457 case L2CAP_MOVE_CHAN_RSP
:
3458 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
3461 case L2CAP_MOVE_CHAN_CFM
:
3462 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
3465 case L2CAP_MOVE_CHAN_CFM_RSP
:
3466 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
3470 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3478 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3479 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3481 switch (cmd
->code
) {
3482 case L2CAP_COMMAND_REJ
:
3485 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3486 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3488 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3492 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3497 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3498 struct sk_buff
*skb
)
3500 u8
*data
= skb
->data
;
3502 struct l2cap_cmd_hdr cmd
;
3505 l2cap_raw_recv(conn
, skb
);
3507 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3509 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3510 data
+= L2CAP_CMD_HDR_SIZE
;
3511 len
-= L2CAP_CMD_HDR_SIZE
;
3513 cmd_len
= le16_to_cpu(cmd
.len
);
3515 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3517 if (cmd_len
> len
|| !cmd
.ident
) {
3518 BT_DBG("corrupted command");
3522 if (conn
->hcon
->type
== LE_LINK
)
3523 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3525 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3528 struct l2cap_cmd_rej_unk rej
;
3530 BT_ERR("Wrong link type (%d)", err
);
3532 /* FIXME: Map err to a valid reason */
3533 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3534 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3544 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3546 u16 our_fcs
, rcv_fcs
;
3549 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3550 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3552 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3554 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3555 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
3556 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3557 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3559 if (our_fcs
!= rcv_fcs
)
3565 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3569 chan
->frames_sent
= 0;
3571 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3573 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3574 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3575 l2cap_send_sframe(chan
, control
);
3576 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3579 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3580 l2cap_retransmit_frames(chan
);
3582 l2cap_ertm_send(chan
);
3584 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3585 chan
->frames_sent
== 0) {
3586 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3587 l2cap_send_sframe(chan
, control
);
3591 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
3593 struct sk_buff
*next_skb
;
3594 int tx_seq_offset
, next_tx_seq_offset
;
3596 bt_cb(skb
)->tx_seq
= tx_seq
;
3597 bt_cb(skb
)->sar
= sar
;
3599 next_skb
= skb_peek(&chan
->srej_q
);
3601 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
3604 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3607 next_tx_seq_offset
= __seq_offset(chan
,
3608 bt_cb(next_skb
)->tx_seq
, chan
->buffer_seq
);
3610 if (next_tx_seq_offset
> tx_seq_offset
) {
3611 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3615 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3618 next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
);
3621 __skb_queue_tail(&chan
->srej_q
, skb
);
3626 static void append_skb_frag(struct sk_buff
*skb
,
3627 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
3629 /* skb->len reflects data in skb as well as all fragments
3630 * skb->data_len reflects only data in fragments
3632 if (!skb_has_frag_list(skb
))
3633 skb_shinfo(skb
)->frag_list
= new_frag
;
3635 new_frag
->next
= NULL
;
3637 (*last_frag
)->next
= new_frag
;
3638 *last_frag
= new_frag
;
3640 skb
->len
+= new_frag
->len
;
3641 skb
->data_len
+= new_frag
->len
;
3642 skb
->truesize
+= new_frag
->truesize
;
3645 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u32 control
)
3649 switch (__get_ctrl_sar(chan
, control
)) {
3650 case L2CAP_SAR_UNSEGMENTED
:
3654 err
= chan
->ops
->recv(chan
->data
, skb
);
3657 case L2CAP_SAR_START
:
3661 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3662 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
3664 if (chan
->sdu_len
> chan
->imtu
) {
3669 if (skb
->len
>= chan
->sdu_len
)
3673 chan
->sdu_last_frag
= skb
;
3679 case L2CAP_SAR_CONTINUE
:
3683 append_skb_frag(chan
->sdu
, skb
,
3684 &chan
->sdu_last_frag
);
3687 if (chan
->sdu
->len
>= chan
->sdu_len
)
3697 append_skb_frag(chan
->sdu
, skb
,
3698 &chan
->sdu_last_frag
);
3701 if (chan
->sdu
->len
!= chan
->sdu_len
)
3704 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
3707 /* Reassembly complete */
3709 chan
->sdu_last_frag
= NULL
;
3717 kfree_skb(chan
->sdu
);
3719 chan
->sdu_last_frag
= NULL
;
3726 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
3730 BT_DBG("chan %p, Enter local busy", chan
);
3732 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3734 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3735 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3736 l2cap_send_sframe(chan
, control
);
3738 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3740 __clear_ack_timer(chan
);
3743 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
3747 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3750 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3751 control
|= __set_ctrl_poll(chan
);
3752 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3753 l2cap_send_sframe(chan
, control
);
3754 chan
->retry_count
= 1;
3756 __clear_retrans_timer(chan
);
3757 __set_monitor_timer(chan
);
3759 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
3762 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3763 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3765 BT_DBG("chan %p, Exit local busy", chan
);
3768 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
3770 if (chan
->mode
== L2CAP_MODE_ERTM
) {
3772 l2cap_ertm_enter_local_busy(chan
);
3774 l2cap_ertm_exit_local_busy(chan
);
3778 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
3780 struct sk_buff
*skb
;
3783 while ((skb
= skb_peek(&chan
->srej_q
)) &&
3784 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3787 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3790 skb
= skb_dequeue(&chan
->srej_q
);
3791 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->sar
);
3792 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
3795 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3799 chan
->buffer_seq_srej
= __next_seq(chan
, chan
->buffer_seq_srej
);
3800 tx_seq
= __next_seq(chan
, tx_seq
);
3804 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3806 struct srej_list
*l
, *tmp
;
3809 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3810 if (l
->tx_seq
== tx_seq
) {
3815 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3816 control
|= __set_reqseq(chan
, l
->tx_seq
);
3817 l2cap_send_sframe(chan
, control
);
3819 list_add_tail(&l
->list
, &chan
->srej_l
);
3823 static int l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3825 struct srej_list
*new;
3828 while (tx_seq
!= chan
->expected_tx_seq
) {
3829 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3830 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
3831 l2cap_send_sframe(chan
, control
);
3833 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3837 new->tx_seq
= chan
->expected_tx_seq
;
3839 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3841 list_add_tail(&new->list
, &chan
->srej_l
);
3844 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3849 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
3851 u16 tx_seq
= __get_txseq(chan
, rx_control
);
3852 u16 req_seq
= __get_reqseq(chan
, rx_control
);
3853 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
3854 int tx_seq_offset
, expected_tx_seq_offset
;
3855 int num_to_ack
= (chan
->tx_win
/6) + 1;
3858 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan
, skb
->len
,
3859 tx_seq
, rx_control
);
3861 if (__is_ctrl_final(chan
, rx_control
) &&
3862 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3863 __clear_monitor_timer(chan
);
3864 if (chan
->unacked_frames
> 0)
3865 __set_retrans_timer(chan
);
3866 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3869 chan
->expected_ack_seq
= req_seq
;
3870 l2cap_drop_acked_frames(chan
);
3872 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
3874 /* invalid tx_seq */
3875 if (tx_seq_offset
>= chan
->tx_win
) {
3876 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3880 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
3883 if (tx_seq
== chan
->expected_tx_seq
)
3886 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3887 struct srej_list
*first
;
3889 first
= list_first_entry(&chan
->srej_l
,
3890 struct srej_list
, list
);
3891 if (tx_seq
== first
->tx_seq
) {
3892 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3893 l2cap_check_srej_gap(chan
, tx_seq
);
3895 list_del(&first
->list
);
3898 if (list_empty(&chan
->srej_l
)) {
3899 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3900 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3901 l2cap_send_ack(chan
);
3902 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3905 struct srej_list
*l
;
3907 /* duplicated tx_seq */
3908 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3911 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3912 if (l
->tx_seq
== tx_seq
) {
3913 l2cap_resend_srejframe(chan
, tx_seq
);
3918 err
= l2cap_send_srejframe(chan
, tx_seq
);
3920 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3925 expected_tx_seq_offset
= __seq_offset(chan
,
3926 chan
->expected_tx_seq
, chan
->buffer_seq
);
3928 /* duplicated tx_seq */
3929 if (tx_seq_offset
< expected_tx_seq_offset
)
3932 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3934 BT_DBG("chan %p, Enter SREJ", chan
);
3936 INIT_LIST_HEAD(&chan
->srej_l
);
3937 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3939 __skb_queue_head_init(&chan
->srej_q
);
3940 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3942 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
3944 err
= l2cap_send_srejframe(chan
, tx_seq
);
3946 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3950 __clear_ack_timer(chan
);
3955 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3957 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3958 bt_cb(skb
)->tx_seq
= tx_seq
;
3959 bt_cb(skb
)->sar
= sar
;
3960 __skb_queue_tail(&chan
->srej_q
, skb
);
3964 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
3965 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
3968 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3972 if (__is_ctrl_final(chan
, rx_control
)) {
3973 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3974 l2cap_retransmit_frames(chan
);
3978 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3979 if (chan
->num_acked
== num_to_ack
- 1)
3980 l2cap_send_ack(chan
);
3982 __set_ack_timer(chan
);
3991 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u32 rx_control
)
3993 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
,
3994 __get_reqseq(chan
, rx_control
), rx_control
);
3996 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
3997 l2cap_drop_acked_frames(chan
);
3999 if (__is_ctrl_poll(chan
, rx_control
)) {
4000 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4001 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4002 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4003 (chan
->unacked_frames
> 0))
4004 __set_retrans_timer(chan
);
4006 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4007 l2cap_send_srejtail(chan
);
4009 l2cap_send_i_or_rr_or_rnr(chan
);
4012 } else if (__is_ctrl_final(chan
, rx_control
)) {
4013 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4015 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4016 l2cap_retransmit_frames(chan
);
4019 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4020 (chan
->unacked_frames
> 0))
4021 __set_retrans_timer(chan
);
4023 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4024 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
4025 l2cap_send_ack(chan
);
4027 l2cap_ertm_send(chan
);
4031 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4033 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4035 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4037 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4039 chan
->expected_ack_seq
= tx_seq
;
4040 l2cap_drop_acked_frames(chan
);
4042 if (__is_ctrl_final(chan
, rx_control
)) {
4043 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4044 l2cap_retransmit_frames(chan
);
4046 l2cap_retransmit_frames(chan
);
4048 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
4049 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4052 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4054 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4056 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4058 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4060 if (__is_ctrl_poll(chan
, rx_control
)) {
4061 chan
->expected_ack_seq
= tx_seq
;
4062 l2cap_drop_acked_frames(chan
);
4064 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4065 l2cap_retransmit_one_frame(chan
, tx_seq
);
4067 l2cap_ertm_send(chan
);
4069 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4070 chan
->srej_save_reqseq
= tx_seq
;
4071 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4073 } else if (__is_ctrl_final(chan
, rx_control
)) {
4074 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
4075 chan
->srej_save_reqseq
== tx_seq
)
4076 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4078 l2cap_retransmit_one_frame(chan
, tx_seq
);
4080 l2cap_retransmit_one_frame(chan
, tx_seq
);
4081 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4082 chan
->srej_save_reqseq
= tx_seq
;
4083 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4088 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4090 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4092 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4094 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4095 chan
->expected_ack_seq
= tx_seq
;
4096 l2cap_drop_acked_frames(chan
);
4098 if (__is_ctrl_poll(chan
, rx_control
))
4099 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4101 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4102 __clear_retrans_timer(chan
);
4103 if (__is_ctrl_poll(chan
, rx_control
))
4104 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
4108 if (__is_ctrl_poll(chan
, rx_control
)) {
4109 l2cap_send_srejtail(chan
);
4111 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4112 l2cap_send_sframe(chan
, rx_control
);
4116 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4118 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan
, rx_control
, skb
->len
);
4120 if (__is_ctrl_final(chan
, rx_control
) &&
4121 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4122 __clear_monitor_timer(chan
);
4123 if (chan
->unacked_frames
> 0)
4124 __set_retrans_timer(chan
);
4125 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4128 switch (__get_ctrl_super(chan
, rx_control
)) {
4129 case L2CAP_SUPER_RR
:
4130 l2cap_data_channel_rrframe(chan
, rx_control
);
4133 case L2CAP_SUPER_REJ
:
4134 l2cap_data_channel_rejframe(chan
, rx_control
);
4137 case L2CAP_SUPER_SREJ
:
4138 l2cap_data_channel_srejframe(chan
, rx_control
);
4141 case L2CAP_SUPER_RNR
:
4142 l2cap_data_channel_rnrframe(chan
, rx_control
);
4150 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
4152 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
4155 int len
, next_tx_seq_offset
, req_seq_offset
;
4157 control
= __get_control(chan
, skb
->data
);
4158 skb_pull(skb
, __ctrl_size(chan
));
4162 * We can just drop the corrupted I-frame here.
4163 * Receiver will miss it and start proper recovery
4164 * procedures and ask retransmission.
4166 if (l2cap_check_fcs(chan
, skb
))
4169 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
4170 len
-= L2CAP_SDULEN_SIZE
;
4172 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4173 len
-= L2CAP_FCS_SIZE
;
4175 if (len
> chan
->mps
) {
4176 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4180 req_seq
= __get_reqseq(chan
, control
);
4182 req_seq_offset
= __seq_offset(chan
, req_seq
, chan
->expected_ack_seq
);
4184 next_tx_seq_offset
= __seq_offset(chan
, chan
->next_tx_seq
,
4185 chan
->expected_ack_seq
);
4187 /* check for invalid req-seq */
4188 if (req_seq_offset
> next_tx_seq_offset
) {
4189 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4193 if (!__is_sframe(chan
, control
)) {
4195 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4199 l2cap_data_channel_iframe(chan
, control
, skb
);
4203 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4207 l2cap_data_channel_sframe(chan
, control
, skb
);
4217 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4219 struct l2cap_chan
*chan
;
4220 struct sock
*sk
= NULL
;
4225 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4227 BT_DBG("unknown cid 0x%4.4x", cid
);
4233 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4235 if (chan
->state
!= BT_CONNECTED
)
4238 switch (chan
->mode
) {
4239 case L2CAP_MODE_BASIC
:
4240 /* If socket recv buffers overflows we drop data here
4241 * which is *bad* because L2CAP has to be reliable.
4242 * But we don't have any other choice. L2CAP doesn't
4243 * provide flow control mechanism. */
4245 if (chan
->imtu
< skb
->len
)
4248 if (!chan
->ops
->recv(chan
->data
, skb
))
4252 case L2CAP_MODE_ERTM
:
4253 l2cap_ertm_data_rcv(sk
, skb
);
4257 case L2CAP_MODE_STREAMING
:
4258 control
= __get_control(chan
, skb
->data
);
4259 skb_pull(skb
, __ctrl_size(chan
));
4262 if (l2cap_check_fcs(chan
, skb
))
4265 if (__is_sar_start(chan
, control
))
4266 len
-= L2CAP_SDULEN_SIZE
;
4268 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4269 len
-= L2CAP_FCS_SIZE
;
4271 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
4274 tx_seq
= __get_txseq(chan
, control
);
4276 if (chan
->expected_tx_seq
!= tx_seq
) {
4277 /* Frame(s) missing - must discard partial SDU */
4278 kfree_skb(chan
->sdu
);
4280 chan
->sdu_last_frag
= NULL
;
4283 /* TODO: Notify userland of missing data */
4286 chan
->expected_tx_seq
= __next_seq(chan
, tx_seq
);
4288 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
4289 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4294 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
4308 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4310 struct sock
*sk
= NULL
;
4311 struct l2cap_chan
*chan
;
4313 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
4321 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4323 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4326 if (chan
->imtu
< skb
->len
)
4329 if (!chan
->ops
->recv(chan
->data
, skb
))
4341 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
4343 struct sock
*sk
= NULL
;
4344 struct l2cap_chan
*chan
;
4346 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
4354 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4356 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4359 if (chan
->imtu
< skb
->len
)
4362 if (!chan
->ops
->recv(chan
->data
, skb
))
4374 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4376 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4380 skb_pull(skb
, L2CAP_HDR_SIZE
);
4381 cid
= __le16_to_cpu(lh
->cid
);
4382 len
= __le16_to_cpu(lh
->len
);
4384 if (len
!= skb
->len
) {
4389 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4392 case L2CAP_CID_LE_SIGNALING
:
4393 case L2CAP_CID_SIGNALING
:
4394 l2cap_sig_channel(conn
, skb
);
4397 case L2CAP_CID_CONN_LESS
:
4398 psm
= get_unaligned_le16(skb
->data
);
4400 l2cap_conless_channel(conn
, psm
, skb
);
4403 case L2CAP_CID_LE_DATA
:
4404 l2cap_att_channel(conn
, cid
, skb
);
4408 if (smp_sig_channel(conn
, skb
))
4409 l2cap_conn_del(conn
->hcon
, EACCES
);
4413 l2cap_data_channel(conn
, cid
, skb
);
4418 /* ---- L2CAP interface with lower layer (HCI) ---- */
4420 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4422 int exact
= 0, lm1
= 0, lm2
= 0;
4423 struct l2cap_chan
*c
;
4425 if (type
!= ACL_LINK
)
4428 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4430 /* Find listening sockets and check their link_mode */
4431 read_lock(&chan_list_lock
);
4432 list_for_each_entry(c
, &chan_list
, global_l
) {
4433 struct sock
*sk
= c
->sk
;
4435 if (c
->state
!= BT_LISTEN
)
4438 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4439 lm1
|= HCI_LM_ACCEPT
;
4440 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4441 lm1
|= HCI_LM_MASTER
;
4443 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4444 lm2
|= HCI_LM_ACCEPT
;
4445 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4446 lm2
|= HCI_LM_MASTER
;
4449 read_unlock(&chan_list_lock
);
4451 return exact
? lm1
: lm2
;
4454 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4456 struct l2cap_conn
*conn
;
4458 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4460 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4464 conn
= l2cap_conn_add(hcon
, status
);
4466 l2cap_conn_ready(conn
);
4468 l2cap_conn_del(hcon
, bt_to_errno(status
));
4473 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4475 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4477 BT_DBG("hcon %p", hcon
);
4479 if ((hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
) || !conn
)
4480 return HCI_ERROR_REMOTE_USER_TERM
;
4482 return conn
->disc_reason
;
4485 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4487 BT_DBG("hcon %p reason %d", hcon
, reason
);
4489 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4492 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4497 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4499 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4502 if (encrypt
== 0x00) {
4503 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4504 __clear_chan_timer(chan
);
4505 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
4506 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4507 l2cap_chan_close(chan
, ECONNREFUSED
);
4509 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4510 __clear_chan_timer(chan
);
4514 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4516 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4517 struct l2cap_chan
*chan
;
4522 BT_DBG("conn %p", conn
);
4524 if (hcon
->type
== LE_LINK
) {
4525 smp_distribute_keys(conn
, 0);
4526 del_timer(&conn
->security_timer
);
4531 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
4532 struct sock
*sk
= chan
->sk
;
4536 BT_DBG("chan->scid %d", chan
->scid
);
4538 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4539 if (!status
&& encrypt
) {
4540 chan
->sec_level
= hcon
->sec_level
;
4541 l2cap_chan_ready(sk
);
4548 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4553 if (!status
&& (chan
->state
== BT_CONNECTED
||
4554 chan
->state
== BT_CONFIG
)) {
4555 l2cap_check_encryption(chan
, encrypt
);
4560 if (chan
->state
== BT_CONNECT
) {
4562 struct l2cap_conn_req req
;
4563 req
.scid
= cpu_to_le16(chan
->scid
);
4564 req
.psm
= chan
->psm
;
4566 chan
->ident
= l2cap_get_ident(conn
);
4567 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4569 l2cap_send_cmd(conn
, chan
->ident
,
4570 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4572 __clear_chan_timer(chan
);
4573 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4575 } else if (chan
->state
== BT_CONNECT2
) {
4576 struct l2cap_conn_rsp rsp
;
4580 if (bt_sk(sk
)->defer_setup
) {
4581 struct sock
*parent
= bt_sk(sk
)->parent
;
4582 res
= L2CAP_CR_PEND
;
4583 stat
= L2CAP_CS_AUTHOR_PEND
;
4585 parent
->sk_data_ready(parent
, 0);
4587 l2cap_state_change(chan
, BT_CONFIG
);
4588 res
= L2CAP_CR_SUCCESS
;
4589 stat
= L2CAP_CS_NO_INFO
;
4592 l2cap_state_change(chan
, BT_DISCONN
);
4593 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4594 res
= L2CAP_CR_SEC_BLOCK
;
4595 stat
= L2CAP_CS_NO_INFO
;
4598 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4599 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4600 rsp
.result
= cpu_to_le16(res
);
4601 rsp
.status
= cpu_to_le16(stat
);
4602 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4614 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4616 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4619 conn
= l2cap_conn_add(hcon
, 0);
4624 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4626 if (!(flags
& ACL_CONT
)) {
4627 struct l2cap_hdr
*hdr
;
4628 struct l2cap_chan
*chan
;
4633 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4634 kfree_skb(conn
->rx_skb
);
4635 conn
->rx_skb
= NULL
;
4637 l2cap_conn_unreliable(conn
, ECOMM
);
4640 /* Start fragment always begin with Basic L2CAP header */
4641 if (skb
->len
< L2CAP_HDR_SIZE
) {
4642 BT_ERR("Frame is too short (len %d)", skb
->len
);
4643 l2cap_conn_unreliable(conn
, ECOMM
);
4647 hdr
= (struct l2cap_hdr
*) skb
->data
;
4648 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4649 cid
= __le16_to_cpu(hdr
->cid
);
4651 if (len
== skb
->len
) {
4652 /* Complete frame received */
4653 l2cap_recv_frame(conn
, skb
);
4657 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4659 if (skb
->len
> len
) {
4660 BT_ERR("Frame is too long (len %d, expected len %d)",
4662 l2cap_conn_unreliable(conn
, ECOMM
);
4666 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4668 if (chan
&& chan
->sk
) {
4669 struct sock
*sk
= chan
->sk
;
4671 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4672 BT_ERR("Frame exceeding recv MTU (len %d, "
4676 l2cap_conn_unreliable(conn
, ECOMM
);
4682 /* Allocate skb for the complete frame (with header) */
4683 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4687 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4689 conn
->rx_len
= len
- skb
->len
;
4691 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4693 if (!conn
->rx_len
) {
4694 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4695 l2cap_conn_unreliable(conn
, ECOMM
);
4699 if (skb
->len
> conn
->rx_len
) {
4700 BT_ERR("Fragment is too long (len %d, expected %d)",
4701 skb
->len
, conn
->rx_len
);
4702 kfree_skb(conn
->rx_skb
);
4703 conn
->rx_skb
= NULL
;
4705 l2cap_conn_unreliable(conn
, ECOMM
);
4709 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4711 conn
->rx_len
-= skb
->len
;
4713 if (!conn
->rx_len
) {
4714 /* Complete frame received */
4715 l2cap_recv_frame(conn
, conn
->rx_skb
);
4716 conn
->rx_skb
= NULL
;
4725 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4727 struct l2cap_chan
*c
;
4729 read_lock_bh(&chan_list_lock
);
4731 list_for_each_entry(c
, &chan_list
, global_l
) {
4732 struct sock
*sk
= c
->sk
;
4734 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4735 batostr(&bt_sk(sk
)->src
),
4736 batostr(&bt_sk(sk
)->dst
),
4737 c
->state
, __le16_to_cpu(c
->psm
),
4738 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4739 c
->sec_level
, c
->mode
);
4742 read_unlock_bh(&chan_list_lock
);
4747 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4749 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4752 static const struct file_operations l2cap_debugfs_fops
= {
4753 .open
= l2cap_debugfs_open
,
4755 .llseek
= seq_lseek
,
4756 .release
= single_release
,
4759 static struct dentry
*l2cap_debugfs
;
4761 static struct hci_proto l2cap_hci_proto
= {
4763 .id
= HCI_PROTO_L2CAP
,
4764 .connect_ind
= l2cap_connect_ind
,
4765 .connect_cfm
= l2cap_connect_cfm
,
4766 .disconn_ind
= l2cap_disconn_ind
,
4767 .disconn_cfm
= l2cap_disconn_cfm
,
4768 .security_cfm
= l2cap_security_cfm
,
4769 .recv_acldata
= l2cap_recv_acldata
4772 int __init
l2cap_init(void)
4776 err
= l2cap_init_sockets();
4780 err
= hci_register_proto(&l2cap_hci_proto
);
4782 BT_ERR("L2CAP protocol registration failed");
4783 bt_sock_unregister(BTPROTO_L2CAP
);
4788 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4789 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4791 BT_ERR("Failed to create L2CAP debug file");
4797 l2cap_cleanup_sockets();
4801 void l2cap_exit(void)
4803 debugfs_remove(l2cap_debugfs
);
4805 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4806 BT_ERR("L2CAP protocol unregistration failed");
4808 l2cap_cleanup_sockets();
4811 module_param(disable_ertm
, bool, 0644);
4812 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");