2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan
*c
)
82 atomic_inc(&c
->refcnt
);
85 static inline void chan_put(struct l2cap_chan
*c
)
87 if (atomic_dec_and_test(&c
->refcnt
))
91 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
95 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
105 struct l2cap_chan
*c
;
107 list_for_each_entry(c
, &conn
->chan_l
, list
) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
118 struct l2cap_chan
*c
;
120 read_lock(&conn
->chan_lock
);
121 c
= __l2cap_get_chan_by_scid(conn
, cid
);
124 read_unlock(&conn
->chan_lock
);
128 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
130 struct l2cap_chan
*c
;
132 list_for_each_entry(c
, &conn
->chan_l
, list
) {
133 if (c
->ident
== ident
)
139 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
141 struct l2cap_chan
*c
;
143 read_lock(&conn
->chan_lock
);
144 c
= __l2cap_get_chan_by_ident(conn
, ident
);
147 read_unlock(&conn
->chan_lock
);
151 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
153 struct l2cap_chan
*c
;
155 list_for_each_entry(c
, &chan_list
, global_l
) {
156 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
162 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
166 write_lock_bh(&chan_list_lock
);
168 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
181 for (p
= 0x1001; p
< 0x1100; p
+= 2)
182 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
183 chan
->psm
= cpu_to_le16(p
);
184 chan
->sport
= cpu_to_le16(p
);
191 write_unlock_bh(&chan_list_lock
);
195 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
197 write_lock_bh(&chan_list_lock
);
201 write_unlock_bh(&chan_list_lock
);
206 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
208 u16 cid
= L2CAP_CID_DYN_START
;
210 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
211 if (!__l2cap_get_chan_by_scid(conn
, cid
))
218 static void l2cap_set_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
, long timeout
)
220 BT_DBG("chan %p state %d timeout %ld", chan
, chan
->state
, timeout
);
222 if (!mod_timer(timer
, jiffies
+ msecs_to_jiffies(timeout
)))
226 static void l2cap_clear_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
)
228 BT_DBG("chan %p state %d", chan
, chan
->state
);
230 if (timer_pending(timer
) && del_timer(timer
))
234 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
237 chan
->ops
->state_change(chan
->data
, state
);
240 static void l2cap_chan_timeout(unsigned long arg
)
242 struct l2cap_chan
*chan
= (struct l2cap_chan
*) arg
;
243 struct sock
*sk
= chan
->sk
;
246 BT_DBG("chan %p state %d", chan
, chan
->state
);
250 if (sock_owned_by_user(sk
)) {
251 /* sk is owned by user. Try again later */
252 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
258 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
259 reason
= ECONNREFUSED
;
260 else if (chan
->state
== BT_CONNECT
&&
261 chan
->sec_level
!= BT_SECURITY_SDP
)
262 reason
= ECONNREFUSED
;
266 l2cap_chan_close(chan
, reason
);
270 chan
->ops
->close(chan
->data
);
274 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
276 struct l2cap_chan
*chan
;
278 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
284 write_lock_bh(&chan_list_lock
);
285 list_add(&chan
->global_l
, &chan_list
);
286 write_unlock_bh(&chan_list_lock
);
288 setup_timer(&chan
->chan_timer
, l2cap_chan_timeout
, (unsigned long) chan
);
290 chan
->state
= BT_OPEN
;
292 atomic_set(&chan
->refcnt
, 1);
294 BT_DBG("sk %p chan %p", sk
, chan
);
299 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
301 write_lock_bh(&chan_list_lock
);
302 list_del(&chan
->global_l
);
303 write_unlock_bh(&chan_list_lock
);
308 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
311 chan
->psm
, chan
->dcid
);
313 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
317 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
318 if (conn
->hcon
->type
== LE_LINK
) {
320 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
321 chan
->scid
= L2CAP_CID_LE_DATA
;
322 chan
->dcid
= L2CAP_CID_LE_DATA
;
324 /* Alloc CID for connection-oriented socket */
325 chan
->scid
= l2cap_alloc_cid(conn
);
326 chan
->omtu
= L2CAP_DEFAULT_MTU
;
328 } else if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
329 /* Connectionless socket */
330 chan
->scid
= L2CAP_CID_CONN_LESS
;
331 chan
->dcid
= L2CAP_CID_CONN_LESS
;
332 chan
->omtu
= L2CAP_DEFAULT_MTU
;
334 /* Raw socket can send/recv signalling messages only */
335 chan
->scid
= L2CAP_CID_SIGNALING
;
336 chan
->dcid
= L2CAP_CID_SIGNALING
;
337 chan
->omtu
= L2CAP_DEFAULT_MTU
;
340 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
341 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
342 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
343 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
344 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
345 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
349 list_add(&chan
->list
, &conn
->chan_l
);
353 * Must be called on the locked socket. */
354 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
356 struct sock
*sk
= chan
->sk
;
357 struct l2cap_conn
*conn
= chan
->conn
;
358 struct sock
*parent
= bt_sk(sk
)->parent
;
360 __clear_chan_timer(chan
);
362 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
365 /* Delete from channel list */
366 write_lock_bh(&conn
->chan_lock
);
367 list_del(&chan
->list
);
368 write_unlock_bh(&conn
->chan_lock
);
372 hci_conn_put(conn
->hcon
);
375 l2cap_state_change(chan
, BT_CLOSED
);
376 sock_set_flag(sk
, SOCK_ZAPPED
);
382 bt_accept_unlink(sk
);
383 parent
->sk_data_ready(parent
, 0);
385 sk
->sk_state_change(sk
);
387 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
388 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
391 skb_queue_purge(&chan
->tx_q
);
393 if (chan
->mode
== L2CAP_MODE_ERTM
) {
394 struct srej_list
*l
, *tmp
;
396 __clear_retrans_timer(chan
);
397 __clear_monitor_timer(chan
);
398 __clear_ack_timer(chan
);
400 skb_queue_purge(&chan
->srej_q
);
402 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
409 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
413 BT_DBG("parent %p", parent
);
415 /* Close not yet accepted channels */
416 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
417 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
418 __clear_chan_timer(chan
);
420 l2cap_chan_close(chan
, ECONNRESET
);
422 chan
->ops
->close(chan
->data
);
426 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
428 struct l2cap_conn
*conn
= chan
->conn
;
429 struct sock
*sk
= chan
->sk
;
431 BT_DBG("chan %p state %d socket %p", chan
, chan
->state
, sk
->sk_socket
);
433 switch (chan
->state
) {
435 l2cap_chan_cleanup_listen(sk
);
437 l2cap_state_change(chan
, BT_CLOSED
);
438 sock_set_flag(sk
, SOCK_ZAPPED
);
443 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
444 conn
->hcon
->type
== ACL_LINK
) {
445 __clear_chan_timer(chan
);
446 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
447 l2cap_send_disconn_req(conn
, chan
, reason
);
449 l2cap_chan_del(chan
, reason
);
453 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
454 conn
->hcon
->type
== ACL_LINK
) {
455 struct l2cap_conn_rsp rsp
;
458 if (bt_sk(sk
)->defer_setup
)
459 result
= L2CAP_CR_SEC_BLOCK
;
461 result
= L2CAP_CR_BAD_PSM
;
462 l2cap_state_change(chan
, BT_DISCONN
);
464 rsp
.scid
= cpu_to_le16(chan
->dcid
);
465 rsp
.dcid
= cpu_to_le16(chan
->scid
);
466 rsp
.result
= cpu_to_le16(result
);
467 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
468 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
472 l2cap_chan_del(chan
, reason
);
477 l2cap_chan_del(chan
, reason
);
481 sock_set_flag(sk
, SOCK_ZAPPED
);
486 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
488 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
489 switch (chan
->sec_level
) {
490 case BT_SECURITY_HIGH
:
491 return HCI_AT_DEDICATED_BONDING_MITM
;
492 case BT_SECURITY_MEDIUM
:
493 return HCI_AT_DEDICATED_BONDING
;
495 return HCI_AT_NO_BONDING
;
497 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
498 if (chan
->sec_level
== BT_SECURITY_LOW
)
499 chan
->sec_level
= BT_SECURITY_SDP
;
501 if (chan
->sec_level
== BT_SECURITY_HIGH
)
502 return HCI_AT_NO_BONDING_MITM
;
504 return HCI_AT_NO_BONDING
;
506 switch (chan
->sec_level
) {
507 case BT_SECURITY_HIGH
:
508 return HCI_AT_GENERAL_BONDING_MITM
;
509 case BT_SECURITY_MEDIUM
:
510 return HCI_AT_GENERAL_BONDING
;
512 return HCI_AT_NO_BONDING
;
517 /* Service level security */
518 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
520 struct l2cap_conn
*conn
= chan
->conn
;
523 auth_type
= l2cap_get_auth_type(chan
);
525 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
528 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
532 /* Get next available identificator.
533 * 1 - 128 are used by kernel.
534 * 129 - 199 are reserved.
535 * 200 - 254 are used by utilities like l2ping, etc.
538 spin_lock_bh(&conn
->lock
);
540 if (++conn
->tx_ident
> 128)
545 spin_unlock_bh(&conn
->lock
);
550 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
552 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
555 BT_DBG("code 0x%2.2x", code
);
560 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
561 flags
= ACL_START_NO_FLUSH
;
565 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
566 skb
->priority
= HCI_PRIO_MAX
;
568 hci_send_acl(conn
->hchan
, skb
, flags
);
571 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
573 struct hci_conn
*hcon
= chan
->conn
->hcon
;
576 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
579 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
580 lmp_no_flush_capable(hcon
->hdev
))
581 flags
= ACL_START_NO_FLUSH
;
585 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
586 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
589 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u32 control
)
592 struct l2cap_hdr
*lh
;
593 struct l2cap_conn
*conn
= chan
->conn
;
596 if (chan
->state
!= BT_CONNECTED
)
599 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
600 hlen
= L2CAP_EXT_HDR_SIZE
;
602 hlen
= L2CAP_ENH_HDR_SIZE
;
604 if (chan
->fcs
== L2CAP_FCS_CRC16
)
605 hlen
+= L2CAP_FCS_SIZE
;
607 BT_DBG("chan %p, control 0x%8.8x", chan
, control
);
609 count
= min_t(unsigned int, conn
->mtu
, hlen
);
611 control
|= __set_sframe(chan
);
613 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
614 control
|= __set_ctrl_final(chan
);
616 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
617 control
|= __set_ctrl_poll(chan
);
619 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
623 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
624 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
625 lh
->cid
= cpu_to_le16(chan
->dcid
);
627 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
629 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
630 u16 fcs
= crc16(0, (u8
*)lh
, count
- L2CAP_FCS_SIZE
);
631 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
634 skb
->priority
= HCI_PRIO_MAX
;
635 l2cap_do_send(chan
, skb
);
638 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u32 control
)
640 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
641 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
642 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
644 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
646 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
648 l2cap_send_sframe(chan
, control
);
651 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
653 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
656 static void l2cap_do_start(struct l2cap_chan
*chan
)
658 struct l2cap_conn
*conn
= chan
->conn
;
660 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
661 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
664 if (l2cap_chan_check_security(chan
) &&
665 __l2cap_no_conn_pending(chan
)) {
666 struct l2cap_conn_req req
;
667 req
.scid
= cpu_to_le16(chan
->scid
);
670 chan
->ident
= l2cap_get_ident(conn
);
671 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
673 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
677 struct l2cap_info_req req
;
678 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
680 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
681 conn
->info_ident
= l2cap_get_ident(conn
);
683 mod_timer(&conn
->info_timer
, jiffies
+
684 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
686 l2cap_send_cmd(conn
, conn
->info_ident
,
687 L2CAP_INFO_REQ
, sizeof(req
), &req
);
691 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
693 u32 local_feat_mask
= l2cap_feat_mask
;
695 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
698 case L2CAP_MODE_ERTM
:
699 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
700 case L2CAP_MODE_STREAMING
:
701 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
707 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
710 struct l2cap_disconn_req req
;
717 if (chan
->mode
== L2CAP_MODE_ERTM
) {
718 __clear_retrans_timer(chan
);
719 __clear_monitor_timer(chan
);
720 __clear_ack_timer(chan
);
723 req
.dcid
= cpu_to_le16(chan
->dcid
);
724 req
.scid
= cpu_to_le16(chan
->scid
);
725 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
726 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
728 l2cap_state_change(chan
, BT_DISCONN
);
732 /* ---- L2CAP connections ---- */
733 static void l2cap_conn_start(struct l2cap_conn
*conn
)
735 struct l2cap_chan
*chan
, *tmp
;
737 BT_DBG("conn %p", conn
);
739 read_lock(&conn
->chan_lock
);
741 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
742 struct sock
*sk
= chan
->sk
;
746 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
751 if (chan
->state
== BT_CONNECT
) {
752 struct l2cap_conn_req req
;
754 if (!l2cap_chan_check_security(chan
) ||
755 !__l2cap_no_conn_pending(chan
)) {
760 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
761 && test_bit(CONF_STATE2_DEVICE
,
762 &chan
->conf_state
)) {
763 /* l2cap_chan_close() calls list_del(chan)
764 * so release the lock */
765 read_unlock(&conn
->chan_lock
);
766 l2cap_chan_close(chan
, ECONNRESET
);
767 read_lock(&conn
->chan_lock
);
772 req
.scid
= cpu_to_le16(chan
->scid
);
775 chan
->ident
= l2cap_get_ident(conn
);
776 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
778 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
781 } else if (chan
->state
== BT_CONNECT2
) {
782 struct l2cap_conn_rsp rsp
;
784 rsp
.scid
= cpu_to_le16(chan
->dcid
);
785 rsp
.dcid
= cpu_to_le16(chan
->scid
);
787 if (l2cap_chan_check_security(chan
)) {
788 if (bt_sk(sk
)->defer_setup
) {
789 struct sock
*parent
= bt_sk(sk
)->parent
;
790 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
791 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
793 parent
->sk_data_ready(parent
, 0);
796 l2cap_state_change(chan
, BT_CONFIG
);
797 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
798 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
801 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
802 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
805 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
808 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
809 rsp
.result
!= L2CAP_CR_SUCCESS
) {
814 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
815 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
816 l2cap_build_conf_req(chan
, buf
), buf
);
817 chan
->num_conf_req
++;
823 read_unlock(&conn
->chan_lock
);
826 /* Find socket with cid and source bdaddr.
827 * Returns closest match, locked.
829 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
831 struct l2cap_chan
*c
, *c1
= NULL
;
833 read_lock(&chan_list_lock
);
835 list_for_each_entry(c
, &chan_list
, global_l
) {
836 struct sock
*sk
= c
->sk
;
838 if (state
&& c
->state
!= state
)
841 if (c
->scid
== cid
) {
843 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
844 read_unlock(&chan_list_lock
);
849 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
854 read_unlock(&chan_list_lock
);
859 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
861 struct sock
*parent
, *sk
;
862 struct l2cap_chan
*chan
, *pchan
;
866 /* Check if we have socket listening on cid */
867 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
874 bh_lock_sock(parent
);
876 /* Check for backlog size */
877 if (sk_acceptq_is_full(parent
)) {
878 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
882 chan
= pchan
->ops
->new_connection(pchan
->data
);
888 write_lock_bh(&conn
->chan_lock
);
890 hci_conn_hold(conn
->hcon
);
892 bacpy(&bt_sk(sk
)->src
, conn
->src
);
893 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
895 bt_accept_enqueue(parent
, sk
);
897 __l2cap_chan_add(conn
, chan
);
899 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
901 l2cap_state_change(chan
, BT_CONNECTED
);
902 parent
->sk_data_ready(parent
, 0);
904 write_unlock_bh(&conn
->chan_lock
);
907 bh_unlock_sock(parent
);
910 static void l2cap_chan_ready(struct sock
*sk
)
912 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
913 struct sock
*parent
= bt_sk(sk
)->parent
;
915 BT_DBG("sk %p, parent %p", sk
, parent
);
917 chan
->conf_state
= 0;
918 __clear_chan_timer(chan
);
920 l2cap_state_change(chan
, BT_CONNECTED
);
921 sk
->sk_state_change(sk
);
924 parent
->sk_data_ready(parent
, 0);
927 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
929 struct l2cap_chan
*chan
;
931 BT_DBG("conn %p", conn
);
933 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
934 l2cap_le_conn_ready(conn
);
936 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
937 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
939 read_lock(&conn
->chan_lock
);
941 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
942 struct sock
*sk
= chan
->sk
;
946 if (conn
->hcon
->type
== LE_LINK
) {
947 if (smp_conn_security(conn
, chan
->sec_level
))
948 l2cap_chan_ready(sk
);
950 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
951 __clear_chan_timer(chan
);
952 l2cap_state_change(chan
, BT_CONNECTED
);
953 sk
->sk_state_change(sk
);
955 } else if (chan
->state
== BT_CONNECT
)
956 l2cap_do_start(chan
);
961 read_unlock(&conn
->chan_lock
);
964 /* Notify sockets that we cannot guaranty reliability anymore */
965 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
967 struct l2cap_chan
*chan
;
969 BT_DBG("conn %p", conn
);
971 read_lock(&conn
->chan_lock
);
973 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
974 struct sock
*sk
= chan
->sk
;
976 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
980 read_unlock(&conn
->chan_lock
);
983 static void l2cap_info_timeout(unsigned long arg
)
985 struct l2cap_conn
*conn
= (void *) arg
;
987 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
988 conn
->info_ident
= 0;
990 l2cap_conn_start(conn
);
993 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
995 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
996 struct l2cap_chan
*chan
, *l
;
1002 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1004 kfree_skb(conn
->rx_skb
);
1007 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1010 l2cap_chan_del(chan
, err
);
1012 chan
->ops
->close(chan
->data
);
1015 hci_chan_del(conn
->hchan
);
1017 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1018 del_timer_sync(&conn
->info_timer
);
1020 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->pend
)) {
1021 del_timer(&conn
->security_timer
);
1022 smp_chan_destroy(conn
);
1025 hcon
->l2cap_data
= NULL
;
1029 static void security_timeout(unsigned long arg
)
1031 struct l2cap_conn
*conn
= (void *) arg
;
1033 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1036 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1038 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1039 struct hci_chan
*hchan
;
1044 hchan
= hci_chan_create(hcon
);
1048 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1050 hci_chan_del(hchan
);
1054 hcon
->l2cap_data
= conn
;
1056 conn
->hchan
= hchan
;
1058 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1060 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1061 conn
->mtu
= hcon
->hdev
->le_mtu
;
1063 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1065 conn
->src
= &hcon
->hdev
->bdaddr
;
1066 conn
->dst
= &hcon
->dst
;
1068 conn
->feat_mask
= 0;
1070 spin_lock_init(&conn
->lock
);
1071 rwlock_init(&conn
->chan_lock
);
1073 INIT_LIST_HEAD(&conn
->chan_l
);
1075 if (hcon
->type
== LE_LINK
)
1076 setup_timer(&conn
->security_timer
, security_timeout
,
1077 (unsigned long) conn
);
1079 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
1080 (unsigned long) conn
);
1082 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1087 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
1089 write_lock_bh(&conn
->chan_lock
);
1090 __l2cap_chan_add(conn
, chan
);
1091 write_unlock_bh(&conn
->chan_lock
);
1094 /* ---- Socket interface ---- */
1096 /* Find socket with psm and source bdaddr.
1097 * Returns closest match.
1099 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1101 struct l2cap_chan
*c
, *c1
= NULL
;
1103 read_lock(&chan_list_lock
);
1105 list_for_each_entry(c
, &chan_list
, global_l
) {
1106 struct sock
*sk
= c
->sk
;
1108 if (state
&& c
->state
!= state
)
1111 if (c
->psm
== psm
) {
1113 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1114 read_unlock(&chan_list_lock
);
1119 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1124 read_unlock(&chan_list_lock
);
1129 int l2cap_chan_connect(struct l2cap_chan
*chan
)
1131 struct sock
*sk
= chan
->sk
;
1132 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1133 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1134 struct l2cap_conn
*conn
;
1135 struct hci_conn
*hcon
;
1136 struct hci_dev
*hdev
;
1140 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1143 hdev
= hci_get_route(dst
, src
);
1145 return -EHOSTUNREACH
;
1147 hci_dev_lock_bh(hdev
);
1149 auth_type
= l2cap_get_auth_type(chan
);
1151 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1152 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1153 chan
->sec_level
, auth_type
);
1155 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1156 chan
->sec_level
, auth_type
);
1159 err
= PTR_ERR(hcon
);
1163 conn
= l2cap_conn_add(hcon
, 0);
1170 /* Update source addr of the socket */
1171 bacpy(src
, conn
->src
);
1173 l2cap_chan_add(conn
, chan
);
1175 l2cap_state_change(chan
, BT_CONNECT
);
1176 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1178 if (hcon
->state
== BT_CONNECTED
) {
1179 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1180 __clear_chan_timer(chan
);
1181 if (l2cap_chan_check_security(chan
))
1182 l2cap_state_change(chan
, BT_CONNECTED
);
1184 l2cap_do_start(chan
);
1190 hci_dev_unlock_bh(hdev
);
1195 int __l2cap_wait_ack(struct sock
*sk
)
1197 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1198 DECLARE_WAITQUEUE(wait
, current
);
1202 add_wait_queue(sk_sleep(sk
), &wait
);
1203 set_current_state(TASK_INTERRUPTIBLE
);
1204 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1208 if (signal_pending(current
)) {
1209 err
= sock_intr_errno(timeo
);
1214 timeo
= schedule_timeout(timeo
);
1216 set_current_state(TASK_INTERRUPTIBLE
);
1218 err
= sock_error(sk
);
1222 set_current_state(TASK_RUNNING
);
1223 remove_wait_queue(sk_sleep(sk
), &wait
);
1227 static void l2cap_monitor_timeout(unsigned long arg
)
1229 struct l2cap_chan
*chan
= (void *) arg
;
1230 struct sock
*sk
= chan
->sk
;
1232 BT_DBG("chan %p", chan
);
1235 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1236 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1241 chan
->retry_count
++;
1242 __set_monitor_timer(chan
);
1244 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1248 static void l2cap_retrans_timeout(unsigned long arg
)
1250 struct l2cap_chan
*chan
= (void *) arg
;
1251 struct sock
*sk
= chan
->sk
;
1253 BT_DBG("chan %p", chan
);
1256 chan
->retry_count
= 1;
1257 __set_monitor_timer(chan
);
1259 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1261 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1265 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1267 struct sk_buff
*skb
;
1269 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1270 chan
->unacked_frames
) {
1271 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1274 skb
= skb_dequeue(&chan
->tx_q
);
1277 chan
->unacked_frames
--;
1280 if (!chan
->unacked_frames
)
1281 __clear_retrans_timer(chan
);
1284 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1286 struct sk_buff
*skb
;
1290 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1291 control
= __get_control(chan
, skb
->data
+ L2CAP_HDR_SIZE
);
1292 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1293 __put_control(chan
, control
, skb
->data
+ L2CAP_HDR_SIZE
);
1295 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1296 fcs
= crc16(0, (u8
*)skb
->data
,
1297 skb
->len
- L2CAP_FCS_SIZE
);
1298 put_unaligned_le16(fcs
,
1299 skb
->data
+ skb
->len
- L2CAP_FCS_SIZE
);
1302 l2cap_do_send(chan
, skb
);
1304 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1308 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1310 struct sk_buff
*skb
, *tx_skb
;
1314 skb
= skb_peek(&chan
->tx_q
);
1318 while (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1319 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1322 skb
= skb_queue_next(&chan
->tx_q
, skb
);
1325 if (chan
->remote_max_tx
&&
1326 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1327 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1331 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1332 bt_cb(skb
)->retries
++;
1334 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1335 control
&= __get_sar_mask(chan
);
1337 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1338 control
|= __set_ctrl_final(chan
);
1340 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1341 control
|= __set_txseq(chan
, tx_seq
);
1343 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1345 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1346 fcs
= crc16(0, (u8
*)tx_skb
->data
,
1347 tx_skb
->len
- L2CAP_FCS_SIZE
);
1348 put_unaligned_le16(fcs
,
1349 tx_skb
->data
+ tx_skb
->len
- L2CAP_FCS_SIZE
);
1352 l2cap_do_send(chan
, tx_skb
);
1355 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1357 struct sk_buff
*skb
, *tx_skb
;
1362 if (chan
->state
!= BT_CONNECTED
)
1365 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1367 if (chan
->remote_max_tx
&&
1368 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1369 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1373 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1375 bt_cb(skb
)->retries
++;
1377 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1378 control
&= __get_sar_mask(chan
);
1380 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1381 control
|= __set_ctrl_final(chan
);
1383 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1384 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1386 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1388 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1389 fcs
= crc16(0, (u8
*)skb
->data
,
1390 tx_skb
->len
- L2CAP_FCS_SIZE
);
1391 put_unaligned_le16(fcs
, skb
->data
+
1392 tx_skb
->len
- L2CAP_FCS_SIZE
);
1395 l2cap_do_send(chan
, tx_skb
);
1397 __set_retrans_timer(chan
);
1399 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1401 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1403 if (bt_cb(skb
)->retries
== 1)
1404 chan
->unacked_frames
++;
1406 chan
->frames_sent
++;
1408 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1409 chan
->tx_send_head
= NULL
;
1411 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1419 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1423 if (!skb_queue_empty(&chan
->tx_q
))
1424 chan
->tx_send_head
= chan
->tx_q
.next
;
1426 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1427 ret
= l2cap_ertm_send(chan
);
1431 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1435 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1437 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1438 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1439 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1440 l2cap_send_sframe(chan
, control
);
1444 if (l2cap_ertm_send(chan
) > 0)
1447 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1448 l2cap_send_sframe(chan
, control
);
1451 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1453 struct srej_list
*tail
;
1456 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1457 control
|= __set_ctrl_final(chan
);
1459 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1460 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1462 l2cap_send_sframe(chan
, control
);
1465 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1467 struct l2cap_conn
*conn
= l2cap_pi(sk
)->chan
->conn
;
1468 struct sk_buff
**frag
;
1471 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1477 /* Continuation fragments (no L2CAP header) */
1478 frag
= &skb_shinfo(skb
)->frag_list
;
1480 count
= min_t(unsigned int, conn
->mtu
, len
);
1482 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1485 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1488 (*frag
)->priority
= skb
->priority
;
1493 frag
= &(*frag
)->next
;
1499 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1500 struct msghdr
*msg
, size_t len
,
1503 struct sock
*sk
= chan
->sk
;
1504 struct l2cap_conn
*conn
= chan
->conn
;
1505 struct sk_buff
*skb
;
1506 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1507 struct l2cap_hdr
*lh
;
1509 BT_DBG("sk %p len %d priority %u", sk
, (int)len
, priority
);
1511 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1512 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1513 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1515 return ERR_PTR(err
);
1517 skb
->priority
= priority
;
1519 /* Create L2CAP header */
1520 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1521 lh
->cid
= cpu_to_le16(chan
->dcid
);
1522 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1523 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1525 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1526 if (unlikely(err
< 0)) {
1528 return ERR_PTR(err
);
1533 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1534 struct msghdr
*msg
, size_t len
,
1537 struct sock
*sk
= chan
->sk
;
1538 struct l2cap_conn
*conn
= chan
->conn
;
1539 struct sk_buff
*skb
;
1540 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1541 struct l2cap_hdr
*lh
;
1543 BT_DBG("sk %p len %d", sk
, (int)len
);
1545 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1546 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1547 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1549 return ERR_PTR(err
);
1551 skb
->priority
= priority
;
1553 /* Create L2CAP header */
1554 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1555 lh
->cid
= cpu_to_le16(chan
->dcid
);
1556 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1558 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1559 if (unlikely(err
< 0)) {
1561 return ERR_PTR(err
);
1566 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1567 struct msghdr
*msg
, size_t len
,
1568 u32 control
, u16 sdulen
)
1570 struct sock
*sk
= chan
->sk
;
1571 struct l2cap_conn
*conn
= chan
->conn
;
1572 struct sk_buff
*skb
;
1573 int err
, count
, hlen
;
1574 struct l2cap_hdr
*lh
;
1576 BT_DBG("sk %p len %d", sk
, (int)len
);
1579 return ERR_PTR(-ENOTCONN
);
1581 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1582 hlen
= L2CAP_EXT_HDR_SIZE
;
1584 hlen
= L2CAP_ENH_HDR_SIZE
;
1587 hlen
+= L2CAP_SDULEN_SIZE
;
1589 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1590 hlen
+= L2CAP_FCS_SIZE
;
1592 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1593 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1594 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1596 return ERR_PTR(err
);
1598 /* Create L2CAP header */
1599 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1600 lh
->cid
= cpu_to_le16(chan
->dcid
);
1601 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1603 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
1606 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
1608 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1609 if (unlikely(err
< 0)) {
1611 return ERR_PTR(err
);
1614 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1615 put_unaligned_le16(0, skb_put(skb
, L2CAP_FCS_SIZE
));
1617 bt_cb(skb
)->retries
= 0;
1621 static int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1623 struct sk_buff
*skb
;
1624 struct sk_buff_head sar_queue
;
1628 skb_queue_head_init(&sar_queue
);
1629 control
= __set_ctrl_sar(chan
, L2CAP_SAR_START
);
1630 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1632 return PTR_ERR(skb
);
1634 __skb_queue_tail(&sar_queue
, skb
);
1635 len
-= chan
->remote_mps
;
1636 size
+= chan
->remote_mps
;
1641 if (len
> chan
->remote_mps
) {
1642 control
= __set_ctrl_sar(chan
, L2CAP_SAR_CONTINUE
);
1643 buflen
= chan
->remote_mps
;
1645 control
= __set_ctrl_sar(chan
, L2CAP_SAR_END
);
1649 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1651 skb_queue_purge(&sar_queue
);
1652 return PTR_ERR(skb
);
1655 __skb_queue_tail(&sar_queue
, skb
);
1659 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1660 if (chan
->tx_send_head
== NULL
)
1661 chan
->tx_send_head
= sar_queue
.next
;
1666 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
1669 struct sk_buff
*skb
;
1673 /* Connectionless channel */
1674 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1675 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
1677 return PTR_ERR(skb
);
1679 l2cap_do_send(chan
, skb
);
1683 switch (chan
->mode
) {
1684 case L2CAP_MODE_BASIC
:
1685 /* Check outgoing MTU */
1686 if (len
> chan
->omtu
)
1689 /* Create a basic PDU */
1690 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
1692 return PTR_ERR(skb
);
1694 l2cap_do_send(chan
, skb
);
1698 case L2CAP_MODE_ERTM
:
1699 case L2CAP_MODE_STREAMING
:
1700 /* Entire SDU fits into one PDU */
1701 if (len
<= chan
->remote_mps
) {
1702 control
= __set_ctrl_sar(chan
, L2CAP_SAR_UNSEGMENTED
);
1703 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1706 return PTR_ERR(skb
);
1708 __skb_queue_tail(&chan
->tx_q
, skb
);
1710 if (chan
->tx_send_head
== NULL
)
1711 chan
->tx_send_head
= skb
;
1714 /* Segment SDU into multiples PDUs */
1715 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1720 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1721 l2cap_streaming_send(chan
);
1726 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
1727 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
1732 err
= l2cap_ertm_send(chan
);
1739 BT_DBG("bad state %1.1x", chan
->mode
);
1746 /* Copy frame to all raw sockets on that connection */
1747 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1749 struct sk_buff
*nskb
;
1750 struct l2cap_chan
*chan
;
1752 BT_DBG("conn %p", conn
);
1754 read_lock(&conn
->chan_lock
);
1755 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1756 struct sock
*sk
= chan
->sk
;
1757 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1760 /* Don't send frame to the socket it came from */
1763 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1767 if (chan
->ops
->recv(chan
->data
, nskb
))
1770 read_unlock(&conn
->chan_lock
);
1773 /* ---- L2CAP signalling commands ---- */
1774 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1775 u8 code
, u8 ident
, u16 dlen
, void *data
)
1777 struct sk_buff
*skb
, **frag
;
1778 struct l2cap_cmd_hdr
*cmd
;
1779 struct l2cap_hdr
*lh
;
1782 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1783 conn
, code
, ident
, dlen
);
1785 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1786 count
= min_t(unsigned int, conn
->mtu
, len
);
1788 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1792 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1793 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1795 if (conn
->hcon
->type
== LE_LINK
)
1796 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1798 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1800 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1803 cmd
->len
= cpu_to_le16(dlen
);
1806 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1807 memcpy(skb_put(skb
, count
), data
, count
);
1813 /* Continuation fragments (no L2CAP header) */
1814 frag
= &skb_shinfo(skb
)->frag_list
;
1816 count
= min_t(unsigned int, conn
->mtu
, len
);
1818 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1822 memcpy(skb_put(*frag
, count
), data
, count
);
1827 frag
= &(*frag
)->next
;
1837 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1839 struct l2cap_conf_opt
*opt
= *ptr
;
1842 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1850 *val
= *((u8
*) opt
->val
);
1854 *val
= get_unaligned_le16(opt
->val
);
1858 *val
= get_unaligned_le32(opt
->val
);
1862 *val
= (unsigned long) opt
->val
;
1866 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1870 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1872 struct l2cap_conf_opt
*opt
= *ptr
;
1874 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1881 *((u8
*) opt
->val
) = val
;
1885 put_unaligned_le16(val
, opt
->val
);
1889 put_unaligned_le32(val
, opt
->val
);
1893 memcpy(opt
->val
, (void *) val
, len
);
1897 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1900 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
1902 struct l2cap_conf_efs efs
;
1904 switch(chan
->mode
) {
1905 case L2CAP_MODE_ERTM
:
1906 efs
.id
= chan
->local_id
;
1907 efs
.stype
= chan
->local_stype
;
1908 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1909 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
1910 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
1911 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
1914 case L2CAP_MODE_STREAMING
:
1916 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
1917 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1918 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
1927 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
1928 (unsigned long) &efs
);
1931 static void l2cap_ack_timeout(unsigned long arg
)
1933 struct l2cap_chan
*chan
= (void *) arg
;
1935 bh_lock_sock(chan
->sk
);
1936 l2cap_send_ack(chan
);
1937 bh_unlock_sock(chan
->sk
);
1940 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1942 struct sock
*sk
= chan
->sk
;
1944 chan
->expected_ack_seq
= 0;
1945 chan
->unacked_frames
= 0;
1946 chan
->buffer_seq
= 0;
1947 chan
->num_acked
= 0;
1948 chan
->frames_sent
= 0;
1950 setup_timer(&chan
->retrans_timer
, l2cap_retrans_timeout
,
1951 (unsigned long) chan
);
1952 setup_timer(&chan
->monitor_timer
, l2cap_monitor_timeout
,
1953 (unsigned long) chan
);
1954 setup_timer(&chan
->ack_timer
, l2cap_ack_timeout
, (unsigned long) chan
);
1956 skb_queue_head_init(&chan
->srej_q
);
1958 INIT_LIST_HEAD(&chan
->srej_l
);
1961 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1964 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1967 case L2CAP_MODE_STREAMING
:
1968 case L2CAP_MODE_ERTM
:
1969 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1973 return L2CAP_MODE_BASIC
;
1977 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
1979 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
1982 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
1984 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
1987 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
1989 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
1990 __l2cap_ews_supported(chan
)) {
1991 /* use extended control field */
1992 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
1993 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
1995 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
1996 L2CAP_DEFAULT_TX_WINDOW
);
1997 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2001 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2003 struct l2cap_conf_req
*req
= data
;
2004 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2005 void *ptr
= req
->data
;
2008 BT_DBG("chan %p", chan
);
2010 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2013 switch (chan
->mode
) {
2014 case L2CAP_MODE_STREAMING
:
2015 case L2CAP_MODE_ERTM
:
2016 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2019 if (__l2cap_efs_supported(chan
))
2020 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2024 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2029 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2030 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2032 switch (chan
->mode
) {
2033 case L2CAP_MODE_BASIC
:
2034 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2035 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2038 rfc
.mode
= L2CAP_MODE_BASIC
;
2040 rfc
.max_transmit
= 0;
2041 rfc
.retrans_timeout
= 0;
2042 rfc
.monitor_timeout
= 0;
2043 rfc
.max_pdu_size
= 0;
2045 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2046 (unsigned long) &rfc
);
2049 case L2CAP_MODE_ERTM
:
2050 rfc
.mode
= L2CAP_MODE_ERTM
;
2051 rfc
.max_transmit
= chan
->max_tx
;
2052 rfc
.retrans_timeout
= 0;
2053 rfc
.monitor_timeout
= 0;
2055 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2056 L2CAP_EXT_HDR_SIZE
-
2059 rfc
.max_pdu_size
= cpu_to_le16(size
);
2061 l2cap_txwin_setup(chan
);
2063 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2064 L2CAP_DEFAULT_TX_WINDOW
);
2066 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2067 (unsigned long) &rfc
);
2069 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2070 l2cap_add_opt_efs(&ptr
, chan
);
2072 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2075 if (chan
->fcs
== L2CAP_FCS_NONE
||
2076 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2077 chan
->fcs
= L2CAP_FCS_NONE
;
2078 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2081 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2082 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2086 case L2CAP_MODE_STREAMING
:
2087 rfc
.mode
= L2CAP_MODE_STREAMING
;
2089 rfc
.max_transmit
= 0;
2090 rfc
.retrans_timeout
= 0;
2091 rfc
.monitor_timeout
= 0;
2093 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2094 L2CAP_EXT_HDR_SIZE
-
2097 rfc
.max_pdu_size
= cpu_to_le16(size
);
2099 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2100 (unsigned long) &rfc
);
2102 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2103 l2cap_add_opt_efs(&ptr
, chan
);
2105 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2108 if (chan
->fcs
== L2CAP_FCS_NONE
||
2109 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2110 chan
->fcs
= L2CAP_FCS_NONE
;
2111 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2116 req
->dcid
= cpu_to_le16(chan
->dcid
);
2117 req
->flags
= cpu_to_le16(0);
2122 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2124 struct l2cap_conf_rsp
*rsp
= data
;
2125 void *ptr
= rsp
->data
;
2126 void *req
= chan
->conf_req
;
2127 int len
= chan
->conf_len
;
2128 int type
, hint
, olen
;
2130 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2131 struct l2cap_conf_efs efs
;
2133 u16 mtu
= L2CAP_DEFAULT_MTU
;
2134 u16 result
= L2CAP_CONF_SUCCESS
;
2137 BT_DBG("chan %p", chan
);
2139 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2140 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2142 hint
= type
& L2CAP_CONF_HINT
;
2143 type
&= L2CAP_CONF_MASK
;
2146 case L2CAP_CONF_MTU
:
2150 case L2CAP_CONF_FLUSH_TO
:
2151 chan
->flush_to
= val
;
2154 case L2CAP_CONF_QOS
:
2157 case L2CAP_CONF_RFC
:
2158 if (olen
== sizeof(rfc
))
2159 memcpy(&rfc
, (void *) val
, olen
);
2162 case L2CAP_CONF_FCS
:
2163 if (val
== L2CAP_FCS_NONE
)
2164 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2167 case L2CAP_CONF_EFS
:
2169 if (olen
== sizeof(efs
))
2170 memcpy(&efs
, (void *) val
, olen
);
2173 case L2CAP_CONF_EWS
:
2175 return -ECONNREFUSED
;
2177 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2178 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2179 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2180 chan
->remote_tx_win
= val
;
2187 result
= L2CAP_CONF_UNKNOWN
;
2188 *((u8
*) ptr
++) = type
;
2193 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2196 switch (chan
->mode
) {
2197 case L2CAP_MODE_STREAMING
:
2198 case L2CAP_MODE_ERTM
:
2199 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2200 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2201 chan
->conn
->feat_mask
);
2206 if (__l2cap_efs_supported(chan
))
2207 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2209 return -ECONNREFUSED
;
2212 if (chan
->mode
!= rfc
.mode
)
2213 return -ECONNREFUSED
;
2219 if (chan
->mode
!= rfc
.mode
) {
2220 result
= L2CAP_CONF_UNACCEPT
;
2221 rfc
.mode
= chan
->mode
;
2223 if (chan
->num_conf_rsp
== 1)
2224 return -ECONNREFUSED
;
2226 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2227 sizeof(rfc
), (unsigned long) &rfc
);
2230 if (result
== L2CAP_CONF_SUCCESS
) {
2231 /* Configure output options and let the other side know
2232 * which ones we don't like. */
2234 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2235 result
= L2CAP_CONF_UNACCEPT
;
2238 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2240 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2243 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2244 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2245 efs
.stype
!= chan
->local_stype
) {
2247 result
= L2CAP_CONF_UNACCEPT
;
2249 if (chan
->num_conf_req
>= 1)
2250 return -ECONNREFUSED
;
2252 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2254 (unsigned long) &efs
);
2256 /* Send PENDING Conf Rsp */
2257 result
= L2CAP_CONF_PENDING
;
2258 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2263 case L2CAP_MODE_BASIC
:
2264 chan
->fcs
= L2CAP_FCS_NONE
;
2265 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2268 case L2CAP_MODE_ERTM
:
2269 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2270 chan
->remote_tx_win
= rfc
.txwin_size
;
2272 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2274 chan
->remote_max_tx
= rfc
.max_transmit
;
2276 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2278 L2CAP_EXT_HDR_SIZE
-
2281 rfc
.max_pdu_size
= cpu_to_le16(size
);
2282 chan
->remote_mps
= size
;
2284 rfc
.retrans_timeout
=
2285 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2286 rfc
.monitor_timeout
=
2287 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2289 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2291 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2292 sizeof(rfc
), (unsigned long) &rfc
);
2294 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2295 chan
->remote_id
= efs
.id
;
2296 chan
->remote_stype
= efs
.stype
;
2297 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
2298 chan
->remote_flush_to
=
2299 le32_to_cpu(efs
.flush_to
);
2300 chan
->remote_acc_lat
=
2301 le32_to_cpu(efs
.acc_lat
);
2302 chan
->remote_sdu_itime
=
2303 le32_to_cpu(efs
.sdu_itime
);
2304 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2305 sizeof(efs
), (unsigned long) &efs
);
2309 case L2CAP_MODE_STREAMING
:
2310 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2312 L2CAP_EXT_HDR_SIZE
-
2315 rfc
.max_pdu_size
= cpu_to_le16(size
);
2316 chan
->remote_mps
= size
;
2318 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2320 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2321 sizeof(rfc
), (unsigned long) &rfc
);
2326 result
= L2CAP_CONF_UNACCEPT
;
2328 memset(&rfc
, 0, sizeof(rfc
));
2329 rfc
.mode
= chan
->mode
;
2332 if (result
== L2CAP_CONF_SUCCESS
)
2333 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2335 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2336 rsp
->result
= cpu_to_le16(result
);
2337 rsp
->flags
= cpu_to_le16(0x0000);
2342 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2344 struct l2cap_conf_req
*req
= data
;
2345 void *ptr
= req
->data
;
2348 struct l2cap_conf_rfc rfc
;
2349 struct l2cap_conf_efs efs
;
2351 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2353 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2354 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2357 case L2CAP_CONF_MTU
:
2358 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2359 *result
= L2CAP_CONF_UNACCEPT
;
2360 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2363 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2366 case L2CAP_CONF_FLUSH_TO
:
2367 chan
->flush_to
= val
;
2368 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2372 case L2CAP_CONF_RFC
:
2373 if (olen
== sizeof(rfc
))
2374 memcpy(&rfc
, (void *)val
, olen
);
2376 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2377 rfc
.mode
!= chan
->mode
)
2378 return -ECONNREFUSED
;
2382 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2383 sizeof(rfc
), (unsigned long) &rfc
);
2386 case L2CAP_CONF_EWS
:
2387 chan
->tx_win
= min_t(u16
, val
,
2388 L2CAP_DEFAULT_EXT_WINDOW
);
2389 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2393 case L2CAP_CONF_EFS
:
2394 if (olen
== sizeof(efs
))
2395 memcpy(&efs
, (void *)val
, olen
);
2397 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2398 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2399 efs
.stype
!= chan
->local_stype
)
2400 return -ECONNREFUSED
;
2402 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2403 sizeof(efs
), (unsigned long) &efs
);
2408 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2409 return -ECONNREFUSED
;
2411 chan
->mode
= rfc
.mode
;
2413 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
2415 case L2CAP_MODE_ERTM
:
2416 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2417 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2418 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2420 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2421 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
2422 chan
->local_sdu_itime
=
2423 le32_to_cpu(efs
.sdu_itime
);
2424 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
2425 chan
->local_flush_to
=
2426 le32_to_cpu(efs
.flush_to
);
2430 case L2CAP_MODE_STREAMING
:
2431 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2435 req
->dcid
= cpu_to_le16(chan
->dcid
);
2436 req
->flags
= cpu_to_le16(0x0000);
2441 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2443 struct l2cap_conf_rsp
*rsp
= data
;
2444 void *ptr
= rsp
->data
;
2446 BT_DBG("chan %p", chan
);
2448 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2449 rsp
->result
= cpu_to_le16(result
);
2450 rsp
->flags
= cpu_to_le16(flags
);
2455 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2457 struct l2cap_conn_rsp rsp
;
2458 struct l2cap_conn
*conn
= chan
->conn
;
2461 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2462 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2463 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2464 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2465 l2cap_send_cmd(conn
, chan
->ident
,
2466 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2468 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2471 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2472 l2cap_build_conf_req(chan
, buf
), buf
);
2473 chan
->num_conf_req
++;
2476 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2480 struct l2cap_conf_rfc rfc
;
2482 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2484 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2487 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2488 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2491 case L2CAP_CONF_RFC
:
2492 if (olen
== sizeof(rfc
))
2493 memcpy(&rfc
, (void *)val
, olen
);
2500 case L2CAP_MODE_ERTM
:
2501 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2502 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2503 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2505 case L2CAP_MODE_STREAMING
:
2506 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2510 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2512 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2514 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2517 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2518 cmd
->ident
== conn
->info_ident
) {
2519 del_timer(&conn
->info_timer
);
2521 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2522 conn
->info_ident
= 0;
2524 l2cap_conn_start(conn
);
2530 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2532 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2533 struct l2cap_conn_rsp rsp
;
2534 struct l2cap_chan
*chan
= NULL
, *pchan
;
2535 struct sock
*parent
, *sk
= NULL
;
2536 int result
, status
= L2CAP_CS_NO_INFO
;
2538 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2539 __le16 psm
= req
->psm
;
2541 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2543 /* Check if we have socket listening on psm */
2544 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2546 result
= L2CAP_CR_BAD_PSM
;
2552 bh_lock_sock(parent
);
2554 /* Check if the ACL is secure enough (if not SDP) */
2555 if (psm
!= cpu_to_le16(0x0001) &&
2556 !hci_conn_check_link_mode(conn
->hcon
)) {
2557 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
2558 result
= L2CAP_CR_SEC_BLOCK
;
2562 result
= L2CAP_CR_NO_MEM
;
2564 /* Check for backlog size */
2565 if (sk_acceptq_is_full(parent
)) {
2566 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2570 chan
= pchan
->ops
->new_connection(pchan
->data
);
2576 write_lock_bh(&conn
->chan_lock
);
2578 /* Check if we already have channel with that dcid */
2579 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2580 write_unlock_bh(&conn
->chan_lock
);
2581 sock_set_flag(sk
, SOCK_ZAPPED
);
2582 chan
->ops
->close(chan
->data
);
2586 hci_conn_hold(conn
->hcon
);
2588 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2589 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2593 bt_accept_enqueue(parent
, sk
);
2595 __l2cap_chan_add(conn
, chan
);
2599 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2601 chan
->ident
= cmd
->ident
;
2603 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2604 if (l2cap_chan_check_security(chan
)) {
2605 if (bt_sk(sk
)->defer_setup
) {
2606 l2cap_state_change(chan
, BT_CONNECT2
);
2607 result
= L2CAP_CR_PEND
;
2608 status
= L2CAP_CS_AUTHOR_PEND
;
2609 parent
->sk_data_ready(parent
, 0);
2611 l2cap_state_change(chan
, BT_CONFIG
);
2612 result
= L2CAP_CR_SUCCESS
;
2613 status
= L2CAP_CS_NO_INFO
;
2616 l2cap_state_change(chan
, BT_CONNECT2
);
2617 result
= L2CAP_CR_PEND
;
2618 status
= L2CAP_CS_AUTHEN_PEND
;
2621 l2cap_state_change(chan
, BT_CONNECT2
);
2622 result
= L2CAP_CR_PEND
;
2623 status
= L2CAP_CS_NO_INFO
;
2626 write_unlock_bh(&conn
->chan_lock
);
2629 bh_unlock_sock(parent
);
2632 rsp
.scid
= cpu_to_le16(scid
);
2633 rsp
.dcid
= cpu_to_le16(dcid
);
2634 rsp
.result
= cpu_to_le16(result
);
2635 rsp
.status
= cpu_to_le16(status
);
2636 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2638 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2639 struct l2cap_info_req info
;
2640 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2642 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2643 conn
->info_ident
= l2cap_get_ident(conn
);
2645 mod_timer(&conn
->info_timer
, jiffies
+
2646 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2648 l2cap_send_cmd(conn
, conn
->info_ident
,
2649 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2652 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
2653 result
== L2CAP_CR_SUCCESS
) {
2655 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
2656 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2657 l2cap_build_conf_req(chan
, buf
), buf
);
2658 chan
->num_conf_req
++;
2664 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2666 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2667 u16 scid
, dcid
, result
, status
;
2668 struct l2cap_chan
*chan
;
2672 scid
= __le16_to_cpu(rsp
->scid
);
2673 dcid
= __le16_to_cpu(rsp
->dcid
);
2674 result
= __le16_to_cpu(rsp
->result
);
2675 status
= __le16_to_cpu(rsp
->status
);
2677 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2680 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2684 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2692 case L2CAP_CR_SUCCESS
:
2693 l2cap_state_change(chan
, BT_CONFIG
);
2696 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2698 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2701 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2702 l2cap_build_conf_req(chan
, req
), req
);
2703 chan
->num_conf_req
++;
2707 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2711 /* don't delete l2cap channel if sk is owned by user */
2712 if (sock_owned_by_user(sk
)) {
2713 l2cap_state_change(chan
, BT_DISCONN
);
2714 __clear_chan_timer(chan
);
2715 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
2719 l2cap_chan_del(chan
, ECONNREFUSED
);
2727 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2729 /* FCS is enabled only in ERTM or streaming mode, if one or both
2732 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2733 chan
->fcs
= L2CAP_FCS_NONE
;
2734 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
2735 chan
->fcs
= L2CAP_FCS_CRC16
;
2738 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2740 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2743 struct l2cap_chan
*chan
;
2747 dcid
= __le16_to_cpu(req
->dcid
);
2748 flags
= __le16_to_cpu(req
->flags
);
2750 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2752 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2758 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
2759 struct l2cap_cmd_rej_cid rej
;
2761 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
2762 rej
.scid
= cpu_to_le16(chan
->scid
);
2763 rej
.dcid
= cpu_to_le16(chan
->dcid
);
2765 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2770 /* Reject if config buffer is too small. */
2771 len
= cmd_len
- sizeof(*req
);
2772 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2773 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2774 l2cap_build_conf_rsp(chan
, rsp
,
2775 L2CAP_CONF_REJECT
, flags
), rsp
);
2780 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2781 chan
->conf_len
+= len
;
2783 if (flags
& 0x0001) {
2784 /* Incomplete config. Send empty response. */
2785 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2786 l2cap_build_conf_rsp(chan
, rsp
,
2787 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2791 /* Complete config. */
2792 len
= l2cap_parse_conf_req(chan
, rsp
);
2794 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2798 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2799 chan
->num_conf_rsp
++;
2801 /* Reset config buffer. */
2804 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
2807 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
2808 set_default_fcs(chan
);
2810 l2cap_state_change(chan
, BT_CONNECTED
);
2812 chan
->next_tx_seq
= 0;
2813 chan
->expected_tx_seq
= 0;
2814 skb_queue_head_init(&chan
->tx_q
);
2815 if (chan
->mode
== L2CAP_MODE_ERTM
)
2816 l2cap_ertm_init(chan
);
2818 l2cap_chan_ready(sk
);
2822 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
2824 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2825 l2cap_build_conf_req(chan
, buf
), buf
);
2826 chan
->num_conf_req
++;
2829 /* Got Conf Rsp PENDING from remote side and asume we sent
2830 Conf Rsp PENDING in the code above */
2831 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
2832 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
2834 /* check compatibility */
2836 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2837 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2839 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2840 l2cap_build_conf_rsp(chan
, rsp
,
2841 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
2849 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2851 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2852 u16 scid
, flags
, result
;
2853 struct l2cap_chan
*chan
;
2855 int len
= cmd
->len
- sizeof(*rsp
);
2857 scid
= __le16_to_cpu(rsp
->scid
);
2858 flags
= __le16_to_cpu(rsp
->flags
);
2859 result
= __le16_to_cpu(rsp
->result
);
2861 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2862 scid
, flags
, result
);
2864 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2871 case L2CAP_CONF_SUCCESS
:
2872 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2873 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
2876 case L2CAP_CONF_PENDING
:
2877 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
2879 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
2882 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2885 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2889 /* check compatibility */
2891 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2892 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2894 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2895 l2cap_build_conf_rsp(chan
, buf
,
2896 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
2900 case L2CAP_CONF_UNACCEPT
:
2901 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2904 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2905 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2909 /* throw out any old stored conf requests */
2910 result
= L2CAP_CONF_SUCCESS
;
2911 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2914 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2918 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2919 L2CAP_CONF_REQ
, len
, req
);
2920 chan
->num_conf_req
++;
2921 if (result
!= L2CAP_CONF_SUCCESS
)
2927 sk
->sk_err
= ECONNRESET
;
2928 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
2929 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2936 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
2938 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
2939 set_default_fcs(chan
);
2941 l2cap_state_change(chan
, BT_CONNECTED
);
2942 chan
->next_tx_seq
= 0;
2943 chan
->expected_tx_seq
= 0;
2944 skb_queue_head_init(&chan
->tx_q
);
2945 if (chan
->mode
== L2CAP_MODE_ERTM
)
2946 l2cap_ertm_init(chan
);
2948 l2cap_chan_ready(sk
);
2956 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2958 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2959 struct l2cap_disconn_rsp rsp
;
2961 struct l2cap_chan
*chan
;
2964 scid
= __le16_to_cpu(req
->scid
);
2965 dcid
= __le16_to_cpu(req
->dcid
);
2967 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2969 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2975 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2976 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2977 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2979 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2981 /* don't delete l2cap channel if sk is owned by user */
2982 if (sock_owned_by_user(sk
)) {
2983 l2cap_state_change(chan
, BT_DISCONN
);
2984 __clear_chan_timer(chan
);
2985 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
2990 l2cap_chan_del(chan
, ECONNRESET
);
2993 chan
->ops
->close(chan
->data
);
2997 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2999 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3001 struct l2cap_chan
*chan
;
3004 scid
= __le16_to_cpu(rsp
->scid
);
3005 dcid
= __le16_to_cpu(rsp
->dcid
);
3007 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3009 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3015 /* don't delete l2cap channel if sk is owned by user */
3016 if (sock_owned_by_user(sk
)) {
3017 l2cap_state_change(chan
,BT_DISCONN
);
3018 __clear_chan_timer(chan
);
3019 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
3024 l2cap_chan_del(chan
, 0);
3027 chan
->ops
->close(chan
->data
);
3031 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3033 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3036 type
= __le16_to_cpu(req
->type
);
3038 BT_DBG("type 0x%4.4x", type
);
3040 if (type
== L2CAP_IT_FEAT_MASK
) {
3042 u32 feat_mask
= l2cap_feat_mask
;
3043 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3044 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3045 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3047 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3050 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3051 | L2CAP_FEAT_EXT_WINDOW
;
3053 put_unaligned_le32(feat_mask
, rsp
->data
);
3054 l2cap_send_cmd(conn
, cmd
->ident
,
3055 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3056 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3058 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3061 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3063 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3065 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3066 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3067 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3068 l2cap_send_cmd(conn
, cmd
->ident
,
3069 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3071 struct l2cap_info_rsp rsp
;
3072 rsp
.type
= cpu_to_le16(type
);
3073 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3074 l2cap_send_cmd(conn
, cmd
->ident
,
3075 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3081 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3083 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3086 type
= __le16_to_cpu(rsp
->type
);
3087 result
= __le16_to_cpu(rsp
->result
);
3089 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3091 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3092 if (cmd
->ident
!= conn
->info_ident
||
3093 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3096 del_timer(&conn
->info_timer
);
3098 if (result
!= L2CAP_IR_SUCCESS
) {
3099 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3100 conn
->info_ident
= 0;
3102 l2cap_conn_start(conn
);
3107 if (type
== L2CAP_IT_FEAT_MASK
) {
3108 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3110 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3111 struct l2cap_info_req req
;
3112 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3114 conn
->info_ident
= l2cap_get_ident(conn
);
3116 l2cap_send_cmd(conn
, conn
->info_ident
,
3117 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3119 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3120 conn
->info_ident
= 0;
3122 l2cap_conn_start(conn
);
3124 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3125 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3126 conn
->info_ident
= 0;
3128 l2cap_conn_start(conn
);
3134 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3135 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3138 struct l2cap_create_chan_req
*req
= data
;
3139 struct l2cap_create_chan_rsp rsp
;
3142 if (cmd_len
!= sizeof(*req
))
3148 psm
= le16_to_cpu(req
->psm
);
3149 scid
= le16_to_cpu(req
->scid
);
3151 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3153 /* Placeholder: Always reject */
3155 rsp
.scid
= cpu_to_le16(scid
);
3156 rsp
.result
= L2CAP_CR_NO_MEM
;
3157 rsp
.status
= L2CAP_CS_NO_INFO
;
3159 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3165 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3166 struct l2cap_cmd_hdr
*cmd
, void *data
)
3168 BT_DBG("conn %p", conn
);
3170 return l2cap_connect_rsp(conn
, cmd
, data
);
3173 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3174 u16 icid
, u16 result
)
3176 struct l2cap_move_chan_rsp rsp
;
3178 BT_DBG("icid %d, result %d", icid
, result
);
3180 rsp
.icid
= cpu_to_le16(icid
);
3181 rsp
.result
= cpu_to_le16(result
);
3183 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3186 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3187 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3189 struct l2cap_move_chan_cfm cfm
;
3192 BT_DBG("icid %d, result %d", icid
, result
);
3194 ident
= l2cap_get_ident(conn
);
3196 chan
->ident
= ident
;
3198 cfm
.icid
= cpu_to_le16(icid
);
3199 cfm
.result
= cpu_to_le16(result
);
3201 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3204 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
3207 struct l2cap_move_chan_cfm_rsp rsp
;
3209 BT_DBG("icid %d", icid
);
3211 rsp
.icid
= cpu_to_le16(icid
);
3212 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
3215 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
3216 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3218 struct l2cap_move_chan_req
*req
= data
;
3220 u16 result
= L2CAP_MR_NOT_ALLOWED
;
3222 if (cmd_len
!= sizeof(*req
))
3225 icid
= le16_to_cpu(req
->icid
);
3227 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
3232 /* Placeholder: Always refuse */
3233 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
3238 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
3239 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3241 struct l2cap_move_chan_rsp
*rsp
= data
;
3244 if (cmd_len
!= sizeof(*rsp
))
3247 icid
= le16_to_cpu(rsp
->icid
);
3248 result
= le16_to_cpu(rsp
->result
);
3250 BT_DBG("icid %d, result %d", icid
, result
);
3252 /* Placeholder: Always unconfirmed */
3253 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
3258 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
3259 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3261 struct l2cap_move_chan_cfm
*cfm
= data
;
3264 if (cmd_len
!= sizeof(*cfm
))
3267 icid
= le16_to_cpu(cfm
->icid
);
3268 result
= le16_to_cpu(cfm
->result
);
3270 BT_DBG("icid %d, result %d", icid
, result
);
3272 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
3277 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
3278 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3280 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
3283 if (cmd_len
!= sizeof(*rsp
))
3286 icid
= le16_to_cpu(rsp
->icid
);
3288 BT_DBG("icid %d", icid
);
3293 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
3298 if (min
> max
|| min
< 6 || max
> 3200)
3301 if (to_multiplier
< 10 || to_multiplier
> 3200)
3304 if (max
>= to_multiplier
* 8)
3307 max_latency
= (to_multiplier
* 8 / max
) - 1;
3308 if (latency
> 499 || latency
> max_latency
)
3314 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
3315 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3317 struct hci_conn
*hcon
= conn
->hcon
;
3318 struct l2cap_conn_param_update_req
*req
;
3319 struct l2cap_conn_param_update_rsp rsp
;
3320 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
3323 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
3326 cmd_len
= __le16_to_cpu(cmd
->len
);
3327 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
3330 req
= (struct l2cap_conn_param_update_req
*) data
;
3331 min
= __le16_to_cpu(req
->min
);
3332 max
= __le16_to_cpu(req
->max
);
3333 latency
= __le16_to_cpu(req
->latency
);
3334 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
3336 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3337 min
, max
, latency
, to_multiplier
);
3339 memset(&rsp
, 0, sizeof(rsp
));
3341 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
3343 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
3345 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
3347 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
3351 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
3356 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
3357 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3361 switch (cmd
->code
) {
3362 case L2CAP_COMMAND_REJ
:
3363 l2cap_command_rej(conn
, cmd
, data
);
3366 case L2CAP_CONN_REQ
:
3367 err
= l2cap_connect_req(conn
, cmd
, data
);
3370 case L2CAP_CONN_RSP
:
3371 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3374 case L2CAP_CONF_REQ
:
3375 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3378 case L2CAP_CONF_RSP
:
3379 err
= l2cap_config_rsp(conn
, cmd
, data
);
3382 case L2CAP_DISCONN_REQ
:
3383 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3386 case L2CAP_DISCONN_RSP
:
3387 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3390 case L2CAP_ECHO_REQ
:
3391 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3394 case L2CAP_ECHO_RSP
:
3397 case L2CAP_INFO_REQ
:
3398 err
= l2cap_information_req(conn
, cmd
, data
);
3401 case L2CAP_INFO_RSP
:
3402 err
= l2cap_information_rsp(conn
, cmd
, data
);
3405 case L2CAP_CREATE_CHAN_REQ
:
3406 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
3409 case L2CAP_CREATE_CHAN_RSP
:
3410 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
3413 case L2CAP_MOVE_CHAN_REQ
:
3414 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
3417 case L2CAP_MOVE_CHAN_RSP
:
3418 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
3421 case L2CAP_MOVE_CHAN_CFM
:
3422 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
3425 case L2CAP_MOVE_CHAN_CFM_RSP
:
3426 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
3430 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3438 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3439 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3441 switch (cmd
->code
) {
3442 case L2CAP_COMMAND_REJ
:
3445 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3446 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3448 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3452 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3457 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3458 struct sk_buff
*skb
)
3460 u8
*data
= skb
->data
;
3462 struct l2cap_cmd_hdr cmd
;
3465 l2cap_raw_recv(conn
, skb
);
3467 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3469 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3470 data
+= L2CAP_CMD_HDR_SIZE
;
3471 len
-= L2CAP_CMD_HDR_SIZE
;
3473 cmd_len
= le16_to_cpu(cmd
.len
);
3475 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3477 if (cmd_len
> len
|| !cmd
.ident
) {
3478 BT_DBG("corrupted command");
3482 if (conn
->hcon
->type
== LE_LINK
)
3483 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3485 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3488 struct l2cap_cmd_rej_unk rej
;
3490 BT_ERR("Wrong link type (%d)", err
);
3492 /* FIXME: Map err to a valid reason */
3493 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3494 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3504 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3506 u16 our_fcs
, rcv_fcs
;
3509 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3510 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3512 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3514 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3515 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
3516 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3517 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3519 if (our_fcs
!= rcv_fcs
)
3525 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3529 chan
->frames_sent
= 0;
3531 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3533 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3534 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3535 l2cap_send_sframe(chan
, control
);
3536 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3539 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3540 l2cap_retransmit_frames(chan
);
3542 l2cap_ertm_send(chan
);
3544 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3545 chan
->frames_sent
== 0) {
3546 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3547 l2cap_send_sframe(chan
, control
);
3551 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
3553 struct sk_buff
*next_skb
;
3554 int tx_seq_offset
, next_tx_seq_offset
;
3556 bt_cb(skb
)->tx_seq
= tx_seq
;
3557 bt_cb(skb
)->sar
= sar
;
3559 next_skb
= skb_peek(&chan
->srej_q
);
3561 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
3564 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3567 next_tx_seq_offset
= __seq_offset(chan
,
3568 bt_cb(next_skb
)->tx_seq
, chan
->buffer_seq
);
3570 if (next_tx_seq_offset
> tx_seq_offset
) {
3571 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3575 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3578 next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
);
3581 __skb_queue_tail(&chan
->srej_q
, skb
);
3586 static void append_skb_frag(struct sk_buff
*skb
,
3587 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
3589 /* skb->len reflects data in skb as well as all fragments
3590 * skb->data_len reflects only data in fragments
3592 if (!skb_has_frag_list(skb
))
3593 skb_shinfo(skb
)->frag_list
= new_frag
;
3595 new_frag
->next
= NULL
;
3597 (*last_frag
)->next
= new_frag
;
3598 *last_frag
= new_frag
;
3600 skb
->len
+= new_frag
->len
;
3601 skb
->data_len
+= new_frag
->len
;
3602 skb
->truesize
+= new_frag
->truesize
;
3605 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u32 control
)
3609 switch (__get_ctrl_sar(chan
, control
)) {
3610 case L2CAP_SAR_UNSEGMENTED
:
3614 err
= chan
->ops
->recv(chan
->data
, skb
);
3617 case L2CAP_SAR_START
:
3621 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3622 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
3624 if (chan
->sdu_len
> chan
->imtu
) {
3629 if (skb
->len
>= chan
->sdu_len
)
3633 chan
->sdu_last_frag
= skb
;
3639 case L2CAP_SAR_CONTINUE
:
3643 append_skb_frag(chan
->sdu
, skb
,
3644 &chan
->sdu_last_frag
);
3647 if (chan
->sdu
->len
>= chan
->sdu_len
)
3657 append_skb_frag(chan
->sdu
, skb
,
3658 &chan
->sdu_last_frag
);
3661 if (chan
->sdu
->len
!= chan
->sdu_len
)
3664 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
3667 /* Reassembly complete */
3669 chan
->sdu_last_frag
= NULL
;
3677 kfree_skb(chan
->sdu
);
3679 chan
->sdu_last_frag
= NULL
;
3686 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
3690 BT_DBG("chan %p, Enter local busy", chan
);
3692 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3694 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3695 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3696 l2cap_send_sframe(chan
, control
);
3698 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3700 __clear_ack_timer(chan
);
3703 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
3707 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3710 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3711 control
|= __set_ctrl_poll(chan
);
3712 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3713 l2cap_send_sframe(chan
, control
);
3714 chan
->retry_count
= 1;
3716 __clear_retrans_timer(chan
);
3717 __set_monitor_timer(chan
);
3719 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
3722 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3723 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3725 BT_DBG("chan %p, Exit local busy", chan
);
3728 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
3730 if (chan
->mode
== L2CAP_MODE_ERTM
) {
3732 l2cap_ertm_enter_local_busy(chan
);
3734 l2cap_ertm_exit_local_busy(chan
);
3738 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
3740 struct sk_buff
*skb
;
3743 while ((skb
= skb_peek(&chan
->srej_q
)) &&
3744 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3747 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3750 skb
= skb_dequeue(&chan
->srej_q
);
3751 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->sar
);
3752 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
3755 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3759 chan
->buffer_seq_srej
= __next_seq(chan
, chan
->buffer_seq_srej
);
3760 tx_seq
= __next_seq(chan
, tx_seq
);
3764 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3766 struct srej_list
*l
, *tmp
;
3769 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3770 if (l
->tx_seq
== tx_seq
) {
3775 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3776 control
|= __set_reqseq(chan
, l
->tx_seq
);
3777 l2cap_send_sframe(chan
, control
);
3779 list_add_tail(&l
->list
, &chan
->srej_l
);
3783 static int l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3785 struct srej_list
*new;
3788 while (tx_seq
!= chan
->expected_tx_seq
) {
3789 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3790 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
3791 l2cap_send_sframe(chan
, control
);
3793 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3797 new->tx_seq
= chan
->expected_tx_seq
;
3799 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3801 list_add_tail(&new->list
, &chan
->srej_l
);
3804 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3809 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
3811 u16 tx_seq
= __get_txseq(chan
, rx_control
);
3812 u16 req_seq
= __get_reqseq(chan
, rx_control
);
3813 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
3814 int tx_seq_offset
, expected_tx_seq_offset
;
3815 int num_to_ack
= (chan
->tx_win
/6) + 1;
3818 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan
, skb
->len
,
3819 tx_seq
, rx_control
);
3821 if (__is_ctrl_final(chan
, rx_control
) &&
3822 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3823 __clear_monitor_timer(chan
);
3824 if (chan
->unacked_frames
> 0)
3825 __set_retrans_timer(chan
);
3826 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3829 chan
->expected_ack_seq
= req_seq
;
3830 l2cap_drop_acked_frames(chan
);
3832 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
3834 /* invalid tx_seq */
3835 if (tx_seq_offset
>= chan
->tx_win
) {
3836 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3840 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
3843 if (tx_seq
== chan
->expected_tx_seq
)
3846 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3847 struct srej_list
*first
;
3849 first
= list_first_entry(&chan
->srej_l
,
3850 struct srej_list
, list
);
3851 if (tx_seq
== first
->tx_seq
) {
3852 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3853 l2cap_check_srej_gap(chan
, tx_seq
);
3855 list_del(&first
->list
);
3858 if (list_empty(&chan
->srej_l
)) {
3859 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3860 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3861 l2cap_send_ack(chan
);
3862 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3865 struct srej_list
*l
;
3867 /* duplicated tx_seq */
3868 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3871 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3872 if (l
->tx_seq
== tx_seq
) {
3873 l2cap_resend_srejframe(chan
, tx_seq
);
3878 err
= l2cap_send_srejframe(chan
, tx_seq
);
3880 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3885 expected_tx_seq_offset
= __seq_offset(chan
,
3886 chan
->expected_tx_seq
, chan
->buffer_seq
);
3888 /* duplicated tx_seq */
3889 if (tx_seq_offset
< expected_tx_seq_offset
)
3892 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3894 BT_DBG("chan %p, Enter SREJ", chan
);
3896 INIT_LIST_HEAD(&chan
->srej_l
);
3897 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3899 __skb_queue_head_init(&chan
->srej_q
);
3900 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3902 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
3904 err
= l2cap_send_srejframe(chan
, tx_seq
);
3906 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3910 __clear_ack_timer(chan
);
3915 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3917 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3918 bt_cb(skb
)->tx_seq
= tx_seq
;
3919 bt_cb(skb
)->sar
= sar
;
3920 __skb_queue_tail(&chan
->srej_q
, skb
);
3924 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
3925 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
3928 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3932 if (__is_ctrl_final(chan
, rx_control
)) {
3933 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3934 l2cap_retransmit_frames(chan
);
3938 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3939 if (chan
->num_acked
== num_to_ack
- 1)
3940 l2cap_send_ack(chan
);
3942 __set_ack_timer(chan
);
3951 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u32 rx_control
)
3953 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
,
3954 __get_reqseq(chan
, rx_control
), rx_control
);
3956 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
3957 l2cap_drop_acked_frames(chan
);
3959 if (__is_ctrl_poll(chan
, rx_control
)) {
3960 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3961 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3962 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3963 (chan
->unacked_frames
> 0))
3964 __set_retrans_timer(chan
);
3966 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3967 l2cap_send_srejtail(chan
);
3969 l2cap_send_i_or_rr_or_rnr(chan
);
3972 } else if (__is_ctrl_final(chan
, rx_control
)) {
3973 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3975 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3976 l2cap_retransmit_frames(chan
);
3979 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3980 (chan
->unacked_frames
> 0))
3981 __set_retrans_timer(chan
);
3983 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3984 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
3985 l2cap_send_ack(chan
);
3987 l2cap_ertm_send(chan
);
3991 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u32 rx_control
)
3993 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
3995 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
3997 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3999 chan
->expected_ack_seq
= tx_seq
;
4000 l2cap_drop_acked_frames(chan
);
4002 if (__is_ctrl_final(chan
, rx_control
)) {
4003 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4004 l2cap_retransmit_frames(chan
);
4006 l2cap_retransmit_frames(chan
);
4008 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
4009 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4012 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4014 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4016 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4018 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4020 if (__is_ctrl_poll(chan
, rx_control
)) {
4021 chan
->expected_ack_seq
= tx_seq
;
4022 l2cap_drop_acked_frames(chan
);
4024 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4025 l2cap_retransmit_one_frame(chan
, tx_seq
);
4027 l2cap_ertm_send(chan
);
4029 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4030 chan
->srej_save_reqseq
= tx_seq
;
4031 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4033 } else if (__is_ctrl_final(chan
, rx_control
)) {
4034 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
4035 chan
->srej_save_reqseq
== tx_seq
)
4036 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4038 l2cap_retransmit_one_frame(chan
, tx_seq
);
4040 l2cap_retransmit_one_frame(chan
, tx_seq
);
4041 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4042 chan
->srej_save_reqseq
= tx_seq
;
4043 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4048 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4050 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4052 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4054 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4055 chan
->expected_ack_seq
= tx_seq
;
4056 l2cap_drop_acked_frames(chan
);
4058 if (__is_ctrl_poll(chan
, rx_control
))
4059 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4061 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4062 __clear_retrans_timer(chan
);
4063 if (__is_ctrl_poll(chan
, rx_control
))
4064 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
4068 if (__is_ctrl_poll(chan
, rx_control
)) {
4069 l2cap_send_srejtail(chan
);
4071 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4072 l2cap_send_sframe(chan
, rx_control
);
4076 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4078 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan
, rx_control
, skb
->len
);
4080 if (__is_ctrl_final(chan
, rx_control
) &&
4081 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4082 __clear_monitor_timer(chan
);
4083 if (chan
->unacked_frames
> 0)
4084 __set_retrans_timer(chan
);
4085 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4088 switch (__get_ctrl_super(chan
, rx_control
)) {
4089 case L2CAP_SUPER_RR
:
4090 l2cap_data_channel_rrframe(chan
, rx_control
);
4093 case L2CAP_SUPER_REJ
:
4094 l2cap_data_channel_rejframe(chan
, rx_control
);
4097 case L2CAP_SUPER_SREJ
:
4098 l2cap_data_channel_srejframe(chan
, rx_control
);
4101 case L2CAP_SUPER_RNR
:
4102 l2cap_data_channel_rnrframe(chan
, rx_control
);
4110 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
4112 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
4115 int len
, next_tx_seq_offset
, req_seq_offset
;
4117 control
= __get_control(chan
, skb
->data
);
4118 skb_pull(skb
, __ctrl_size(chan
));
4122 * We can just drop the corrupted I-frame here.
4123 * Receiver will miss it and start proper recovery
4124 * procedures and ask retransmission.
4126 if (l2cap_check_fcs(chan
, skb
))
4129 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
4130 len
-= L2CAP_SDULEN_SIZE
;
4132 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4133 len
-= L2CAP_FCS_SIZE
;
4135 if (len
> chan
->mps
) {
4136 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4140 req_seq
= __get_reqseq(chan
, control
);
4142 req_seq_offset
= __seq_offset(chan
, req_seq
, chan
->expected_ack_seq
);
4144 next_tx_seq_offset
= __seq_offset(chan
, chan
->next_tx_seq
,
4145 chan
->expected_ack_seq
);
4147 /* check for invalid req-seq */
4148 if (req_seq_offset
> next_tx_seq_offset
) {
4149 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4153 if (!__is_sframe(chan
, control
)) {
4155 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4159 l2cap_data_channel_iframe(chan
, control
, skb
);
4163 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4167 l2cap_data_channel_sframe(chan
, control
, skb
);
4177 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4179 struct l2cap_chan
*chan
;
4180 struct sock
*sk
= NULL
;
4185 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4187 BT_DBG("unknown cid 0x%4.4x", cid
);
4193 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4195 if (chan
->state
!= BT_CONNECTED
)
4198 switch (chan
->mode
) {
4199 case L2CAP_MODE_BASIC
:
4200 /* If socket recv buffers overflows we drop data here
4201 * which is *bad* because L2CAP has to be reliable.
4202 * But we don't have any other choice. L2CAP doesn't
4203 * provide flow control mechanism. */
4205 if (chan
->imtu
< skb
->len
)
4208 if (!chan
->ops
->recv(chan
->data
, skb
))
4212 case L2CAP_MODE_ERTM
:
4213 if (!sock_owned_by_user(sk
)) {
4214 l2cap_ertm_data_rcv(sk
, skb
);
4216 if (sk_add_backlog(sk
, skb
))
4222 case L2CAP_MODE_STREAMING
:
4223 control
= __get_control(chan
, skb
->data
);
4224 skb_pull(skb
, __ctrl_size(chan
));
4227 if (l2cap_check_fcs(chan
, skb
))
4230 if (__is_sar_start(chan
, control
))
4231 len
-= L2CAP_SDULEN_SIZE
;
4233 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4234 len
-= L2CAP_FCS_SIZE
;
4236 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
4239 tx_seq
= __get_txseq(chan
, control
);
4241 if (chan
->expected_tx_seq
!= tx_seq
) {
4242 /* Frame(s) missing - must discard partial SDU */
4243 kfree_skb(chan
->sdu
);
4245 chan
->sdu_last_frag
= NULL
;
4248 /* TODO: Notify userland of missing data */
4251 chan
->expected_tx_seq
= __next_seq(chan
, tx_seq
);
4253 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
4254 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4259 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
4273 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4275 struct sock
*sk
= NULL
;
4276 struct l2cap_chan
*chan
;
4278 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
4286 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4288 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4291 if (chan
->imtu
< skb
->len
)
4294 if (!chan
->ops
->recv(chan
->data
, skb
))
4306 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
4308 struct sock
*sk
= NULL
;
4309 struct l2cap_chan
*chan
;
4311 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
4319 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4321 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4324 if (chan
->imtu
< skb
->len
)
4327 if (!chan
->ops
->recv(chan
->data
, skb
))
4339 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4341 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4345 skb_pull(skb
, L2CAP_HDR_SIZE
);
4346 cid
= __le16_to_cpu(lh
->cid
);
4347 len
= __le16_to_cpu(lh
->len
);
4349 if (len
!= skb
->len
) {
4354 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4357 case L2CAP_CID_LE_SIGNALING
:
4358 case L2CAP_CID_SIGNALING
:
4359 l2cap_sig_channel(conn
, skb
);
4362 case L2CAP_CID_CONN_LESS
:
4363 psm
= get_unaligned_le16(skb
->data
);
4365 l2cap_conless_channel(conn
, psm
, skb
);
4368 case L2CAP_CID_LE_DATA
:
4369 l2cap_att_channel(conn
, cid
, skb
);
4373 if (smp_sig_channel(conn
, skb
))
4374 l2cap_conn_del(conn
->hcon
, EACCES
);
4378 l2cap_data_channel(conn
, cid
, skb
);
4383 /* ---- L2CAP interface with lower layer (HCI) ---- */
4385 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4387 int exact
= 0, lm1
= 0, lm2
= 0;
4388 struct l2cap_chan
*c
;
4390 if (type
!= ACL_LINK
)
4393 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4395 /* Find listening sockets and check their link_mode */
4396 read_lock(&chan_list_lock
);
4397 list_for_each_entry(c
, &chan_list
, global_l
) {
4398 struct sock
*sk
= c
->sk
;
4400 if (c
->state
!= BT_LISTEN
)
4403 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4404 lm1
|= HCI_LM_ACCEPT
;
4405 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4406 lm1
|= HCI_LM_MASTER
;
4408 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4409 lm2
|= HCI_LM_ACCEPT
;
4410 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4411 lm2
|= HCI_LM_MASTER
;
4414 read_unlock(&chan_list_lock
);
4416 return exact
? lm1
: lm2
;
4419 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4421 struct l2cap_conn
*conn
;
4423 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4425 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4429 conn
= l2cap_conn_add(hcon
, status
);
4431 l2cap_conn_ready(conn
);
4433 l2cap_conn_del(hcon
, bt_to_errno(status
));
4438 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4440 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4442 BT_DBG("hcon %p", hcon
);
4444 if ((hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
) || !conn
)
4445 return HCI_ERROR_REMOTE_USER_TERM
;
4447 return conn
->disc_reason
;
4450 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4452 BT_DBG("hcon %p reason %d", hcon
, reason
);
4454 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4457 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4462 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4464 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4467 if (encrypt
== 0x00) {
4468 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4469 __clear_chan_timer(chan
);
4470 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
4471 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4472 l2cap_chan_close(chan
, ECONNREFUSED
);
4474 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4475 __clear_chan_timer(chan
);
4479 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4481 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4482 struct l2cap_chan
*chan
;
4487 BT_DBG("conn %p", conn
);
4489 if (hcon
->type
== LE_LINK
) {
4490 smp_distribute_keys(conn
, 0);
4491 del_timer(&conn
->security_timer
);
4494 read_lock(&conn
->chan_lock
);
4496 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4497 struct sock
*sk
= chan
->sk
;
4501 BT_DBG("chan->scid %d", chan
->scid
);
4503 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4504 if (!status
&& encrypt
) {
4505 chan
->sec_level
= hcon
->sec_level
;
4506 l2cap_chan_ready(sk
);
4513 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4518 if (!status
&& (chan
->state
== BT_CONNECTED
||
4519 chan
->state
== BT_CONFIG
)) {
4520 l2cap_check_encryption(chan
, encrypt
);
4525 if (chan
->state
== BT_CONNECT
) {
4527 struct l2cap_conn_req req
;
4528 req
.scid
= cpu_to_le16(chan
->scid
);
4529 req
.psm
= chan
->psm
;
4531 chan
->ident
= l2cap_get_ident(conn
);
4532 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4534 l2cap_send_cmd(conn
, chan
->ident
,
4535 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4537 __clear_chan_timer(chan
);
4538 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4540 } else if (chan
->state
== BT_CONNECT2
) {
4541 struct l2cap_conn_rsp rsp
;
4545 if (bt_sk(sk
)->defer_setup
) {
4546 struct sock
*parent
= bt_sk(sk
)->parent
;
4547 res
= L2CAP_CR_PEND
;
4548 stat
= L2CAP_CS_AUTHOR_PEND
;
4550 parent
->sk_data_ready(parent
, 0);
4552 l2cap_state_change(chan
, BT_CONFIG
);
4553 res
= L2CAP_CR_SUCCESS
;
4554 stat
= L2CAP_CS_NO_INFO
;
4557 l2cap_state_change(chan
, BT_DISCONN
);
4558 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4559 res
= L2CAP_CR_SEC_BLOCK
;
4560 stat
= L2CAP_CS_NO_INFO
;
4563 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4564 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4565 rsp
.result
= cpu_to_le16(res
);
4566 rsp
.status
= cpu_to_le16(stat
);
4567 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4574 read_unlock(&conn
->chan_lock
);
4579 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4581 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4584 conn
= l2cap_conn_add(hcon
, 0);
4589 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4591 if (!(flags
& ACL_CONT
)) {
4592 struct l2cap_hdr
*hdr
;
4593 struct l2cap_chan
*chan
;
4598 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4599 kfree_skb(conn
->rx_skb
);
4600 conn
->rx_skb
= NULL
;
4602 l2cap_conn_unreliable(conn
, ECOMM
);
4605 /* Start fragment always begin with Basic L2CAP header */
4606 if (skb
->len
< L2CAP_HDR_SIZE
) {
4607 BT_ERR("Frame is too short (len %d)", skb
->len
);
4608 l2cap_conn_unreliable(conn
, ECOMM
);
4612 hdr
= (struct l2cap_hdr
*) skb
->data
;
4613 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4614 cid
= __le16_to_cpu(hdr
->cid
);
4616 if (len
== skb
->len
) {
4617 /* Complete frame received */
4618 l2cap_recv_frame(conn
, skb
);
4622 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4624 if (skb
->len
> len
) {
4625 BT_ERR("Frame is too long (len %d, expected len %d)",
4627 l2cap_conn_unreliable(conn
, ECOMM
);
4631 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4633 if (chan
&& chan
->sk
) {
4634 struct sock
*sk
= chan
->sk
;
4636 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4637 BT_ERR("Frame exceeding recv MTU (len %d, "
4641 l2cap_conn_unreliable(conn
, ECOMM
);
4647 /* Allocate skb for the complete frame (with header) */
4648 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4652 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4654 conn
->rx_len
= len
- skb
->len
;
4656 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4658 if (!conn
->rx_len
) {
4659 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4660 l2cap_conn_unreliable(conn
, ECOMM
);
4664 if (skb
->len
> conn
->rx_len
) {
4665 BT_ERR("Fragment is too long (len %d, expected %d)",
4666 skb
->len
, conn
->rx_len
);
4667 kfree_skb(conn
->rx_skb
);
4668 conn
->rx_skb
= NULL
;
4670 l2cap_conn_unreliable(conn
, ECOMM
);
4674 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4676 conn
->rx_len
-= skb
->len
;
4678 if (!conn
->rx_len
) {
4679 /* Complete frame received */
4680 l2cap_recv_frame(conn
, conn
->rx_skb
);
4681 conn
->rx_skb
= NULL
;
4690 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4692 struct l2cap_chan
*c
;
4694 read_lock_bh(&chan_list_lock
);
4696 list_for_each_entry(c
, &chan_list
, global_l
) {
4697 struct sock
*sk
= c
->sk
;
4699 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4700 batostr(&bt_sk(sk
)->src
),
4701 batostr(&bt_sk(sk
)->dst
),
4702 c
->state
, __le16_to_cpu(c
->psm
),
4703 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4704 c
->sec_level
, c
->mode
);
4707 read_unlock_bh(&chan_list_lock
);
4712 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4714 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4717 static const struct file_operations l2cap_debugfs_fops
= {
4718 .open
= l2cap_debugfs_open
,
4720 .llseek
= seq_lseek
,
4721 .release
= single_release
,
4724 static struct dentry
*l2cap_debugfs
;
4726 static struct hci_proto l2cap_hci_proto
= {
4728 .id
= HCI_PROTO_L2CAP
,
4729 .connect_ind
= l2cap_connect_ind
,
4730 .connect_cfm
= l2cap_connect_cfm
,
4731 .disconn_ind
= l2cap_disconn_ind
,
4732 .disconn_cfm
= l2cap_disconn_cfm
,
4733 .security_cfm
= l2cap_security_cfm
,
4734 .recv_acldata
= l2cap_recv_acldata
4737 int __init
l2cap_init(void)
4741 err
= l2cap_init_sockets();
4745 err
= hci_register_proto(&l2cap_hci_proto
);
4747 BT_ERR("L2CAP protocol registration failed");
4748 bt_sock_unregister(BTPROTO_L2CAP
);
4753 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4754 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4756 BT_ERR("Failed to create L2CAP debug file");
4762 l2cap_cleanup_sockets();
4766 void l2cap_exit(void)
4768 debugfs_remove(l2cap_debugfs
);
4770 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4771 BT_ERR("L2CAP protocol unregistration failed");
4773 l2cap_cleanup_sockets();
4776 module_param(disable_ertm
, bool, 0644);
4777 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");
4779 module_param(enable_hs
, bool, 0644);
4780 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");