2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm
= 1;
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
77 struct sk_buff_head
*skbs
, u8 event
);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
85 list_for_each_entry(c
, &conn
->chan_l
, list
) {
92 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
96 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
107 struct l2cap_chan
*c
;
109 mutex_lock(&conn
->chan_lock
);
110 c
= __l2cap_get_chan_by_scid(conn
, cid
);
113 mutex_unlock(&conn
->chan_lock
);
118 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
120 struct l2cap_chan
*c
;
122 list_for_each_entry(c
, &conn
->chan_l
, list
) {
123 if (c
->ident
== ident
)
129 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
131 struct l2cap_chan
*c
;
133 list_for_each_entry(c
, &chan_list
, global_l
) {
134 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
140 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
144 write_lock(&chan_list_lock
);
146 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
159 for (p
= 0x1001; p
< 0x1100; p
+= 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
161 chan
->psm
= cpu_to_le16(p
);
162 chan
->sport
= cpu_to_le16(p
);
169 write_unlock(&chan_list_lock
);
173 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
175 write_lock(&chan_list_lock
);
179 write_unlock(&chan_list_lock
);
184 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
186 u16 cid
= L2CAP_CID_DYN_START
;
188 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
189 if (!__l2cap_get_chan_by_scid(conn
, cid
))
196 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
198 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
199 state_to_string(state
));
202 chan
->ops
->state_change(chan
->data
, state
);
205 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
207 struct sock
*sk
= chan
->sk
;
210 __l2cap_state_change(chan
, state
);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
216 struct sock
*sk
= chan
->sk
;
221 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
223 struct sock
*sk
= chan
->sk
;
226 __l2cap_chan_set_err(chan
, err
);
230 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
235 skb_queue_walk(head
, skb
) {
236 if (bt_cb(skb
)->control
.txseq
== seq
)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
256 size_t alloc_size
, i
;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size
= roundup_pow_of_two(size
);
264 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
268 seq_list
->mask
= alloc_size
- 1;
269 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
270 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
271 for (i
= 0; i
< alloc_size
; i
++)
272 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
279 kfree(seq_list
->list
);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
285 /* Constant-time check for list membership */
286 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
289 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
291 u16 mask
= seq_list
->mask
;
293 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR
;
296 } else if (seq_list
->head
== seq
) {
297 /* Head can be removed in constant time */
298 seq_list
->head
= seq_list
->list
[seq
& mask
];
299 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
301 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
302 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
303 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 /* Walk the list to find the sequence number */
307 u16 prev
= seq_list
->head
;
308 while (seq_list
->list
[prev
& mask
] != seq
) {
309 prev
= seq_list
->list
[prev
& mask
];
310 if (prev
== L2CAP_SEQ_LIST_TAIL
)
311 return L2CAP_SEQ_LIST_CLEAR
;
314 /* Unlink the number from the list and clear it */
315 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
316 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
317 if (seq_list
->tail
== seq
)
318 seq_list
->tail
= prev
;
323 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
333 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
336 for (i
= 0; i
<= seq_list
->mask
; i
++)
337 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
339 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
340 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
343 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
345 u16 mask
= seq_list
->mask
;
347 /* All appends happen in constant time */
349 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
352 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
353 seq_list
->head
= seq
;
355 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
357 seq_list
->tail
= seq
;
358 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
361 static void l2cap_chan_timeout(struct work_struct
*work
)
363 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
365 struct l2cap_conn
*conn
= chan
->conn
;
368 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
370 mutex_lock(&conn
->chan_lock
);
371 l2cap_chan_lock(chan
);
373 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
374 reason
= ECONNREFUSED
;
375 else if (chan
->state
== BT_CONNECT
&&
376 chan
->sec_level
!= BT_SECURITY_SDP
)
377 reason
= ECONNREFUSED
;
381 l2cap_chan_close(chan
, reason
);
383 l2cap_chan_unlock(chan
);
385 chan
->ops
->close(chan
->data
);
386 mutex_unlock(&conn
->chan_lock
);
388 l2cap_chan_put(chan
);
391 struct l2cap_chan
*l2cap_chan_create(void)
393 struct l2cap_chan
*chan
;
395 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
399 mutex_init(&chan
->lock
);
401 write_lock(&chan_list_lock
);
402 list_add(&chan
->global_l
, &chan_list
);
403 write_unlock(&chan_list_lock
);
405 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
407 chan
->state
= BT_OPEN
;
409 atomic_set(&chan
->refcnt
, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
414 BT_DBG("chan %p", chan
);
419 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
421 write_lock(&chan_list_lock
);
422 list_del(&chan
->global_l
);
423 write_unlock(&chan_list_lock
);
425 l2cap_chan_put(chan
);
428 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
430 chan
->fcs
= L2CAP_FCS_CRC16
;
431 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
432 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
433 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
434 chan
->sec_level
= BT_SECURITY_LOW
;
436 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
439 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
442 __le16_to_cpu(chan
->psm
), chan
->dcid
);
444 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
448 switch (chan
->chan_type
) {
449 case L2CAP_CHAN_CONN_ORIENTED
:
450 if (conn
->hcon
->type
== LE_LINK
) {
452 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
453 chan
->scid
= L2CAP_CID_LE_DATA
;
454 chan
->dcid
= L2CAP_CID_LE_DATA
;
456 /* Alloc CID for connection-oriented socket */
457 chan
->scid
= l2cap_alloc_cid(conn
);
458 chan
->omtu
= L2CAP_DEFAULT_MTU
;
462 case L2CAP_CHAN_CONN_LESS
:
463 /* Connectionless socket */
464 chan
->scid
= L2CAP_CID_CONN_LESS
;
465 chan
->dcid
= L2CAP_CID_CONN_LESS
;
466 chan
->omtu
= L2CAP_DEFAULT_MTU
;
470 /* Raw socket can send/recv signalling messages only */
471 chan
->scid
= L2CAP_CID_SIGNALING
;
472 chan
->dcid
= L2CAP_CID_SIGNALING
;
473 chan
->omtu
= L2CAP_DEFAULT_MTU
;
476 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
477 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
478 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
479 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
480 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
481 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
483 l2cap_chan_hold(chan
);
485 list_add(&chan
->list
, &conn
->chan_l
);
488 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
490 mutex_lock(&conn
->chan_lock
);
491 __l2cap_chan_add(conn
, chan
);
492 mutex_unlock(&conn
->chan_lock
);
495 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
497 struct sock
*sk
= chan
->sk
;
498 struct l2cap_conn
*conn
= chan
->conn
;
499 struct sock
*parent
= bt_sk(sk
)->parent
;
501 __clear_chan_timer(chan
);
503 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
506 /* Delete from channel list */
507 list_del(&chan
->list
);
509 l2cap_chan_put(chan
);
512 hci_conn_put(conn
->hcon
);
517 __l2cap_state_change(chan
, BT_CLOSED
);
518 sock_set_flag(sk
, SOCK_ZAPPED
);
521 __l2cap_chan_set_err(chan
, err
);
524 bt_accept_unlink(sk
);
525 parent
->sk_data_ready(parent
, 0);
527 sk
->sk_state_change(sk
);
531 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
534 skb_queue_purge(&chan
->tx_q
);
536 if (chan
->mode
== L2CAP_MODE_ERTM
) {
537 struct srej_list
*l
, *tmp
;
539 __clear_retrans_timer(chan
);
540 __clear_monitor_timer(chan
);
541 __clear_ack_timer(chan
);
543 skb_queue_purge(&chan
->srej_q
);
545 l2cap_seq_list_free(&chan
->srej_list
);
546 l2cap_seq_list_free(&chan
->retrans_list
);
547 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
554 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
558 BT_DBG("parent %p", parent
);
560 /* Close not yet accepted channels */
561 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
562 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
564 l2cap_chan_lock(chan
);
565 __clear_chan_timer(chan
);
566 l2cap_chan_close(chan
, ECONNRESET
);
567 l2cap_chan_unlock(chan
);
569 chan
->ops
->close(chan
->data
);
573 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
575 struct l2cap_conn
*conn
= chan
->conn
;
576 struct sock
*sk
= chan
->sk
;
578 BT_DBG("chan %p state %s sk %p", chan
,
579 state_to_string(chan
->state
), sk
);
581 switch (chan
->state
) {
584 l2cap_chan_cleanup_listen(sk
);
586 __l2cap_state_change(chan
, BT_CLOSED
);
587 sock_set_flag(sk
, SOCK_ZAPPED
);
593 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
594 conn
->hcon
->type
== ACL_LINK
) {
595 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
596 l2cap_send_disconn_req(conn
, chan
, reason
);
598 l2cap_chan_del(chan
, reason
);
602 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
603 conn
->hcon
->type
== ACL_LINK
) {
604 struct l2cap_conn_rsp rsp
;
607 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
608 result
= L2CAP_CR_SEC_BLOCK
;
610 result
= L2CAP_CR_BAD_PSM
;
611 l2cap_state_change(chan
, BT_DISCONN
);
613 rsp
.scid
= cpu_to_le16(chan
->dcid
);
614 rsp
.dcid
= cpu_to_le16(chan
->scid
);
615 rsp
.result
= cpu_to_le16(result
);
616 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
617 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
621 l2cap_chan_del(chan
, reason
);
626 l2cap_chan_del(chan
, reason
);
631 sock_set_flag(sk
, SOCK_ZAPPED
);
637 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
639 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
640 switch (chan
->sec_level
) {
641 case BT_SECURITY_HIGH
:
642 return HCI_AT_DEDICATED_BONDING_MITM
;
643 case BT_SECURITY_MEDIUM
:
644 return HCI_AT_DEDICATED_BONDING
;
646 return HCI_AT_NO_BONDING
;
648 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
649 if (chan
->sec_level
== BT_SECURITY_LOW
)
650 chan
->sec_level
= BT_SECURITY_SDP
;
652 if (chan
->sec_level
== BT_SECURITY_HIGH
)
653 return HCI_AT_NO_BONDING_MITM
;
655 return HCI_AT_NO_BONDING
;
657 switch (chan
->sec_level
) {
658 case BT_SECURITY_HIGH
:
659 return HCI_AT_GENERAL_BONDING_MITM
;
660 case BT_SECURITY_MEDIUM
:
661 return HCI_AT_GENERAL_BONDING
;
663 return HCI_AT_NO_BONDING
;
668 /* Service level security */
669 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
671 struct l2cap_conn
*conn
= chan
->conn
;
674 auth_type
= l2cap_get_auth_type(chan
);
676 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
679 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
683 /* Get next available identificator.
684 * 1 - 128 are used by kernel.
685 * 129 - 199 are reserved.
686 * 200 - 254 are used by utilities like l2ping, etc.
689 spin_lock(&conn
->lock
);
691 if (++conn
->tx_ident
> 128)
696 spin_unlock(&conn
->lock
);
701 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
703 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
706 BT_DBG("code 0x%2.2x", code
);
711 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
712 flags
= ACL_START_NO_FLUSH
;
716 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
717 skb
->priority
= HCI_PRIO_MAX
;
719 hci_send_acl(conn
->hchan
, skb
, flags
);
722 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
724 struct hci_conn
*hcon
= chan
->conn
->hcon
;
727 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
730 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
731 lmp_no_flush_capable(hcon
->hdev
))
732 flags
= ACL_START_NO_FLUSH
;
736 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
737 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
740 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
742 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
743 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
745 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
748 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
749 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
756 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
757 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
764 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
766 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
767 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
769 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
772 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
773 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
780 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
781 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
788 static inline void __unpack_control(struct l2cap_chan
*chan
,
791 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
792 __unpack_extended_control(get_unaligned_le32(skb
->data
),
793 &bt_cb(skb
)->control
);
795 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
796 &bt_cb(skb
)->control
);
800 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
804 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
805 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
807 if (control
->sframe
) {
808 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
809 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
810 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
812 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
813 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
819 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
823 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
824 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
826 if (control
->sframe
) {
827 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
828 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
829 packed
|= L2CAP_CTRL_FRAME_TYPE
;
831 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
832 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
838 static inline void __pack_control(struct l2cap_chan
*chan
,
839 struct l2cap_ctrl
*control
,
842 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
843 put_unaligned_le32(__pack_extended_control(control
),
844 skb
->data
+ L2CAP_HDR_SIZE
);
846 put_unaligned_le16(__pack_enhanced_control(control
),
847 skb
->data
+ L2CAP_HDR_SIZE
);
851 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
855 struct l2cap_hdr
*lh
;
858 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
859 hlen
= L2CAP_EXT_HDR_SIZE
;
861 hlen
= L2CAP_ENH_HDR_SIZE
;
863 if (chan
->fcs
== L2CAP_FCS_CRC16
)
864 hlen
+= L2CAP_FCS_SIZE
;
866 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
869 return ERR_PTR(-ENOMEM
);
871 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
872 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
873 lh
->cid
= cpu_to_le16(chan
->dcid
);
875 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
876 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
878 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
880 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
881 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
882 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
885 skb
->priority
= HCI_PRIO_MAX
;
889 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
890 struct l2cap_ctrl
*control
)
895 BT_DBG("chan %p, control %p", chan
, control
);
897 if (!control
->sframe
)
900 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
904 if (control
->super
== L2CAP_SUPER_RR
)
905 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
906 else if (control
->super
== L2CAP_SUPER_RNR
)
907 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
909 if (control
->super
!= L2CAP_SUPER_SREJ
) {
910 chan
->last_acked_seq
= control
->reqseq
;
911 __clear_ack_timer(chan
);
914 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
915 control
->final
, control
->poll
, control
->super
);
917 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
918 control_field
= __pack_extended_control(control
);
920 control_field
= __pack_enhanced_control(control
);
922 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
924 l2cap_do_send(chan
, skb
);
927 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u32 control
)
929 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
930 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
931 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
933 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
935 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
938 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
940 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
943 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
945 struct l2cap_conn
*conn
= chan
->conn
;
946 struct l2cap_conn_req req
;
948 req
.scid
= cpu_to_le16(chan
->scid
);
951 chan
->ident
= l2cap_get_ident(conn
);
953 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
955 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
958 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
960 struct sock
*sk
= chan
->sk
;
965 parent
= bt_sk(sk
)->parent
;
967 BT_DBG("sk %p, parent %p", sk
, parent
);
969 /* This clears all conf flags, including CONF_NOT_COMPLETE */
970 chan
->conf_state
= 0;
971 __clear_chan_timer(chan
);
973 __l2cap_state_change(chan
, BT_CONNECTED
);
974 sk
->sk_state_change(sk
);
977 parent
->sk_data_ready(parent
, 0);
982 static void l2cap_do_start(struct l2cap_chan
*chan
)
984 struct l2cap_conn
*conn
= chan
->conn
;
986 if (conn
->hcon
->type
== LE_LINK
) {
987 l2cap_chan_ready(chan
);
991 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
992 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
995 if (l2cap_chan_check_security(chan
) &&
996 __l2cap_no_conn_pending(chan
))
997 l2cap_send_conn_req(chan
);
999 struct l2cap_info_req req
;
1000 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1002 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1003 conn
->info_ident
= l2cap_get_ident(conn
);
1005 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1007 l2cap_send_cmd(conn
, conn
->info_ident
,
1008 L2CAP_INFO_REQ
, sizeof(req
), &req
);
1012 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1014 u32 local_feat_mask
= l2cap_feat_mask
;
1016 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1019 case L2CAP_MODE_ERTM
:
1020 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1021 case L2CAP_MODE_STREAMING
:
1022 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1028 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
1030 struct sock
*sk
= chan
->sk
;
1031 struct l2cap_disconn_req req
;
1036 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1037 __clear_retrans_timer(chan
);
1038 __clear_monitor_timer(chan
);
1039 __clear_ack_timer(chan
);
1042 req
.dcid
= cpu_to_le16(chan
->dcid
);
1043 req
.scid
= cpu_to_le16(chan
->scid
);
1044 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1045 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1048 __l2cap_state_change(chan
, BT_DISCONN
);
1049 __l2cap_chan_set_err(chan
, err
);
1053 /* ---- L2CAP connections ---- */
1054 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1056 struct l2cap_chan
*chan
, *tmp
;
1058 BT_DBG("conn %p", conn
);
1060 mutex_lock(&conn
->chan_lock
);
1062 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1063 struct sock
*sk
= chan
->sk
;
1065 l2cap_chan_lock(chan
);
1067 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1068 l2cap_chan_unlock(chan
);
1072 if (chan
->state
== BT_CONNECT
) {
1073 if (!l2cap_chan_check_security(chan
) ||
1074 !__l2cap_no_conn_pending(chan
)) {
1075 l2cap_chan_unlock(chan
);
1079 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1080 && test_bit(CONF_STATE2_DEVICE
,
1081 &chan
->conf_state
)) {
1082 l2cap_chan_close(chan
, ECONNRESET
);
1083 l2cap_chan_unlock(chan
);
1087 l2cap_send_conn_req(chan
);
1089 } else if (chan
->state
== BT_CONNECT2
) {
1090 struct l2cap_conn_rsp rsp
;
1092 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1093 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1095 if (l2cap_chan_check_security(chan
)) {
1097 if (test_bit(BT_SK_DEFER_SETUP
,
1098 &bt_sk(sk
)->flags
)) {
1099 struct sock
*parent
= bt_sk(sk
)->parent
;
1100 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1101 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1103 parent
->sk_data_ready(parent
, 0);
1106 __l2cap_state_change(chan
, BT_CONFIG
);
1107 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1108 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1112 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1113 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1116 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1119 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1120 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1121 l2cap_chan_unlock(chan
);
1125 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1126 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1127 l2cap_build_conf_req(chan
, buf
), buf
);
1128 chan
->num_conf_req
++;
1131 l2cap_chan_unlock(chan
);
1134 mutex_unlock(&conn
->chan_lock
);
1137 /* Find socket with cid and source/destination bdaddr.
1138 * Returns closest match, locked.
1140 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1144 struct l2cap_chan
*c
, *c1
= NULL
;
1146 read_lock(&chan_list_lock
);
1148 list_for_each_entry(c
, &chan_list
, global_l
) {
1149 struct sock
*sk
= c
->sk
;
1151 if (state
&& c
->state
!= state
)
1154 if (c
->scid
== cid
) {
1155 int src_match
, dst_match
;
1156 int src_any
, dst_any
;
1159 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1160 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1161 if (src_match
&& dst_match
) {
1162 read_unlock(&chan_list_lock
);
1167 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1168 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1169 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1170 (src_any
&& dst_any
))
1175 read_unlock(&chan_list_lock
);
1180 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1182 struct sock
*parent
, *sk
;
1183 struct l2cap_chan
*chan
, *pchan
;
1187 /* Check if we have socket listening on cid */
1188 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1189 conn
->src
, conn
->dst
);
1197 /* Check for backlog size */
1198 if (sk_acceptq_is_full(parent
)) {
1199 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1203 chan
= pchan
->ops
->new_connection(pchan
->data
);
1209 hci_conn_hold(conn
->hcon
);
1211 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1212 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1214 bt_accept_enqueue(parent
, sk
);
1216 l2cap_chan_add(conn
, chan
);
1218 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1220 __l2cap_state_change(chan
, BT_CONNECTED
);
1221 parent
->sk_data_ready(parent
, 0);
1224 release_sock(parent
);
1227 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1229 struct l2cap_chan
*chan
;
1231 BT_DBG("conn %p", conn
);
1233 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1234 l2cap_le_conn_ready(conn
);
1236 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1237 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1239 mutex_lock(&conn
->chan_lock
);
1241 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1243 l2cap_chan_lock(chan
);
1245 if (conn
->hcon
->type
== LE_LINK
) {
1246 if (smp_conn_security(conn
, chan
->sec_level
))
1247 l2cap_chan_ready(chan
);
1249 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1250 struct sock
*sk
= chan
->sk
;
1251 __clear_chan_timer(chan
);
1253 __l2cap_state_change(chan
, BT_CONNECTED
);
1254 sk
->sk_state_change(sk
);
1257 } else if (chan
->state
== BT_CONNECT
)
1258 l2cap_do_start(chan
);
1260 l2cap_chan_unlock(chan
);
1263 mutex_unlock(&conn
->chan_lock
);
1266 /* Notify sockets that we cannot guaranty reliability anymore */
1267 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1269 struct l2cap_chan
*chan
;
1271 BT_DBG("conn %p", conn
);
1273 mutex_lock(&conn
->chan_lock
);
1275 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1276 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1277 __l2cap_chan_set_err(chan
, err
);
1280 mutex_unlock(&conn
->chan_lock
);
1283 static void l2cap_info_timeout(struct work_struct
*work
)
1285 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1288 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1289 conn
->info_ident
= 0;
1291 l2cap_conn_start(conn
);
1294 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1296 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1297 struct l2cap_chan
*chan
, *l
;
1302 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1304 kfree_skb(conn
->rx_skb
);
1306 mutex_lock(&conn
->chan_lock
);
1309 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1310 l2cap_chan_hold(chan
);
1311 l2cap_chan_lock(chan
);
1313 l2cap_chan_del(chan
, err
);
1315 l2cap_chan_unlock(chan
);
1317 chan
->ops
->close(chan
->data
);
1318 l2cap_chan_put(chan
);
1321 mutex_unlock(&conn
->chan_lock
);
1323 hci_chan_del(conn
->hchan
);
1325 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1326 cancel_delayed_work_sync(&conn
->info_timer
);
1328 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1329 cancel_delayed_work_sync(&conn
->security_timer
);
1330 smp_chan_destroy(conn
);
1333 hcon
->l2cap_data
= NULL
;
1337 static void security_timeout(struct work_struct
*work
)
1339 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1340 security_timer
.work
);
1342 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1345 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1347 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1348 struct hci_chan
*hchan
;
1353 hchan
= hci_chan_create(hcon
);
1357 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1359 hci_chan_del(hchan
);
1363 hcon
->l2cap_data
= conn
;
1365 conn
->hchan
= hchan
;
1367 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1369 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1370 conn
->mtu
= hcon
->hdev
->le_mtu
;
1372 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1374 conn
->src
= &hcon
->hdev
->bdaddr
;
1375 conn
->dst
= &hcon
->dst
;
1377 conn
->feat_mask
= 0;
1379 spin_lock_init(&conn
->lock
);
1380 mutex_init(&conn
->chan_lock
);
1382 INIT_LIST_HEAD(&conn
->chan_l
);
1384 if (hcon
->type
== LE_LINK
)
1385 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1387 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1389 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1394 /* ---- Socket interface ---- */
1396 /* Find socket with psm and source / destination bdaddr.
1397 * Returns closest match.
1399 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1403 struct l2cap_chan
*c
, *c1
= NULL
;
1405 read_lock(&chan_list_lock
);
1407 list_for_each_entry(c
, &chan_list
, global_l
) {
1408 struct sock
*sk
= c
->sk
;
1410 if (state
&& c
->state
!= state
)
1413 if (c
->psm
== psm
) {
1414 int src_match
, dst_match
;
1415 int src_any
, dst_any
;
1418 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1419 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1420 if (src_match
&& dst_match
) {
1421 read_unlock(&chan_list_lock
);
1426 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1427 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1428 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1429 (src_any
&& dst_any
))
1434 read_unlock(&chan_list_lock
);
1439 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1440 bdaddr_t
*dst
, u8 dst_type
)
1442 struct sock
*sk
= chan
->sk
;
1443 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1444 struct l2cap_conn
*conn
;
1445 struct hci_conn
*hcon
;
1446 struct hci_dev
*hdev
;
1450 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1451 dst_type
, __le16_to_cpu(chan
->psm
));
1453 hdev
= hci_get_route(dst
, src
);
1455 return -EHOSTUNREACH
;
1459 l2cap_chan_lock(chan
);
1461 /* PSM must be odd and lsb of upper byte must be 0 */
1462 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1463 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1468 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1473 switch (chan
->mode
) {
1474 case L2CAP_MODE_BASIC
:
1476 case L2CAP_MODE_ERTM
:
1477 case L2CAP_MODE_STREAMING
:
1488 switch (sk
->sk_state
) {
1492 /* Already connecting */
1498 /* Already connected */
1514 /* Set destination address and psm */
1515 bacpy(&bt_sk(sk
)->dst
, dst
);
1522 auth_type
= l2cap_get_auth_type(chan
);
1524 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1525 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1526 chan
->sec_level
, auth_type
);
1528 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1529 chan
->sec_level
, auth_type
);
1532 err
= PTR_ERR(hcon
);
1536 conn
= l2cap_conn_add(hcon
, 0);
1543 if (hcon
->type
== LE_LINK
) {
1546 if (!list_empty(&conn
->chan_l
)) {
1555 /* Update source addr of the socket */
1556 bacpy(src
, conn
->src
);
1558 l2cap_chan_unlock(chan
);
1559 l2cap_chan_add(conn
, chan
);
1560 l2cap_chan_lock(chan
);
1562 l2cap_state_change(chan
, BT_CONNECT
);
1563 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1565 if (hcon
->state
== BT_CONNECTED
) {
1566 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1567 __clear_chan_timer(chan
);
1568 if (l2cap_chan_check_security(chan
))
1569 l2cap_state_change(chan
, BT_CONNECTED
);
1571 l2cap_do_start(chan
);
1577 l2cap_chan_unlock(chan
);
1578 hci_dev_unlock(hdev
);
1583 int __l2cap_wait_ack(struct sock
*sk
)
1585 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1586 DECLARE_WAITQUEUE(wait
, current
);
1590 add_wait_queue(sk_sleep(sk
), &wait
);
1591 set_current_state(TASK_INTERRUPTIBLE
);
1592 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1596 if (signal_pending(current
)) {
1597 err
= sock_intr_errno(timeo
);
1602 timeo
= schedule_timeout(timeo
);
1604 set_current_state(TASK_INTERRUPTIBLE
);
1606 err
= sock_error(sk
);
1610 set_current_state(TASK_RUNNING
);
1611 remove_wait_queue(sk_sleep(sk
), &wait
);
1615 static void l2cap_monitor_timeout(struct work_struct
*work
)
1617 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1618 monitor_timer
.work
);
1620 BT_DBG("chan %p", chan
);
1622 l2cap_chan_lock(chan
);
1624 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1625 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1626 l2cap_chan_unlock(chan
);
1627 l2cap_chan_put(chan
);
1631 chan
->retry_count
++;
1632 __set_monitor_timer(chan
);
1634 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1635 l2cap_chan_unlock(chan
);
1636 l2cap_chan_put(chan
);
1639 static void l2cap_retrans_timeout(struct work_struct
*work
)
1641 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1642 retrans_timer
.work
);
1644 BT_DBG("chan %p", chan
);
1646 l2cap_chan_lock(chan
);
1648 chan
->retry_count
= 1;
1649 __set_monitor_timer(chan
);
1651 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1653 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1655 l2cap_chan_unlock(chan
);
1656 l2cap_chan_put(chan
);
1659 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1661 struct sk_buff
*skb
;
1663 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1664 chan
->unacked_frames
) {
1665 if (bt_cb(skb
)->control
.txseq
== chan
->expected_ack_seq
)
1668 skb
= skb_dequeue(&chan
->tx_q
);
1671 chan
->unacked_frames
--;
1674 if (!chan
->unacked_frames
)
1675 __clear_retrans_timer(chan
);
1678 static int l2cap_streaming_send(struct l2cap_chan
*chan
,
1679 struct sk_buff_head
*skbs
)
1681 struct sk_buff
*skb
;
1682 struct l2cap_ctrl
*control
;
1684 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1686 if (chan
->state
!= BT_CONNECTED
)
1689 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1691 while (!skb_queue_empty(&chan
->tx_q
)) {
1693 skb
= skb_dequeue(&chan
->tx_q
);
1695 bt_cb(skb
)->control
.retries
= 1;
1696 control
= &bt_cb(skb
)->control
;
1698 control
->reqseq
= 0;
1699 control
->txseq
= chan
->next_tx_seq
;
1701 __pack_control(chan
, control
, skb
);
1703 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1704 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1705 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1708 l2cap_do_send(chan
, skb
);
1710 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1712 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1713 chan
->frames_sent
++;
1719 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1721 struct sk_buff
*skb
, *tx_skb
;
1725 skb
= skb_peek(&chan
->tx_q
);
1729 while (bt_cb(skb
)->control
.txseq
!= tx_seq
) {
1730 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1733 skb
= skb_queue_next(&chan
->tx_q
, skb
);
1736 if (bt_cb(skb
)->control
.retries
== chan
->remote_max_tx
&&
1737 chan
->remote_max_tx
) {
1738 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1742 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1743 bt_cb(skb
)->control
.retries
++;
1745 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1746 control
&= __get_sar_mask(chan
);
1748 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1749 control
|= __set_ctrl_final(chan
);
1751 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1752 control
|= __set_txseq(chan
, tx_seq
);
1754 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1756 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1757 fcs
= crc16(0, (u8
*)tx_skb
->data
,
1758 tx_skb
->len
- L2CAP_FCS_SIZE
);
1759 put_unaligned_le16(fcs
,
1760 tx_skb
->data
+ tx_skb
->len
- L2CAP_FCS_SIZE
);
1763 l2cap_do_send(chan
, tx_skb
);
1766 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1768 struct sk_buff
*skb
, *tx_skb
;
1769 struct l2cap_ctrl
*control
;
1772 BT_DBG("chan %p", chan
);
1774 if (chan
->state
!= BT_CONNECTED
)
1777 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1780 while (chan
->tx_send_head
&&
1781 chan
->unacked_frames
< chan
->remote_tx_win
&&
1782 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1784 skb
= chan
->tx_send_head
;
1786 bt_cb(skb
)->control
.retries
= 1;
1787 control
= &bt_cb(skb
)->control
;
1789 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1792 control
->reqseq
= chan
->buffer_seq
;
1793 chan
->last_acked_seq
= chan
->buffer_seq
;
1794 control
->txseq
= chan
->next_tx_seq
;
1796 __pack_control(chan
, control
, skb
);
1798 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1799 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1800 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1803 /* Clone after data has been modified. Data is assumed to be
1804 read-only (for locking purposes) on cloned sk_buffs.
1806 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1811 __set_retrans_timer(chan
);
1813 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1814 chan
->unacked_frames
++;
1815 chan
->frames_sent
++;
1818 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1819 chan
->tx_send_head
= NULL
;
1821 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1823 l2cap_do_send(chan
, tx_skb
);
1824 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1827 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent
,
1828 (int) chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1833 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1837 if (!skb_queue_empty(&chan
->tx_q
))
1838 chan
->tx_send_head
= chan
->tx_q
.next
;
1840 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1841 ret
= l2cap_ertm_send(chan
);
1845 static void __l2cap_send_ack(struct l2cap_chan
*chan
)
1849 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1851 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1852 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1853 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1857 if (l2cap_ertm_send(chan
) > 0)
1860 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1863 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1865 __clear_ack_timer(chan
);
1866 __l2cap_send_ack(chan
);
1869 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1871 struct srej_list
*tail
;
1874 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1875 control
|= __set_ctrl_final(chan
);
1877 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1878 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1881 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1882 struct msghdr
*msg
, int len
,
1883 int count
, struct sk_buff
*skb
)
1885 struct l2cap_conn
*conn
= chan
->conn
;
1886 struct sk_buff
**frag
;
1889 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1895 /* Continuation fragments (no L2CAP header) */
1896 frag
= &skb_shinfo(skb
)->frag_list
;
1898 struct sk_buff
*tmp
;
1900 count
= min_t(unsigned int, conn
->mtu
, len
);
1902 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1903 msg
->msg_flags
& MSG_DONTWAIT
);
1905 return PTR_ERR(tmp
);
1909 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1912 (*frag
)->priority
= skb
->priority
;
1917 skb
->len
+= (*frag
)->len
;
1918 skb
->data_len
+= (*frag
)->len
;
1920 frag
= &(*frag
)->next
;
1926 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1927 struct msghdr
*msg
, size_t len
,
1930 struct l2cap_conn
*conn
= chan
->conn
;
1931 struct sk_buff
*skb
;
1932 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1933 struct l2cap_hdr
*lh
;
1935 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1937 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1939 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1940 msg
->msg_flags
& MSG_DONTWAIT
);
1944 skb
->priority
= priority
;
1946 /* Create L2CAP header */
1947 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1948 lh
->cid
= cpu_to_le16(chan
->dcid
);
1949 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
1950 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
1952 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1953 if (unlikely(err
< 0)) {
1955 return ERR_PTR(err
);
1960 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1961 struct msghdr
*msg
, size_t len
,
1964 struct l2cap_conn
*conn
= chan
->conn
;
1965 struct sk_buff
*skb
;
1967 struct l2cap_hdr
*lh
;
1969 BT_DBG("chan %p len %d", chan
, (int)len
);
1971 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
1973 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
1974 msg
->msg_flags
& MSG_DONTWAIT
);
1978 skb
->priority
= priority
;
1980 /* Create L2CAP header */
1981 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1982 lh
->cid
= cpu_to_le16(chan
->dcid
);
1983 lh
->len
= cpu_to_le16(len
);
1985 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1986 if (unlikely(err
< 0)) {
1988 return ERR_PTR(err
);
1993 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1994 struct msghdr
*msg
, size_t len
,
1997 struct l2cap_conn
*conn
= chan
->conn
;
1998 struct sk_buff
*skb
;
1999 int err
, count
, hlen
;
2000 struct l2cap_hdr
*lh
;
2002 BT_DBG("chan %p len %d", chan
, (int)len
);
2005 return ERR_PTR(-ENOTCONN
);
2007 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2008 hlen
= L2CAP_EXT_HDR_SIZE
;
2010 hlen
= L2CAP_ENH_HDR_SIZE
;
2013 hlen
+= L2CAP_SDULEN_SIZE
;
2015 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2016 hlen
+= L2CAP_FCS_SIZE
;
2018 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2020 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2021 msg
->msg_flags
& MSG_DONTWAIT
);
2025 /* Create L2CAP header */
2026 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2027 lh
->cid
= cpu_to_le16(chan
->dcid
);
2028 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2030 /* Control header is populated later */
2031 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2032 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2034 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2037 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2039 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2040 if (unlikely(err
< 0)) {
2042 return ERR_PTR(err
);
2045 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2046 bt_cb(skb
)->control
.retries
= 0;
2050 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2051 struct sk_buff_head
*seg_queue
,
2052 struct msghdr
*msg
, size_t len
)
2054 struct sk_buff
*skb
;
2060 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2062 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2063 * so fragmented skbs are not used. The HCI layer's handling
2064 * of fragmented skbs is not compatible with ERTM's queueing.
2067 /* PDU size is derived from the HCI MTU */
2068 pdu_len
= chan
->conn
->mtu
;
2070 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2072 /* Adjust for largest possible L2CAP overhead. */
2073 pdu_len
-= L2CAP_EXT_HDR_SIZE
+ L2CAP_FCS_SIZE
;
2075 /* Remote device may have requested smaller PDUs */
2076 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2078 if (len
<= pdu_len
) {
2079 sar
= L2CAP_SAR_UNSEGMENTED
;
2083 sar
= L2CAP_SAR_START
;
2085 pdu_len
-= L2CAP_SDULEN_SIZE
;
2089 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2092 __skb_queue_purge(seg_queue
);
2093 return PTR_ERR(skb
);
2096 bt_cb(skb
)->control
.sar
= sar
;
2097 __skb_queue_tail(seg_queue
, skb
);
2102 pdu_len
+= L2CAP_SDULEN_SIZE
;
2105 if (len
<= pdu_len
) {
2106 sar
= L2CAP_SAR_END
;
2109 sar
= L2CAP_SAR_CONTINUE
;
2116 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2119 struct sk_buff
*skb
;
2121 struct sk_buff_head seg_queue
;
2123 /* Connectionless channel */
2124 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2125 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2127 return PTR_ERR(skb
);
2129 l2cap_do_send(chan
, skb
);
2133 switch (chan
->mode
) {
2134 case L2CAP_MODE_BASIC
:
2135 /* Check outgoing MTU */
2136 if (len
> chan
->omtu
)
2139 /* Create a basic PDU */
2140 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2142 return PTR_ERR(skb
);
2144 l2cap_do_send(chan
, skb
);
2148 case L2CAP_MODE_ERTM
:
2149 case L2CAP_MODE_STREAMING
:
2150 /* Check outgoing MTU */
2151 if (len
> chan
->omtu
) {
2156 __skb_queue_head_init(&seg_queue
);
2158 /* Do segmentation before calling in to the state machine,
2159 * since it's possible to block while waiting for memory
2162 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2164 /* The channel could have been closed while segmenting,
2165 * check that it is still connected.
2167 if (chan
->state
!= BT_CONNECTED
) {
2168 __skb_queue_purge(&seg_queue
);
2175 if (chan
->mode
== L2CAP_MODE_ERTM
)
2176 err
= l2cap_tx(chan
, 0, &seg_queue
,
2177 L2CAP_EV_DATA_REQUEST
);
2179 err
= l2cap_streaming_send(chan
, &seg_queue
);
2184 /* If the skbs were not queued for sending, they'll still be in
2185 * seg_queue and need to be purged.
2187 __skb_queue_purge(&seg_queue
);
2191 BT_DBG("bad state %1.1x", chan
->mode
);
2198 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2200 struct sk_buff
*acked_skb
;
2203 BT_DBG("chan %p, reqseq %d", chan
, reqseq
);
2205 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2208 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2209 chan
->expected_ack_seq
, chan
->unacked_frames
);
2211 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2212 ackseq
= __next_seq(chan
, ackseq
)) {
2214 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2216 skb_unlink(acked_skb
, &chan
->tx_q
);
2217 kfree_skb(acked_skb
);
2218 chan
->unacked_frames
--;
2222 chan
->expected_ack_seq
= reqseq
;
2224 if (chan
->unacked_frames
== 0)
2225 __clear_retrans_timer(chan
);
2227 BT_DBG("unacked_frames %d", (int) chan
->unacked_frames
);
2230 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2232 BT_DBG("chan %p", chan
);
2234 chan
->expected_tx_seq
= chan
->buffer_seq
;
2235 l2cap_seq_list_clear(&chan
->srej_list
);
2236 skb_queue_purge(&chan
->srej_q
);
2237 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2240 static int l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2241 struct l2cap_ctrl
*control
,
2242 struct sk_buff_head
*skbs
, u8 event
)
2246 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2250 case L2CAP_EV_DATA_REQUEST
:
2251 if (chan
->tx_send_head
== NULL
)
2252 chan
->tx_send_head
= skb_peek(skbs
);
2254 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2255 l2cap_ertm_send(chan
);
2257 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2258 BT_DBG("Enter LOCAL_BUSY");
2259 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2261 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2262 /* The SREJ_SENT state must be aborted if we are to
2263 * enter the LOCAL_BUSY state.
2265 l2cap_abort_rx_srej_sent(chan
);
2268 l2cap_send_ack(chan
);
2271 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2272 BT_DBG("Exit LOCAL_BUSY");
2273 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2275 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2276 struct l2cap_ctrl local_control
;
2278 memset(&local_control
, 0, sizeof(local_control
));
2279 local_control
.sframe
= 1;
2280 local_control
.super
= L2CAP_SUPER_RR
;
2281 local_control
.poll
= 1;
2282 local_control
.reqseq
= chan
->buffer_seq
;
2283 l2cap_send_sframe(chan
, &local_control
);
2285 chan
->retry_count
= 1;
2286 __set_monitor_timer(chan
);
2287 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2290 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2291 l2cap_process_reqseq(chan
, control
->reqseq
);
2293 case L2CAP_EV_EXPLICIT_POLL
:
2294 l2cap_send_rr_or_rnr(chan
, 1);
2295 chan
->retry_count
= 1;
2296 __set_monitor_timer(chan
);
2297 __clear_ack_timer(chan
);
2298 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2300 case L2CAP_EV_RETRANS_TO
:
2301 l2cap_send_rr_or_rnr(chan
, 1);
2302 chan
->retry_count
= 1;
2303 __set_monitor_timer(chan
);
2304 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2306 case L2CAP_EV_RECV_FBIT
:
2307 /* Nothing to process */
2316 static int l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2317 struct l2cap_ctrl
*control
,
2318 struct sk_buff_head
*skbs
, u8 event
)
2322 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2326 case L2CAP_EV_DATA_REQUEST
:
2327 if (chan
->tx_send_head
== NULL
)
2328 chan
->tx_send_head
= skb_peek(skbs
);
2329 /* Queue data, but don't send. */
2330 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2332 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2333 BT_DBG("Enter LOCAL_BUSY");
2334 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2336 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2337 /* The SREJ_SENT state must be aborted if we are to
2338 * enter the LOCAL_BUSY state.
2340 l2cap_abort_rx_srej_sent(chan
);
2343 l2cap_send_ack(chan
);
2346 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2347 BT_DBG("Exit LOCAL_BUSY");
2348 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2350 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2351 struct l2cap_ctrl local_control
;
2352 memset(&local_control
, 0, sizeof(local_control
));
2353 local_control
.sframe
= 1;
2354 local_control
.super
= L2CAP_SUPER_RR
;
2355 local_control
.poll
= 1;
2356 local_control
.reqseq
= chan
->buffer_seq
;
2357 l2cap_send_sframe(chan
, &local_control
);
2359 chan
->retry_count
= 1;
2360 __set_monitor_timer(chan
);
2361 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2364 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2365 l2cap_process_reqseq(chan
, control
->reqseq
);
2369 case L2CAP_EV_RECV_FBIT
:
2370 if (control
&& control
->final
) {
2371 __clear_monitor_timer(chan
);
2372 if (chan
->unacked_frames
> 0)
2373 __set_retrans_timer(chan
);
2374 chan
->retry_count
= 0;
2375 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2376 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2379 case L2CAP_EV_EXPLICIT_POLL
:
2382 case L2CAP_EV_MONITOR_TO
:
2383 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2384 l2cap_send_rr_or_rnr(chan
, 1);
2385 __set_monitor_timer(chan
);
2386 chan
->retry_count
++;
2388 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2398 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2399 struct sk_buff_head
*skbs
, u8 event
)
2403 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2404 chan
, control
, skbs
, event
, chan
->tx_state
);
2406 switch (chan
->tx_state
) {
2407 case L2CAP_TX_STATE_XMIT
:
2408 err
= l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2410 case L2CAP_TX_STATE_WAIT_F
:
2411 err
= l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2421 /* Copy frame to all raw sockets on that connection */
2422 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2424 struct sk_buff
*nskb
;
2425 struct l2cap_chan
*chan
;
2427 BT_DBG("conn %p", conn
);
2429 mutex_lock(&conn
->chan_lock
);
2431 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2432 struct sock
*sk
= chan
->sk
;
2433 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2436 /* Don't send frame to the socket it came from */
2439 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2443 if (chan
->ops
->recv(chan
->data
, nskb
))
2447 mutex_unlock(&conn
->chan_lock
);
2450 /* ---- L2CAP signalling commands ---- */
2451 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2452 u8 code
, u8 ident
, u16 dlen
, void *data
)
2454 struct sk_buff
*skb
, **frag
;
2455 struct l2cap_cmd_hdr
*cmd
;
2456 struct l2cap_hdr
*lh
;
2459 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2460 conn
, code
, ident
, dlen
);
2462 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2463 count
= min_t(unsigned int, conn
->mtu
, len
);
2465 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2469 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2470 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2472 if (conn
->hcon
->type
== LE_LINK
)
2473 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2475 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2477 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2480 cmd
->len
= cpu_to_le16(dlen
);
2483 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2484 memcpy(skb_put(skb
, count
), data
, count
);
2490 /* Continuation fragments (no L2CAP header) */
2491 frag
= &skb_shinfo(skb
)->frag_list
;
2493 count
= min_t(unsigned int, conn
->mtu
, len
);
2495 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2499 memcpy(skb_put(*frag
, count
), data
, count
);
2504 frag
= &(*frag
)->next
;
2514 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2516 struct l2cap_conf_opt
*opt
= *ptr
;
2519 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2527 *val
= *((u8
*) opt
->val
);
2531 *val
= get_unaligned_le16(opt
->val
);
2535 *val
= get_unaligned_le32(opt
->val
);
2539 *val
= (unsigned long) opt
->val
;
2543 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2547 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2549 struct l2cap_conf_opt
*opt
= *ptr
;
2551 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2558 *((u8
*) opt
->val
) = val
;
2562 put_unaligned_le16(val
, opt
->val
);
2566 put_unaligned_le32(val
, opt
->val
);
2570 memcpy(opt
->val
, (void *) val
, len
);
2574 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2577 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2579 struct l2cap_conf_efs efs
;
2581 switch (chan
->mode
) {
2582 case L2CAP_MODE_ERTM
:
2583 efs
.id
= chan
->local_id
;
2584 efs
.stype
= chan
->local_stype
;
2585 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2586 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2587 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2588 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2591 case L2CAP_MODE_STREAMING
:
2593 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2594 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2595 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2604 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2605 (unsigned long) &efs
);
2608 static void l2cap_ack_timeout(struct work_struct
*work
)
2610 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2613 BT_DBG("chan %p", chan
);
2615 l2cap_chan_lock(chan
);
2617 __l2cap_send_ack(chan
);
2619 l2cap_chan_unlock(chan
);
2621 l2cap_chan_put(chan
);
2624 static inline int l2cap_ertm_init(struct l2cap_chan
*chan
)
2628 chan
->next_tx_seq
= 0;
2629 chan
->expected_tx_seq
= 0;
2630 chan
->expected_ack_seq
= 0;
2631 chan
->unacked_frames
= 0;
2632 chan
->buffer_seq
= 0;
2633 chan
->num_acked
= 0;
2634 chan
->frames_sent
= 0;
2635 chan
->last_acked_seq
= 0;
2637 chan
->sdu_last_frag
= NULL
;
2640 skb_queue_head_init(&chan
->tx_q
);
2642 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2645 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2646 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2648 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2649 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2650 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2652 skb_queue_head_init(&chan
->srej_q
);
2654 INIT_LIST_HEAD(&chan
->srej_l
);
2655 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2659 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2661 l2cap_seq_list_free(&chan
->srej_list
);
2666 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2669 case L2CAP_MODE_STREAMING
:
2670 case L2CAP_MODE_ERTM
:
2671 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2675 return L2CAP_MODE_BASIC
;
2679 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2681 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2684 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2686 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2689 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2691 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2692 __l2cap_ews_supported(chan
)) {
2693 /* use extended control field */
2694 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2695 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2697 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2698 L2CAP_DEFAULT_TX_WINDOW
);
2699 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2703 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2705 struct l2cap_conf_req
*req
= data
;
2706 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2707 void *ptr
= req
->data
;
2710 BT_DBG("chan %p", chan
);
2712 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2715 switch (chan
->mode
) {
2716 case L2CAP_MODE_STREAMING
:
2717 case L2CAP_MODE_ERTM
:
2718 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2721 if (__l2cap_efs_supported(chan
))
2722 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2726 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2731 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2732 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2734 switch (chan
->mode
) {
2735 case L2CAP_MODE_BASIC
:
2736 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2737 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2740 rfc
.mode
= L2CAP_MODE_BASIC
;
2742 rfc
.max_transmit
= 0;
2743 rfc
.retrans_timeout
= 0;
2744 rfc
.monitor_timeout
= 0;
2745 rfc
.max_pdu_size
= 0;
2747 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2748 (unsigned long) &rfc
);
2751 case L2CAP_MODE_ERTM
:
2752 rfc
.mode
= L2CAP_MODE_ERTM
;
2753 rfc
.max_transmit
= chan
->max_tx
;
2754 rfc
.retrans_timeout
= 0;
2755 rfc
.monitor_timeout
= 0;
2757 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2758 L2CAP_EXT_HDR_SIZE
-
2761 rfc
.max_pdu_size
= cpu_to_le16(size
);
2763 l2cap_txwin_setup(chan
);
2765 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2766 L2CAP_DEFAULT_TX_WINDOW
);
2768 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2769 (unsigned long) &rfc
);
2771 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2772 l2cap_add_opt_efs(&ptr
, chan
);
2774 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2777 if (chan
->fcs
== L2CAP_FCS_NONE
||
2778 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2779 chan
->fcs
= L2CAP_FCS_NONE
;
2780 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2783 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2784 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2788 case L2CAP_MODE_STREAMING
:
2789 rfc
.mode
= L2CAP_MODE_STREAMING
;
2791 rfc
.max_transmit
= 0;
2792 rfc
.retrans_timeout
= 0;
2793 rfc
.monitor_timeout
= 0;
2795 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2796 L2CAP_EXT_HDR_SIZE
-
2799 rfc
.max_pdu_size
= cpu_to_le16(size
);
2801 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2802 (unsigned long) &rfc
);
2804 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2805 l2cap_add_opt_efs(&ptr
, chan
);
2807 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2810 if (chan
->fcs
== L2CAP_FCS_NONE
||
2811 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2812 chan
->fcs
= L2CAP_FCS_NONE
;
2813 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2818 req
->dcid
= cpu_to_le16(chan
->dcid
);
2819 req
->flags
= cpu_to_le16(0);
2824 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2826 struct l2cap_conf_rsp
*rsp
= data
;
2827 void *ptr
= rsp
->data
;
2828 void *req
= chan
->conf_req
;
2829 int len
= chan
->conf_len
;
2830 int type
, hint
, olen
;
2832 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2833 struct l2cap_conf_efs efs
;
2835 u16 mtu
= L2CAP_DEFAULT_MTU
;
2836 u16 result
= L2CAP_CONF_SUCCESS
;
2839 BT_DBG("chan %p", chan
);
2841 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2842 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2844 hint
= type
& L2CAP_CONF_HINT
;
2845 type
&= L2CAP_CONF_MASK
;
2848 case L2CAP_CONF_MTU
:
2852 case L2CAP_CONF_FLUSH_TO
:
2853 chan
->flush_to
= val
;
2856 case L2CAP_CONF_QOS
:
2859 case L2CAP_CONF_RFC
:
2860 if (olen
== sizeof(rfc
))
2861 memcpy(&rfc
, (void *) val
, olen
);
2864 case L2CAP_CONF_FCS
:
2865 if (val
== L2CAP_FCS_NONE
)
2866 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2869 case L2CAP_CONF_EFS
:
2871 if (olen
== sizeof(efs
))
2872 memcpy(&efs
, (void *) val
, olen
);
2875 case L2CAP_CONF_EWS
:
2877 return -ECONNREFUSED
;
2879 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2880 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2881 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2882 chan
->remote_tx_win
= val
;
2889 result
= L2CAP_CONF_UNKNOWN
;
2890 *((u8
*) ptr
++) = type
;
2895 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2898 switch (chan
->mode
) {
2899 case L2CAP_MODE_STREAMING
:
2900 case L2CAP_MODE_ERTM
:
2901 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2902 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2903 chan
->conn
->feat_mask
);
2908 if (__l2cap_efs_supported(chan
))
2909 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2911 return -ECONNREFUSED
;
2914 if (chan
->mode
!= rfc
.mode
)
2915 return -ECONNREFUSED
;
2921 if (chan
->mode
!= rfc
.mode
) {
2922 result
= L2CAP_CONF_UNACCEPT
;
2923 rfc
.mode
= chan
->mode
;
2925 if (chan
->num_conf_rsp
== 1)
2926 return -ECONNREFUSED
;
2928 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2929 sizeof(rfc
), (unsigned long) &rfc
);
2932 if (result
== L2CAP_CONF_SUCCESS
) {
2933 /* Configure output options and let the other side know
2934 * which ones we don't like. */
2936 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2937 result
= L2CAP_CONF_UNACCEPT
;
2940 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2942 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2945 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2946 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2947 efs
.stype
!= chan
->local_stype
) {
2949 result
= L2CAP_CONF_UNACCEPT
;
2951 if (chan
->num_conf_req
>= 1)
2952 return -ECONNREFUSED
;
2954 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2956 (unsigned long) &efs
);
2958 /* Send PENDING Conf Rsp */
2959 result
= L2CAP_CONF_PENDING
;
2960 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2965 case L2CAP_MODE_BASIC
:
2966 chan
->fcs
= L2CAP_FCS_NONE
;
2967 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2970 case L2CAP_MODE_ERTM
:
2971 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2972 chan
->remote_tx_win
= rfc
.txwin_size
;
2974 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2976 chan
->remote_max_tx
= rfc
.max_transmit
;
2978 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2980 L2CAP_EXT_HDR_SIZE
-
2983 rfc
.max_pdu_size
= cpu_to_le16(size
);
2984 chan
->remote_mps
= size
;
2986 rfc
.retrans_timeout
=
2987 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2988 rfc
.monitor_timeout
=
2989 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2991 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2993 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2994 sizeof(rfc
), (unsigned long) &rfc
);
2996 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2997 chan
->remote_id
= efs
.id
;
2998 chan
->remote_stype
= efs
.stype
;
2999 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3000 chan
->remote_flush_to
=
3001 le32_to_cpu(efs
.flush_to
);
3002 chan
->remote_acc_lat
=
3003 le32_to_cpu(efs
.acc_lat
);
3004 chan
->remote_sdu_itime
=
3005 le32_to_cpu(efs
.sdu_itime
);
3006 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3007 sizeof(efs
), (unsigned long) &efs
);
3011 case L2CAP_MODE_STREAMING
:
3012 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3014 L2CAP_EXT_HDR_SIZE
-
3017 rfc
.max_pdu_size
= cpu_to_le16(size
);
3018 chan
->remote_mps
= size
;
3020 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3022 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3023 sizeof(rfc
), (unsigned long) &rfc
);
3028 result
= L2CAP_CONF_UNACCEPT
;
3030 memset(&rfc
, 0, sizeof(rfc
));
3031 rfc
.mode
= chan
->mode
;
3034 if (result
== L2CAP_CONF_SUCCESS
)
3035 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3037 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3038 rsp
->result
= cpu_to_le16(result
);
3039 rsp
->flags
= cpu_to_le16(0x0000);
3044 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3046 struct l2cap_conf_req
*req
= data
;
3047 void *ptr
= req
->data
;
3050 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3051 struct l2cap_conf_efs efs
;
3053 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3055 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3056 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3059 case L2CAP_CONF_MTU
:
3060 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3061 *result
= L2CAP_CONF_UNACCEPT
;
3062 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3065 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3068 case L2CAP_CONF_FLUSH_TO
:
3069 chan
->flush_to
= val
;
3070 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3074 case L2CAP_CONF_RFC
:
3075 if (olen
== sizeof(rfc
))
3076 memcpy(&rfc
, (void *)val
, olen
);
3078 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3079 rfc
.mode
!= chan
->mode
)
3080 return -ECONNREFUSED
;
3084 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3085 sizeof(rfc
), (unsigned long) &rfc
);
3088 case L2CAP_CONF_EWS
:
3089 chan
->tx_win
= min_t(u16
, val
,
3090 L2CAP_DEFAULT_EXT_WINDOW
);
3091 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3095 case L2CAP_CONF_EFS
:
3096 if (olen
== sizeof(efs
))
3097 memcpy(&efs
, (void *)val
, olen
);
3099 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3100 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3101 efs
.stype
!= chan
->local_stype
)
3102 return -ECONNREFUSED
;
3104 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3105 sizeof(efs
), (unsigned long) &efs
);
3110 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3111 return -ECONNREFUSED
;
3113 chan
->mode
= rfc
.mode
;
3115 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3117 case L2CAP_MODE_ERTM
:
3118 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3119 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3120 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3122 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3123 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3124 chan
->local_sdu_itime
=
3125 le32_to_cpu(efs
.sdu_itime
);
3126 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3127 chan
->local_flush_to
=
3128 le32_to_cpu(efs
.flush_to
);
3132 case L2CAP_MODE_STREAMING
:
3133 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3137 req
->dcid
= cpu_to_le16(chan
->dcid
);
3138 req
->flags
= cpu_to_le16(0x0000);
3143 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3145 struct l2cap_conf_rsp
*rsp
= data
;
3146 void *ptr
= rsp
->data
;
3148 BT_DBG("chan %p", chan
);
3150 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3151 rsp
->result
= cpu_to_le16(result
);
3152 rsp
->flags
= cpu_to_le16(flags
);
3157 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3159 struct l2cap_conn_rsp rsp
;
3160 struct l2cap_conn
*conn
= chan
->conn
;
3163 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3164 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3165 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3166 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3167 l2cap_send_cmd(conn
, chan
->ident
,
3168 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3170 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3173 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3174 l2cap_build_conf_req(chan
, buf
), buf
);
3175 chan
->num_conf_req
++;
3178 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3182 struct l2cap_conf_rfc rfc
;
3184 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3186 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3189 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3190 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3193 case L2CAP_CONF_RFC
:
3194 if (olen
== sizeof(rfc
))
3195 memcpy(&rfc
, (void *)val
, olen
);
3200 /* Use sane default values in case a misbehaving remote device
3201 * did not send an RFC option.
3203 rfc
.mode
= chan
->mode
;
3204 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3205 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3206 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
3208 BT_ERR("Expected RFC option was not found, using defaults");
3212 case L2CAP_MODE_ERTM
:
3213 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3214 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3215 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3217 case L2CAP_MODE_STREAMING
:
3218 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3222 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3224 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3226 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3229 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3230 cmd
->ident
== conn
->info_ident
) {
3231 cancel_delayed_work(&conn
->info_timer
);
3233 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3234 conn
->info_ident
= 0;
3236 l2cap_conn_start(conn
);
3242 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3244 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3245 struct l2cap_conn_rsp rsp
;
3246 struct l2cap_chan
*chan
= NULL
, *pchan
;
3247 struct sock
*parent
, *sk
= NULL
;
3248 int result
, status
= L2CAP_CS_NO_INFO
;
3250 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3251 __le16 psm
= req
->psm
;
3253 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3255 /* Check if we have socket listening on psm */
3256 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3258 result
= L2CAP_CR_BAD_PSM
;
3264 mutex_lock(&conn
->chan_lock
);
3267 /* Check if the ACL is secure enough (if not SDP) */
3268 if (psm
!= cpu_to_le16(0x0001) &&
3269 !hci_conn_check_link_mode(conn
->hcon
)) {
3270 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3271 result
= L2CAP_CR_SEC_BLOCK
;
3275 result
= L2CAP_CR_NO_MEM
;
3277 /* Check for backlog size */
3278 if (sk_acceptq_is_full(parent
)) {
3279 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
3283 chan
= pchan
->ops
->new_connection(pchan
->data
);
3289 /* Check if we already have channel with that dcid */
3290 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
3291 sock_set_flag(sk
, SOCK_ZAPPED
);
3292 chan
->ops
->close(chan
->data
);
3296 hci_conn_hold(conn
->hcon
);
3298 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3299 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3303 bt_accept_enqueue(parent
, sk
);
3305 __l2cap_chan_add(conn
, chan
);
3309 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3311 chan
->ident
= cmd
->ident
;
3313 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3314 if (l2cap_chan_check_security(chan
)) {
3315 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3316 __l2cap_state_change(chan
, BT_CONNECT2
);
3317 result
= L2CAP_CR_PEND
;
3318 status
= L2CAP_CS_AUTHOR_PEND
;
3319 parent
->sk_data_ready(parent
, 0);
3321 __l2cap_state_change(chan
, BT_CONFIG
);
3322 result
= L2CAP_CR_SUCCESS
;
3323 status
= L2CAP_CS_NO_INFO
;
3326 __l2cap_state_change(chan
, BT_CONNECT2
);
3327 result
= L2CAP_CR_PEND
;
3328 status
= L2CAP_CS_AUTHEN_PEND
;
3331 __l2cap_state_change(chan
, BT_CONNECT2
);
3332 result
= L2CAP_CR_PEND
;
3333 status
= L2CAP_CS_NO_INFO
;
3337 release_sock(parent
);
3338 mutex_unlock(&conn
->chan_lock
);
3341 rsp
.scid
= cpu_to_le16(scid
);
3342 rsp
.dcid
= cpu_to_le16(dcid
);
3343 rsp
.result
= cpu_to_le16(result
);
3344 rsp
.status
= cpu_to_le16(status
);
3345 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3347 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3348 struct l2cap_info_req info
;
3349 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3351 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3352 conn
->info_ident
= l2cap_get_ident(conn
);
3354 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3356 l2cap_send_cmd(conn
, conn
->info_ident
,
3357 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3360 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3361 result
== L2CAP_CR_SUCCESS
) {
3363 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3364 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3365 l2cap_build_conf_req(chan
, buf
), buf
);
3366 chan
->num_conf_req
++;
3372 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3374 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3375 u16 scid
, dcid
, result
, status
;
3376 struct l2cap_chan
*chan
;
3380 scid
= __le16_to_cpu(rsp
->scid
);
3381 dcid
= __le16_to_cpu(rsp
->dcid
);
3382 result
= __le16_to_cpu(rsp
->result
);
3383 status
= __le16_to_cpu(rsp
->status
);
3385 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3386 dcid
, scid
, result
, status
);
3388 mutex_lock(&conn
->chan_lock
);
3391 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3397 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3406 l2cap_chan_lock(chan
);
3409 case L2CAP_CR_SUCCESS
:
3410 l2cap_state_change(chan
, BT_CONFIG
);
3413 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3415 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3418 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3419 l2cap_build_conf_req(chan
, req
), req
);
3420 chan
->num_conf_req
++;
3424 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3428 l2cap_chan_del(chan
, ECONNREFUSED
);
3432 l2cap_chan_unlock(chan
);
3435 mutex_unlock(&conn
->chan_lock
);
3440 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3442 /* FCS is enabled only in ERTM or streaming mode, if one or both
3445 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3446 chan
->fcs
= L2CAP_FCS_NONE
;
3447 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3448 chan
->fcs
= L2CAP_FCS_CRC16
;
3451 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3453 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3456 struct l2cap_chan
*chan
;
3459 dcid
= __le16_to_cpu(req
->dcid
);
3460 flags
= __le16_to_cpu(req
->flags
);
3462 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3464 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3468 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3469 struct l2cap_cmd_rej_cid rej
;
3471 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3472 rej
.scid
= cpu_to_le16(chan
->scid
);
3473 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3475 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3480 /* Reject if config buffer is too small. */
3481 len
= cmd_len
- sizeof(*req
);
3482 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3483 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3484 l2cap_build_conf_rsp(chan
, rsp
,
3485 L2CAP_CONF_REJECT
, flags
), rsp
);
3490 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3491 chan
->conf_len
+= len
;
3493 if (flags
& 0x0001) {
3494 /* Incomplete config. Send empty response. */
3495 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3496 l2cap_build_conf_rsp(chan
, rsp
,
3497 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3501 /* Complete config. */
3502 len
= l2cap_parse_conf_req(chan
, rsp
);
3504 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3508 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3509 chan
->num_conf_rsp
++;
3511 /* Reset config buffer. */
3514 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3517 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3518 set_default_fcs(chan
);
3520 l2cap_state_change(chan
, BT_CONNECTED
);
3522 if (chan
->mode
== L2CAP_MODE_ERTM
||
3523 chan
->mode
== L2CAP_MODE_STREAMING
)
3524 err
= l2cap_ertm_init(chan
);
3527 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3529 l2cap_chan_ready(chan
);
3534 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3536 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3537 l2cap_build_conf_req(chan
, buf
), buf
);
3538 chan
->num_conf_req
++;
3541 /* Got Conf Rsp PENDING from remote side and asume we sent
3542 Conf Rsp PENDING in the code above */
3543 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3544 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3546 /* check compatibility */
3548 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3549 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3551 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3552 l2cap_build_conf_rsp(chan
, rsp
,
3553 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
3557 l2cap_chan_unlock(chan
);
3561 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3563 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3564 u16 scid
, flags
, result
;
3565 struct l2cap_chan
*chan
;
3566 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3569 scid
= __le16_to_cpu(rsp
->scid
);
3570 flags
= __le16_to_cpu(rsp
->flags
);
3571 result
= __le16_to_cpu(rsp
->result
);
3573 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3576 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3581 case L2CAP_CONF_SUCCESS
:
3582 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3583 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3586 case L2CAP_CONF_PENDING
:
3587 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3589 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3592 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3595 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3599 /* check compatibility */
3601 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3602 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3604 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3605 l2cap_build_conf_rsp(chan
, buf
,
3606 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3610 case L2CAP_CONF_UNACCEPT
:
3611 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3614 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3615 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3619 /* throw out any old stored conf requests */
3620 result
= L2CAP_CONF_SUCCESS
;
3621 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3624 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3628 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3629 L2CAP_CONF_REQ
, len
, req
);
3630 chan
->num_conf_req
++;
3631 if (result
!= L2CAP_CONF_SUCCESS
)
3637 l2cap_chan_set_err(chan
, ECONNRESET
);
3639 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3640 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3647 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3649 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3650 set_default_fcs(chan
);
3652 l2cap_state_change(chan
, BT_CONNECTED
);
3653 if (chan
->mode
== L2CAP_MODE_ERTM
||
3654 chan
->mode
== L2CAP_MODE_STREAMING
)
3655 err
= l2cap_ertm_init(chan
);
3658 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3660 l2cap_chan_ready(chan
);
3664 l2cap_chan_unlock(chan
);
3668 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3670 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3671 struct l2cap_disconn_rsp rsp
;
3673 struct l2cap_chan
*chan
;
3676 scid
= __le16_to_cpu(req
->scid
);
3677 dcid
= __le16_to_cpu(req
->dcid
);
3679 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3681 mutex_lock(&conn
->chan_lock
);
3683 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3685 mutex_unlock(&conn
->chan_lock
);
3689 l2cap_chan_lock(chan
);
3693 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3694 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3695 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3698 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3701 l2cap_chan_hold(chan
);
3702 l2cap_chan_del(chan
, ECONNRESET
);
3704 l2cap_chan_unlock(chan
);
3706 chan
->ops
->close(chan
->data
);
3707 l2cap_chan_put(chan
);
3709 mutex_unlock(&conn
->chan_lock
);
3714 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3716 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3718 struct l2cap_chan
*chan
;
3720 scid
= __le16_to_cpu(rsp
->scid
);
3721 dcid
= __le16_to_cpu(rsp
->dcid
);
3723 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3725 mutex_lock(&conn
->chan_lock
);
3727 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3729 mutex_unlock(&conn
->chan_lock
);
3733 l2cap_chan_lock(chan
);
3735 l2cap_chan_hold(chan
);
3736 l2cap_chan_del(chan
, 0);
3738 l2cap_chan_unlock(chan
);
3740 chan
->ops
->close(chan
->data
);
3741 l2cap_chan_put(chan
);
3743 mutex_unlock(&conn
->chan_lock
);
3748 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3750 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3753 type
= __le16_to_cpu(req
->type
);
3755 BT_DBG("type 0x%4.4x", type
);
3757 if (type
== L2CAP_IT_FEAT_MASK
) {
3759 u32 feat_mask
= l2cap_feat_mask
;
3760 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3761 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3762 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3764 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3767 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3768 | L2CAP_FEAT_EXT_WINDOW
;
3770 put_unaligned_le32(feat_mask
, rsp
->data
);
3771 l2cap_send_cmd(conn
, cmd
->ident
,
3772 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3773 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3775 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3778 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3780 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3782 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3783 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3784 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3785 l2cap_send_cmd(conn
, cmd
->ident
,
3786 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3788 struct l2cap_info_rsp rsp
;
3789 rsp
.type
= cpu_to_le16(type
);
3790 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3791 l2cap_send_cmd(conn
, cmd
->ident
,
3792 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3798 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3800 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3803 type
= __le16_to_cpu(rsp
->type
);
3804 result
= __le16_to_cpu(rsp
->result
);
3806 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3808 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3809 if (cmd
->ident
!= conn
->info_ident
||
3810 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3813 cancel_delayed_work(&conn
->info_timer
);
3815 if (result
!= L2CAP_IR_SUCCESS
) {
3816 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3817 conn
->info_ident
= 0;
3819 l2cap_conn_start(conn
);
3825 case L2CAP_IT_FEAT_MASK
:
3826 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3828 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3829 struct l2cap_info_req req
;
3830 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3832 conn
->info_ident
= l2cap_get_ident(conn
);
3834 l2cap_send_cmd(conn
, conn
->info_ident
,
3835 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3837 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3838 conn
->info_ident
= 0;
3840 l2cap_conn_start(conn
);
3844 case L2CAP_IT_FIXED_CHAN
:
3845 conn
->fixed_chan_mask
= rsp
->data
[0];
3846 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3847 conn
->info_ident
= 0;
3849 l2cap_conn_start(conn
);
3856 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3857 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3860 struct l2cap_create_chan_req
*req
= data
;
3861 struct l2cap_create_chan_rsp rsp
;
3864 if (cmd_len
!= sizeof(*req
))
3870 psm
= le16_to_cpu(req
->psm
);
3871 scid
= le16_to_cpu(req
->scid
);
3873 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3875 /* Placeholder: Always reject */
3877 rsp
.scid
= cpu_to_le16(scid
);
3878 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
3879 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3881 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3887 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3888 struct l2cap_cmd_hdr
*cmd
, void *data
)
3890 BT_DBG("conn %p", conn
);
3892 return l2cap_connect_rsp(conn
, cmd
, data
);
3895 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3896 u16 icid
, u16 result
)
3898 struct l2cap_move_chan_rsp rsp
;
3900 BT_DBG("icid %d, result %d", icid
, result
);
3902 rsp
.icid
= cpu_to_le16(icid
);
3903 rsp
.result
= cpu_to_le16(result
);
3905 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3908 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3909 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3911 struct l2cap_move_chan_cfm cfm
;
3914 BT_DBG("icid %d, result %d", icid
, result
);
3916 ident
= l2cap_get_ident(conn
);
3918 chan
->ident
= ident
;
3920 cfm
.icid
= cpu_to_le16(icid
);
3921 cfm
.result
= cpu_to_le16(result
);
3923 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3926 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
3929 struct l2cap_move_chan_cfm_rsp rsp
;
3931 BT_DBG("icid %d", icid
);
3933 rsp
.icid
= cpu_to_le16(icid
);
3934 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
3937 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
3938 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3940 struct l2cap_move_chan_req
*req
= data
;
3942 u16 result
= L2CAP_MR_NOT_ALLOWED
;
3944 if (cmd_len
!= sizeof(*req
))
3947 icid
= le16_to_cpu(req
->icid
);
3949 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
3954 /* Placeholder: Always refuse */
3955 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
3960 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
3961 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3963 struct l2cap_move_chan_rsp
*rsp
= data
;
3966 if (cmd_len
!= sizeof(*rsp
))
3969 icid
= le16_to_cpu(rsp
->icid
);
3970 result
= le16_to_cpu(rsp
->result
);
3972 BT_DBG("icid %d, result %d", icid
, result
);
3974 /* Placeholder: Always unconfirmed */
3975 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
3980 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
3981 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3983 struct l2cap_move_chan_cfm
*cfm
= data
;
3986 if (cmd_len
!= sizeof(*cfm
))
3989 icid
= le16_to_cpu(cfm
->icid
);
3990 result
= le16_to_cpu(cfm
->result
);
3992 BT_DBG("icid %d, result %d", icid
, result
);
3994 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
3999 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4000 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4002 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4005 if (cmd_len
!= sizeof(*rsp
))
4008 icid
= le16_to_cpu(rsp
->icid
);
4010 BT_DBG("icid %d", icid
);
4015 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4020 if (min
> max
|| min
< 6 || max
> 3200)
4023 if (to_multiplier
< 10 || to_multiplier
> 3200)
4026 if (max
>= to_multiplier
* 8)
4029 max_latency
= (to_multiplier
* 8 / max
) - 1;
4030 if (latency
> 499 || latency
> max_latency
)
4036 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4037 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4039 struct hci_conn
*hcon
= conn
->hcon
;
4040 struct l2cap_conn_param_update_req
*req
;
4041 struct l2cap_conn_param_update_rsp rsp
;
4042 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4045 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4048 cmd_len
= __le16_to_cpu(cmd
->len
);
4049 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4052 req
= (struct l2cap_conn_param_update_req
*) data
;
4053 min
= __le16_to_cpu(req
->min
);
4054 max
= __le16_to_cpu(req
->max
);
4055 latency
= __le16_to_cpu(req
->latency
);
4056 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4058 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4059 min
, max
, latency
, to_multiplier
);
4061 memset(&rsp
, 0, sizeof(rsp
));
4063 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4065 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4067 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4069 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4073 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4078 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4079 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4083 switch (cmd
->code
) {
4084 case L2CAP_COMMAND_REJ
:
4085 l2cap_command_rej(conn
, cmd
, data
);
4088 case L2CAP_CONN_REQ
:
4089 err
= l2cap_connect_req(conn
, cmd
, data
);
4092 case L2CAP_CONN_RSP
:
4093 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4096 case L2CAP_CONF_REQ
:
4097 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4100 case L2CAP_CONF_RSP
:
4101 err
= l2cap_config_rsp(conn
, cmd
, data
);
4104 case L2CAP_DISCONN_REQ
:
4105 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4108 case L2CAP_DISCONN_RSP
:
4109 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4112 case L2CAP_ECHO_REQ
:
4113 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4116 case L2CAP_ECHO_RSP
:
4119 case L2CAP_INFO_REQ
:
4120 err
= l2cap_information_req(conn
, cmd
, data
);
4123 case L2CAP_INFO_RSP
:
4124 err
= l2cap_information_rsp(conn
, cmd
, data
);
4127 case L2CAP_CREATE_CHAN_REQ
:
4128 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4131 case L2CAP_CREATE_CHAN_RSP
:
4132 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4135 case L2CAP_MOVE_CHAN_REQ
:
4136 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4139 case L2CAP_MOVE_CHAN_RSP
:
4140 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4143 case L2CAP_MOVE_CHAN_CFM
:
4144 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4147 case L2CAP_MOVE_CHAN_CFM_RSP
:
4148 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4152 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4160 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4161 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4163 switch (cmd
->code
) {
4164 case L2CAP_COMMAND_REJ
:
4167 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4168 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4170 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4174 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4179 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4180 struct sk_buff
*skb
)
4182 u8
*data
= skb
->data
;
4184 struct l2cap_cmd_hdr cmd
;
4187 l2cap_raw_recv(conn
, skb
);
4189 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4191 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4192 data
+= L2CAP_CMD_HDR_SIZE
;
4193 len
-= L2CAP_CMD_HDR_SIZE
;
4195 cmd_len
= le16_to_cpu(cmd
.len
);
4197 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4199 if (cmd_len
> len
|| !cmd
.ident
) {
4200 BT_DBG("corrupted command");
4204 if (conn
->hcon
->type
== LE_LINK
)
4205 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4207 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4210 struct l2cap_cmd_rej_unk rej
;
4212 BT_ERR("Wrong link type (%d)", err
);
4214 /* FIXME: Map err to a valid reason */
4215 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4216 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4226 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4228 u16 our_fcs
, rcv_fcs
;
4231 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4232 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4234 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4236 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4237 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4238 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4239 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4241 if (our_fcs
!= rcv_fcs
)
4247 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4251 chan
->frames_sent
= 0;
4253 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
4255 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4256 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
4257 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
4260 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
4261 l2cap_retransmit_frames(chan
);
4263 l2cap_ertm_send(chan
);
4265 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4266 chan
->frames_sent
== 0) {
4267 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4271 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
4273 struct sk_buff
*next_skb
;
4274 int tx_seq_offset
, next_tx_seq_offset
;
4276 bt_cb(skb
)->control
.txseq
= tx_seq
;
4277 bt_cb(skb
)->control
.sar
= sar
;
4279 next_skb
= skb_peek(&chan
->srej_q
);
4281 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
4284 if (bt_cb(next_skb
)->control
.txseq
== tx_seq
)
4287 next_tx_seq_offset
= __seq_offset(chan
,
4288 bt_cb(next_skb
)->control
.txseq
, chan
->buffer_seq
);
4290 if (next_tx_seq_offset
> tx_seq_offset
) {
4291 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
4295 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
4298 next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
);
4301 __skb_queue_tail(&chan
->srej_q
, skb
);
4306 static void append_skb_frag(struct sk_buff
*skb
,
4307 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4309 /* skb->len reflects data in skb as well as all fragments
4310 * skb->data_len reflects only data in fragments
4312 if (!skb_has_frag_list(skb
))
4313 skb_shinfo(skb
)->frag_list
= new_frag
;
4315 new_frag
->next
= NULL
;
4317 (*last_frag
)->next
= new_frag
;
4318 *last_frag
= new_frag
;
4320 skb
->len
+= new_frag
->len
;
4321 skb
->data_len
+= new_frag
->len
;
4322 skb
->truesize
+= new_frag
->truesize
;
4325 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u32 control
)
4329 switch (__get_ctrl_sar(chan
, control
)) {
4330 case L2CAP_SAR_UNSEGMENTED
:
4334 err
= chan
->ops
->recv(chan
->data
, skb
);
4337 case L2CAP_SAR_START
:
4341 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4342 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4344 if (chan
->sdu_len
> chan
->imtu
) {
4349 if (skb
->len
>= chan
->sdu_len
)
4353 chan
->sdu_last_frag
= skb
;
4359 case L2CAP_SAR_CONTINUE
:
4363 append_skb_frag(chan
->sdu
, skb
,
4364 &chan
->sdu_last_frag
);
4367 if (chan
->sdu
->len
>= chan
->sdu_len
)
4377 append_skb_frag(chan
->sdu
, skb
,
4378 &chan
->sdu_last_frag
);
4381 if (chan
->sdu
->len
!= chan
->sdu_len
)
4384 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
4387 /* Reassembly complete */
4389 chan
->sdu_last_frag
= NULL
;
4397 kfree_skb(chan
->sdu
);
4399 chan
->sdu_last_frag
= NULL
;
4406 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
4408 BT_DBG("chan %p, Enter local busy", chan
);
4410 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4411 l2cap_seq_list_clear(&chan
->srej_list
);
4413 __set_ack_timer(chan
);
4416 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
4420 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4423 control
= __set_reqseq(chan
, chan
->buffer_seq
);
4424 control
|= __set_ctrl_poll(chan
);
4425 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4426 chan
->retry_count
= 1;
4428 __clear_retrans_timer(chan
);
4429 __set_monitor_timer(chan
);
4431 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
4434 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4435 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
4437 BT_DBG("chan %p, Exit local busy", chan
);
4440 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4442 if (chan
->mode
== L2CAP_MODE_ERTM
) {
4444 l2cap_ertm_enter_local_busy(chan
);
4446 l2cap_ertm_exit_local_busy(chan
);
4450 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
4452 struct sk_buff
*skb
;
4455 while ((skb
= skb_peek(&chan
->srej_q
)) &&
4456 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4459 if (bt_cb(skb
)->control
.txseq
!= tx_seq
)
4462 skb
= skb_dequeue(&chan
->srej_q
);
4463 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
4464 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4467 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4471 chan
->buffer_seq_srej
= __next_seq(chan
, chan
->buffer_seq_srej
);
4472 tx_seq
= __next_seq(chan
, tx_seq
);
4476 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4478 struct srej_list
*l
, *tmp
;
4481 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
4482 if (l
->tx_seq
== tx_seq
) {
4487 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4488 control
|= __set_reqseq(chan
, l
->tx_seq
);
4490 list_add_tail(&l
->list
, &chan
->srej_l
);
4494 static int l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4496 struct srej_list
*new;
4499 while (tx_seq
!= chan
->expected_tx_seq
) {
4500 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4501 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
4502 l2cap_seq_list_append(&chan
->srej_list
, chan
->expected_tx_seq
);
4504 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
4508 new->tx_seq
= chan
->expected_tx_seq
;
4510 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4512 list_add_tail(&new->list
, &chan
->srej_l
);
4515 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4520 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4522 u16 tx_seq
= __get_txseq(chan
, rx_control
);
4523 u16 req_seq
= __get_reqseq(chan
, rx_control
);
4524 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
4525 int tx_seq_offset
, expected_tx_seq_offset
;
4526 int num_to_ack
= (chan
->tx_win
/6) + 1;
4529 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan
, skb
->len
,
4530 tx_seq
, rx_control
);
4532 if (__is_ctrl_final(chan
, rx_control
) &&
4533 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4534 __clear_monitor_timer(chan
);
4535 if (chan
->unacked_frames
> 0)
4536 __set_retrans_timer(chan
);
4537 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4540 chan
->expected_ack_seq
= req_seq
;
4541 l2cap_drop_acked_frames(chan
);
4543 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
4545 /* invalid tx_seq */
4546 if (tx_seq_offset
>= chan
->tx_win
) {
4547 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4551 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4552 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4553 l2cap_send_ack(chan
);
4557 if (tx_seq
== chan
->expected_tx_seq
)
4560 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4561 struct srej_list
*first
;
4563 first
= list_first_entry(&chan
->srej_l
,
4564 struct srej_list
, list
);
4565 if (tx_seq
== first
->tx_seq
) {
4566 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4567 l2cap_check_srej_gap(chan
, tx_seq
);
4569 list_del(&first
->list
);
4572 if (list_empty(&chan
->srej_l
)) {
4573 chan
->buffer_seq
= chan
->buffer_seq_srej
;
4574 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4575 l2cap_send_ack(chan
);
4576 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
4579 struct srej_list
*l
;
4581 /* duplicated tx_seq */
4582 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
4585 list_for_each_entry(l
, &chan
->srej_l
, list
) {
4586 if (l
->tx_seq
== tx_seq
) {
4587 l2cap_resend_srejframe(chan
, tx_seq
);
4592 err
= l2cap_send_srejframe(chan
, tx_seq
);
4594 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4599 expected_tx_seq_offset
= __seq_offset(chan
,
4600 chan
->expected_tx_seq
, chan
->buffer_seq
);
4602 /* duplicated tx_seq */
4603 if (tx_seq_offset
< expected_tx_seq_offset
)
4606 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4608 BT_DBG("chan %p, Enter SREJ", chan
);
4610 INIT_LIST_HEAD(&chan
->srej_l
);
4611 chan
->buffer_seq_srej
= chan
->buffer_seq
;
4613 __skb_queue_head_init(&chan
->srej_q
);
4614 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4616 /* Set P-bit only if there are some I-frames to ack. */
4617 if (__clear_ack_timer(chan
))
4618 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
4620 err
= l2cap_send_srejframe(chan
, tx_seq
);
4622 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4629 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4631 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4632 bt_cb(skb
)->control
.txseq
= tx_seq
;
4633 bt_cb(skb
)->control
.sar
= sar
;
4634 __skb_queue_tail(&chan
->srej_q
, skb
);
4638 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
4639 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4642 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4646 if (__is_ctrl_final(chan
, rx_control
)) {
4647 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4648 l2cap_retransmit_frames(chan
);
4652 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
4653 if (chan
->num_acked
== num_to_ack
- 1)
4654 l2cap_send_ack(chan
);
4656 __set_ack_timer(chan
);
4665 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4667 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
,
4668 __get_reqseq(chan
, rx_control
), rx_control
);
4670 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
4671 l2cap_drop_acked_frames(chan
);
4673 if (__is_ctrl_poll(chan
, rx_control
)) {
4674 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4675 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4676 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4677 (chan
->unacked_frames
> 0))
4678 __set_retrans_timer(chan
);
4680 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4681 l2cap_send_srejtail(chan
);
4683 l2cap_send_i_or_rr_or_rnr(chan
);
4686 } else if (__is_ctrl_final(chan
, rx_control
)) {
4687 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4689 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4690 l2cap_retransmit_frames(chan
);
4693 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4694 (chan
->unacked_frames
> 0))
4695 __set_retrans_timer(chan
);
4697 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4698 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
4699 l2cap_send_ack(chan
);
4701 l2cap_ertm_send(chan
);
4705 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4707 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4709 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4711 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4713 chan
->expected_ack_seq
= tx_seq
;
4714 l2cap_drop_acked_frames(chan
);
4716 if (__is_ctrl_final(chan
, rx_control
)) {
4717 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4718 l2cap_retransmit_frames(chan
);
4720 l2cap_retransmit_frames(chan
);
4722 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
4723 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4726 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4728 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4730 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4732 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4734 if (__is_ctrl_poll(chan
, rx_control
)) {
4735 chan
->expected_ack_seq
= tx_seq
;
4736 l2cap_drop_acked_frames(chan
);
4738 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4739 l2cap_retransmit_one_frame(chan
, tx_seq
);
4741 l2cap_ertm_send(chan
);
4743 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4744 chan
->srej_save_reqseq
= tx_seq
;
4745 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4747 } else if (__is_ctrl_final(chan
, rx_control
)) {
4748 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
4749 chan
->srej_save_reqseq
== tx_seq
)
4750 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4752 l2cap_retransmit_one_frame(chan
, tx_seq
);
4754 l2cap_retransmit_one_frame(chan
, tx_seq
);
4755 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4756 chan
->srej_save_reqseq
= tx_seq
;
4757 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4762 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4764 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4766 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4768 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4769 chan
->expected_ack_seq
= tx_seq
;
4770 l2cap_drop_acked_frames(chan
);
4772 if (__is_ctrl_poll(chan
, rx_control
))
4773 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4775 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4776 __clear_retrans_timer(chan
);
4777 if (__is_ctrl_poll(chan
, rx_control
))
4778 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
4782 if (__is_ctrl_poll(chan
, rx_control
)) {
4783 l2cap_send_srejtail(chan
);
4785 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4789 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4791 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan
, rx_control
, skb
->len
);
4793 if (__is_ctrl_final(chan
, rx_control
) &&
4794 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4795 __clear_monitor_timer(chan
);
4796 if (chan
->unacked_frames
> 0)
4797 __set_retrans_timer(chan
);
4798 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4801 switch (__get_ctrl_super(chan
, rx_control
)) {
4802 case L2CAP_SUPER_RR
:
4803 l2cap_data_channel_rrframe(chan
, rx_control
);
4806 case L2CAP_SUPER_REJ
:
4807 l2cap_data_channel_rejframe(chan
, rx_control
);
4810 case L2CAP_SUPER_SREJ
:
4811 l2cap_data_channel_srejframe(chan
, rx_control
);
4814 case L2CAP_SUPER_RNR
:
4815 l2cap_data_channel_rnrframe(chan
, rx_control
);
4823 static int l2cap_ertm_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4827 int len
, next_tx_seq_offset
, req_seq_offset
;
4829 __unpack_control(chan
, skb
);
4831 control
= __get_control(chan
, skb
->data
);
4832 skb_pull(skb
, __ctrl_size(chan
));
4836 * We can just drop the corrupted I-frame here.
4837 * Receiver will miss it and start proper recovery
4838 * procedures and ask retransmission.
4840 if (l2cap_check_fcs(chan
, skb
))
4843 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
4844 len
-= L2CAP_SDULEN_SIZE
;
4846 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4847 len
-= L2CAP_FCS_SIZE
;
4849 if (len
> chan
->mps
) {
4850 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4854 req_seq
= __get_reqseq(chan
, control
);
4856 req_seq_offset
= __seq_offset(chan
, req_seq
, chan
->expected_ack_seq
);
4858 next_tx_seq_offset
= __seq_offset(chan
, chan
->next_tx_seq
,
4859 chan
->expected_ack_seq
);
4861 /* check for invalid req-seq */
4862 if (req_seq_offset
> next_tx_seq_offset
) {
4863 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4867 if (!__is_sframe(chan
, control
)) {
4869 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4873 l2cap_data_channel_iframe(chan
, control
, skb
);
4877 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4881 l2cap_data_channel_sframe(chan
, control
, skb
);
4891 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4893 struct l2cap_chan
*chan
;
4898 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4900 BT_DBG("unknown cid 0x%4.4x", cid
);
4901 /* Drop packet and return */
4906 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4908 if (chan
->state
!= BT_CONNECTED
)
4911 switch (chan
->mode
) {
4912 case L2CAP_MODE_BASIC
:
4913 /* If socket recv buffers overflows we drop data here
4914 * which is *bad* because L2CAP has to be reliable.
4915 * But we don't have any other choice. L2CAP doesn't
4916 * provide flow control mechanism. */
4918 if (chan
->imtu
< skb
->len
)
4921 if (!chan
->ops
->recv(chan
->data
, skb
))
4925 case L2CAP_MODE_ERTM
:
4926 l2cap_ertm_data_rcv(chan
, skb
);
4930 case L2CAP_MODE_STREAMING
:
4931 control
= __get_control(chan
, skb
->data
);
4932 skb_pull(skb
, __ctrl_size(chan
));
4935 if (l2cap_check_fcs(chan
, skb
))
4938 if (__is_sar_start(chan
, control
))
4939 len
-= L2CAP_SDULEN_SIZE
;
4941 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4942 len
-= L2CAP_FCS_SIZE
;
4944 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
4947 tx_seq
= __get_txseq(chan
, control
);
4949 if (chan
->expected_tx_seq
!= tx_seq
) {
4950 /* Frame(s) missing - must discard partial SDU */
4951 kfree_skb(chan
->sdu
);
4953 chan
->sdu_last_frag
= NULL
;
4956 /* TODO: Notify userland of missing data */
4959 chan
->expected_tx_seq
= __next_seq(chan
, tx_seq
);
4961 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
4962 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4967 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
4975 l2cap_chan_unlock(chan
);
4980 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4982 struct l2cap_chan
*chan
;
4984 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
4988 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4990 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4993 if (chan
->imtu
< skb
->len
)
4996 if (!chan
->ops
->recv(chan
->data
, skb
))
5005 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5006 struct sk_buff
*skb
)
5008 struct l2cap_chan
*chan
;
5010 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5014 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5016 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5019 if (chan
->imtu
< skb
->len
)
5022 if (!chan
->ops
->recv(chan
->data
, skb
))
5031 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5033 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5037 skb_pull(skb
, L2CAP_HDR_SIZE
);
5038 cid
= __le16_to_cpu(lh
->cid
);
5039 len
= __le16_to_cpu(lh
->len
);
5041 if (len
!= skb
->len
) {
5046 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5049 case L2CAP_CID_LE_SIGNALING
:
5050 case L2CAP_CID_SIGNALING
:
5051 l2cap_sig_channel(conn
, skb
);
5054 case L2CAP_CID_CONN_LESS
:
5055 psm
= get_unaligned((__le16
*) skb
->data
);
5057 l2cap_conless_channel(conn
, psm
, skb
);
5060 case L2CAP_CID_LE_DATA
:
5061 l2cap_att_channel(conn
, cid
, skb
);
5065 if (smp_sig_channel(conn
, skb
))
5066 l2cap_conn_del(conn
->hcon
, EACCES
);
5070 l2cap_data_channel(conn
, cid
, skb
);
5075 /* ---- L2CAP interface with lower layer (HCI) ---- */
5077 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5079 int exact
= 0, lm1
= 0, lm2
= 0;
5080 struct l2cap_chan
*c
;
5082 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
5084 /* Find listening sockets and check their link_mode */
5085 read_lock(&chan_list_lock
);
5086 list_for_each_entry(c
, &chan_list
, global_l
) {
5087 struct sock
*sk
= c
->sk
;
5089 if (c
->state
!= BT_LISTEN
)
5092 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5093 lm1
|= HCI_LM_ACCEPT
;
5094 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5095 lm1
|= HCI_LM_MASTER
;
5097 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5098 lm2
|= HCI_LM_ACCEPT
;
5099 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5100 lm2
|= HCI_LM_MASTER
;
5103 read_unlock(&chan_list_lock
);
5105 return exact
? lm1
: lm2
;
5108 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5110 struct l2cap_conn
*conn
;
5112 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
5115 conn
= l2cap_conn_add(hcon
, status
);
5117 l2cap_conn_ready(conn
);
5119 l2cap_conn_del(hcon
, bt_to_errno(status
));
5124 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5126 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5128 BT_DBG("hcon %p", hcon
);
5131 return HCI_ERROR_REMOTE_USER_TERM
;
5132 return conn
->disc_reason
;
5135 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5137 BT_DBG("hcon %p reason %d", hcon
, reason
);
5139 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5143 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5145 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5148 if (encrypt
== 0x00) {
5149 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5150 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5151 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5152 l2cap_chan_close(chan
, ECONNREFUSED
);
5154 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5155 __clear_chan_timer(chan
);
5159 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5161 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5162 struct l2cap_chan
*chan
;
5167 BT_DBG("conn %p", conn
);
5169 if (hcon
->type
== LE_LINK
) {
5170 if (!status
&& encrypt
)
5171 smp_distribute_keys(conn
, 0);
5172 cancel_delayed_work(&conn
->security_timer
);
5175 mutex_lock(&conn
->chan_lock
);
5177 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5178 l2cap_chan_lock(chan
);
5180 BT_DBG("chan->scid %d", chan
->scid
);
5182 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5183 if (!status
&& encrypt
) {
5184 chan
->sec_level
= hcon
->sec_level
;
5185 l2cap_chan_ready(chan
);
5188 l2cap_chan_unlock(chan
);
5192 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5193 l2cap_chan_unlock(chan
);
5197 if (!status
&& (chan
->state
== BT_CONNECTED
||
5198 chan
->state
== BT_CONFIG
)) {
5199 struct sock
*sk
= chan
->sk
;
5201 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5202 sk
->sk_state_change(sk
);
5204 l2cap_check_encryption(chan
, encrypt
);
5205 l2cap_chan_unlock(chan
);
5209 if (chan
->state
== BT_CONNECT
) {
5211 l2cap_send_conn_req(chan
);
5213 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5215 } else if (chan
->state
== BT_CONNECT2
) {
5216 struct sock
*sk
= chan
->sk
;
5217 struct l2cap_conn_rsp rsp
;
5223 if (test_bit(BT_SK_DEFER_SETUP
,
5224 &bt_sk(sk
)->flags
)) {
5225 struct sock
*parent
= bt_sk(sk
)->parent
;
5226 res
= L2CAP_CR_PEND
;
5227 stat
= L2CAP_CS_AUTHOR_PEND
;
5229 parent
->sk_data_ready(parent
, 0);
5231 __l2cap_state_change(chan
, BT_CONFIG
);
5232 res
= L2CAP_CR_SUCCESS
;
5233 stat
= L2CAP_CS_NO_INFO
;
5236 __l2cap_state_change(chan
, BT_DISCONN
);
5237 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5238 res
= L2CAP_CR_SEC_BLOCK
;
5239 stat
= L2CAP_CS_NO_INFO
;
5244 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5245 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5246 rsp
.result
= cpu_to_le16(res
);
5247 rsp
.status
= cpu_to_le16(stat
);
5248 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5252 l2cap_chan_unlock(chan
);
5255 mutex_unlock(&conn
->chan_lock
);
5260 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5262 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5265 conn
= l2cap_conn_add(hcon
, 0);
5270 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5272 if (!(flags
& ACL_CONT
)) {
5273 struct l2cap_hdr
*hdr
;
5277 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5278 kfree_skb(conn
->rx_skb
);
5279 conn
->rx_skb
= NULL
;
5281 l2cap_conn_unreliable(conn
, ECOMM
);
5284 /* Start fragment always begin with Basic L2CAP header */
5285 if (skb
->len
< L2CAP_HDR_SIZE
) {
5286 BT_ERR("Frame is too short (len %d)", skb
->len
);
5287 l2cap_conn_unreliable(conn
, ECOMM
);
5291 hdr
= (struct l2cap_hdr
*) skb
->data
;
5292 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5294 if (len
== skb
->len
) {
5295 /* Complete frame received */
5296 l2cap_recv_frame(conn
, skb
);
5300 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5302 if (skb
->len
> len
) {
5303 BT_ERR("Frame is too long (len %d, expected len %d)",
5305 l2cap_conn_unreliable(conn
, ECOMM
);
5309 /* Allocate skb for the complete frame (with header) */
5310 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5314 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5316 conn
->rx_len
= len
- skb
->len
;
5318 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5320 if (!conn
->rx_len
) {
5321 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5322 l2cap_conn_unreliable(conn
, ECOMM
);
5326 if (skb
->len
> conn
->rx_len
) {
5327 BT_ERR("Fragment is too long (len %d, expected %d)",
5328 skb
->len
, conn
->rx_len
);
5329 kfree_skb(conn
->rx_skb
);
5330 conn
->rx_skb
= NULL
;
5332 l2cap_conn_unreliable(conn
, ECOMM
);
5336 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5338 conn
->rx_len
-= skb
->len
;
5340 if (!conn
->rx_len
) {
5341 /* Complete frame received */
5342 l2cap_recv_frame(conn
, conn
->rx_skb
);
5343 conn
->rx_skb
= NULL
;
5352 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5354 struct l2cap_chan
*c
;
5356 read_lock(&chan_list_lock
);
5358 list_for_each_entry(c
, &chan_list
, global_l
) {
5359 struct sock
*sk
= c
->sk
;
5361 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5362 batostr(&bt_sk(sk
)->src
),
5363 batostr(&bt_sk(sk
)->dst
),
5364 c
->state
, __le16_to_cpu(c
->psm
),
5365 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5366 c
->sec_level
, c
->mode
);
5369 read_unlock(&chan_list_lock
);
5374 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5376 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5379 static const struct file_operations l2cap_debugfs_fops
= {
5380 .open
= l2cap_debugfs_open
,
5382 .llseek
= seq_lseek
,
5383 .release
= single_release
,
5386 static struct dentry
*l2cap_debugfs
;
5388 int __init
l2cap_init(void)
5392 err
= l2cap_init_sockets();
5397 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5398 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5400 BT_ERR("Failed to create L2CAP debug file");
5406 void l2cap_exit(void)
5408 debugfs_remove(l2cap_debugfs
);
5409 l2cap_cleanup_sockets();
5412 module_param(disable_ertm
, bool, 0644);
5413 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");