2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
45 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
47 static LIST_HEAD(chan_list
);
48 static DEFINE_RWLOCK(chan_list_lock
);
50 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
51 u8 code
, u8 ident
, u16 dlen
, void *data
);
52 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
54 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
55 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
56 struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
68 list_for_each_entry(c
, &conn
->chan_l
, list
) {
75 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
80 list_for_each_entry(c
, &conn
->chan_l
, list
) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
94 mutex_lock(&conn
->chan_lock
);
95 c
= __l2cap_get_chan_by_scid(conn
, cid
);
98 mutex_unlock(&conn
->chan_lock
);
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
109 struct l2cap_chan
*c
;
111 mutex_lock(&conn
->chan_lock
);
112 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
115 mutex_unlock(&conn
->chan_lock
);
120 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
123 struct l2cap_chan
*c
;
125 list_for_each_entry(c
, &conn
->chan_l
, list
) {
126 if (c
->ident
== ident
)
132 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
134 struct l2cap_chan
*c
;
136 list_for_each_entry(c
, &chan_list
, global_l
) {
137 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
143 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
147 write_lock(&chan_list_lock
);
149 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
162 for (p
= 0x1001; p
< 0x1100; p
+= 2)
163 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
164 chan
->psm
= cpu_to_le16(p
);
165 chan
->sport
= cpu_to_le16(p
);
172 write_unlock(&chan_list_lock
);
176 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
178 write_lock(&chan_list_lock
);
182 write_unlock(&chan_list_lock
);
187 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
189 u16 cid
= L2CAP_CID_DYN_START
;
191 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
192 if (!__l2cap_get_chan_by_scid(conn
, cid
))
199 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
201 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
202 state_to_string(state
));
205 chan
->ops
->state_change(chan
, state
);
208 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
210 struct sock
*sk
= chan
->sk
;
213 __l2cap_state_change(chan
, state
);
217 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
219 struct sock
*sk
= chan
->sk
;
224 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
226 struct sock
*sk
= chan
->sk
;
229 __l2cap_chan_set_err(chan
, err
);
233 static void __set_retrans_timer(struct l2cap_chan
*chan
)
235 if (!delayed_work_pending(&chan
->monitor_timer
) &&
236 chan
->retrans_timeout
) {
237 l2cap_set_timer(chan
, &chan
->retrans_timer
,
238 msecs_to_jiffies(chan
->retrans_timeout
));
242 static void __set_monitor_timer(struct l2cap_chan
*chan
)
244 __clear_retrans_timer(chan
);
245 if (chan
->monitor_timeout
) {
246 l2cap_set_timer(chan
, &chan
->monitor_timer
,
247 msecs_to_jiffies(chan
->monitor_timeout
));
251 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
256 skb_queue_walk(head
, skb
) {
257 if (bt_cb(skb
)->control
.txseq
== seq
)
264 /* ---- L2CAP sequence number lists ---- */
266 /* For ERTM, ordered lists of sequence numbers must be tracked for
267 * SREJ requests that are received and for frames that are to be
268 * retransmitted. These seq_list functions implement a singly-linked
269 * list in an array, where membership in the list can also be checked
270 * in constant time. Items can also be added to the tail of the list
271 * and removed from the head in constant time, without further memory
275 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
277 size_t alloc_size
, i
;
279 /* Allocated size is a power of 2 to map sequence numbers
280 * (which may be up to 14 bits) in to a smaller array that is
281 * sized for the negotiated ERTM transmit windows.
283 alloc_size
= roundup_pow_of_two(size
);
285 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
289 seq_list
->mask
= alloc_size
- 1;
290 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
291 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
292 for (i
= 0; i
< alloc_size
; i
++)
293 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
298 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
300 kfree(seq_list
->list
);
303 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
306 /* Constant-time check for list membership */
307 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
310 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
312 u16 mask
= seq_list
->mask
;
314 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
315 /* In case someone tries to pop the head of an empty list */
316 return L2CAP_SEQ_LIST_CLEAR
;
317 } else if (seq_list
->head
== seq
) {
318 /* Head can be removed in constant time */
319 seq_list
->head
= seq_list
->list
[seq
& mask
];
320 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
322 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
323 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
324 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
327 /* Walk the list to find the sequence number */
328 u16 prev
= seq_list
->head
;
329 while (seq_list
->list
[prev
& mask
] != seq
) {
330 prev
= seq_list
->list
[prev
& mask
];
331 if (prev
== L2CAP_SEQ_LIST_TAIL
)
332 return L2CAP_SEQ_LIST_CLEAR
;
335 /* Unlink the number from the list and clear it */
336 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
337 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
338 if (seq_list
->tail
== seq
)
339 seq_list
->tail
= prev
;
344 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
346 /* Remove the head in constant time */
347 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
350 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
354 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
357 for (i
= 0; i
<= seq_list
->mask
; i
++)
358 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
360 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
361 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
364 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
366 u16 mask
= seq_list
->mask
;
368 /* All appends happen in constant time */
370 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
373 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
374 seq_list
->head
= seq
;
376 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
378 seq_list
->tail
= seq
;
379 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
382 static void l2cap_chan_timeout(struct work_struct
*work
)
384 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
386 struct l2cap_conn
*conn
= chan
->conn
;
389 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
391 mutex_lock(&conn
->chan_lock
);
392 l2cap_chan_lock(chan
);
394 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
395 reason
= ECONNREFUSED
;
396 else if (chan
->state
== BT_CONNECT
&&
397 chan
->sec_level
!= BT_SECURITY_SDP
)
398 reason
= ECONNREFUSED
;
402 l2cap_chan_close(chan
, reason
);
404 l2cap_chan_unlock(chan
);
406 chan
->ops
->close(chan
);
407 mutex_unlock(&conn
->chan_lock
);
409 l2cap_chan_put(chan
);
412 struct l2cap_chan
*l2cap_chan_create(void)
414 struct l2cap_chan
*chan
;
416 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
420 mutex_init(&chan
->lock
);
422 write_lock(&chan_list_lock
);
423 list_add(&chan
->global_l
, &chan_list
);
424 write_unlock(&chan_list_lock
);
426 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
428 chan
->state
= BT_OPEN
;
430 kref_init(&chan
->kref
);
432 /* This flag is cleared in l2cap_chan_ready() */
433 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
435 BT_DBG("chan %p", chan
);
440 static void l2cap_chan_destroy(struct kref
*kref
)
442 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
444 BT_DBG("chan %p", chan
);
446 write_lock(&chan_list_lock
);
447 list_del(&chan
->global_l
);
448 write_unlock(&chan_list_lock
);
453 void l2cap_chan_hold(struct l2cap_chan
*c
)
455 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
460 void l2cap_chan_put(struct l2cap_chan
*c
)
462 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
464 kref_put(&c
->kref
, l2cap_chan_destroy
);
467 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
469 chan
->fcs
= L2CAP_FCS_CRC16
;
470 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
471 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
472 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
473 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
474 chan
->sec_level
= BT_SECURITY_LOW
;
476 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
479 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
481 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
482 __le16_to_cpu(chan
->psm
), chan
->dcid
);
484 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
488 switch (chan
->chan_type
) {
489 case L2CAP_CHAN_CONN_ORIENTED
:
490 if (conn
->hcon
->type
== LE_LINK
) {
492 chan
->omtu
= L2CAP_DEFAULT_MTU
;
493 chan
->scid
= L2CAP_CID_LE_DATA
;
494 chan
->dcid
= L2CAP_CID_LE_DATA
;
496 /* Alloc CID for connection-oriented socket */
497 chan
->scid
= l2cap_alloc_cid(conn
);
498 chan
->omtu
= L2CAP_DEFAULT_MTU
;
502 case L2CAP_CHAN_CONN_LESS
:
503 /* Connectionless socket */
504 chan
->scid
= L2CAP_CID_CONN_LESS
;
505 chan
->dcid
= L2CAP_CID_CONN_LESS
;
506 chan
->omtu
= L2CAP_DEFAULT_MTU
;
509 case L2CAP_CHAN_CONN_FIX_A2MP
:
510 chan
->scid
= L2CAP_CID_A2MP
;
511 chan
->dcid
= L2CAP_CID_A2MP
;
512 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
513 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
517 /* Raw socket can send/recv signalling messages only */
518 chan
->scid
= L2CAP_CID_SIGNALING
;
519 chan
->dcid
= L2CAP_CID_SIGNALING
;
520 chan
->omtu
= L2CAP_DEFAULT_MTU
;
523 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
524 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
525 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
526 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
527 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
528 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
530 l2cap_chan_hold(chan
);
532 list_add(&chan
->list
, &conn
->chan_l
);
535 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
537 mutex_lock(&conn
->chan_lock
);
538 __l2cap_chan_add(conn
, chan
);
539 mutex_unlock(&conn
->chan_lock
);
542 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
544 struct l2cap_conn
*conn
= chan
->conn
;
546 __clear_chan_timer(chan
);
548 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
551 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
552 /* Delete from channel list */
553 list_del(&chan
->list
);
555 l2cap_chan_put(chan
);
559 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
560 hci_conn_put(conn
->hcon
);
562 if (mgr
&& mgr
->bredr_chan
== chan
)
563 mgr
->bredr_chan
= NULL
;
566 chan
->ops
->teardown(chan
, err
);
568 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
572 case L2CAP_MODE_BASIC
:
575 case L2CAP_MODE_ERTM
:
576 __clear_retrans_timer(chan
);
577 __clear_monitor_timer(chan
);
578 __clear_ack_timer(chan
);
580 skb_queue_purge(&chan
->srej_q
);
582 l2cap_seq_list_free(&chan
->srej_list
);
583 l2cap_seq_list_free(&chan
->retrans_list
);
587 case L2CAP_MODE_STREAMING
:
588 skb_queue_purge(&chan
->tx_q
);
595 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
597 struct l2cap_conn
*conn
= chan
->conn
;
598 struct sock
*sk
= chan
->sk
;
600 BT_DBG("chan %p state %s sk %p", chan
, state_to_string(chan
->state
),
603 switch (chan
->state
) {
605 chan
->ops
->teardown(chan
, 0);
610 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
611 conn
->hcon
->type
== ACL_LINK
) {
612 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
613 l2cap_send_disconn_req(conn
, chan
, reason
);
615 l2cap_chan_del(chan
, reason
);
619 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
620 conn
->hcon
->type
== ACL_LINK
) {
621 struct l2cap_conn_rsp rsp
;
624 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
625 result
= L2CAP_CR_SEC_BLOCK
;
627 result
= L2CAP_CR_BAD_PSM
;
628 l2cap_state_change(chan
, BT_DISCONN
);
630 rsp
.scid
= cpu_to_le16(chan
->dcid
);
631 rsp
.dcid
= cpu_to_le16(chan
->scid
);
632 rsp
.result
= cpu_to_le16(result
);
633 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
634 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
638 l2cap_chan_del(chan
, reason
);
643 l2cap_chan_del(chan
, reason
);
647 chan
->ops
->teardown(chan
, 0);
652 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
654 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
655 switch (chan
->sec_level
) {
656 case BT_SECURITY_HIGH
:
657 return HCI_AT_DEDICATED_BONDING_MITM
;
658 case BT_SECURITY_MEDIUM
:
659 return HCI_AT_DEDICATED_BONDING
;
661 return HCI_AT_NO_BONDING
;
663 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
664 if (chan
->sec_level
== BT_SECURITY_LOW
)
665 chan
->sec_level
= BT_SECURITY_SDP
;
667 if (chan
->sec_level
== BT_SECURITY_HIGH
)
668 return HCI_AT_NO_BONDING_MITM
;
670 return HCI_AT_NO_BONDING
;
672 switch (chan
->sec_level
) {
673 case BT_SECURITY_HIGH
:
674 return HCI_AT_GENERAL_BONDING_MITM
;
675 case BT_SECURITY_MEDIUM
:
676 return HCI_AT_GENERAL_BONDING
;
678 return HCI_AT_NO_BONDING
;
683 /* Service level security */
684 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
686 struct l2cap_conn
*conn
= chan
->conn
;
689 auth_type
= l2cap_get_auth_type(chan
);
691 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
694 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
698 /* Get next available identificator.
699 * 1 - 128 are used by kernel.
700 * 129 - 199 are reserved.
701 * 200 - 254 are used by utilities like l2ping, etc.
704 spin_lock(&conn
->lock
);
706 if (++conn
->tx_ident
> 128)
711 spin_unlock(&conn
->lock
);
716 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
719 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
722 BT_DBG("code 0x%2.2x", code
);
727 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
728 flags
= ACL_START_NO_FLUSH
;
732 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
733 skb
->priority
= HCI_PRIO_MAX
;
735 hci_send_acl(conn
->hchan
, skb
, flags
);
738 static bool __chan_is_moving(struct l2cap_chan
*chan
)
740 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
741 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
744 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
746 struct hci_conn
*hcon
= chan
->conn
->hcon
;
749 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
752 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
753 lmp_no_flush_capable(hcon
->hdev
))
754 flags
= ACL_START_NO_FLUSH
;
758 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
759 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
762 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
764 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
765 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
767 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
770 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
771 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
778 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
779 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
786 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
788 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
789 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
791 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
794 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
795 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
802 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
803 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
810 static inline void __unpack_control(struct l2cap_chan
*chan
,
813 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
814 __unpack_extended_control(get_unaligned_le32(skb
->data
),
815 &bt_cb(skb
)->control
);
816 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
818 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
819 &bt_cb(skb
)->control
);
820 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
824 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
828 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
829 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
831 if (control
->sframe
) {
832 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
833 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
834 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
836 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
837 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
843 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
847 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
848 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
850 if (control
->sframe
) {
851 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
852 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
853 packed
|= L2CAP_CTRL_FRAME_TYPE
;
855 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
856 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
862 static inline void __pack_control(struct l2cap_chan
*chan
,
863 struct l2cap_ctrl
*control
,
866 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
867 put_unaligned_le32(__pack_extended_control(control
),
868 skb
->data
+ L2CAP_HDR_SIZE
);
870 put_unaligned_le16(__pack_enhanced_control(control
),
871 skb
->data
+ L2CAP_HDR_SIZE
);
875 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
877 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
878 return L2CAP_EXT_HDR_SIZE
;
880 return L2CAP_ENH_HDR_SIZE
;
883 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
887 struct l2cap_hdr
*lh
;
888 int hlen
= __ertm_hdr_size(chan
);
890 if (chan
->fcs
== L2CAP_FCS_CRC16
)
891 hlen
+= L2CAP_FCS_SIZE
;
893 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
896 return ERR_PTR(-ENOMEM
);
898 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
899 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
900 lh
->cid
= cpu_to_le16(chan
->dcid
);
902 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
903 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
905 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
907 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
908 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
909 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
912 skb
->priority
= HCI_PRIO_MAX
;
916 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
917 struct l2cap_ctrl
*control
)
922 BT_DBG("chan %p, control %p", chan
, control
);
924 if (!control
->sframe
)
927 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
931 if (control
->super
== L2CAP_SUPER_RR
)
932 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
933 else if (control
->super
== L2CAP_SUPER_RNR
)
934 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
936 if (control
->super
!= L2CAP_SUPER_SREJ
) {
937 chan
->last_acked_seq
= control
->reqseq
;
938 __clear_ack_timer(chan
);
941 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
942 control
->final
, control
->poll
, control
->super
);
944 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
945 control_field
= __pack_extended_control(control
);
947 control_field
= __pack_enhanced_control(control
);
949 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
951 l2cap_do_send(chan
, skb
);
954 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
956 struct l2cap_ctrl control
;
958 BT_DBG("chan %p, poll %d", chan
, poll
);
960 memset(&control
, 0, sizeof(control
));
964 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
965 control
.super
= L2CAP_SUPER_RNR
;
967 control
.super
= L2CAP_SUPER_RR
;
969 control
.reqseq
= chan
->buffer_seq
;
970 l2cap_send_sframe(chan
, &control
);
973 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
975 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
978 static bool __amp_capable(struct l2cap_chan
*chan
)
980 struct l2cap_conn
*conn
= chan
->conn
;
983 chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
&&
984 conn
->fixed_chan_mask
& L2CAP_FC_A2MP
)
990 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
992 struct l2cap_conn
*conn
= chan
->conn
;
993 struct l2cap_conn_req req
;
995 req
.scid
= cpu_to_le16(chan
->scid
);
998 chan
->ident
= l2cap_get_ident(conn
);
1000 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1002 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1005 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1007 struct sk_buff
*skb
;
1009 BT_DBG("chan %p", chan
);
1011 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1014 __clear_retrans_timer(chan
);
1015 __clear_monitor_timer(chan
);
1016 __clear_ack_timer(chan
);
1018 chan
->retry_count
= 0;
1019 skb_queue_walk(&chan
->tx_q
, skb
) {
1020 if (bt_cb(skb
)->control
.retries
)
1021 bt_cb(skb
)->control
.retries
= 1;
1026 chan
->expected_tx_seq
= chan
->buffer_seq
;
1028 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1029 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1030 l2cap_seq_list_clear(&chan
->retrans_list
);
1031 l2cap_seq_list_clear(&chan
->srej_list
);
1032 skb_queue_purge(&chan
->srej_q
);
1034 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1035 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1037 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1040 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1042 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1043 chan
->conf_state
= 0;
1044 __clear_chan_timer(chan
);
1046 chan
->state
= BT_CONNECTED
;
1048 chan
->ops
->ready(chan
);
1051 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1053 if (__amp_capable(chan
)) {
1054 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1055 a2mp_discover_amp(chan
);
1057 l2cap_send_conn_req(chan
);
1061 static void l2cap_do_start(struct l2cap_chan
*chan
)
1063 struct l2cap_conn
*conn
= chan
->conn
;
1065 if (conn
->hcon
->type
== LE_LINK
) {
1066 l2cap_chan_ready(chan
);
1070 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1071 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1074 if (l2cap_chan_check_security(chan
) &&
1075 __l2cap_no_conn_pending(chan
)) {
1076 l2cap_start_connection(chan
);
1079 struct l2cap_info_req req
;
1080 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1082 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1083 conn
->info_ident
= l2cap_get_ident(conn
);
1085 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1087 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1092 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1094 u32 local_feat_mask
= l2cap_feat_mask
;
1096 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1099 case L2CAP_MODE_ERTM
:
1100 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1101 case L2CAP_MODE_STREAMING
:
1102 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1108 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
1109 struct l2cap_chan
*chan
, int err
)
1111 struct sock
*sk
= chan
->sk
;
1112 struct l2cap_disconn_req req
;
1117 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1118 __clear_retrans_timer(chan
);
1119 __clear_monitor_timer(chan
);
1120 __clear_ack_timer(chan
);
1123 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1124 l2cap_state_change(chan
, BT_DISCONN
);
1128 req
.dcid
= cpu_to_le16(chan
->dcid
);
1129 req
.scid
= cpu_to_le16(chan
->scid
);
1130 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1134 __l2cap_state_change(chan
, BT_DISCONN
);
1135 __l2cap_chan_set_err(chan
, err
);
1139 /* ---- L2CAP connections ---- */
1140 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1142 struct l2cap_chan
*chan
, *tmp
;
1144 BT_DBG("conn %p", conn
);
1146 mutex_lock(&conn
->chan_lock
);
1148 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1149 struct sock
*sk
= chan
->sk
;
1151 l2cap_chan_lock(chan
);
1153 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1154 l2cap_chan_unlock(chan
);
1158 if (chan
->state
== BT_CONNECT
) {
1159 if (!l2cap_chan_check_security(chan
) ||
1160 !__l2cap_no_conn_pending(chan
)) {
1161 l2cap_chan_unlock(chan
);
1165 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1166 && test_bit(CONF_STATE2_DEVICE
,
1167 &chan
->conf_state
)) {
1168 l2cap_chan_close(chan
, ECONNRESET
);
1169 l2cap_chan_unlock(chan
);
1173 l2cap_start_connection(chan
);
1175 } else if (chan
->state
== BT_CONNECT2
) {
1176 struct l2cap_conn_rsp rsp
;
1178 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1179 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1181 if (l2cap_chan_check_security(chan
)) {
1183 if (test_bit(BT_SK_DEFER_SETUP
,
1184 &bt_sk(sk
)->flags
)) {
1185 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1186 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1187 chan
->ops
->defer(chan
);
1190 __l2cap_state_change(chan
, BT_CONFIG
);
1191 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1192 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1196 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1197 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1200 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1203 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1204 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1205 l2cap_chan_unlock(chan
);
1209 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1210 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1211 l2cap_build_conf_req(chan
, buf
), buf
);
1212 chan
->num_conf_req
++;
1215 l2cap_chan_unlock(chan
);
1218 mutex_unlock(&conn
->chan_lock
);
1221 /* Find socket with cid and source/destination bdaddr.
1222 * Returns closest match, locked.
1224 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1228 struct l2cap_chan
*c
, *c1
= NULL
;
1230 read_lock(&chan_list_lock
);
1232 list_for_each_entry(c
, &chan_list
, global_l
) {
1233 struct sock
*sk
= c
->sk
;
1235 if (state
&& c
->state
!= state
)
1238 if (c
->scid
== cid
) {
1239 int src_match
, dst_match
;
1240 int src_any
, dst_any
;
1243 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1244 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1245 if (src_match
&& dst_match
) {
1246 read_unlock(&chan_list_lock
);
1251 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1252 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1253 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1254 (src_any
&& dst_any
))
1259 read_unlock(&chan_list_lock
);
1264 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1266 struct sock
*parent
, *sk
;
1267 struct l2cap_chan
*chan
, *pchan
;
1271 /* Check if we have socket listening on cid */
1272 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1273 conn
->src
, conn
->dst
);
1281 chan
= pchan
->ops
->new_connection(pchan
);
1287 hci_conn_hold(conn
->hcon
);
1288 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
1290 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1291 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1293 l2cap_chan_add(conn
, chan
);
1295 l2cap_chan_ready(chan
);
1298 release_sock(parent
);
1301 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1303 struct l2cap_chan
*chan
;
1304 struct hci_conn
*hcon
= conn
->hcon
;
1306 BT_DBG("conn %p", conn
);
1308 if (!hcon
->out
&& hcon
->type
== LE_LINK
)
1309 l2cap_le_conn_ready(conn
);
1311 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1312 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1314 mutex_lock(&conn
->chan_lock
);
1316 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1318 l2cap_chan_lock(chan
);
1320 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1321 l2cap_chan_unlock(chan
);
1325 if (hcon
->type
== LE_LINK
) {
1326 if (smp_conn_security(hcon
, chan
->sec_level
))
1327 l2cap_chan_ready(chan
);
1329 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1330 struct sock
*sk
= chan
->sk
;
1331 __clear_chan_timer(chan
);
1333 __l2cap_state_change(chan
, BT_CONNECTED
);
1334 sk
->sk_state_change(sk
);
1337 } else if (chan
->state
== BT_CONNECT
)
1338 l2cap_do_start(chan
);
1340 l2cap_chan_unlock(chan
);
1343 mutex_unlock(&conn
->chan_lock
);
1346 /* Notify sockets that we cannot guaranty reliability anymore */
1347 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1349 struct l2cap_chan
*chan
;
1351 BT_DBG("conn %p", conn
);
1353 mutex_lock(&conn
->chan_lock
);
1355 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1356 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1357 l2cap_chan_set_err(chan
, err
);
1360 mutex_unlock(&conn
->chan_lock
);
1363 static void l2cap_info_timeout(struct work_struct
*work
)
1365 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1368 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1369 conn
->info_ident
= 0;
1371 l2cap_conn_start(conn
);
1374 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1376 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1377 struct l2cap_chan
*chan
, *l
;
1382 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1384 kfree_skb(conn
->rx_skb
);
1386 mutex_lock(&conn
->chan_lock
);
1389 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1390 l2cap_chan_hold(chan
);
1391 l2cap_chan_lock(chan
);
1393 l2cap_chan_del(chan
, err
);
1395 l2cap_chan_unlock(chan
);
1397 chan
->ops
->close(chan
);
1398 l2cap_chan_put(chan
);
1401 mutex_unlock(&conn
->chan_lock
);
1403 hci_chan_del(conn
->hchan
);
1405 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1406 cancel_delayed_work_sync(&conn
->info_timer
);
1408 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1409 cancel_delayed_work_sync(&conn
->security_timer
);
1410 smp_chan_destroy(conn
);
1413 hcon
->l2cap_data
= NULL
;
1417 static void security_timeout(struct work_struct
*work
)
1419 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1420 security_timer
.work
);
1422 BT_DBG("conn %p", conn
);
1424 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1425 smp_chan_destroy(conn
);
1426 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1430 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1432 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1433 struct hci_chan
*hchan
;
1438 hchan
= hci_chan_create(hcon
);
1442 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1444 hci_chan_del(hchan
);
1448 hcon
->l2cap_data
= conn
;
1450 conn
->hchan
= hchan
;
1452 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1454 switch (hcon
->type
) {
1456 conn
->mtu
= hcon
->hdev
->block_mtu
;
1460 if (hcon
->hdev
->le_mtu
) {
1461 conn
->mtu
= hcon
->hdev
->le_mtu
;
1467 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1471 conn
->src
= &hcon
->hdev
->bdaddr
;
1472 conn
->dst
= &hcon
->dst
;
1474 conn
->feat_mask
= 0;
1476 spin_lock_init(&conn
->lock
);
1477 mutex_init(&conn
->chan_lock
);
1479 INIT_LIST_HEAD(&conn
->chan_l
);
1481 if (hcon
->type
== LE_LINK
)
1482 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1484 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1486 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1491 /* ---- Socket interface ---- */
1493 /* Find socket with psm and source / destination bdaddr.
1494 * Returns closest match.
1496 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1500 struct l2cap_chan
*c
, *c1
= NULL
;
1502 read_lock(&chan_list_lock
);
1504 list_for_each_entry(c
, &chan_list
, global_l
) {
1505 struct sock
*sk
= c
->sk
;
1507 if (state
&& c
->state
!= state
)
1510 if (c
->psm
== psm
) {
1511 int src_match
, dst_match
;
1512 int src_any
, dst_any
;
1515 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1516 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1517 if (src_match
&& dst_match
) {
1518 read_unlock(&chan_list_lock
);
1523 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1524 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1525 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1526 (src_any
&& dst_any
))
1531 read_unlock(&chan_list_lock
);
1536 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1537 bdaddr_t
*dst
, u8 dst_type
)
1539 struct sock
*sk
= chan
->sk
;
1540 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1541 struct l2cap_conn
*conn
;
1542 struct hci_conn
*hcon
;
1543 struct hci_dev
*hdev
;
1547 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src
, dst
,
1548 dst_type
, __le16_to_cpu(psm
));
1550 hdev
= hci_get_route(dst
, src
);
1552 return -EHOSTUNREACH
;
1556 l2cap_chan_lock(chan
);
1558 /* PSM must be odd and lsb of upper byte must be 0 */
1559 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1560 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1565 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1570 switch (chan
->mode
) {
1571 case L2CAP_MODE_BASIC
:
1573 case L2CAP_MODE_ERTM
:
1574 case L2CAP_MODE_STREAMING
:
1583 switch (chan
->state
) {
1587 /* Already connecting */
1592 /* Already connected */
1606 /* Set destination address and psm */
1608 bacpy(&bt_sk(sk
)->dst
, dst
);
1614 auth_type
= l2cap_get_auth_type(chan
);
1616 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1617 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1618 chan
->sec_level
, auth_type
);
1620 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1621 chan
->sec_level
, auth_type
);
1624 err
= PTR_ERR(hcon
);
1628 conn
= l2cap_conn_add(hcon
, 0);
1635 if (hcon
->type
== LE_LINK
) {
1638 if (!list_empty(&conn
->chan_l
)) {
1647 /* Update source addr of the socket */
1648 bacpy(src
, conn
->src
);
1650 l2cap_chan_unlock(chan
);
1651 l2cap_chan_add(conn
, chan
);
1652 l2cap_chan_lock(chan
);
1654 l2cap_state_change(chan
, BT_CONNECT
);
1655 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1657 if (hcon
->state
== BT_CONNECTED
) {
1658 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1659 __clear_chan_timer(chan
);
1660 if (l2cap_chan_check_security(chan
))
1661 l2cap_state_change(chan
, BT_CONNECTED
);
1663 l2cap_do_start(chan
);
1669 l2cap_chan_unlock(chan
);
1670 hci_dev_unlock(hdev
);
1675 int __l2cap_wait_ack(struct sock
*sk
)
1677 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1678 DECLARE_WAITQUEUE(wait
, current
);
1682 add_wait_queue(sk_sleep(sk
), &wait
);
1683 set_current_state(TASK_INTERRUPTIBLE
);
1684 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1688 if (signal_pending(current
)) {
1689 err
= sock_intr_errno(timeo
);
1694 timeo
= schedule_timeout(timeo
);
1696 set_current_state(TASK_INTERRUPTIBLE
);
1698 err
= sock_error(sk
);
1702 set_current_state(TASK_RUNNING
);
1703 remove_wait_queue(sk_sleep(sk
), &wait
);
1707 static void l2cap_monitor_timeout(struct work_struct
*work
)
1709 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1710 monitor_timer
.work
);
1712 BT_DBG("chan %p", chan
);
1714 l2cap_chan_lock(chan
);
1717 l2cap_chan_unlock(chan
);
1718 l2cap_chan_put(chan
);
1722 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1724 l2cap_chan_unlock(chan
);
1725 l2cap_chan_put(chan
);
1728 static void l2cap_retrans_timeout(struct work_struct
*work
)
1730 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1731 retrans_timer
.work
);
1733 BT_DBG("chan %p", chan
);
1735 l2cap_chan_lock(chan
);
1738 l2cap_chan_unlock(chan
);
1739 l2cap_chan_put(chan
);
1743 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1744 l2cap_chan_unlock(chan
);
1745 l2cap_chan_put(chan
);
1748 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1749 struct sk_buff_head
*skbs
)
1751 struct sk_buff
*skb
;
1752 struct l2cap_ctrl
*control
;
1754 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1756 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1758 while (!skb_queue_empty(&chan
->tx_q
)) {
1760 skb
= skb_dequeue(&chan
->tx_q
);
1762 bt_cb(skb
)->control
.retries
= 1;
1763 control
= &bt_cb(skb
)->control
;
1765 control
->reqseq
= 0;
1766 control
->txseq
= chan
->next_tx_seq
;
1768 __pack_control(chan
, control
, skb
);
1770 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1771 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1772 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1775 l2cap_do_send(chan
, skb
);
1777 BT_DBG("Sent txseq %u", control
->txseq
);
1779 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1780 chan
->frames_sent
++;
1784 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1786 struct sk_buff
*skb
, *tx_skb
;
1787 struct l2cap_ctrl
*control
;
1790 BT_DBG("chan %p", chan
);
1792 if (chan
->state
!= BT_CONNECTED
)
1795 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1798 while (chan
->tx_send_head
&&
1799 chan
->unacked_frames
< chan
->remote_tx_win
&&
1800 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1802 skb
= chan
->tx_send_head
;
1804 bt_cb(skb
)->control
.retries
= 1;
1805 control
= &bt_cb(skb
)->control
;
1807 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1810 control
->reqseq
= chan
->buffer_seq
;
1811 chan
->last_acked_seq
= chan
->buffer_seq
;
1812 control
->txseq
= chan
->next_tx_seq
;
1814 __pack_control(chan
, control
, skb
);
1816 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1817 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1818 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1821 /* Clone after data has been modified. Data is assumed to be
1822 read-only (for locking purposes) on cloned sk_buffs.
1824 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1829 __set_retrans_timer(chan
);
1831 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1832 chan
->unacked_frames
++;
1833 chan
->frames_sent
++;
1836 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1837 chan
->tx_send_head
= NULL
;
1839 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1841 l2cap_do_send(chan
, tx_skb
);
1842 BT_DBG("Sent txseq %u", control
->txseq
);
1845 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1846 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1851 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1853 struct l2cap_ctrl control
;
1854 struct sk_buff
*skb
;
1855 struct sk_buff
*tx_skb
;
1858 BT_DBG("chan %p", chan
);
1860 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1863 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1864 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1866 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1868 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1873 bt_cb(skb
)->control
.retries
++;
1874 control
= bt_cb(skb
)->control
;
1876 if (chan
->max_tx
!= 0 &&
1877 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1878 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1879 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1880 l2cap_seq_list_clear(&chan
->retrans_list
);
1884 control
.reqseq
= chan
->buffer_seq
;
1885 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1890 if (skb_cloned(skb
)) {
1891 /* Cloned sk_buffs are read-only, so we need a
1894 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1896 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1900 l2cap_seq_list_clear(&chan
->retrans_list
);
1904 /* Update skb contents */
1905 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1906 put_unaligned_le32(__pack_extended_control(&control
),
1907 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1909 put_unaligned_le16(__pack_enhanced_control(&control
),
1910 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1913 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1914 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1915 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1919 l2cap_do_send(chan
, tx_skb
);
1921 BT_DBG("Resent txseq %d", control
.txseq
);
1923 chan
->last_acked_seq
= chan
->buffer_seq
;
1927 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1928 struct l2cap_ctrl
*control
)
1930 BT_DBG("chan %p, control %p", chan
, control
);
1932 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1933 l2cap_ertm_resend(chan
);
1936 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1937 struct l2cap_ctrl
*control
)
1939 struct sk_buff
*skb
;
1941 BT_DBG("chan %p, control %p", chan
, control
);
1944 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1946 l2cap_seq_list_clear(&chan
->retrans_list
);
1948 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1951 if (chan
->unacked_frames
) {
1952 skb_queue_walk(&chan
->tx_q
, skb
) {
1953 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1954 skb
== chan
->tx_send_head
)
1958 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1959 if (skb
== chan
->tx_send_head
)
1962 l2cap_seq_list_append(&chan
->retrans_list
,
1963 bt_cb(skb
)->control
.txseq
);
1966 l2cap_ertm_resend(chan
);
1970 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1972 struct l2cap_ctrl control
;
1973 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1974 chan
->last_acked_seq
);
1977 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1978 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1980 memset(&control
, 0, sizeof(control
));
1983 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1984 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1985 __clear_ack_timer(chan
);
1986 control
.super
= L2CAP_SUPER_RNR
;
1987 control
.reqseq
= chan
->buffer_seq
;
1988 l2cap_send_sframe(chan
, &control
);
1990 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1991 l2cap_ertm_send(chan
);
1992 /* If any i-frames were sent, they included an ack */
1993 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1997 /* Ack now if the window is 3/4ths full.
1998 * Calculate without mul or div
2000 threshold
= chan
->ack_win
;
2001 threshold
+= threshold
<< 1;
2004 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2007 if (frames_to_ack
>= threshold
) {
2008 __clear_ack_timer(chan
);
2009 control
.super
= L2CAP_SUPER_RR
;
2010 control
.reqseq
= chan
->buffer_seq
;
2011 l2cap_send_sframe(chan
, &control
);
2016 __set_ack_timer(chan
);
2020 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2021 struct msghdr
*msg
, int len
,
2022 int count
, struct sk_buff
*skb
)
2024 struct l2cap_conn
*conn
= chan
->conn
;
2025 struct sk_buff
**frag
;
2028 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2034 /* Continuation fragments (no L2CAP header) */
2035 frag
= &skb_shinfo(skb
)->frag_list
;
2037 struct sk_buff
*tmp
;
2039 count
= min_t(unsigned int, conn
->mtu
, len
);
2041 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2042 msg
->msg_flags
& MSG_DONTWAIT
);
2044 return PTR_ERR(tmp
);
2048 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2051 (*frag
)->priority
= skb
->priority
;
2056 skb
->len
+= (*frag
)->len
;
2057 skb
->data_len
+= (*frag
)->len
;
2059 frag
= &(*frag
)->next
;
2065 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2066 struct msghdr
*msg
, size_t len
,
2069 struct l2cap_conn
*conn
= chan
->conn
;
2070 struct sk_buff
*skb
;
2071 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2072 struct l2cap_hdr
*lh
;
2074 BT_DBG("chan %p len %zu priority %u", chan
, len
, priority
);
2076 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2078 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2079 msg
->msg_flags
& MSG_DONTWAIT
);
2083 skb
->priority
= priority
;
2085 /* Create L2CAP header */
2086 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2087 lh
->cid
= cpu_to_le16(chan
->dcid
);
2088 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2089 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2091 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2092 if (unlikely(err
< 0)) {
2094 return ERR_PTR(err
);
2099 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2100 struct msghdr
*msg
, size_t len
,
2103 struct l2cap_conn
*conn
= chan
->conn
;
2104 struct sk_buff
*skb
;
2106 struct l2cap_hdr
*lh
;
2108 BT_DBG("chan %p len %zu", chan
, len
);
2110 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2112 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2113 msg
->msg_flags
& MSG_DONTWAIT
);
2117 skb
->priority
= priority
;
2119 /* Create L2CAP header */
2120 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2121 lh
->cid
= cpu_to_le16(chan
->dcid
);
2122 lh
->len
= cpu_to_le16(len
);
2124 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2125 if (unlikely(err
< 0)) {
2127 return ERR_PTR(err
);
2132 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2133 struct msghdr
*msg
, size_t len
,
2136 struct l2cap_conn
*conn
= chan
->conn
;
2137 struct sk_buff
*skb
;
2138 int err
, count
, hlen
;
2139 struct l2cap_hdr
*lh
;
2141 BT_DBG("chan %p len %zu", chan
, len
);
2144 return ERR_PTR(-ENOTCONN
);
2146 hlen
= __ertm_hdr_size(chan
);
2149 hlen
+= L2CAP_SDULEN_SIZE
;
2151 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2152 hlen
+= L2CAP_FCS_SIZE
;
2154 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2156 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2157 msg
->msg_flags
& MSG_DONTWAIT
);
2161 /* Create L2CAP header */
2162 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2163 lh
->cid
= cpu_to_le16(chan
->dcid
);
2164 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2166 /* Control header is populated later */
2167 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2168 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2170 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2173 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2175 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2176 if (unlikely(err
< 0)) {
2178 return ERR_PTR(err
);
2181 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2182 bt_cb(skb
)->control
.retries
= 0;
2186 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2187 struct sk_buff_head
*seg_queue
,
2188 struct msghdr
*msg
, size_t len
)
2190 struct sk_buff
*skb
;
2195 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2197 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2198 * so fragmented skbs are not used. The HCI layer's handling
2199 * of fragmented skbs is not compatible with ERTM's queueing.
2202 /* PDU size is derived from the HCI MTU */
2203 pdu_len
= chan
->conn
->mtu
;
2205 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2207 /* Adjust for largest possible L2CAP overhead. */
2209 pdu_len
-= L2CAP_FCS_SIZE
;
2211 pdu_len
-= __ertm_hdr_size(chan
);
2213 /* Remote device may have requested smaller PDUs */
2214 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2216 if (len
<= pdu_len
) {
2217 sar
= L2CAP_SAR_UNSEGMENTED
;
2221 sar
= L2CAP_SAR_START
;
2223 pdu_len
-= L2CAP_SDULEN_SIZE
;
2227 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2230 __skb_queue_purge(seg_queue
);
2231 return PTR_ERR(skb
);
2234 bt_cb(skb
)->control
.sar
= sar
;
2235 __skb_queue_tail(seg_queue
, skb
);
2240 pdu_len
+= L2CAP_SDULEN_SIZE
;
2243 if (len
<= pdu_len
) {
2244 sar
= L2CAP_SAR_END
;
2247 sar
= L2CAP_SAR_CONTINUE
;
2254 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2257 struct sk_buff
*skb
;
2259 struct sk_buff_head seg_queue
;
2261 /* Connectionless channel */
2262 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2263 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2265 return PTR_ERR(skb
);
2267 l2cap_do_send(chan
, skb
);
2271 switch (chan
->mode
) {
2272 case L2CAP_MODE_BASIC
:
2273 /* Check outgoing MTU */
2274 if (len
> chan
->omtu
)
2277 /* Create a basic PDU */
2278 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2280 return PTR_ERR(skb
);
2282 l2cap_do_send(chan
, skb
);
2286 case L2CAP_MODE_ERTM
:
2287 case L2CAP_MODE_STREAMING
:
2288 /* Check outgoing MTU */
2289 if (len
> chan
->omtu
) {
2294 __skb_queue_head_init(&seg_queue
);
2296 /* Do segmentation before calling in to the state machine,
2297 * since it's possible to block while waiting for memory
2300 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2302 /* The channel could have been closed while segmenting,
2303 * check that it is still connected.
2305 if (chan
->state
!= BT_CONNECTED
) {
2306 __skb_queue_purge(&seg_queue
);
2313 if (chan
->mode
== L2CAP_MODE_ERTM
)
2314 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2316 l2cap_streaming_send(chan
, &seg_queue
);
2320 /* If the skbs were not queued for sending, they'll still be in
2321 * seg_queue and need to be purged.
2323 __skb_queue_purge(&seg_queue
);
2327 BT_DBG("bad state %1.1x", chan
->mode
);
2334 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2336 struct l2cap_ctrl control
;
2339 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2341 memset(&control
, 0, sizeof(control
));
2343 control
.super
= L2CAP_SUPER_SREJ
;
2345 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2346 seq
= __next_seq(chan
, seq
)) {
2347 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2348 control
.reqseq
= seq
;
2349 l2cap_send_sframe(chan
, &control
);
2350 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2354 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2357 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2359 struct l2cap_ctrl control
;
2361 BT_DBG("chan %p", chan
);
2363 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2366 memset(&control
, 0, sizeof(control
));
2368 control
.super
= L2CAP_SUPER_SREJ
;
2369 control
.reqseq
= chan
->srej_list
.tail
;
2370 l2cap_send_sframe(chan
, &control
);
2373 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2375 struct l2cap_ctrl control
;
2379 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2381 memset(&control
, 0, sizeof(control
));
2383 control
.super
= L2CAP_SUPER_SREJ
;
2385 /* Capture initial list head to allow only one pass through the list. */
2386 initial_head
= chan
->srej_list
.head
;
2389 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2390 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2393 control
.reqseq
= seq
;
2394 l2cap_send_sframe(chan
, &control
);
2395 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2396 } while (chan
->srej_list
.head
!= initial_head
);
2399 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2401 struct sk_buff
*acked_skb
;
2404 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2406 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2409 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2410 chan
->expected_ack_seq
, chan
->unacked_frames
);
2412 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2413 ackseq
= __next_seq(chan
, ackseq
)) {
2415 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2417 skb_unlink(acked_skb
, &chan
->tx_q
);
2418 kfree_skb(acked_skb
);
2419 chan
->unacked_frames
--;
2423 chan
->expected_ack_seq
= reqseq
;
2425 if (chan
->unacked_frames
== 0)
2426 __clear_retrans_timer(chan
);
2428 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2431 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2433 BT_DBG("chan %p", chan
);
2435 chan
->expected_tx_seq
= chan
->buffer_seq
;
2436 l2cap_seq_list_clear(&chan
->srej_list
);
2437 skb_queue_purge(&chan
->srej_q
);
2438 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2441 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2442 struct l2cap_ctrl
*control
,
2443 struct sk_buff_head
*skbs
, u8 event
)
2445 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2449 case L2CAP_EV_DATA_REQUEST
:
2450 if (chan
->tx_send_head
== NULL
)
2451 chan
->tx_send_head
= skb_peek(skbs
);
2453 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2454 l2cap_ertm_send(chan
);
2456 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2457 BT_DBG("Enter LOCAL_BUSY");
2458 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2460 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2461 /* The SREJ_SENT state must be aborted if we are to
2462 * enter the LOCAL_BUSY state.
2464 l2cap_abort_rx_srej_sent(chan
);
2467 l2cap_send_ack(chan
);
2470 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2471 BT_DBG("Exit LOCAL_BUSY");
2472 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2474 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2475 struct l2cap_ctrl local_control
;
2477 memset(&local_control
, 0, sizeof(local_control
));
2478 local_control
.sframe
= 1;
2479 local_control
.super
= L2CAP_SUPER_RR
;
2480 local_control
.poll
= 1;
2481 local_control
.reqseq
= chan
->buffer_seq
;
2482 l2cap_send_sframe(chan
, &local_control
);
2484 chan
->retry_count
= 1;
2485 __set_monitor_timer(chan
);
2486 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2489 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2490 l2cap_process_reqseq(chan
, control
->reqseq
);
2492 case L2CAP_EV_EXPLICIT_POLL
:
2493 l2cap_send_rr_or_rnr(chan
, 1);
2494 chan
->retry_count
= 1;
2495 __set_monitor_timer(chan
);
2496 __clear_ack_timer(chan
);
2497 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2499 case L2CAP_EV_RETRANS_TO
:
2500 l2cap_send_rr_or_rnr(chan
, 1);
2501 chan
->retry_count
= 1;
2502 __set_monitor_timer(chan
);
2503 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2505 case L2CAP_EV_RECV_FBIT
:
2506 /* Nothing to process */
2513 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2514 struct l2cap_ctrl
*control
,
2515 struct sk_buff_head
*skbs
, u8 event
)
2517 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2521 case L2CAP_EV_DATA_REQUEST
:
2522 if (chan
->tx_send_head
== NULL
)
2523 chan
->tx_send_head
= skb_peek(skbs
);
2524 /* Queue data, but don't send. */
2525 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2527 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2528 BT_DBG("Enter LOCAL_BUSY");
2529 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2531 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2532 /* The SREJ_SENT state must be aborted if we are to
2533 * enter the LOCAL_BUSY state.
2535 l2cap_abort_rx_srej_sent(chan
);
2538 l2cap_send_ack(chan
);
2541 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2542 BT_DBG("Exit LOCAL_BUSY");
2543 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2545 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2546 struct l2cap_ctrl local_control
;
2547 memset(&local_control
, 0, sizeof(local_control
));
2548 local_control
.sframe
= 1;
2549 local_control
.super
= L2CAP_SUPER_RR
;
2550 local_control
.poll
= 1;
2551 local_control
.reqseq
= chan
->buffer_seq
;
2552 l2cap_send_sframe(chan
, &local_control
);
2554 chan
->retry_count
= 1;
2555 __set_monitor_timer(chan
);
2556 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2559 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2560 l2cap_process_reqseq(chan
, control
->reqseq
);
2564 case L2CAP_EV_RECV_FBIT
:
2565 if (control
&& control
->final
) {
2566 __clear_monitor_timer(chan
);
2567 if (chan
->unacked_frames
> 0)
2568 __set_retrans_timer(chan
);
2569 chan
->retry_count
= 0;
2570 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2571 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2574 case L2CAP_EV_EXPLICIT_POLL
:
2577 case L2CAP_EV_MONITOR_TO
:
2578 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2579 l2cap_send_rr_or_rnr(chan
, 1);
2580 __set_monitor_timer(chan
);
2581 chan
->retry_count
++;
2583 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2591 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2592 struct sk_buff_head
*skbs
, u8 event
)
2594 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2595 chan
, control
, skbs
, event
, chan
->tx_state
);
2597 switch (chan
->tx_state
) {
2598 case L2CAP_TX_STATE_XMIT
:
2599 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2601 case L2CAP_TX_STATE_WAIT_F
:
2602 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2610 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2611 struct l2cap_ctrl
*control
)
2613 BT_DBG("chan %p, control %p", chan
, control
);
2614 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2617 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2618 struct l2cap_ctrl
*control
)
2620 BT_DBG("chan %p, control %p", chan
, control
);
2621 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2624 /* Copy frame to all raw sockets on that connection */
2625 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2627 struct sk_buff
*nskb
;
2628 struct l2cap_chan
*chan
;
2630 BT_DBG("conn %p", conn
);
2632 mutex_lock(&conn
->chan_lock
);
2634 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2635 struct sock
*sk
= chan
->sk
;
2636 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2639 /* Don't send frame to the socket it came from */
2642 nskb
= skb_clone(skb
, GFP_KERNEL
);
2646 if (chan
->ops
->recv(chan
, nskb
))
2650 mutex_unlock(&conn
->chan_lock
);
2653 /* ---- L2CAP signalling commands ---- */
2654 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2655 u8 ident
, u16 dlen
, void *data
)
2657 struct sk_buff
*skb
, **frag
;
2658 struct l2cap_cmd_hdr
*cmd
;
2659 struct l2cap_hdr
*lh
;
2662 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2663 conn
, code
, ident
, dlen
);
2665 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2666 count
= min_t(unsigned int, conn
->mtu
, len
);
2668 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2672 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2673 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2675 if (conn
->hcon
->type
== LE_LINK
)
2676 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2678 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2680 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2683 cmd
->len
= cpu_to_le16(dlen
);
2686 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2687 memcpy(skb_put(skb
, count
), data
, count
);
2693 /* Continuation fragments (no L2CAP header) */
2694 frag
= &skb_shinfo(skb
)->frag_list
;
2696 count
= min_t(unsigned int, conn
->mtu
, len
);
2698 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2702 memcpy(skb_put(*frag
, count
), data
, count
);
2707 frag
= &(*frag
)->next
;
2717 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2720 struct l2cap_conf_opt
*opt
= *ptr
;
2723 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2731 *val
= *((u8
*) opt
->val
);
2735 *val
= get_unaligned_le16(opt
->val
);
2739 *val
= get_unaligned_le32(opt
->val
);
2743 *val
= (unsigned long) opt
->val
;
2747 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2751 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2753 struct l2cap_conf_opt
*opt
= *ptr
;
2755 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2762 *((u8
*) opt
->val
) = val
;
2766 put_unaligned_le16(val
, opt
->val
);
2770 put_unaligned_le32(val
, opt
->val
);
2774 memcpy(opt
->val
, (void *) val
, len
);
2778 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2781 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2783 struct l2cap_conf_efs efs
;
2785 switch (chan
->mode
) {
2786 case L2CAP_MODE_ERTM
:
2787 efs
.id
= chan
->local_id
;
2788 efs
.stype
= chan
->local_stype
;
2789 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2790 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2791 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2792 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2795 case L2CAP_MODE_STREAMING
:
2797 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2798 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2799 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2808 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2809 (unsigned long) &efs
);
2812 static void l2cap_ack_timeout(struct work_struct
*work
)
2814 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2818 BT_DBG("chan %p", chan
);
2820 l2cap_chan_lock(chan
);
2822 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2823 chan
->last_acked_seq
);
2826 l2cap_send_rr_or_rnr(chan
, 0);
2828 l2cap_chan_unlock(chan
);
2829 l2cap_chan_put(chan
);
2832 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2836 chan
->next_tx_seq
= 0;
2837 chan
->expected_tx_seq
= 0;
2838 chan
->expected_ack_seq
= 0;
2839 chan
->unacked_frames
= 0;
2840 chan
->buffer_seq
= 0;
2841 chan
->frames_sent
= 0;
2842 chan
->last_acked_seq
= 0;
2844 chan
->sdu_last_frag
= NULL
;
2847 skb_queue_head_init(&chan
->tx_q
);
2849 chan
->local_amp_id
= 0;
2851 chan
->move_state
= L2CAP_MOVE_STABLE
;
2852 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
2854 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2857 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2858 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2860 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2861 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2862 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2864 skb_queue_head_init(&chan
->srej_q
);
2866 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2870 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2872 l2cap_seq_list_free(&chan
->srej_list
);
2877 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2880 case L2CAP_MODE_STREAMING
:
2881 case L2CAP_MODE_ERTM
:
2882 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2886 return L2CAP_MODE_BASIC
;
2890 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2892 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2895 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2897 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2900 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2902 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2903 __l2cap_ews_supported(chan
)) {
2904 /* use extended control field */
2905 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2906 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2908 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2909 L2CAP_DEFAULT_TX_WINDOW
);
2910 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2912 chan
->ack_win
= chan
->tx_win
;
2915 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2917 struct l2cap_conf_req
*req
= data
;
2918 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2919 void *ptr
= req
->data
;
2922 BT_DBG("chan %p", chan
);
2924 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2927 switch (chan
->mode
) {
2928 case L2CAP_MODE_STREAMING
:
2929 case L2CAP_MODE_ERTM
:
2930 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2933 if (__l2cap_efs_supported(chan
))
2934 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2938 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2943 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2944 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2946 switch (chan
->mode
) {
2947 case L2CAP_MODE_BASIC
:
2948 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2949 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2952 rfc
.mode
= L2CAP_MODE_BASIC
;
2954 rfc
.max_transmit
= 0;
2955 rfc
.retrans_timeout
= 0;
2956 rfc
.monitor_timeout
= 0;
2957 rfc
.max_pdu_size
= 0;
2959 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2960 (unsigned long) &rfc
);
2963 case L2CAP_MODE_ERTM
:
2964 rfc
.mode
= L2CAP_MODE_ERTM
;
2965 rfc
.max_transmit
= chan
->max_tx
;
2966 rfc
.retrans_timeout
= 0;
2967 rfc
.monitor_timeout
= 0;
2969 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2970 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
2972 rfc
.max_pdu_size
= cpu_to_le16(size
);
2974 l2cap_txwin_setup(chan
);
2976 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2977 L2CAP_DEFAULT_TX_WINDOW
);
2979 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2980 (unsigned long) &rfc
);
2982 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2983 l2cap_add_opt_efs(&ptr
, chan
);
2985 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2988 if (chan
->fcs
== L2CAP_FCS_NONE
||
2989 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2990 chan
->fcs
= L2CAP_FCS_NONE
;
2991 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2994 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2995 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2999 case L2CAP_MODE_STREAMING
:
3000 l2cap_txwin_setup(chan
);
3001 rfc
.mode
= L2CAP_MODE_STREAMING
;
3003 rfc
.max_transmit
= 0;
3004 rfc
.retrans_timeout
= 0;
3005 rfc
.monitor_timeout
= 0;
3007 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3008 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3010 rfc
.max_pdu_size
= cpu_to_le16(size
);
3012 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3013 (unsigned long) &rfc
);
3015 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3016 l2cap_add_opt_efs(&ptr
, chan
);
3018 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
3021 if (chan
->fcs
== L2CAP_FCS_NONE
||
3022 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
3023 chan
->fcs
= L2CAP_FCS_NONE
;
3024 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
3029 req
->dcid
= cpu_to_le16(chan
->dcid
);
3030 req
->flags
= __constant_cpu_to_le16(0);
3035 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3037 struct l2cap_conf_rsp
*rsp
= data
;
3038 void *ptr
= rsp
->data
;
3039 void *req
= chan
->conf_req
;
3040 int len
= chan
->conf_len
;
3041 int type
, hint
, olen
;
3043 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3044 struct l2cap_conf_efs efs
;
3046 u16 mtu
= L2CAP_DEFAULT_MTU
;
3047 u16 result
= L2CAP_CONF_SUCCESS
;
3050 BT_DBG("chan %p", chan
);
3052 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3053 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3055 hint
= type
& L2CAP_CONF_HINT
;
3056 type
&= L2CAP_CONF_MASK
;
3059 case L2CAP_CONF_MTU
:
3063 case L2CAP_CONF_FLUSH_TO
:
3064 chan
->flush_to
= val
;
3067 case L2CAP_CONF_QOS
:
3070 case L2CAP_CONF_RFC
:
3071 if (olen
== sizeof(rfc
))
3072 memcpy(&rfc
, (void *) val
, olen
);
3075 case L2CAP_CONF_FCS
:
3076 if (val
== L2CAP_FCS_NONE
)
3077 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3080 case L2CAP_CONF_EFS
:
3082 if (olen
== sizeof(efs
))
3083 memcpy(&efs
, (void *) val
, olen
);
3086 case L2CAP_CONF_EWS
:
3088 return -ECONNREFUSED
;
3090 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3091 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3092 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3093 chan
->remote_tx_win
= val
;
3100 result
= L2CAP_CONF_UNKNOWN
;
3101 *((u8
*) ptr
++) = type
;
3106 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3109 switch (chan
->mode
) {
3110 case L2CAP_MODE_STREAMING
:
3111 case L2CAP_MODE_ERTM
:
3112 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3113 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3114 chan
->conn
->feat_mask
);
3119 if (__l2cap_efs_supported(chan
))
3120 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3122 return -ECONNREFUSED
;
3125 if (chan
->mode
!= rfc
.mode
)
3126 return -ECONNREFUSED
;
3132 if (chan
->mode
!= rfc
.mode
) {
3133 result
= L2CAP_CONF_UNACCEPT
;
3134 rfc
.mode
= chan
->mode
;
3136 if (chan
->num_conf_rsp
== 1)
3137 return -ECONNREFUSED
;
3139 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3140 (unsigned long) &rfc
);
3143 if (result
== L2CAP_CONF_SUCCESS
) {
3144 /* Configure output options and let the other side know
3145 * which ones we don't like. */
3147 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3148 result
= L2CAP_CONF_UNACCEPT
;
3151 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3153 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3156 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3157 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3158 efs
.stype
!= chan
->local_stype
) {
3160 result
= L2CAP_CONF_UNACCEPT
;
3162 if (chan
->num_conf_req
>= 1)
3163 return -ECONNREFUSED
;
3165 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3167 (unsigned long) &efs
);
3169 /* Send PENDING Conf Rsp */
3170 result
= L2CAP_CONF_PENDING
;
3171 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3176 case L2CAP_MODE_BASIC
:
3177 chan
->fcs
= L2CAP_FCS_NONE
;
3178 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3181 case L2CAP_MODE_ERTM
:
3182 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3183 chan
->remote_tx_win
= rfc
.txwin_size
;
3185 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3187 chan
->remote_max_tx
= rfc
.max_transmit
;
3189 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3190 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3191 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3192 rfc
.max_pdu_size
= cpu_to_le16(size
);
3193 chan
->remote_mps
= size
;
3195 rfc
.retrans_timeout
=
3196 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3197 rfc
.monitor_timeout
=
3198 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3200 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3202 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3203 sizeof(rfc
), (unsigned long) &rfc
);
3205 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3206 chan
->remote_id
= efs
.id
;
3207 chan
->remote_stype
= efs
.stype
;
3208 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3209 chan
->remote_flush_to
=
3210 le32_to_cpu(efs
.flush_to
);
3211 chan
->remote_acc_lat
=
3212 le32_to_cpu(efs
.acc_lat
);
3213 chan
->remote_sdu_itime
=
3214 le32_to_cpu(efs
.sdu_itime
);
3215 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3217 (unsigned long) &efs
);
3221 case L2CAP_MODE_STREAMING
:
3222 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3223 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3224 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3225 rfc
.max_pdu_size
= cpu_to_le16(size
);
3226 chan
->remote_mps
= size
;
3228 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3230 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3231 (unsigned long) &rfc
);
3236 result
= L2CAP_CONF_UNACCEPT
;
3238 memset(&rfc
, 0, sizeof(rfc
));
3239 rfc
.mode
= chan
->mode
;
3242 if (result
== L2CAP_CONF_SUCCESS
)
3243 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3245 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3246 rsp
->result
= cpu_to_le16(result
);
3247 rsp
->flags
= __constant_cpu_to_le16(0);
3252 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3253 void *data
, u16
*result
)
3255 struct l2cap_conf_req
*req
= data
;
3256 void *ptr
= req
->data
;
3259 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3260 struct l2cap_conf_efs efs
;
3262 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3264 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3265 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3268 case L2CAP_CONF_MTU
:
3269 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3270 *result
= L2CAP_CONF_UNACCEPT
;
3271 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3274 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3277 case L2CAP_CONF_FLUSH_TO
:
3278 chan
->flush_to
= val
;
3279 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3283 case L2CAP_CONF_RFC
:
3284 if (olen
== sizeof(rfc
))
3285 memcpy(&rfc
, (void *)val
, olen
);
3287 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3288 rfc
.mode
!= chan
->mode
)
3289 return -ECONNREFUSED
;
3293 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3294 sizeof(rfc
), (unsigned long) &rfc
);
3297 case L2CAP_CONF_EWS
:
3298 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3299 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3303 case L2CAP_CONF_EFS
:
3304 if (olen
== sizeof(efs
))
3305 memcpy(&efs
, (void *)val
, olen
);
3307 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3308 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3309 efs
.stype
!= chan
->local_stype
)
3310 return -ECONNREFUSED
;
3312 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3313 (unsigned long) &efs
);
3318 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3319 return -ECONNREFUSED
;
3321 chan
->mode
= rfc
.mode
;
3323 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3325 case L2CAP_MODE_ERTM
:
3326 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3327 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3328 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3329 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3330 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3333 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3334 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3335 chan
->local_sdu_itime
=
3336 le32_to_cpu(efs
.sdu_itime
);
3337 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3338 chan
->local_flush_to
=
3339 le32_to_cpu(efs
.flush_to
);
3343 case L2CAP_MODE_STREAMING
:
3344 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3348 req
->dcid
= cpu_to_le16(chan
->dcid
);
3349 req
->flags
= __constant_cpu_to_le16(0);
3354 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3355 u16 result
, u16 flags
)
3357 struct l2cap_conf_rsp
*rsp
= data
;
3358 void *ptr
= rsp
->data
;
3360 BT_DBG("chan %p", chan
);
3362 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3363 rsp
->result
= cpu_to_le16(result
);
3364 rsp
->flags
= cpu_to_le16(flags
);
3369 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3371 struct l2cap_conn_rsp rsp
;
3372 struct l2cap_conn
*conn
= chan
->conn
;
3375 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3376 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3377 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3378 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3379 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3381 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3384 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3385 l2cap_build_conf_req(chan
, buf
), buf
);
3386 chan
->num_conf_req
++;
3389 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3393 /* Use sane default values in case a misbehaving remote device
3394 * did not send an RFC or extended window size option.
3396 u16 txwin_ext
= chan
->ack_win
;
3397 struct l2cap_conf_rfc rfc
= {
3399 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3400 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3401 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3402 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3405 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3407 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3410 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3411 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3414 case L2CAP_CONF_RFC
:
3415 if (olen
== sizeof(rfc
))
3416 memcpy(&rfc
, (void *)val
, olen
);
3418 case L2CAP_CONF_EWS
:
3425 case L2CAP_MODE_ERTM
:
3426 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3427 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3428 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3429 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3430 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3432 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3435 case L2CAP_MODE_STREAMING
:
3436 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3440 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3441 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3443 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3445 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3448 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3449 cmd
->ident
== conn
->info_ident
) {
3450 cancel_delayed_work(&conn
->info_timer
);
3452 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3453 conn
->info_ident
= 0;
3455 l2cap_conn_start(conn
);
3461 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3462 struct l2cap_cmd_hdr
*cmd
,
3463 u8
*data
, u8 rsp_code
, u8 amp_id
)
3465 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3466 struct l2cap_conn_rsp rsp
;
3467 struct l2cap_chan
*chan
= NULL
, *pchan
;
3468 struct sock
*parent
, *sk
= NULL
;
3469 int result
, status
= L2CAP_CS_NO_INFO
;
3471 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3472 __le16 psm
= req
->psm
;
3474 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3476 /* Check if we have socket listening on psm */
3477 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3479 result
= L2CAP_CR_BAD_PSM
;
3485 mutex_lock(&conn
->chan_lock
);
3488 /* Check if the ACL is secure enough (if not SDP) */
3489 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3490 !hci_conn_check_link_mode(conn
->hcon
)) {
3491 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3492 result
= L2CAP_CR_SEC_BLOCK
;
3496 result
= L2CAP_CR_NO_MEM
;
3498 /* Check if we already have channel with that dcid */
3499 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3502 chan
= pchan
->ops
->new_connection(pchan
);
3508 hci_conn_hold(conn
->hcon
);
3510 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3511 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3514 chan
->local_amp_id
= amp_id
;
3516 __l2cap_chan_add(conn
, chan
);
3520 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3522 chan
->ident
= cmd
->ident
;
3524 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3525 if (l2cap_chan_check_security(chan
)) {
3526 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3527 __l2cap_state_change(chan
, BT_CONNECT2
);
3528 result
= L2CAP_CR_PEND
;
3529 status
= L2CAP_CS_AUTHOR_PEND
;
3530 chan
->ops
->defer(chan
);
3532 /* Force pending result for AMP controllers.
3533 * The connection will succeed after the
3534 * physical link is up.
3537 __l2cap_state_change(chan
, BT_CONNECT2
);
3538 result
= L2CAP_CR_PEND
;
3540 __l2cap_state_change(chan
, BT_CONFIG
);
3541 result
= L2CAP_CR_SUCCESS
;
3543 status
= L2CAP_CS_NO_INFO
;
3546 __l2cap_state_change(chan
, BT_CONNECT2
);
3547 result
= L2CAP_CR_PEND
;
3548 status
= L2CAP_CS_AUTHEN_PEND
;
3551 __l2cap_state_change(chan
, BT_CONNECT2
);
3552 result
= L2CAP_CR_PEND
;
3553 status
= L2CAP_CS_NO_INFO
;
3557 release_sock(parent
);
3558 mutex_unlock(&conn
->chan_lock
);
3561 rsp
.scid
= cpu_to_le16(scid
);
3562 rsp
.dcid
= cpu_to_le16(dcid
);
3563 rsp
.result
= cpu_to_le16(result
);
3564 rsp
.status
= cpu_to_le16(status
);
3565 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3567 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3568 struct l2cap_info_req info
;
3569 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3571 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3572 conn
->info_ident
= l2cap_get_ident(conn
);
3574 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3576 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3577 sizeof(info
), &info
);
3580 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3581 result
== L2CAP_CR_SUCCESS
) {
3583 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3584 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3585 l2cap_build_conf_req(chan
, buf
), buf
);
3586 chan
->num_conf_req
++;
3592 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3593 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3595 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3599 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3600 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3602 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3603 u16 scid
, dcid
, result
, status
;
3604 struct l2cap_chan
*chan
;
3608 scid
= __le16_to_cpu(rsp
->scid
);
3609 dcid
= __le16_to_cpu(rsp
->dcid
);
3610 result
= __le16_to_cpu(rsp
->result
);
3611 status
= __le16_to_cpu(rsp
->status
);
3613 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3614 dcid
, scid
, result
, status
);
3616 mutex_lock(&conn
->chan_lock
);
3619 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3625 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3634 l2cap_chan_lock(chan
);
3637 case L2CAP_CR_SUCCESS
:
3638 l2cap_state_change(chan
, BT_CONFIG
);
3641 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3643 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3646 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3647 l2cap_build_conf_req(chan
, req
), req
);
3648 chan
->num_conf_req
++;
3652 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3656 l2cap_chan_del(chan
, ECONNREFUSED
);
3660 l2cap_chan_unlock(chan
);
3663 mutex_unlock(&conn
->chan_lock
);
3668 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3670 /* FCS is enabled only in ERTM or streaming mode, if one or both
3673 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3674 chan
->fcs
= L2CAP_FCS_NONE
;
3675 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3676 chan
->fcs
= L2CAP_FCS_CRC16
;
3679 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3680 u8 ident
, u16 flags
)
3682 struct l2cap_conn
*conn
= chan
->conn
;
3684 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3687 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3688 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3690 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3691 l2cap_build_conf_rsp(chan
, data
,
3692 L2CAP_CONF_SUCCESS
, flags
), data
);
3695 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3696 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3699 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3702 struct l2cap_chan
*chan
;
3705 dcid
= __le16_to_cpu(req
->dcid
);
3706 flags
= __le16_to_cpu(req
->flags
);
3708 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3710 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3714 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3715 struct l2cap_cmd_rej_cid rej
;
3717 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3718 rej
.scid
= cpu_to_le16(chan
->scid
);
3719 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3721 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3726 /* Reject if config buffer is too small. */
3727 len
= cmd_len
- sizeof(*req
);
3728 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3729 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3730 l2cap_build_conf_rsp(chan
, rsp
,
3731 L2CAP_CONF_REJECT
, flags
), rsp
);
3736 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3737 chan
->conf_len
+= len
;
3739 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3740 /* Incomplete config. Send empty response. */
3741 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3742 l2cap_build_conf_rsp(chan
, rsp
,
3743 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3747 /* Complete config. */
3748 len
= l2cap_parse_conf_req(chan
, rsp
);
3750 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3754 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3755 chan
->num_conf_rsp
++;
3757 /* Reset config buffer. */
3760 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3763 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3764 set_default_fcs(chan
);
3766 if (chan
->mode
== L2CAP_MODE_ERTM
||
3767 chan
->mode
== L2CAP_MODE_STREAMING
)
3768 err
= l2cap_ertm_init(chan
);
3771 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3773 l2cap_chan_ready(chan
);
3778 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3780 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3781 l2cap_build_conf_req(chan
, buf
), buf
);
3782 chan
->num_conf_req
++;
3785 /* Got Conf Rsp PENDING from remote side and asume we sent
3786 Conf Rsp PENDING in the code above */
3787 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3788 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3790 /* check compatibility */
3792 /* Send rsp for BR/EDR channel */
3794 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
3796 chan
->ident
= cmd
->ident
;
3800 l2cap_chan_unlock(chan
);
3804 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
3805 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3807 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3808 u16 scid
, flags
, result
;
3809 struct l2cap_chan
*chan
;
3810 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3813 scid
= __le16_to_cpu(rsp
->scid
);
3814 flags
= __le16_to_cpu(rsp
->flags
);
3815 result
= __le16_to_cpu(rsp
->result
);
3817 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3820 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3825 case L2CAP_CONF_SUCCESS
:
3826 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3827 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3830 case L2CAP_CONF_PENDING
:
3831 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3833 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3836 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3839 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3843 /* check compatibility */
3846 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
3849 chan
->ident
= cmd
->ident
;
3853 case L2CAP_CONF_UNACCEPT
:
3854 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3857 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3858 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3862 /* throw out any old stored conf requests */
3863 result
= L2CAP_CONF_SUCCESS
;
3864 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3867 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3871 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3872 L2CAP_CONF_REQ
, len
, req
);
3873 chan
->num_conf_req
++;
3874 if (result
!= L2CAP_CONF_SUCCESS
)
3880 l2cap_chan_set_err(chan
, ECONNRESET
);
3882 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3883 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3887 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
3890 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3892 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3893 set_default_fcs(chan
);
3895 if (chan
->mode
== L2CAP_MODE_ERTM
||
3896 chan
->mode
== L2CAP_MODE_STREAMING
)
3897 err
= l2cap_ertm_init(chan
);
3900 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3902 l2cap_chan_ready(chan
);
3906 l2cap_chan_unlock(chan
);
3910 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
3911 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3913 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3914 struct l2cap_disconn_rsp rsp
;
3916 struct l2cap_chan
*chan
;
3919 scid
= __le16_to_cpu(req
->scid
);
3920 dcid
= __le16_to_cpu(req
->dcid
);
3922 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3924 mutex_lock(&conn
->chan_lock
);
3926 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3928 mutex_unlock(&conn
->chan_lock
);
3932 l2cap_chan_lock(chan
);
3936 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3937 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3938 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3941 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3944 l2cap_chan_hold(chan
);
3945 l2cap_chan_del(chan
, ECONNRESET
);
3947 l2cap_chan_unlock(chan
);
3949 chan
->ops
->close(chan
);
3950 l2cap_chan_put(chan
);
3952 mutex_unlock(&conn
->chan_lock
);
3957 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
3958 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3960 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3962 struct l2cap_chan
*chan
;
3964 scid
= __le16_to_cpu(rsp
->scid
);
3965 dcid
= __le16_to_cpu(rsp
->dcid
);
3967 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3969 mutex_lock(&conn
->chan_lock
);
3971 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3973 mutex_unlock(&conn
->chan_lock
);
3977 l2cap_chan_lock(chan
);
3979 l2cap_chan_hold(chan
);
3980 l2cap_chan_del(chan
, 0);
3982 l2cap_chan_unlock(chan
);
3984 chan
->ops
->close(chan
);
3985 l2cap_chan_put(chan
);
3987 mutex_unlock(&conn
->chan_lock
);
3992 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
3993 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3995 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3998 type
= __le16_to_cpu(req
->type
);
4000 BT_DBG("type 0x%4.4x", type
);
4002 if (type
== L2CAP_IT_FEAT_MASK
) {
4004 u32 feat_mask
= l2cap_feat_mask
;
4005 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4006 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4007 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4009 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4012 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4013 | L2CAP_FEAT_EXT_WINDOW
;
4015 put_unaligned_le32(feat_mask
, rsp
->data
);
4016 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4018 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4020 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4023 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4025 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4027 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4028 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4029 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4030 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4033 struct l2cap_info_rsp rsp
;
4034 rsp
.type
= cpu_to_le16(type
);
4035 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4036 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4043 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4044 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4046 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4049 type
= __le16_to_cpu(rsp
->type
);
4050 result
= __le16_to_cpu(rsp
->result
);
4052 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4054 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4055 if (cmd
->ident
!= conn
->info_ident
||
4056 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4059 cancel_delayed_work(&conn
->info_timer
);
4061 if (result
!= L2CAP_IR_SUCCESS
) {
4062 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4063 conn
->info_ident
= 0;
4065 l2cap_conn_start(conn
);
4071 case L2CAP_IT_FEAT_MASK
:
4072 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4074 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4075 struct l2cap_info_req req
;
4076 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4078 conn
->info_ident
= l2cap_get_ident(conn
);
4080 l2cap_send_cmd(conn
, conn
->info_ident
,
4081 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4083 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4084 conn
->info_ident
= 0;
4086 l2cap_conn_start(conn
);
4090 case L2CAP_IT_FIXED_CHAN
:
4091 conn
->fixed_chan_mask
= rsp
->data
[0];
4092 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4093 conn
->info_ident
= 0;
4095 l2cap_conn_start(conn
);
4102 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4103 struct l2cap_cmd_hdr
*cmd
,
4104 u16 cmd_len
, void *data
)
4106 struct l2cap_create_chan_req
*req
= data
;
4107 struct l2cap_chan
*chan
;
4110 if (cmd_len
!= sizeof(*req
))
4116 psm
= le16_to_cpu(req
->psm
);
4117 scid
= le16_to_cpu(req
->scid
);
4119 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4122 struct hci_dev
*hdev
;
4124 /* Validate AMP controller id */
4125 hdev
= hci_dev_get(req
->amp_id
);
4126 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4127 !test_bit(HCI_UP
, &hdev
->flags
)) {
4128 struct l2cap_create_chan_rsp rsp
;
4131 rsp
.scid
= cpu_to_le16(scid
);
4132 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4133 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4135 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4147 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4153 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
4154 u16 icid
, u16 result
)
4156 struct l2cap_move_chan_rsp rsp
;
4158 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4160 rsp
.icid
= cpu_to_le16(icid
);
4161 rsp
.result
= cpu_to_le16(result
);
4163 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
4166 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
4167 struct l2cap_chan
*chan
,
4168 u16 icid
, u16 result
)
4170 struct l2cap_move_chan_cfm cfm
;
4173 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4175 ident
= l2cap_get_ident(conn
);
4177 chan
->ident
= ident
;
4179 cfm
.icid
= cpu_to_le16(icid
);
4180 cfm
.result
= cpu_to_le16(result
);
4182 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4185 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4188 struct l2cap_move_chan_cfm_rsp rsp
;
4190 BT_DBG("icid 0x%4.4x", icid
);
4192 rsp
.icid
= cpu_to_le16(icid
);
4193 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4196 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4197 struct l2cap_cmd_hdr
*cmd
,
4198 u16 cmd_len
, void *data
)
4200 struct l2cap_move_chan_req
*req
= data
;
4201 struct l2cap_chan
*chan
;
4203 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4205 if (cmd_len
!= sizeof(*req
))
4208 icid
= le16_to_cpu(req
->icid
);
4210 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4215 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4217 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
,
4218 L2CAP_MR_NOT_ALLOWED
);
4222 if (chan
->scid
< L2CAP_CID_DYN_START
||
4223 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4224 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4225 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4226 result
= L2CAP_MR_NOT_ALLOWED
;
4227 goto send_move_response
;
4230 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4231 result
= L2CAP_MR_SAME_ID
;
4232 goto send_move_response
;
4235 if (req
->dest_amp_id
) {
4236 struct hci_dev
*hdev
;
4237 hdev
= hci_dev_get(req
->dest_amp_id
);
4238 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4239 !test_bit(HCI_UP
, &hdev
->flags
)) {
4243 result
= L2CAP_MR_BAD_ID
;
4244 goto send_move_response
;
4249 /* Detect a move collision. Only send a collision response
4250 * if this side has "lost", otherwise proceed with the move.
4251 * The winner has the larger bd_addr.
4253 if ((__chan_is_moving(chan
) ||
4254 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4255 bacmp(conn
->src
, conn
->dst
) > 0) {
4256 result
= L2CAP_MR_COLLISION
;
4257 goto send_move_response
;
4260 chan
->ident
= cmd
->ident
;
4261 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4262 l2cap_move_setup(chan
);
4263 chan
->move_id
= req
->dest_amp_id
;
4266 if (!req
->dest_amp_id
) {
4267 /* Moving to BR/EDR */
4268 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4269 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4270 result
= L2CAP_MR_PEND
;
4272 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4273 result
= L2CAP_MR_SUCCESS
;
4276 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4277 /* Placeholder - uncomment when amp functions are available */
4278 /*amp_accept_physical(chan, req->dest_amp_id);*/
4279 result
= L2CAP_MR_PEND
;
4283 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4285 l2cap_chan_unlock(chan
);
4290 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4291 struct l2cap_cmd_hdr
*cmd
,
4292 u16 cmd_len
, void *data
)
4294 struct l2cap_move_chan_rsp
*rsp
= data
;
4297 if (cmd_len
!= sizeof(*rsp
))
4300 icid
= le16_to_cpu(rsp
->icid
);
4301 result
= le16_to_cpu(rsp
->result
);
4303 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4305 /* Placeholder: Always unconfirmed */
4306 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4311 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4312 struct l2cap_cmd_hdr
*cmd
,
4313 u16 cmd_len
, void *data
)
4315 struct l2cap_move_chan_cfm
*cfm
= data
;
4318 if (cmd_len
!= sizeof(*cfm
))
4321 icid
= le16_to_cpu(cfm
->icid
);
4322 result
= le16_to_cpu(cfm
->result
);
4324 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4326 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4331 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4332 struct l2cap_cmd_hdr
*cmd
,
4333 u16 cmd_len
, void *data
)
4335 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4338 if (cmd_len
!= sizeof(*rsp
))
4341 icid
= le16_to_cpu(rsp
->icid
);
4343 BT_DBG("icid 0x%4.4x", icid
);
4348 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4353 if (min
> max
|| min
< 6 || max
> 3200)
4356 if (to_multiplier
< 10 || to_multiplier
> 3200)
4359 if (max
>= to_multiplier
* 8)
4362 max_latency
= (to_multiplier
* 8 / max
) - 1;
4363 if (latency
> 499 || latency
> max_latency
)
4369 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4370 struct l2cap_cmd_hdr
*cmd
,
4373 struct hci_conn
*hcon
= conn
->hcon
;
4374 struct l2cap_conn_param_update_req
*req
;
4375 struct l2cap_conn_param_update_rsp rsp
;
4376 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4379 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4382 cmd_len
= __le16_to_cpu(cmd
->len
);
4383 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4386 req
= (struct l2cap_conn_param_update_req
*) data
;
4387 min
= __le16_to_cpu(req
->min
);
4388 max
= __le16_to_cpu(req
->max
);
4389 latency
= __le16_to_cpu(req
->latency
);
4390 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4392 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4393 min
, max
, latency
, to_multiplier
);
4395 memset(&rsp
, 0, sizeof(rsp
));
4397 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4399 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4401 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4403 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4407 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4412 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4413 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4418 switch (cmd
->code
) {
4419 case L2CAP_COMMAND_REJ
:
4420 l2cap_command_rej(conn
, cmd
, data
);
4423 case L2CAP_CONN_REQ
:
4424 err
= l2cap_connect_req(conn
, cmd
, data
);
4427 case L2CAP_CONN_RSP
:
4428 case L2CAP_CREATE_CHAN_RSP
:
4429 err
= l2cap_connect_create_rsp(conn
, cmd
, data
);
4432 case L2CAP_CONF_REQ
:
4433 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4436 case L2CAP_CONF_RSP
:
4437 err
= l2cap_config_rsp(conn
, cmd
, data
);
4440 case L2CAP_DISCONN_REQ
:
4441 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4444 case L2CAP_DISCONN_RSP
:
4445 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4448 case L2CAP_ECHO_REQ
:
4449 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4452 case L2CAP_ECHO_RSP
:
4455 case L2CAP_INFO_REQ
:
4456 err
= l2cap_information_req(conn
, cmd
, data
);
4459 case L2CAP_INFO_RSP
:
4460 err
= l2cap_information_rsp(conn
, cmd
, data
);
4463 case L2CAP_CREATE_CHAN_REQ
:
4464 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4467 case L2CAP_MOVE_CHAN_REQ
:
4468 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4471 case L2CAP_MOVE_CHAN_RSP
:
4472 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4475 case L2CAP_MOVE_CHAN_CFM
:
4476 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4479 case L2CAP_MOVE_CHAN_CFM_RSP
:
4480 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4484 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4492 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4493 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4495 switch (cmd
->code
) {
4496 case L2CAP_COMMAND_REJ
:
4499 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4500 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4502 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4506 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4511 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4512 struct sk_buff
*skb
)
4514 u8
*data
= skb
->data
;
4516 struct l2cap_cmd_hdr cmd
;
4519 l2cap_raw_recv(conn
, skb
);
4521 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4523 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4524 data
+= L2CAP_CMD_HDR_SIZE
;
4525 len
-= L2CAP_CMD_HDR_SIZE
;
4527 cmd_len
= le16_to_cpu(cmd
.len
);
4529 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
4532 if (cmd_len
> len
|| !cmd
.ident
) {
4533 BT_DBG("corrupted command");
4537 if (conn
->hcon
->type
== LE_LINK
)
4538 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4540 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4543 struct l2cap_cmd_rej_unk rej
;
4545 BT_ERR("Wrong link type (%d)", err
);
4547 /* FIXME: Map err to a valid reason */
4548 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4549 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
4560 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4562 u16 our_fcs
, rcv_fcs
;
4565 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4566 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4568 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4570 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4571 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4572 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4573 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4575 if (our_fcs
!= rcv_fcs
)
4581 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4583 struct l2cap_ctrl control
;
4585 BT_DBG("chan %p", chan
);
4587 memset(&control
, 0, sizeof(control
));
4590 control
.reqseq
= chan
->buffer_seq
;
4591 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4593 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4594 control
.super
= L2CAP_SUPER_RNR
;
4595 l2cap_send_sframe(chan
, &control
);
4598 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4599 chan
->unacked_frames
> 0)
4600 __set_retrans_timer(chan
);
4602 /* Send pending iframes */
4603 l2cap_ertm_send(chan
);
4605 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4606 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4607 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4610 control
.super
= L2CAP_SUPER_RR
;
4611 l2cap_send_sframe(chan
, &control
);
4615 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
4616 struct sk_buff
**last_frag
)
4618 /* skb->len reflects data in skb as well as all fragments
4619 * skb->data_len reflects only data in fragments
4621 if (!skb_has_frag_list(skb
))
4622 skb_shinfo(skb
)->frag_list
= new_frag
;
4624 new_frag
->next
= NULL
;
4626 (*last_frag
)->next
= new_frag
;
4627 *last_frag
= new_frag
;
4629 skb
->len
+= new_frag
->len
;
4630 skb
->data_len
+= new_frag
->len
;
4631 skb
->truesize
+= new_frag
->truesize
;
4634 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4635 struct l2cap_ctrl
*control
)
4639 switch (control
->sar
) {
4640 case L2CAP_SAR_UNSEGMENTED
:
4644 err
= chan
->ops
->recv(chan
, skb
);
4647 case L2CAP_SAR_START
:
4651 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4652 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4654 if (chan
->sdu_len
> chan
->imtu
) {
4659 if (skb
->len
>= chan
->sdu_len
)
4663 chan
->sdu_last_frag
= skb
;
4669 case L2CAP_SAR_CONTINUE
:
4673 append_skb_frag(chan
->sdu
, skb
,
4674 &chan
->sdu_last_frag
);
4677 if (chan
->sdu
->len
>= chan
->sdu_len
)
4687 append_skb_frag(chan
->sdu
, skb
,
4688 &chan
->sdu_last_frag
);
4691 if (chan
->sdu
->len
!= chan
->sdu_len
)
4694 err
= chan
->ops
->recv(chan
, chan
->sdu
);
4697 /* Reassembly complete */
4699 chan
->sdu_last_frag
= NULL
;
4707 kfree_skb(chan
->sdu
);
4709 chan
->sdu_last_frag
= NULL
;
4716 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4720 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4723 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4724 l2cap_tx(chan
, NULL
, NULL
, event
);
4727 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4730 /* Pass sequential frames to l2cap_reassemble_sdu()
4731 * until a gap is encountered.
4734 BT_DBG("chan %p", chan
);
4736 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4737 struct sk_buff
*skb
;
4738 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4739 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4741 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4746 skb_unlink(skb
, &chan
->srej_q
);
4747 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4748 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4753 if (skb_queue_empty(&chan
->srej_q
)) {
4754 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4755 l2cap_send_ack(chan
);
4761 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4762 struct l2cap_ctrl
*control
)
4764 struct sk_buff
*skb
;
4766 BT_DBG("chan %p, control %p", chan
, control
);
4768 if (control
->reqseq
== chan
->next_tx_seq
) {
4769 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4770 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4774 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4777 BT_DBG("Seq %d not available for retransmission",
4782 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4783 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4784 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4788 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4790 if (control
->poll
) {
4791 l2cap_pass_to_tx(chan
, control
);
4793 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4794 l2cap_retransmit(chan
, control
);
4795 l2cap_ertm_send(chan
);
4797 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4798 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4799 chan
->srej_save_reqseq
= control
->reqseq
;
4802 l2cap_pass_to_tx_fbit(chan
, control
);
4804 if (control
->final
) {
4805 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4806 !test_and_clear_bit(CONN_SREJ_ACT
,
4808 l2cap_retransmit(chan
, control
);
4810 l2cap_retransmit(chan
, control
);
4811 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4812 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4813 chan
->srej_save_reqseq
= control
->reqseq
;
4819 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4820 struct l2cap_ctrl
*control
)
4822 struct sk_buff
*skb
;
4824 BT_DBG("chan %p, control %p", chan
, control
);
4826 if (control
->reqseq
== chan
->next_tx_seq
) {
4827 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4828 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4832 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4834 if (chan
->max_tx
&& skb
&&
4835 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4836 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4837 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4841 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4843 l2cap_pass_to_tx(chan
, control
);
4845 if (control
->final
) {
4846 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4847 l2cap_retransmit_all(chan
, control
);
4849 l2cap_retransmit_all(chan
, control
);
4850 l2cap_ertm_send(chan
);
4851 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4852 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4856 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4858 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4860 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4861 chan
->expected_tx_seq
);
4863 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4864 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4866 /* See notes below regarding "double poll" and
4869 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4870 BT_DBG("Invalid/Ignore - after SREJ");
4871 return L2CAP_TXSEQ_INVALID_IGNORE
;
4873 BT_DBG("Invalid - in window after SREJ sent");
4874 return L2CAP_TXSEQ_INVALID
;
4878 if (chan
->srej_list
.head
== txseq
) {
4879 BT_DBG("Expected SREJ");
4880 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4883 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4884 BT_DBG("Duplicate SREJ - txseq already stored");
4885 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4888 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4889 BT_DBG("Unexpected SREJ - not requested");
4890 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4894 if (chan
->expected_tx_seq
== txseq
) {
4895 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4897 BT_DBG("Invalid - txseq outside tx window");
4898 return L2CAP_TXSEQ_INVALID
;
4901 return L2CAP_TXSEQ_EXPECTED
;
4905 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4906 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
4907 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4908 return L2CAP_TXSEQ_DUPLICATE
;
4911 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4912 /* A source of invalid packets is a "double poll" condition,
4913 * where delays cause us to send multiple poll packets. If
4914 * the remote stack receives and processes both polls,
4915 * sequence numbers can wrap around in such a way that a
4916 * resent frame has a sequence number that looks like new data
4917 * with a sequence gap. This would trigger an erroneous SREJ
4920 * Fortunately, this is impossible with a tx window that's
4921 * less than half of the maximum sequence number, which allows
4922 * invalid frames to be safely ignored.
4924 * With tx window sizes greater than half of the tx window
4925 * maximum, the frame is invalid and cannot be ignored. This
4926 * causes a disconnect.
4929 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4930 BT_DBG("Invalid/Ignore - txseq outside tx window");
4931 return L2CAP_TXSEQ_INVALID_IGNORE
;
4933 BT_DBG("Invalid - txseq outside tx window");
4934 return L2CAP_TXSEQ_INVALID
;
4937 BT_DBG("Unexpected - txseq indicates missing frames");
4938 return L2CAP_TXSEQ_UNEXPECTED
;
4942 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4943 struct l2cap_ctrl
*control
,
4944 struct sk_buff
*skb
, u8 event
)
4947 bool skb_in_use
= 0;
4949 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4953 case L2CAP_EV_RECV_IFRAME
:
4954 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4955 case L2CAP_TXSEQ_EXPECTED
:
4956 l2cap_pass_to_tx(chan
, control
);
4958 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4959 BT_DBG("Busy, discarding expected seq %d",
4964 chan
->expected_tx_seq
= __next_seq(chan
,
4967 chan
->buffer_seq
= chan
->expected_tx_seq
;
4970 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4974 if (control
->final
) {
4975 if (!test_and_clear_bit(CONN_REJ_ACT
,
4976 &chan
->conn_state
)) {
4978 l2cap_retransmit_all(chan
, control
);
4979 l2cap_ertm_send(chan
);
4983 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4984 l2cap_send_ack(chan
);
4986 case L2CAP_TXSEQ_UNEXPECTED
:
4987 l2cap_pass_to_tx(chan
, control
);
4989 /* Can't issue SREJ frames in the local busy state.
4990 * Drop this frame, it will be seen as missing
4991 * when local busy is exited.
4993 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4994 BT_DBG("Busy, discarding unexpected seq %d",
4999 /* There was a gap in the sequence, so an SREJ
5000 * must be sent for each missing frame. The
5001 * current frame is stored for later use.
5003 skb_queue_tail(&chan
->srej_q
, skb
);
5005 BT_DBG("Queued %p (queue len %d)", skb
,
5006 skb_queue_len(&chan
->srej_q
));
5008 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5009 l2cap_seq_list_clear(&chan
->srej_list
);
5010 l2cap_send_srej(chan
, control
->txseq
);
5012 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
5014 case L2CAP_TXSEQ_DUPLICATE
:
5015 l2cap_pass_to_tx(chan
, control
);
5017 case L2CAP_TXSEQ_INVALID_IGNORE
:
5019 case L2CAP_TXSEQ_INVALID
:
5021 l2cap_send_disconn_req(chan
->conn
, chan
,
5026 case L2CAP_EV_RECV_RR
:
5027 l2cap_pass_to_tx(chan
, control
);
5028 if (control
->final
) {
5029 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5031 if (!test_and_clear_bit(CONN_REJ_ACT
,
5032 &chan
->conn_state
)) {
5034 l2cap_retransmit_all(chan
, control
);
5037 l2cap_ertm_send(chan
);
5038 } else if (control
->poll
) {
5039 l2cap_send_i_or_rr_or_rnr(chan
);
5041 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5042 &chan
->conn_state
) &&
5043 chan
->unacked_frames
)
5044 __set_retrans_timer(chan
);
5046 l2cap_ertm_send(chan
);
5049 case L2CAP_EV_RECV_RNR
:
5050 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5051 l2cap_pass_to_tx(chan
, control
);
5052 if (control
&& control
->poll
) {
5053 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5054 l2cap_send_rr_or_rnr(chan
, 0);
5056 __clear_retrans_timer(chan
);
5057 l2cap_seq_list_clear(&chan
->retrans_list
);
5059 case L2CAP_EV_RECV_REJ
:
5060 l2cap_handle_rej(chan
, control
);
5062 case L2CAP_EV_RECV_SREJ
:
5063 l2cap_handle_srej(chan
, control
);
5069 if (skb
&& !skb_in_use
) {
5070 BT_DBG("Freeing %p", skb
);
5077 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
5078 struct l2cap_ctrl
*control
,
5079 struct sk_buff
*skb
, u8 event
)
5082 u16 txseq
= control
->txseq
;
5083 bool skb_in_use
= 0;
5085 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5089 case L2CAP_EV_RECV_IFRAME
:
5090 switch (l2cap_classify_txseq(chan
, txseq
)) {
5091 case L2CAP_TXSEQ_EXPECTED
:
5092 /* Keep frame for reassembly later */
5093 l2cap_pass_to_tx(chan
, control
);
5094 skb_queue_tail(&chan
->srej_q
, skb
);
5096 BT_DBG("Queued %p (queue len %d)", skb
,
5097 skb_queue_len(&chan
->srej_q
));
5099 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
5101 case L2CAP_TXSEQ_EXPECTED_SREJ
:
5102 l2cap_seq_list_pop(&chan
->srej_list
);
5104 l2cap_pass_to_tx(chan
, control
);
5105 skb_queue_tail(&chan
->srej_q
, skb
);
5107 BT_DBG("Queued %p (queue len %d)", skb
,
5108 skb_queue_len(&chan
->srej_q
));
5110 err
= l2cap_rx_queued_iframes(chan
);
5115 case L2CAP_TXSEQ_UNEXPECTED
:
5116 /* Got a frame that can't be reassembled yet.
5117 * Save it for later, and send SREJs to cover
5118 * the missing frames.
5120 skb_queue_tail(&chan
->srej_q
, skb
);
5122 BT_DBG("Queued %p (queue len %d)", skb
,
5123 skb_queue_len(&chan
->srej_q
));
5125 l2cap_pass_to_tx(chan
, control
);
5126 l2cap_send_srej(chan
, control
->txseq
);
5128 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
5129 /* This frame was requested with an SREJ, but
5130 * some expected retransmitted frames are
5131 * missing. Request retransmission of missing
5134 skb_queue_tail(&chan
->srej_q
, skb
);
5136 BT_DBG("Queued %p (queue len %d)", skb
,
5137 skb_queue_len(&chan
->srej_q
));
5139 l2cap_pass_to_tx(chan
, control
);
5140 l2cap_send_srej_list(chan
, control
->txseq
);
5142 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
5143 /* We've already queued this frame. Drop this copy. */
5144 l2cap_pass_to_tx(chan
, control
);
5146 case L2CAP_TXSEQ_DUPLICATE
:
5147 /* Expecting a later sequence number, so this frame
5148 * was already received. Ignore it completely.
5151 case L2CAP_TXSEQ_INVALID_IGNORE
:
5153 case L2CAP_TXSEQ_INVALID
:
5155 l2cap_send_disconn_req(chan
->conn
, chan
,
5160 case L2CAP_EV_RECV_RR
:
5161 l2cap_pass_to_tx(chan
, control
);
5162 if (control
->final
) {
5163 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5165 if (!test_and_clear_bit(CONN_REJ_ACT
,
5166 &chan
->conn_state
)) {
5168 l2cap_retransmit_all(chan
, control
);
5171 l2cap_ertm_send(chan
);
5172 } else if (control
->poll
) {
5173 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5174 &chan
->conn_state
) &&
5175 chan
->unacked_frames
) {
5176 __set_retrans_timer(chan
);
5179 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5180 l2cap_send_srej_tail(chan
);
5182 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5183 &chan
->conn_state
) &&
5184 chan
->unacked_frames
)
5185 __set_retrans_timer(chan
);
5187 l2cap_send_ack(chan
);
5190 case L2CAP_EV_RECV_RNR
:
5191 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5192 l2cap_pass_to_tx(chan
, control
);
5193 if (control
->poll
) {
5194 l2cap_send_srej_tail(chan
);
5196 struct l2cap_ctrl rr_control
;
5197 memset(&rr_control
, 0, sizeof(rr_control
));
5198 rr_control
.sframe
= 1;
5199 rr_control
.super
= L2CAP_SUPER_RR
;
5200 rr_control
.reqseq
= chan
->buffer_seq
;
5201 l2cap_send_sframe(chan
, &rr_control
);
5205 case L2CAP_EV_RECV_REJ
:
5206 l2cap_handle_rej(chan
, control
);
5208 case L2CAP_EV_RECV_SREJ
:
5209 l2cap_handle_srej(chan
, control
);
5213 if (skb
&& !skb_in_use
) {
5214 BT_DBG("Freeing %p", skb
);
5221 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5223 /* Make sure reqseq is for a packet that has been sent but not acked */
5226 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5227 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5230 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5231 struct sk_buff
*skb
, u8 event
)
5235 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5236 control
, skb
, event
, chan
->rx_state
);
5238 if (__valid_reqseq(chan
, control
->reqseq
)) {
5239 switch (chan
->rx_state
) {
5240 case L2CAP_RX_STATE_RECV
:
5241 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5243 case L2CAP_RX_STATE_SREJ_SENT
:
5244 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5252 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5253 control
->reqseq
, chan
->next_tx_seq
,
5254 chan
->expected_ack_seq
);
5255 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5261 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5262 struct sk_buff
*skb
)
5266 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5269 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5270 L2CAP_TXSEQ_EXPECTED
) {
5271 l2cap_pass_to_tx(chan
, control
);
5273 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5274 __next_seq(chan
, chan
->buffer_seq
));
5276 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5278 l2cap_reassemble_sdu(chan
, skb
, control
);
5281 kfree_skb(chan
->sdu
);
5284 chan
->sdu_last_frag
= NULL
;
5288 BT_DBG("Freeing %p", skb
);
5293 chan
->last_acked_seq
= control
->txseq
;
5294 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5299 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5301 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5305 __unpack_control(chan
, skb
);
5310 * We can just drop the corrupted I-frame here.
5311 * Receiver will miss it and start proper recovery
5312 * procedures and ask for retransmission.
5314 if (l2cap_check_fcs(chan
, skb
))
5317 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5318 len
-= L2CAP_SDULEN_SIZE
;
5320 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5321 len
-= L2CAP_FCS_SIZE
;
5323 if (len
> chan
->mps
) {
5324 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5328 if (!control
->sframe
) {
5331 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5332 control
->sar
, control
->reqseq
, control
->final
,
5335 /* Validate F-bit - F=0 always valid, F=1 only
5336 * valid in TX WAIT_F
5338 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5341 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5342 event
= L2CAP_EV_RECV_IFRAME
;
5343 err
= l2cap_rx(chan
, control
, skb
, event
);
5345 err
= l2cap_stream_rx(chan
, control
, skb
);
5349 l2cap_send_disconn_req(chan
->conn
, chan
,
5352 const u8 rx_func_to_event
[4] = {
5353 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5354 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5357 /* Only I-frames are expected in streaming mode */
5358 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5361 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5362 control
->reqseq
, control
->final
, control
->poll
,
5367 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5371 /* Validate F and P bits */
5372 if (control
->final
&& (control
->poll
||
5373 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5376 event
= rx_func_to_event
[control
->super
];
5377 if (l2cap_rx(chan
, control
, skb
, event
))
5378 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5388 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
5389 struct sk_buff
*skb
)
5391 struct l2cap_chan
*chan
;
5393 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5395 if (cid
== L2CAP_CID_A2MP
) {
5396 chan
= a2mp_channel_create(conn
, skb
);
5402 l2cap_chan_lock(chan
);
5404 BT_DBG("unknown cid 0x%4.4x", cid
);
5405 /* Drop packet and return */
5411 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5413 if (chan
->state
!= BT_CONNECTED
)
5416 switch (chan
->mode
) {
5417 case L2CAP_MODE_BASIC
:
5418 /* If socket recv buffers overflows we drop data here
5419 * which is *bad* because L2CAP has to be reliable.
5420 * But we don't have any other choice. L2CAP doesn't
5421 * provide flow control mechanism. */
5423 if (chan
->imtu
< skb
->len
)
5426 if (!chan
->ops
->recv(chan
, skb
))
5430 case L2CAP_MODE_ERTM
:
5431 case L2CAP_MODE_STREAMING
:
5432 l2cap_data_rcv(chan
, skb
);
5436 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5444 l2cap_chan_unlock(chan
);
5447 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
5448 struct sk_buff
*skb
)
5450 struct l2cap_chan
*chan
;
5452 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5456 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5458 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5461 if (chan
->imtu
< skb
->len
)
5464 if (!chan
->ops
->recv(chan
, skb
))
5471 static void l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5472 struct sk_buff
*skb
)
5474 struct l2cap_chan
*chan
;
5476 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5480 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5482 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5485 if (chan
->imtu
< skb
->len
)
5488 if (!chan
->ops
->recv(chan
, skb
))
5495 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5497 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5501 skb_pull(skb
, L2CAP_HDR_SIZE
);
5502 cid
= __le16_to_cpu(lh
->cid
);
5503 len
= __le16_to_cpu(lh
->len
);
5505 if (len
!= skb
->len
) {
5510 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5513 case L2CAP_CID_LE_SIGNALING
:
5514 case L2CAP_CID_SIGNALING
:
5515 l2cap_sig_channel(conn
, skb
);
5518 case L2CAP_CID_CONN_LESS
:
5519 psm
= get_unaligned((__le16
*) skb
->data
);
5520 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
5521 l2cap_conless_channel(conn
, psm
, skb
);
5524 case L2CAP_CID_LE_DATA
:
5525 l2cap_att_channel(conn
, cid
, skb
);
5529 if (smp_sig_channel(conn
, skb
))
5530 l2cap_conn_del(conn
->hcon
, EACCES
);
5534 l2cap_data_channel(conn
, cid
, skb
);
5539 /* ---- L2CAP interface with lower layer (HCI) ---- */
5541 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5543 int exact
= 0, lm1
= 0, lm2
= 0;
5544 struct l2cap_chan
*c
;
5546 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
5548 /* Find listening sockets and check their link_mode */
5549 read_lock(&chan_list_lock
);
5550 list_for_each_entry(c
, &chan_list
, global_l
) {
5551 struct sock
*sk
= c
->sk
;
5553 if (c
->state
!= BT_LISTEN
)
5556 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5557 lm1
|= HCI_LM_ACCEPT
;
5558 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5559 lm1
|= HCI_LM_MASTER
;
5561 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5562 lm2
|= HCI_LM_ACCEPT
;
5563 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5564 lm2
|= HCI_LM_MASTER
;
5567 read_unlock(&chan_list_lock
);
5569 return exact
? lm1
: lm2
;
5572 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5574 struct l2cap_conn
*conn
;
5576 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
5579 conn
= l2cap_conn_add(hcon
, status
);
5581 l2cap_conn_ready(conn
);
5583 l2cap_conn_del(hcon
, bt_to_errno(status
));
5587 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5589 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5591 BT_DBG("hcon %p", hcon
);
5594 return HCI_ERROR_REMOTE_USER_TERM
;
5595 return conn
->disc_reason
;
5598 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5600 BT_DBG("hcon %p reason %d", hcon
, reason
);
5602 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5605 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5607 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5610 if (encrypt
== 0x00) {
5611 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5612 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5613 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5614 l2cap_chan_close(chan
, ECONNREFUSED
);
5616 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5617 __clear_chan_timer(chan
);
5621 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5623 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5624 struct l2cap_chan
*chan
;
5629 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
5631 if (hcon
->type
== LE_LINK
) {
5632 if (!status
&& encrypt
)
5633 smp_distribute_keys(conn
, 0);
5634 cancel_delayed_work(&conn
->security_timer
);
5637 mutex_lock(&conn
->chan_lock
);
5639 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5640 l2cap_chan_lock(chan
);
5642 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
5643 state_to_string(chan
->state
));
5645 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
5646 l2cap_chan_unlock(chan
);
5650 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5651 if (!status
&& encrypt
) {
5652 chan
->sec_level
= hcon
->sec_level
;
5653 l2cap_chan_ready(chan
);
5656 l2cap_chan_unlock(chan
);
5660 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5661 l2cap_chan_unlock(chan
);
5665 if (!status
&& (chan
->state
== BT_CONNECTED
||
5666 chan
->state
== BT_CONFIG
)) {
5667 struct sock
*sk
= chan
->sk
;
5669 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5670 sk
->sk_state_change(sk
);
5672 l2cap_check_encryption(chan
, encrypt
);
5673 l2cap_chan_unlock(chan
);
5677 if (chan
->state
== BT_CONNECT
) {
5679 l2cap_start_connection(chan
);
5681 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5683 } else if (chan
->state
== BT_CONNECT2
) {
5684 struct sock
*sk
= chan
->sk
;
5685 struct l2cap_conn_rsp rsp
;
5691 if (test_bit(BT_SK_DEFER_SETUP
,
5692 &bt_sk(sk
)->flags
)) {
5693 res
= L2CAP_CR_PEND
;
5694 stat
= L2CAP_CS_AUTHOR_PEND
;
5695 chan
->ops
->defer(chan
);
5697 __l2cap_state_change(chan
, BT_CONFIG
);
5698 res
= L2CAP_CR_SUCCESS
;
5699 stat
= L2CAP_CS_NO_INFO
;
5702 __l2cap_state_change(chan
, BT_DISCONN
);
5703 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5704 res
= L2CAP_CR_SEC_BLOCK
;
5705 stat
= L2CAP_CS_NO_INFO
;
5710 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5711 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5712 rsp
.result
= cpu_to_le16(res
);
5713 rsp
.status
= cpu_to_le16(stat
);
5714 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5717 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
5718 res
== L2CAP_CR_SUCCESS
) {
5720 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
5721 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
5723 l2cap_build_conf_req(chan
, buf
),
5725 chan
->num_conf_req
++;
5729 l2cap_chan_unlock(chan
);
5732 mutex_unlock(&conn
->chan_lock
);
5737 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5739 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5740 struct l2cap_hdr
*hdr
;
5743 /* For AMP controller do not create l2cap conn */
5744 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
5748 conn
= l2cap_conn_add(hcon
, 0);
5753 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5757 case ACL_START_NO_FLUSH
:
5760 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5761 kfree_skb(conn
->rx_skb
);
5762 conn
->rx_skb
= NULL
;
5764 l2cap_conn_unreliable(conn
, ECOMM
);
5767 /* Start fragment always begin with Basic L2CAP header */
5768 if (skb
->len
< L2CAP_HDR_SIZE
) {
5769 BT_ERR("Frame is too short (len %d)", skb
->len
);
5770 l2cap_conn_unreliable(conn
, ECOMM
);
5774 hdr
= (struct l2cap_hdr
*) skb
->data
;
5775 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5777 if (len
== skb
->len
) {
5778 /* Complete frame received */
5779 l2cap_recv_frame(conn
, skb
);
5783 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5785 if (skb
->len
> len
) {
5786 BT_ERR("Frame is too long (len %d, expected len %d)",
5788 l2cap_conn_unreliable(conn
, ECOMM
);
5792 /* Allocate skb for the complete frame (with header) */
5793 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
5797 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5799 conn
->rx_len
= len
- skb
->len
;
5803 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5805 if (!conn
->rx_len
) {
5806 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5807 l2cap_conn_unreliable(conn
, ECOMM
);
5811 if (skb
->len
> conn
->rx_len
) {
5812 BT_ERR("Fragment is too long (len %d, expected %d)",
5813 skb
->len
, conn
->rx_len
);
5814 kfree_skb(conn
->rx_skb
);
5815 conn
->rx_skb
= NULL
;
5817 l2cap_conn_unreliable(conn
, ECOMM
);
5821 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5823 conn
->rx_len
-= skb
->len
;
5825 if (!conn
->rx_len
) {
5826 /* Complete frame received */
5827 l2cap_recv_frame(conn
, conn
->rx_skb
);
5828 conn
->rx_skb
= NULL
;
5838 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5840 struct l2cap_chan
*c
;
5842 read_lock(&chan_list_lock
);
5844 list_for_each_entry(c
, &chan_list
, global_l
) {
5845 struct sock
*sk
= c
->sk
;
5847 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5848 &bt_sk(sk
)->src
, &bt_sk(sk
)->dst
,
5849 c
->state
, __le16_to_cpu(c
->psm
),
5850 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5851 c
->sec_level
, c
->mode
);
5854 read_unlock(&chan_list_lock
);
5859 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5861 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5864 static const struct file_operations l2cap_debugfs_fops
= {
5865 .open
= l2cap_debugfs_open
,
5867 .llseek
= seq_lseek
,
5868 .release
= single_release
,
5871 static struct dentry
*l2cap_debugfs
;
5873 int __init
l2cap_init(void)
5877 err
= l2cap_init_sockets();
5882 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
5883 NULL
, &l2cap_debugfs_fops
);
5885 BT_ERR("Failed to create L2CAP debug file");
5891 void l2cap_exit(void)
5893 debugfs_remove(l2cap_debugfs
);
5894 l2cap_cleanup_sockets();
5897 module_param(disable_ertm
, bool, 0644);
5898 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");