2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
45 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
46 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
48 static LIST_HEAD(chan_list
);
49 static DEFINE_RWLOCK(chan_list_lock
);
51 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
52 u8 code
, u8 ident
, u16 dlen
, void *data
);
53 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
55 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
56 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
57 struct l2cap_chan
*chan
, int err
);
59 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
60 struct sk_buff_head
*skbs
, u8 event
);
62 /* ---- L2CAP channels ---- */
64 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
69 list_for_each_entry(c
, &conn
->chan_l
, list
) {
76 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
81 list_for_each_entry(c
, &conn
->chan_l
, list
) {
88 /* Find channel with given SCID.
89 * Returns locked channel. */
90 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
95 mutex_lock(&conn
->chan_lock
);
96 c
= __l2cap_get_chan_by_scid(conn
, cid
);
99 mutex_unlock(&conn
->chan_lock
);
104 /* Find channel with given DCID.
105 * Returns locked channel.
107 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
110 struct l2cap_chan
*c
;
112 mutex_lock(&conn
->chan_lock
);
113 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
116 mutex_unlock(&conn
->chan_lock
);
121 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
124 struct l2cap_chan
*c
;
126 list_for_each_entry(c
, &conn
->chan_l
, list
) {
127 if (c
->ident
== ident
)
133 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
136 struct l2cap_chan
*c
;
138 mutex_lock(&conn
->chan_lock
);
139 c
= __l2cap_get_chan_by_ident(conn
, ident
);
142 mutex_unlock(&conn
->chan_lock
);
147 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
149 struct l2cap_chan
*c
;
151 list_for_each_entry(c
, &chan_list
, global_l
) {
152 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
158 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
162 write_lock(&chan_list_lock
);
164 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
177 for (p
= 0x1001; p
< 0x1100; p
+= 2)
178 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
179 chan
->psm
= cpu_to_le16(p
);
180 chan
->sport
= cpu_to_le16(p
);
187 write_unlock(&chan_list_lock
);
191 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
193 write_lock(&chan_list_lock
);
197 write_unlock(&chan_list_lock
);
202 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
204 u16 cid
= L2CAP_CID_DYN_START
;
206 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
207 if (!__l2cap_get_chan_by_scid(conn
, cid
))
214 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
216 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
217 state_to_string(state
));
220 chan
->ops
->state_change(chan
, state
);
223 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
225 struct sock
*sk
= chan
->sk
;
228 __l2cap_state_change(chan
, state
);
232 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
234 struct sock
*sk
= chan
->sk
;
239 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
241 struct sock
*sk
= chan
->sk
;
244 __l2cap_chan_set_err(chan
, err
);
248 static void __set_retrans_timer(struct l2cap_chan
*chan
)
250 if (!delayed_work_pending(&chan
->monitor_timer
) &&
251 chan
->retrans_timeout
) {
252 l2cap_set_timer(chan
, &chan
->retrans_timer
,
253 msecs_to_jiffies(chan
->retrans_timeout
));
257 static void __set_monitor_timer(struct l2cap_chan
*chan
)
259 __clear_retrans_timer(chan
);
260 if (chan
->monitor_timeout
) {
261 l2cap_set_timer(chan
, &chan
->monitor_timer
,
262 msecs_to_jiffies(chan
->monitor_timeout
));
266 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
271 skb_queue_walk(head
, skb
) {
272 if (bt_cb(skb
)->control
.txseq
== seq
)
279 /* ---- L2CAP sequence number lists ---- */
281 /* For ERTM, ordered lists of sequence numbers must be tracked for
282 * SREJ requests that are received and for frames that are to be
283 * retransmitted. These seq_list functions implement a singly-linked
284 * list in an array, where membership in the list can also be checked
285 * in constant time. Items can also be added to the tail of the list
286 * and removed from the head in constant time, without further memory
290 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
292 size_t alloc_size
, i
;
294 /* Allocated size is a power of 2 to map sequence numbers
295 * (which may be up to 14 bits) in to a smaller array that is
296 * sized for the negotiated ERTM transmit windows.
298 alloc_size
= roundup_pow_of_two(size
);
300 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
304 seq_list
->mask
= alloc_size
- 1;
305 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
306 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
307 for (i
= 0; i
< alloc_size
; i
++)
308 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
313 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
315 kfree(seq_list
->list
);
318 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
321 /* Constant-time check for list membership */
322 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
325 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
327 u16 mask
= seq_list
->mask
;
329 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
330 /* In case someone tries to pop the head of an empty list */
331 return L2CAP_SEQ_LIST_CLEAR
;
332 } else if (seq_list
->head
== seq
) {
333 /* Head can be removed in constant time */
334 seq_list
->head
= seq_list
->list
[seq
& mask
];
335 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
337 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
338 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
339 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
342 /* Walk the list to find the sequence number */
343 u16 prev
= seq_list
->head
;
344 while (seq_list
->list
[prev
& mask
] != seq
) {
345 prev
= seq_list
->list
[prev
& mask
];
346 if (prev
== L2CAP_SEQ_LIST_TAIL
)
347 return L2CAP_SEQ_LIST_CLEAR
;
350 /* Unlink the number from the list and clear it */
351 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
352 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
353 if (seq_list
->tail
== seq
)
354 seq_list
->tail
= prev
;
359 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
361 /* Remove the head in constant time */
362 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
365 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
369 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
372 for (i
= 0; i
<= seq_list
->mask
; i
++)
373 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
375 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
376 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
379 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
381 u16 mask
= seq_list
->mask
;
383 /* All appends happen in constant time */
385 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
388 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
389 seq_list
->head
= seq
;
391 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
393 seq_list
->tail
= seq
;
394 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
397 static void l2cap_chan_timeout(struct work_struct
*work
)
399 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
401 struct l2cap_conn
*conn
= chan
->conn
;
404 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
406 mutex_lock(&conn
->chan_lock
);
407 l2cap_chan_lock(chan
);
409 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
410 reason
= ECONNREFUSED
;
411 else if (chan
->state
== BT_CONNECT
&&
412 chan
->sec_level
!= BT_SECURITY_SDP
)
413 reason
= ECONNREFUSED
;
417 l2cap_chan_close(chan
, reason
);
419 l2cap_chan_unlock(chan
);
421 chan
->ops
->close(chan
);
422 mutex_unlock(&conn
->chan_lock
);
424 l2cap_chan_put(chan
);
427 struct l2cap_chan
*l2cap_chan_create(void)
429 struct l2cap_chan
*chan
;
431 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
435 mutex_init(&chan
->lock
);
437 write_lock(&chan_list_lock
);
438 list_add(&chan
->global_l
, &chan_list
);
439 write_unlock(&chan_list_lock
);
441 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
443 chan
->state
= BT_OPEN
;
445 kref_init(&chan
->kref
);
447 /* This flag is cleared in l2cap_chan_ready() */
448 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
450 BT_DBG("chan %p", chan
);
455 static void l2cap_chan_destroy(struct kref
*kref
)
457 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
459 BT_DBG("chan %p", chan
);
461 write_lock(&chan_list_lock
);
462 list_del(&chan
->global_l
);
463 write_unlock(&chan_list_lock
);
468 void l2cap_chan_hold(struct l2cap_chan
*c
)
470 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
475 void l2cap_chan_put(struct l2cap_chan
*c
)
477 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
479 kref_put(&c
->kref
, l2cap_chan_destroy
);
482 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
484 chan
->fcs
= L2CAP_FCS_CRC16
;
485 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
486 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
487 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
488 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
489 chan
->sec_level
= BT_SECURITY_LOW
;
491 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
494 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
496 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
497 __le16_to_cpu(chan
->psm
), chan
->dcid
);
499 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
503 switch (chan
->chan_type
) {
504 case L2CAP_CHAN_CONN_ORIENTED
:
505 if (conn
->hcon
->type
== LE_LINK
) {
507 chan
->omtu
= L2CAP_DEFAULT_MTU
;
508 chan
->scid
= L2CAP_CID_LE_DATA
;
509 chan
->dcid
= L2CAP_CID_LE_DATA
;
511 /* Alloc CID for connection-oriented socket */
512 chan
->scid
= l2cap_alloc_cid(conn
);
513 chan
->omtu
= L2CAP_DEFAULT_MTU
;
517 case L2CAP_CHAN_CONN_LESS
:
518 /* Connectionless socket */
519 chan
->scid
= L2CAP_CID_CONN_LESS
;
520 chan
->dcid
= L2CAP_CID_CONN_LESS
;
521 chan
->omtu
= L2CAP_DEFAULT_MTU
;
524 case L2CAP_CHAN_CONN_FIX_A2MP
:
525 chan
->scid
= L2CAP_CID_A2MP
;
526 chan
->dcid
= L2CAP_CID_A2MP
;
527 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
528 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
532 /* Raw socket can send/recv signalling messages only */
533 chan
->scid
= L2CAP_CID_SIGNALING
;
534 chan
->dcid
= L2CAP_CID_SIGNALING
;
535 chan
->omtu
= L2CAP_DEFAULT_MTU
;
538 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
539 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
540 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
541 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
542 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
543 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
545 l2cap_chan_hold(chan
);
547 list_add(&chan
->list
, &conn
->chan_l
);
550 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
552 mutex_lock(&conn
->chan_lock
);
553 __l2cap_chan_add(conn
, chan
);
554 mutex_unlock(&conn
->chan_lock
);
557 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
559 struct l2cap_conn
*conn
= chan
->conn
;
561 __clear_chan_timer(chan
);
563 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
566 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
567 /* Delete from channel list */
568 list_del(&chan
->list
);
570 l2cap_chan_put(chan
);
574 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
575 hci_conn_put(conn
->hcon
);
577 if (mgr
&& mgr
->bredr_chan
== chan
)
578 mgr
->bredr_chan
= NULL
;
581 if (chan
->hs_hchan
) {
582 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
584 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
585 amp_disconnect_logical_link(hs_hchan
);
588 chan
->ops
->teardown(chan
, err
);
590 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
594 case L2CAP_MODE_BASIC
:
597 case L2CAP_MODE_ERTM
:
598 __clear_retrans_timer(chan
);
599 __clear_monitor_timer(chan
);
600 __clear_ack_timer(chan
);
602 skb_queue_purge(&chan
->srej_q
);
604 l2cap_seq_list_free(&chan
->srej_list
);
605 l2cap_seq_list_free(&chan
->retrans_list
);
609 case L2CAP_MODE_STREAMING
:
610 skb_queue_purge(&chan
->tx_q
);
617 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
619 struct l2cap_conn
*conn
= chan
->conn
;
620 struct sock
*sk
= chan
->sk
;
622 BT_DBG("chan %p state %s sk %p", chan
, state_to_string(chan
->state
),
625 switch (chan
->state
) {
627 chan
->ops
->teardown(chan
, 0);
632 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
633 conn
->hcon
->type
== ACL_LINK
) {
634 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
635 l2cap_send_disconn_req(conn
, chan
, reason
);
637 l2cap_chan_del(chan
, reason
);
641 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
642 conn
->hcon
->type
== ACL_LINK
) {
643 struct l2cap_conn_rsp rsp
;
646 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
647 result
= L2CAP_CR_SEC_BLOCK
;
649 result
= L2CAP_CR_BAD_PSM
;
650 l2cap_state_change(chan
, BT_DISCONN
);
652 rsp
.scid
= cpu_to_le16(chan
->dcid
);
653 rsp
.dcid
= cpu_to_le16(chan
->scid
);
654 rsp
.result
= cpu_to_le16(result
);
655 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
656 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
660 l2cap_chan_del(chan
, reason
);
665 l2cap_chan_del(chan
, reason
);
669 chan
->ops
->teardown(chan
, 0);
674 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
676 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
677 switch (chan
->sec_level
) {
678 case BT_SECURITY_HIGH
:
679 return HCI_AT_DEDICATED_BONDING_MITM
;
680 case BT_SECURITY_MEDIUM
:
681 return HCI_AT_DEDICATED_BONDING
;
683 return HCI_AT_NO_BONDING
;
685 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
686 if (chan
->sec_level
== BT_SECURITY_LOW
)
687 chan
->sec_level
= BT_SECURITY_SDP
;
689 if (chan
->sec_level
== BT_SECURITY_HIGH
)
690 return HCI_AT_NO_BONDING_MITM
;
692 return HCI_AT_NO_BONDING
;
694 switch (chan
->sec_level
) {
695 case BT_SECURITY_HIGH
:
696 return HCI_AT_GENERAL_BONDING_MITM
;
697 case BT_SECURITY_MEDIUM
:
698 return HCI_AT_GENERAL_BONDING
;
700 return HCI_AT_NO_BONDING
;
705 /* Service level security */
706 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
708 struct l2cap_conn
*conn
= chan
->conn
;
711 auth_type
= l2cap_get_auth_type(chan
);
713 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
716 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
720 /* Get next available identificator.
721 * 1 - 128 are used by kernel.
722 * 129 - 199 are reserved.
723 * 200 - 254 are used by utilities like l2ping, etc.
726 spin_lock(&conn
->lock
);
728 if (++conn
->tx_ident
> 128)
733 spin_unlock(&conn
->lock
);
738 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
741 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
744 BT_DBG("code 0x%2.2x", code
);
749 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
750 flags
= ACL_START_NO_FLUSH
;
754 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
755 skb
->priority
= HCI_PRIO_MAX
;
757 hci_send_acl(conn
->hchan
, skb
, flags
);
760 static bool __chan_is_moving(struct l2cap_chan
*chan
)
762 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
763 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
766 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
768 struct hci_conn
*hcon
= chan
->conn
->hcon
;
771 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
774 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
776 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
783 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
784 lmp_no_flush_capable(hcon
->hdev
))
785 flags
= ACL_START_NO_FLUSH
;
789 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
790 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
793 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
795 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
796 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
798 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
801 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
802 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
809 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
810 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
817 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
819 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
820 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
822 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
825 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
826 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
833 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
834 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
841 static inline void __unpack_control(struct l2cap_chan
*chan
,
844 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
845 __unpack_extended_control(get_unaligned_le32(skb
->data
),
846 &bt_cb(skb
)->control
);
847 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
849 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
850 &bt_cb(skb
)->control
);
851 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
855 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
859 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
860 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
862 if (control
->sframe
) {
863 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
864 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
865 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
867 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
868 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
874 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
878 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
879 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
881 if (control
->sframe
) {
882 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
883 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
884 packed
|= L2CAP_CTRL_FRAME_TYPE
;
886 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
887 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
893 static inline void __pack_control(struct l2cap_chan
*chan
,
894 struct l2cap_ctrl
*control
,
897 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
898 put_unaligned_le32(__pack_extended_control(control
),
899 skb
->data
+ L2CAP_HDR_SIZE
);
901 put_unaligned_le16(__pack_enhanced_control(control
),
902 skb
->data
+ L2CAP_HDR_SIZE
);
906 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
908 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
909 return L2CAP_EXT_HDR_SIZE
;
911 return L2CAP_ENH_HDR_SIZE
;
914 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
918 struct l2cap_hdr
*lh
;
919 int hlen
= __ertm_hdr_size(chan
);
921 if (chan
->fcs
== L2CAP_FCS_CRC16
)
922 hlen
+= L2CAP_FCS_SIZE
;
924 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
927 return ERR_PTR(-ENOMEM
);
929 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
930 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
931 lh
->cid
= cpu_to_le16(chan
->dcid
);
933 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
934 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
936 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
938 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
939 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
940 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
943 skb
->priority
= HCI_PRIO_MAX
;
947 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
948 struct l2cap_ctrl
*control
)
953 BT_DBG("chan %p, control %p", chan
, control
);
955 if (!control
->sframe
)
958 if (__chan_is_moving(chan
))
961 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
965 if (control
->super
== L2CAP_SUPER_RR
)
966 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
967 else if (control
->super
== L2CAP_SUPER_RNR
)
968 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
970 if (control
->super
!= L2CAP_SUPER_SREJ
) {
971 chan
->last_acked_seq
= control
->reqseq
;
972 __clear_ack_timer(chan
);
975 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
976 control
->final
, control
->poll
, control
->super
);
978 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
979 control_field
= __pack_extended_control(control
);
981 control_field
= __pack_enhanced_control(control
);
983 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
985 l2cap_do_send(chan
, skb
);
988 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
990 struct l2cap_ctrl control
;
992 BT_DBG("chan %p, poll %d", chan
, poll
);
994 memset(&control
, 0, sizeof(control
));
998 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
999 control
.super
= L2CAP_SUPER_RNR
;
1001 control
.super
= L2CAP_SUPER_RR
;
1003 control
.reqseq
= chan
->buffer_seq
;
1004 l2cap_send_sframe(chan
, &control
);
1007 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1009 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1012 static bool __amp_capable(struct l2cap_chan
*chan
)
1014 struct l2cap_conn
*conn
= chan
->conn
;
1017 chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
&&
1018 conn
->fixed_chan_mask
& L2CAP_FC_A2MP
)
1024 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1026 /* Check EFS parameters */
1030 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1032 struct l2cap_conn
*conn
= chan
->conn
;
1033 struct l2cap_conn_req req
;
1035 req
.scid
= cpu_to_le16(chan
->scid
);
1036 req
.psm
= chan
->psm
;
1038 chan
->ident
= l2cap_get_ident(conn
);
1040 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1042 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1045 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1047 struct l2cap_create_chan_req req
;
1048 req
.scid
= cpu_to_le16(chan
->scid
);
1049 req
.psm
= chan
->psm
;
1050 req
.amp_id
= amp_id
;
1052 chan
->ident
= l2cap_get_ident(chan
->conn
);
1054 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1058 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1060 struct sk_buff
*skb
;
1062 BT_DBG("chan %p", chan
);
1064 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1067 __clear_retrans_timer(chan
);
1068 __clear_monitor_timer(chan
);
1069 __clear_ack_timer(chan
);
1071 chan
->retry_count
= 0;
1072 skb_queue_walk(&chan
->tx_q
, skb
) {
1073 if (bt_cb(skb
)->control
.retries
)
1074 bt_cb(skb
)->control
.retries
= 1;
1079 chan
->expected_tx_seq
= chan
->buffer_seq
;
1081 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1082 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1083 l2cap_seq_list_clear(&chan
->retrans_list
);
1084 l2cap_seq_list_clear(&chan
->srej_list
);
1085 skb_queue_purge(&chan
->srej_q
);
1087 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1088 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1090 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1093 static void l2cap_move_done(struct l2cap_chan
*chan
)
1095 u8 move_role
= chan
->move_role
;
1096 BT_DBG("chan %p", chan
);
1098 chan
->move_state
= L2CAP_MOVE_STABLE
;
1099 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1101 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1104 switch (move_role
) {
1105 case L2CAP_MOVE_ROLE_INITIATOR
:
1106 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1107 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1109 case L2CAP_MOVE_ROLE_RESPONDER
:
1110 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1115 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan
->conf_state
= 0;
1119 __clear_chan_timer(chan
);
1121 chan
->state
= BT_CONNECTED
;
1123 chan
->ops
->ready(chan
);
1126 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1128 if (__amp_capable(chan
)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1130 a2mp_discover_amp(chan
);
1132 l2cap_send_conn_req(chan
);
1136 static void l2cap_do_start(struct l2cap_chan
*chan
)
1138 struct l2cap_conn
*conn
= chan
->conn
;
1140 if (conn
->hcon
->type
== LE_LINK
) {
1141 l2cap_chan_ready(chan
);
1145 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1146 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1149 if (l2cap_chan_check_security(chan
) &&
1150 __l2cap_no_conn_pending(chan
)) {
1151 l2cap_start_connection(chan
);
1154 struct l2cap_info_req req
;
1155 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1157 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1158 conn
->info_ident
= l2cap_get_ident(conn
);
1160 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1162 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1167 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1169 u32 local_feat_mask
= l2cap_feat_mask
;
1171 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1174 case L2CAP_MODE_ERTM
:
1175 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1176 case L2CAP_MODE_STREAMING
:
1177 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1183 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
1184 struct l2cap_chan
*chan
, int err
)
1186 struct sock
*sk
= chan
->sk
;
1187 struct l2cap_disconn_req req
;
1192 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1193 __clear_retrans_timer(chan
);
1194 __clear_monitor_timer(chan
);
1195 __clear_ack_timer(chan
);
1198 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1199 l2cap_state_change(chan
, BT_DISCONN
);
1203 req
.dcid
= cpu_to_le16(chan
->dcid
);
1204 req
.scid
= cpu_to_le16(chan
->scid
);
1205 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1209 __l2cap_state_change(chan
, BT_DISCONN
);
1210 __l2cap_chan_set_err(chan
, err
);
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1217 struct l2cap_chan
*chan
, *tmp
;
1219 BT_DBG("conn %p", conn
);
1221 mutex_lock(&conn
->chan_lock
);
1223 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1224 struct sock
*sk
= chan
->sk
;
1226 l2cap_chan_lock(chan
);
1228 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1229 l2cap_chan_unlock(chan
);
1233 if (chan
->state
== BT_CONNECT
) {
1234 if (!l2cap_chan_check_security(chan
) ||
1235 !__l2cap_no_conn_pending(chan
)) {
1236 l2cap_chan_unlock(chan
);
1240 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1241 && test_bit(CONF_STATE2_DEVICE
,
1242 &chan
->conf_state
)) {
1243 l2cap_chan_close(chan
, ECONNRESET
);
1244 l2cap_chan_unlock(chan
);
1248 l2cap_start_connection(chan
);
1250 } else if (chan
->state
== BT_CONNECT2
) {
1251 struct l2cap_conn_rsp rsp
;
1253 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1254 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1256 if (l2cap_chan_check_security(chan
)) {
1258 if (test_bit(BT_SK_DEFER_SETUP
,
1259 &bt_sk(sk
)->flags
)) {
1260 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1261 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1262 chan
->ops
->defer(chan
);
1265 __l2cap_state_change(chan
, BT_CONFIG
);
1266 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1267 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1271 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1272 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1275 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1278 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1279 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1280 l2cap_chan_unlock(chan
);
1284 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1285 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1286 l2cap_build_conf_req(chan
, buf
), buf
);
1287 chan
->num_conf_req
++;
1290 l2cap_chan_unlock(chan
);
1293 mutex_unlock(&conn
->chan_lock
);
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1299 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1303 struct l2cap_chan
*c
, *c1
= NULL
;
1305 read_lock(&chan_list_lock
);
1307 list_for_each_entry(c
, &chan_list
, global_l
) {
1308 struct sock
*sk
= c
->sk
;
1310 if (state
&& c
->state
!= state
)
1313 if (c
->scid
== cid
) {
1314 int src_match
, dst_match
;
1315 int src_any
, dst_any
;
1318 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1319 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1320 if (src_match
&& dst_match
) {
1321 read_unlock(&chan_list_lock
);
1326 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1327 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1328 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1329 (src_any
&& dst_any
))
1334 read_unlock(&chan_list_lock
);
1339 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1341 struct sock
*parent
, *sk
;
1342 struct l2cap_chan
*chan
, *pchan
;
1346 /* Check if we have socket listening on cid */
1347 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1348 conn
->src
, conn
->dst
);
1356 chan
= pchan
->ops
->new_connection(pchan
);
1362 hci_conn_hold(conn
->hcon
);
1363 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
1365 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1366 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1368 l2cap_chan_add(conn
, chan
);
1370 l2cap_chan_ready(chan
);
1373 release_sock(parent
);
1376 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1378 struct l2cap_chan
*chan
;
1379 struct hci_conn
*hcon
= conn
->hcon
;
1381 BT_DBG("conn %p", conn
);
1383 if (!hcon
->out
&& hcon
->type
== LE_LINK
)
1384 l2cap_le_conn_ready(conn
);
1386 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1387 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1389 mutex_lock(&conn
->chan_lock
);
1391 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1393 l2cap_chan_lock(chan
);
1395 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1396 l2cap_chan_unlock(chan
);
1400 if (hcon
->type
== LE_LINK
) {
1401 if (smp_conn_security(hcon
, chan
->sec_level
))
1402 l2cap_chan_ready(chan
);
1404 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1405 struct sock
*sk
= chan
->sk
;
1406 __clear_chan_timer(chan
);
1408 __l2cap_state_change(chan
, BT_CONNECTED
);
1409 sk
->sk_state_change(sk
);
1412 } else if (chan
->state
== BT_CONNECT
)
1413 l2cap_do_start(chan
);
1415 l2cap_chan_unlock(chan
);
1418 mutex_unlock(&conn
->chan_lock
);
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1424 struct l2cap_chan
*chan
;
1426 BT_DBG("conn %p", conn
);
1428 mutex_lock(&conn
->chan_lock
);
1430 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1431 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1432 l2cap_chan_set_err(chan
, err
);
1435 mutex_unlock(&conn
->chan_lock
);
1438 static void l2cap_info_timeout(struct work_struct
*work
)
1440 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1443 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1444 conn
->info_ident
= 0;
1446 l2cap_conn_start(conn
);
1449 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1451 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1452 struct l2cap_chan
*chan
, *l
;
1457 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1459 kfree_skb(conn
->rx_skb
);
1461 mutex_lock(&conn
->chan_lock
);
1464 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1465 l2cap_chan_hold(chan
);
1466 l2cap_chan_lock(chan
);
1468 l2cap_chan_del(chan
, err
);
1470 l2cap_chan_unlock(chan
);
1472 chan
->ops
->close(chan
);
1473 l2cap_chan_put(chan
);
1476 mutex_unlock(&conn
->chan_lock
);
1478 hci_chan_del(conn
->hchan
);
1480 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1481 cancel_delayed_work_sync(&conn
->info_timer
);
1483 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1484 cancel_delayed_work_sync(&conn
->security_timer
);
1485 smp_chan_destroy(conn
);
1488 hcon
->l2cap_data
= NULL
;
1492 static void security_timeout(struct work_struct
*work
)
1494 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1495 security_timer
.work
);
1497 BT_DBG("conn %p", conn
);
1499 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1500 smp_chan_destroy(conn
);
1501 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1505 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1507 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1508 struct hci_chan
*hchan
;
1513 hchan
= hci_chan_create(hcon
);
1517 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1519 hci_chan_del(hchan
);
1523 hcon
->l2cap_data
= conn
;
1525 conn
->hchan
= hchan
;
1527 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1529 switch (hcon
->type
) {
1531 conn
->mtu
= hcon
->hdev
->block_mtu
;
1535 if (hcon
->hdev
->le_mtu
) {
1536 conn
->mtu
= hcon
->hdev
->le_mtu
;
1542 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1546 conn
->src
= &hcon
->hdev
->bdaddr
;
1547 conn
->dst
= &hcon
->dst
;
1549 conn
->feat_mask
= 0;
1551 spin_lock_init(&conn
->lock
);
1552 mutex_init(&conn
->chan_lock
);
1554 INIT_LIST_HEAD(&conn
->chan_l
);
1556 if (hcon
->type
== LE_LINK
)
1557 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1559 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1561 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1566 /* ---- Socket interface ---- */
1568 /* Find socket with psm and source / destination bdaddr.
1569 * Returns closest match.
1571 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1575 struct l2cap_chan
*c
, *c1
= NULL
;
1577 read_lock(&chan_list_lock
);
1579 list_for_each_entry(c
, &chan_list
, global_l
) {
1580 struct sock
*sk
= c
->sk
;
1582 if (state
&& c
->state
!= state
)
1585 if (c
->psm
== psm
) {
1586 int src_match
, dst_match
;
1587 int src_any
, dst_any
;
1590 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1591 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1592 if (src_match
&& dst_match
) {
1593 read_unlock(&chan_list_lock
);
1598 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1599 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1600 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1601 (src_any
&& dst_any
))
1606 read_unlock(&chan_list_lock
);
1611 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1612 bdaddr_t
*dst
, u8 dst_type
)
1614 struct sock
*sk
= chan
->sk
;
1615 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1616 struct l2cap_conn
*conn
;
1617 struct hci_conn
*hcon
;
1618 struct hci_dev
*hdev
;
1622 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src
, dst
,
1623 dst_type
, __le16_to_cpu(psm
));
1625 hdev
= hci_get_route(dst
, src
);
1627 return -EHOSTUNREACH
;
1631 l2cap_chan_lock(chan
);
1633 /* PSM must be odd and lsb of upper byte must be 0 */
1634 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1635 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1640 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1645 switch (chan
->mode
) {
1646 case L2CAP_MODE_BASIC
:
1648 case L2CAP_MODE_ERTM
:
1649 case L2CAP_MODE_STREAMING
:
1658 switch (chan
->state
) {
1662 /* Already connecting */
1667 /* Already connected */
1681 /* Set destination address and psm */
1683 bacpy(&bt_sk(sk
)->dst
, dst
);
1689 auth_type
= l2cap_get_auth_type(chan
);
1691 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1692 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1693 chan
->sec_level
, auth_type
);
1695 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1696 chan
->sec_level
, auth_type
);
1699 err
= PTR_ERR(hcon
);
1703 conn
= l2cap_conn_add(hcon
, 0);
1710 if (hcon
->type
== LE_LINK
) {
1713 if (!list_empty(&conn
->chan_l
)) {
1722 /* Update source addr of the socket */
1723 bacpy(src
, conn
->src
);
1725 l2cap_chan_unlock(chan
);
1726 l2cap_chan_add(conn
, chan
);
1727 l2cap_chan_lock(chan
);
1729 l2cap_state_change(chan
, BT_CONNECT
);
1730 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1732 if (hcon
->state
== BT_CONNECTED
) {
1733 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1734 __clear_chan_timer(chan
);
1735 if (l2cap_chan_check_security(chan
))
1736 l2cap_state_change(chan
, BT_CONNECTED
);
1738 l2cap_do_start(chan
);
1744 l2cap_chan_unlock(chan
);
1745 hci_dev_unlock(hdev
);
1750 int __l2cap_wait_ack(struct sock
*sk
)
1752 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1753 DECLARE_WAITQUEUE(wait
, current
);
1757 add_wait_queue(sk_sleep(sk
), &wait
);
1758 set_current_state(TASK_INTERRUPTIBLE
);
1759 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1763 if (signal_pending(current
)) {
1764 err
= sock_intr_errno(timeo
);
1769 timeo
= schedule_timeout(timeo
);
1771 set_current_state(TASK_INTERRUPTIBLE
);
1773 err
= sock_error(sk
);
1777 set_current_state(TASK_RUNNING
);
1778 remove_wait_queue(sk_sleep(sk
), &wait
);
1782 static void l2cap_monitor_timeout(struct work_struct
*work
)
1784 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1785 monitor_timer
.work
);
1787 BT_DBG("chan %p", chan
);
1789 l2cap_chan_lock(chan
);
1792 l2cap_chan_unlock(chan
);
1793 l2cap_chan_put(chan
);
1797 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1799 l2cap_chan_unlock(chan
);
1800 l2cap_chan_put(chan
);
1803 static void l2cap_retrans_timeout(struct work_struct
*work
)
1805 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1806 retrans_timer
.work
);
1808 BT_DBG("chan %p", chan
);
1810 l2cap_chan_lock(chan
);
1813 l2cap_chan_unlock(chan
);
1814 l2cap_chan_put(chan
);
1818 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1819 l2cap_chan_unlock(chan
);
1820 l2cap_chan_put(chan
);
1823 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1824 struct sk_buff_head
*skbs
)
1826 struct sk_buff
*skb
;
1827 struct l2cap_ctrl
*control
;
1829 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1831 if (__chan_is_moving(chan
))
1834 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1836 while (!skb_queue_empty(&chan
->tx_q
)) {
1838 skb
= skb_dequeue(&chan
->tx_q
);
1840 bt_cb(skb
)->control
.retries
= 1;
1841 control
= &bt_cb(skb
)->control
;
1843 control
->reqseq
= 0;
1844 control
->txseq
= chan
->next_tx_seq
;
1846 __pack_control(chan
, control
, skb
);
1848 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1849 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1850 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1853 l2cap_do_send(chan
, skb
);
1855 BT_DBG("Sent txseq %u", control
->txseq
);
1857 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1858 chan
->frames_sent
++;
1862 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1864 struct sk_buff
*skb
, *tx_skb
;
1865 struct l2cap_ctrl
*control
;
1868 BT_DBG("chan %p", chan
);
1870 if (chan
->state
!= BT_CONNECTED
)
1873 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1876 if (__chan_is_moving(chan
))
1879 while (chan
->tx_send_head
&&
1880 chan
->unacked_frames
< chan
->remote_tx_win
&&
1881 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1883 skb
= chan
->tx_send_head
;
1885 bt_cb(skb
)->control
.retries
= 1;
1886 control
= &bt_cb(skb
)->control
;
1888 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1891 control
->reqseq
= chan
->buffer_seq
;
1892 chan
->last_acked_seq
= chan
->buffer_seq
;
1893 control
->txseq
= chan
->next_tx_seq
;
1895 __pack_control(chan
, control
, skb
);
1897 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1898 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1899 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1902 /* Clone after data has been modified. Data is assumed to be
1903 read-only (for locking purposes) on cloned sk_buffs.
1905 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1910 __set_retrans_timer(chan
);
1912 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1913 chan
->unacked_frames
++;
1914 chan
->frames_sent
++;
1917 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1918 chan
->tx_send_head
= NULL
;
1920 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1922 l2cap_do_send(chan
, tx_skb
);
1923 BT_DBG("Sent txseq %u", control
->txseq
);
1926 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1927 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1932 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1934 struct l2cap_ctrl control
;
1935 struct sk_buff
*skb
;
1936 struct sk_buff
*tx_skb
;
1939 BT_DBG("chan %p", chan
);
1941 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1944 if (__chan_is_moving(chan
))
1947 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1948 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1950 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1952 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1957 bt_cb(skb
)->control
.retries
++;
1958 control
= bt_cb(skb
)->control
;
1960 if (chan
->max_tx
!= 0 &&
1961 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1962 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1963 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1964 l2cap_seq_list_clear(&chan
->retrans_list
);
1968 control
.reqseq
= chan
->buffer_seq
;
1969 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1974 if (skb_cloned(skb
)) {
1975 /* Cloned sk_buffs are read-only, so we need a
1978 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1980 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1984 l2cap_seq_list_clear(&chan
->retrans_list
);
1988 /* Update skb contents */
1989 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1990 put_unaligned_le32(__pack_extended_control(&control
),
1991 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1993 put_unaligned_le16(__pack_enhanced_control(&control
),
1994 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1997 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1998 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1999 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2003 l2cap_do_send(chan
, tx_skb
);
2005 BT_DBG("Resent txseq %d", control
.txseq
);
2007 chan
->last_acked_seq
= chan
->buffer_seq
;
2011 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2012 struct l2cap_ctrl
*control
)
2014 BT_DBG("chan %p, control %p", chan
, control
);
2016 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2017 l2cap_ertm_resend(chan
);
2020 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2021 struct l2cap_ctrl
*control
)
2023 struct sk_buff
*skb
;
2025 BT_DBG("chan %p, control %p", chan
, control
);
2028 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2030 l2cap_seq_list_clear(&chan
->retrans_list
);
2032 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2035 if (chan
->unacked_frames
) {
2036 skb_queue_walk(&chan
->tx_q
, skb
) {
2037 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2038 skb
== chan
->tx_send_head
)
2042 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2043 if (skb
== chan
->tx_send_head
)
2046 l2cap_seq_list_append(&chan
->retrans_list
,
2047 bt_cb(skb
)->control
.txseq
);
2050 l2cap_ertm_resend(chan
);
2054 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2056 struct l2cap_ctrl control
;
2057 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2058 chan
->last_acked_seq
);
2061 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2062 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2064 memset(&control
, 0, sizeof(control
));
2067 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2068 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2069 __clear_ack_timer(chan
);
2070 control
.super
= L2CAP_SUPER_RNR
;
2071 control
.reqseq
= chan
->buffer_seq
;
2072 l2cap_send_sframe(chan
, &control
);
2074 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2075 l2cap_ertm_send(chan
);
2076 /* If any i-frames were sent, they included an ack */
2077 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2081 /* Ack now if the window is 3/4ths full.
2082 * Calculate without mul or div
2084 threshold
= chan
->ack_win
;
2085 threshold
+= threshold
<< 1;
2088 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2091 if (frames_to_ack
>= threshold
) {
2092 __clear_ack_timer(chan
);
2093 control
.super
= L2CAP_SUPER_RR
;
2094 control
.reqseq
= chan
->buffer_seq
;
2095 l2cap_send_sframe(chan
, &control
);
2100 __set_ack_timer(chan
);
2104 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2105 struct msghdr
*msg
, int len
,
2106 int count
, struct sk_buff
*skb
)
2108 struct l2cap_conn
*conn
= chan
->conn
;
2109 struct sk_buff
**frag
;
2112 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2118 /* Continuation fragments (no L2CAP header) */
2119 frag
= &skb_shinfo(skb
)->frag_list
;
2121 struct sk_buff
*tmp
;
2123 count
= min_t(unsigned int, conn
->mtu
, len
);
2125 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2126 msg
->msg_flags
& MSG_DONTWAIT
);
2128 return PTR_ERR(tmp
);
2132 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2135 (*frag
)->priority
= skb
->priority
;
2140 skb
->len
+= (*frag
)->len
;
2141 skb
->data_len
+= (*frag
)->len
;
2143 frag
= &(*frag
)->next
;
2149 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2150 struct msghdr
*msg
, size_t len
,
2153 struct l2cap_conn
*conn
= chan
->conn
;
2154 struct sk_buff
*skb
;
2155 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2156 struct l2cap_hdr
*lh
;
2158 BT_DBG("chan %p len %zu priority %u", chan
, len
, priority
);
2160 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2162 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2163 msg
->msg_flags
& MSG_DONTWAIT
);
2167 skb
->priority
= priority
;
2169 /* Create L2CAP header */
2170 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2171 lh
->cid
= cpu_to_le16(chan
->dcid
);
2172 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2173 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2175 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2176 if (unlikely(err
< 0)) {
2178 return ERR_PTR(err
);
2183 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2184 struct msghdr
*msg
, size_t len
,
2187 struct l2cap_conn
*conn
= chan
->conn
;
2188 struct sk_buff
*skb
;
2190 struct l2cap_hdr
*lh
;
2192 BT_DBG("chan %p len %zu", chan
, len
);
2194 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2196 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2197 msg
->msg_flags
& MSG_DONTWAIT
);
2201 skb
->priority
= priority
;
2203 /* Create L2CAP header */
2204 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2205 lh
->cid
= cpu_to_le16(chan
->dcid
);
2206 lh
->len
= cpu_to_le16(len
);
2208 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2209 if (unlikely(err
< 0)) {
2211 return ERR_PTR(err
);
2216 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2217 struct msghdr
*msg
, size_t len
,
2220 struct l2cap_conn
*conn
= chan
->conn
;
2221 struct sk_buff
*skb
;
2222 int err
, count
, hlen
;
2223 struct l2cap_hdr
*lh
;
2225 BT_DBG("chan %p len %zu", chan
, len
);
2228 return ERR_PTR(-ENOTCONN
);
2230 hlen
= __ertm_hdr_size(chan
);
2233 hlen
+= L2CAP_SDULEN_SIZE
;
2235 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2236 hlen
+= L2CAP_FCS_SIZE
;
2238 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2240 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2241 msg
->msg_flags
& MSG_DONTWAIT
);
2245 /* Create L2CAP header */
2246 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2247 lh
->cid
= cpu_to_le16(chan
->dcid
);
2248 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2250 /* Control header is populated later */
2251 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2252 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2254 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2257 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2259 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2260 if (unlikely(err
< 0)) {
2262 return ERR_PTR(err
);
2265 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2266 bt_cb(skb
)->control
.retries
= 0;
2270 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2271 struct sk_buff_head
*seg_queue
,
2272 struct msghdr
*msg
, size_t len
)
2274 struct sk_buff
*skb
;
2279 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2281 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2282 * so fragmented skbs are not used. The HCI layer's handling
2283 * of fragmented skbs is not compatible with ERTM's queueing.
2286 /* PDU size is derived from the HCI MTU */
2287 pdu_len
= chan
->conn
->mtu
;
2289 /* Constrain PDU size for BR/EDR connections */
2291 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2293 /* Adjust for largest possible L2CAP overhead. */
2295 pdu_len
-= L2CAP_FCS_SIZE
;
2297 pdu_len
-= __ertm_hdr_size(chan
);
2299 /* Remote device may have requested smaller PDUs */
2300 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2302 if (len
<= pdu_len
) {
2303 sar
= L2CAP_SAR_UNSEGMENTED
;
2307 sar
= L2CAP_SAR_START
;
2309 pdu_len
-= L2CAP_SDULEN_SIZE
;
2313 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2316 __skb_queue_purge(seg_queue
);
2317 return PTR_ERR(skb
);
2320 bt_cb(skb
)->control
.sar
= sar
;
2321 __skb_queue_tail(seg_queue
, skb
);
2326 pdu_len
+= L2CAP_SDULEN_SIZE
;
2329 if (len
<= pdu_len
) {
2330 sar
= L2CAP_SAR_END
;
2333 sar
= L2CAP_SAR_CONTINUE
;
2340 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2343 struct sk_buff
*skb
;
2345 struct sk_buff_head seg_queue
;
2347 /* Connectionless channel */
2348 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2349 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2351 return PTR_ERR(skb
);
2353 l2cap_do_send(chan
, skb
);
2357 switch (chan
->mode
) {
2358 case L2CAP_MODE_BASIC
:
2359 /* Check outgoing MTU */
2360 if (len
> chan
->omtu
)
2363 /* Create a basic PDU */
2364 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2366 return PTR_ERR(skb
);
2368 l2cap_do_send(chan
, skb
);
2372 case L2CAP_MODE_ERTM
:
2373 case L2CAP_MODE_STREAMING
:
2374 /* Check outgoing MTU */
2375 if (len
> chan
->omtu
) {
2380 __skb_queue_head_init(&seg_queue
);
2382 /* Do segmentation before calling in to the state machine,
2383 * since it's possible to block while waiting for memory
2386 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2388 /* The channel could have been closed while segmenting,
2389 * check that it is still connected.
2391 if (chan
->state
!= BT_CONNECTED
) {
2392 __skb_queue_purge(&seg_queue
);
2399 if (chan
->mode
== L2CAP_MODE_ERTM
)
2400 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2402 l2cap_streaming_send(chan
, &seg_queue
);
2406 /* If the skbs were not queued for sending, they'll still be in
2407 * seg_queue and need to be purged.
2409 __skb_queue_purge(&seg_queue
);
2413 BT_DBG("bad state %1.1x", chan
->mode
);
2420 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2422 struct l2cap_ctrl control
;
2425 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2427 memset(&control
, 0, sizeof(control
));
2429 control
.super
= L2CAP_SUPER_SREJ
;
2431 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2432 seq
= __next_seq(chan
, seq
)) {
2433 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2434 control
.reqseq
= seq
;
2435 l2cap_send_sframe(chan
, &control
);
2436 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2440 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2443 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2445 struct l2cap_ctrl control
;
2447 BT_DBG("chan %p", chan
);
2449 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2452 memset(&control
, 0, sizeof(control
));
2454 control
.super
= L2CAP_SUPER_SREJ
;
2455 control
.reqseq
= chan
->srej_list
.tail
;
2456 l2cap_send_sframe(chan
, &control
);
2459 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2461 struct l2cap_ctrl control
;
2465 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2467 memset(&control
, 0, sizeof(control
));
2469 control
.super
= L2CAP_SUPER_SREJ
;
2471 /* Capture initial list head to allow only one pass through the list. */
2472 initial_head
= chan
->srej_list
.head
;
2475 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2476 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2479 control
.reqseq
= seq
;
2480 l2cap_send_sframe(chan
, &control
);
2481 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2482 } while (chan
->srej_list
.head
!= initial_head
);
2485 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2487 struct sk_buff
*acked_skb
;
2490 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2492 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2495 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2496 chan
->expected_ack_seq
, chan
->unacked_frames
);
2498 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2499 ackseq
= __next_seq(chan
, ackseq
)) {
2501 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2503 skb_unlink(acked_skb
, &chan
->tx_q
);
2504 kfree_skb(acked_skb
);
2505 chan
->unacked_frames
--;
2509 chan
->expected_ack_seq
= reqseq
;
2511 if (chan
->unacked_frames
== 0)
2512 __clear_retrans_timer(chan
);
2514 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2517 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2519 BT_DBG("chan %p", chan
);
2521 chan
->expected_tx_seq
= chan
->buffer_seq
;
2522 l2cap_seq_list_clear(&chan
->srej_list
);
2523 skb_queue_purge(&chan
->srej_q
);
2524 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2527 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2528 struct l2cap_ctrl
*control
,
2529 struct sk_buff_head
*skbs
, u8 event
)
2531 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2535 case L2CAP_EV_DATA_REQUEST
:
2536 if (chan
->tx_send_head
== NULL
)
2537 chan
->tx_send_head
= skb_peek(skbs
);
2539 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2540 l2cap_ertm_send(chan
);
2542 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2543 BT_DBG("Enter LOCAL_BUSY");
2544 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2546 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2547 /* The SREJ_SENT state must be aborted if we are to
2548 * enter the LOCAL_BUSY state.
2550 l2cap_abort_rx_srej_sent(chan
);
2553 l2cap_send_ack(chan
);
2556 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2557 BT_DBG("Exit LOCAL_BUSY");
2558 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2560 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2561 struct l2cap_ctrl local_control
;
2563 memset(&local_control
, 0, sizeof(local_control
));
2564 local_control
.sframe
= 1;
2565 local_control
.super
= L2CAP_SUPER_RR
;
2566 local_control
.poll
= 1;
2567 local_control
.reqseq
= chan
->buffer_seq
;
2568 l2cap_send_sframe(chan
, &local_control
);
2570 chan
->retry_count
= 1;
2571 __set_monitor_timer(chan
);
2572 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2575 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2576 l2cap_process_reqseq(chan
, control
->reqseq
);
2578 case L2CAP_EV_EXPLICIT_POLL
:
2579 l2cap_send_rr_or_rnr(chan
, 1);
2580 chan
->retry_count
= 1;
2581 __set_monitor_timer(chan
);
2582 __clear_ack_timer(chan
);
2583 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2585 case L2CAP_EV_RETRANS_TO
:
2586 l2cap_send_rr_or_rnr(chan
, 1);
2587 chan
->retry_count
= 1;
2588 __set_monitor_timer(chan
);
2589 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2591 case L2CAP_EV_RECV_FBIT
:
2592 /* Nothing to process */
2599 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2600 struct l2cap_ctrl
*control
,
2601 struct sk_buff_head
*skbs
, u8 event
)
2603 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2607 case L2CAP_EV_DATA_REQUEST
:
2608 if (chan
->tx_send_head
== NULL
)
2609 chan
->tx_send_head
= skb_peek(skbs
);
2610 /* Queue data, but don't send. */
2611 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2613 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2614 BT_DBG("Enter LOCAL_BUSY");
2615 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2617 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2618 /* The SREJ_SENT state must be aborted if we are to
2619 * enter the LOCAL_BUSY state.
2621 l2cap_abort_rx_srej_sent(chan
);
2624 l2cap_send_ack(chan
);
2627 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2628 BT_DBG("Exit LOCAL_BUSY");
2629 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2631 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2632 struct l2cap_ctrl local_control
;
2633 memset(&local_control
, 0, sizeof(local_control
));
2634 local_control
.sframe
= 1;
2635 local_control
.super
= L2CAP_SUPER_RR
;
2636 local_control
.poll
= 1;
2637 local_control
.reqseq
= chan
->buffer_seq
;
2638 l2cap_send_sframe(chan
, &local_control
);
2640 chan
->retry_count
= 1;
2641 __set_monitor_timer(chan
);
2642 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2645 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2646 l2cap_process_reqseq(chan
, control
->reqseq
);
2650 case L2CAP_EV_RECV_FBIT
:
2651 if (control
&& control
->final
) {
2652 __clear_monitor_timer(chan
);
2653 if (chan
->unacked_frames
> 0)
2654 __set_retrans_timer(chan
);
2655 chan
->retry_count
= 0;
2656 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2657 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2660 case L2CAP_EV_EXPLICIT_POLL
:
2663 case L2CAP_EV_MONITOR_TO
:
2664 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2665 l2cap_send_rr_or_rnr(chan
, 1);
2666 __set_monitor_timer(chan
);
2667 chan
->retry_count
++;
2669 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2677 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2678 struct sk_buff_head
*skbs
, u8 event
)
2680 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2681 chan
, control
, skbs
, event
, chan
->tx_state
);
2683 switch (chan
->tx_state
) {
2684 case L2CAP_TX_STATE_XMIT
:
2685 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2687 case L2CAP_TX_STATE_WAIT_F
:
2688 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2696 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2697 struct l2cap_ctrl
*control
)
2699 BT_DBG("chan %p, control %p", chan
, control
);
2700 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2703 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2704 struct l2cap_ctrl
*control
)
2706 BT_DBG("chan %p, control %p", chan
, control
);
2707 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2710 /* Copy frame to all raw sockets on that connection */
2711 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2713 struct sk_buff
*nskb
;
2714 struct l2cap_chan
*chan
;
2716 BT_DBG("conn %p", conn
);
2718 mutex_lock(&conn
->chan_lock
);
2720 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2721 struct sock
*sk
= chan
->sk
;
2722 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2725 /* Don't send frame to the socket it came from */
2728 nskb
= skb_clone(skb
, GFP_KERNEL
);
2732 if (chan
->ops
->recv(chan
, nskb
))
2736 mutex_unlock(&conn
->chan_lock
);
2739 /* ---- L2CAP signalling commands ---- */
2740 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2741 u8 ident
, u16 dlen
, void *data
)
2743 struct sk_buff
*skb
, **frag
;
2744 struct l2cap_cmd_hdr
*cmd
;
2745 struct l2cap_hdr
*lh
;
2748 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2749 conn
, code
, ident
, dlen
);
2751 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2752 count
= min_t(unsigned int, conn
->mtu
, len
);
2754 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2758 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2759 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2761 if (conn
->hcon
->type
== LE_LINK
)
2762 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2764 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2766 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2769 cmd
->len
= cpu_to_le16(dlen
);
2772 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2773 memcpy(skb_put(skb
, count
), data
, count
);
2779 /* Continuation fragments (no L2CAP header) */
2780 frag
= &skb_shinfo(skb
)->frag_list
;
2782 count
= min_t(unsigned int, conn
->mtu
, len
);
2784 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2788 memcpy(skb_put(*frag
, count
), data
, count
);
2793 frag
= &(*frag
)->next
;
2803 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2806 struct l2cap_conf_opt
*opt
= *ptr
;
2809 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2817 *val
= *((u8
*) opt
->val
);
2821 *val
= get_unaligned_le16(opt
->val
);
2825 *val
= get_unaligned_le32(opt
->val
);
2829 *val
= (unsigned long) opt
->val
;
2833 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2837 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2839 struct l2cap_conf_opt
*opt
= *ptr
;
2841 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2848 *((u8
*) opt
->val
) = val
;
2852 put_unaligned_le16(val
, opt
->val
);
2856 put_unaligned_le32(val
, opt
->val
);
2860 memcpy(opt
->val
, (void *) val
, len
);
2864 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2867 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2869 struct l2cap_conf_efs efs
;
2871 switch (chan
->mode
) {
2872 case L2CAP_MODE_ERTM
:
2873 efs
.id
= chan
->local_id
;
2874 efs
.stype
= chan
->local_stype
;
2875 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2876 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2877 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2878 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2881 case L2CAP_MODE_STREAMING
:
2883 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2884 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2885 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2894 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2895 (unsigned long) &efs
);
2898 static void l2cap_ack_timeout(struct work_struct
*work
)
2900 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2904 BT_DBG("chan %p", chan
);
2906 l2cap_chan_lock(chan
);
2908 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2909 chan
->last_acked_seq
);
2912 l2cap_send_rr_or_rnr(chan
, 0);
2914 l2cap_chan_unlock(chan
);
2915 l2cap_chan_put(chan
);
2918 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2922 chan
->next_tx_seq
= 0;
2923 chan
->expected_tx_seq
= 0;
2924 chan
->expected_ack_seq
= 0;
2925 chan
->unacked_frames
= 0;
2926 chan
->buffer_seq
= 0;
2927 chan
->frames_sent
= 0;
2928 chan
->last_acked_seq
= 0;
2930 chan
->sdu_last_frag
= NULL
;
2933 skb_queue_head_init(&chan
->tx_q
);
2935 chan
->local_amp_id
= 0;
2937 chan
->move_state
= L2CAP_MOVE_STABLE
;
2938 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
2940 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2943 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2944 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2946 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2947 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2948 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2950 skb_queue_head_init(&chan
->srej_q
);
2952 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2956 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2958 l2cap_seq_list_free(&chan
->srej_list
);
2963 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2966 case L2CAP_MODE_STREAMING
:
2967 case L2CAP_MODE_ERTM
:
2968 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2972 return L2CAP_MODE_BASIC
;
2976 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2978 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2981 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2983 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2986 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
2987 struct l2cap_conf_rfc
*rfc
)
2989 if (chan
->local_amp_id
&& chan
->hs_hcon
) {
2990 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
2992 /* Class 1 devices have must have ERTM timeouts
2993 * exceeding the Link Supervision Timeout. The
2994 * default Link Supervision Timeout for AMP
2995 * controllers is 10 seconds.
2997 * Class 1 devices use 0xffffffff for their
2998 * best-effort flush timeout, so the clamping logic
2999 * will result in a timeout that meets the above
3000 * requirement. ERTM timeouts are 16-bit values, so
3001 * the maximum timeout is 65.535 seconds.
3004 /* Convert timeout to milliseconds and round */
3005 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3007 /* This is the recommended formula for class 2 devices
3008 * that start ERTM timers when packets are sent to the
3011 ertm_to
= 3 * ertm_to
+ 500;
3013 if (ertm_to
> 0xffff)
3016 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3017 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3019 rfc
->retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3020 rfc
->monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3024 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3026 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3027 __l2cap_ews_supported(chan
)) {
3028 /* use extended control field */
3029 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3030 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3032 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3033 L2CAP_DEFAULT_TX_WINDOW
);
3034 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3036 chan
->ack_win
= chan
->tx_win
;
3039 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3041 struct l2cap_conf_req
*req
= data
;
3042 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3043 void *ptr
= req
->data
;
3046 BT_DBG("chan %p", chan
);
3048 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3051 switch (chan
->mode
) {
3052 case L2CAP_MODE_STREAMING
:
3053 case L2CAP_MODE_ERTM
:
3054 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3057 if (__l2cap_efs_supported(chan
))
3058 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3062 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3067 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3068 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3070 switch (chan
->mode
) {
3071 case L2CAP_MODE_BASIC
:
3072 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3073 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3076 rfc
.mode
= L2CAP_MODE_BASIC
;
3078 rfc
.max_transmit
= 0;
3079 rfc
.retrans_timeout
= 0;
3080 rfc
.monitor_timeout
= 0;
3081 rfc
.max_pdu_size
= 0;
3083 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3084 (unsigned long) &rfc
);
3087 case L2CAP_MODE_ERTM
:
3088 rfc
.mode
= L2CAP_MODE_ERTM
;
3089 rfc
.max_transmit
= chan
->max_tx
;
3091 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3093 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3094 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3096 rfc
.max_pdu_size
= cpu_to_le16(size
);
3098 l2cap_txwin_setup(chan
);
3100 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3101 L2CAP_DEFAULT_TX_WINDOW
);
3103 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3104 (unsigned long) &rfc
);
3106 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3107 l2cap_add_opt_efs(&ptr
, chan
);
3109 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
3112 if (chan
->fcs
== L2CAP_FCS_NONE
||
3113 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
3114 chan
->fcs
= L2CAP_FCS_NONE
;
3115 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
3118 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3119 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3123 case L2CAP_MODE_STREAMING
:
3124 l2cap_txwin_setup(chan
);
3125 rfc
.mode
= L2CAP_MODE_STREAMING
;
3127 rfc
.max_transmit
= 0;
3128 rfc
.retrans_timeout
= 0;
3129 rfc
.monitor_timeout
= 0;
3131 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3132 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3134 rfc
.max_pdu_size
= cpu_to_le16(size
);
3136 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3137 (unsigned long) &rfc
);
3139 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3140 l2cap_add_opt_efs(&ptr
, chan
);
3142 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
3145 if (chan
->fcs
== L2CAP_FCS_NONE
||
3146 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
3147 chan
->fcs
= L2CAP_FCS_NONE
;
3148 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
3153 req
->dcid
= cpu_to_le16(chan
->dcid
);
3154 req
->flags
= __constant_cpu_to_le16(0);
3159 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3161 struct l2cap_conf_rsp
*rsp
= data
;
3162 void *ptr
= rsp
->data
;
3163 void *req
= chan
->conf_req
;
3164 int len
= chan
->conf_len
;
3165 int type
, hint
, olen
;
3167 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3168 struct l2cap_conf_efs efs
;
3170 u16 mtu
= L2CAP_DEFAULT_MTU
;
3171 u16 result
= L2CAP_CONF_SUCCESS
;
3174 BT_DBG("chan %p", chan
);
3176 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3177 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3179 hint
= type
& L2CAP_CONF_HINT
;
3180 type
&= L2CAP_CONF_MASK
;
3183 case L2CAP_CONF_MTU
:
3187 case L2CAP_CONF_FLUSH_TO
:
3188 chan
->flush_to
= val
;
3191 case L2CAP_CONF_QOS
:
3194 case L2CAP_CONF_RFC
:
3195 if (olen
== sizeof(rfc
))
3196 memcpy(&rfc
, (void *) val
, olen
);
3199 case L2CAP_CONF_FCS
:
3200 if (val
== L2CAP_FCS_NONE
)
3201 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3204 case L2CAP_CONF_EFS
:
3206 if (olen
== sizeof(efs
))
3207 memcpy(&efs
, (void *) val
, olen
);
3210 case L2CAP_CONF_EWS
:
3212 return -ECONNREFUSED
;
3214 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3215 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3216 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3217 chan
->remote_tx_win
= val
;
3224 result
= L2CAP_CONF_UNKNOWN
;
3225 *((u8
*) ptr
++) = type
;
3230 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3233 switch (chan
->mode
) {
3234 case L2CAP_MODE_STREAMING
:
3235 case L2CAP_MODE_ERTM
:
3236 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3237 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3238 chan
->conn
->feat_mask
);
3243 if (__l2cap_efs_supported(chan
))
3244 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3246 return -ECONNREFUSED
;
3249 if (chan
->mode
!= rfc
.mode
)
3250 return -ECONNREFUSED
;
3256 if (chan
->mode
!= rfc
.mode
) {
3257 result
= L2CAP_CONF_UNACCEPT
;
3258 rfc
.mode
= chan
->mode
;
3260 if (chan
->num_conf_rsp
== 1)
3261 return -ECONNREFUSED
;
3263 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3264 (unsigned long) &rfc
);
3267 if (result
== L2CAP_CONF_SUCCESS
) {
3268 /* Configure output options and let the other side know
3269 * which ones we don't like. */
3271 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3272 result
= L2CAP_CONF_UNACCEPT
;
3275 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3277 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3280 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3281 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3282 efs
.stype
!= chan
->local_stype
) {
3284 result
= L2CAP_CONF_UNACCEPT
;
3286 if (chan
->num_conf_req
>= 1)
3287 return -ECONNREFUSED
;
3289 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3291 (unsigned long) &efs
);
3293 /* Send PENDING Conf Rsp */
3294 result
= L2CAP_CONF_PENDING
;
3295 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3300 case L2CAP_MODE_BASIC
:
3301 chan
->fcs
= L2CAP_FCS_NONE
;
3302 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3305 case L2CAP_MODE_ERTM
:
3306 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3307 chan
->remote_tx_win
= rfc
.txwin_size
;
3309 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3311 chan
->remote_max_tx
= rfc
.max_transmit
;
3313 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3314 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3315 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3316 rfc
.max_pdu_size
= cpu_to_le16(size
);
3317 chan
->remote_mps
= size
;
3319 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3321 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3323 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3324 sizeof(rfc
), (unsigned long) &rfc
);
3326 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3327 chan
->remote_id
= efs
.id
;
3328 chan
->remote_stype
= efs
.stype
;
3329 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3330 chan
->remote_flush_to
=
3331 le32_to_cpu(efs
.flush_to
);
3332 chan
->remote_acc_lat
=
3333 le32_to_cpu(efs
.acc_lat
);
3334 chan
->remote_sdu_itime
=
3335 le32_to_cpu(efs
.sdu_itime
);
3336 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3338 (unsigned long) &efs
);
3342 case L2CAP_MODE_STREAMING
:
3343 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3344 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3345 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3346 rfc
.max_pdu_size
= cpu_to_le16(size
);
3347 chan
->remote_mps
= size
;
3349 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3351 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3352 (unsigned long) &rfc
);
3357 result
= L2CAP_CONF_UNACCEPT
;
3359 memset(&rfc
, 0, sizeof(rfc
));
3360 rfc
.mode
= chan
->mode
;
3363 if (result
== L2CAP_CONF_SUCCESS
)
3364 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3366 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3367 rsp
->result
= cpu_to_le16(result
);
3368 rsp
->flags
= __constant_cpu_to_le16(0);
3373 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3374 void *data
, u16
*result
)
3376 struct l2cap_conf_req
*req
= data
;
3377 void *ptr
= req
->data
;
3380 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3381 struct l2cap_conf_efs efs
;
3383 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3385 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3386 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3389 case L2CAP_CONF_MTU
:
3390 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3391 *result
= L2CAP_CONF_UNACCEPT
;
3392 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3395 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3398 case L2CAP_CONF_FLUSH_TO
:
3399 chan
->flush_to
= val
;
3400 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3404 case L2CAP_CONF_RFC
:
3405 if (olen
== sizeof(rfc
))
3406 memcpy(&rfc
, (void *)val
, olen
);
3408 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3409 rfc
.mode
!= chan
->mode
)
3410 return -ECONNREFUSED
;
3414 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3415 sizeof(rfc
), (unsigned long) &rfc
);
3418 case L2CAP_CONF_EWS
:
3419 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3420 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3424 case L2CAP_CONF_EFS
:
3425 if (olen
== sizeof(efs
))
3426 memcpy(&efs
, (void *)val
, olen
);
3428 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3429 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3430 efs
.stype
!= chan
->local_stype
)
3431 return -ECONNREFUSED
;
3433 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3434 (unsigned long) &efs
);
3439 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3440 return -ECONNREFUSED
;
3442 chan
->mode
= rfc
.mode
;
3444 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3446 case L2CAP_MODE_ERTM
:
3447 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3448 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3449 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3450 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3451 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3454 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3455 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3456 chan
->local_sdu_itime
=
3457 le32_to_cpu(efs
.sdu_itime
);
3458 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3459 chan
->local_flush_to
=
3460 le32_to_cpu(efs
.flush_to
);
3464 case L2CAP_MODE_STREAMING
:
3465 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3469 req
->dcid
= cpu_to_le16(chan
->dcid
);
3470 req
->flags
= __constant_cpu_to_le16(0);
3475 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3476 u16 result
, u16 flags
)
3478 struct l2cap_conf_rsp
*rsp
= data
;
3479 void *ptr
= rsp
->data
;
3481 BT_DBG("chan %p", chan
);
3483 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3484 rsp
->result
= cpu_to_le16(result
);
3485 rsp
->flags
= cpu_to_le16(flags
);
3490 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3492 struct l2cap_conn_rsp rsp
;
3493 struct l2cap_conn
*conn
= chan
->conn
;
3497 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3498 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3499 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3500 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3503 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3505 rsp_code
= L2CAP_CONN_RSP
;
3507 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3509 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3511 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3514 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3515 l2cap_build_conf_req(chan
, buf
), buf
);
3516 chan
->num_conf_req
++;
3519 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3523 /* Use sane default values in case a misbehaving remote device
3524 * did not send an RFC or extended window size option.
3526 u16 txwin_ext
= chan
->ack_win
;
3527 struct l2cap_conf_rfc rfc
= {
3529 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3530 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3531 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3532 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3535 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3537 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3540 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3541 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3544 case L2CAP_CONF_RFC
:
3545 if (olen
== sizeof(rfc
))
3546 memcpy(&rfc
, (void *)val
, olen
);
3548 case L2CAP_CONF_EWS
:
3555 case L2CAP_MODE_ERTM
:
3556 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3557 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3558 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3559 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3560 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3562 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3565 case L2CAP_MODE_STREAMING
:
3566 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3570 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3571 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3573 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3575 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3578 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3579 cmd
->ident
== conn
->info_ident
) {
3580 cancel_delayed_work(&conn
->info_timer
);
3582 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3583 conn
->info_ident
= 0;
3585 l2cap_conn_start(conn
);
3591 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3592 struct l2cap_cmd_hdr
*cmd
,
3593 u8
*data
, u8 rsp_code
, u8 amp_id
)
3595 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3596 struct l2cap_conn_rsp rsp
;
3597 struct l2cap_chan
*chan
= NULL
, *pchan
;
3598 struct sock
*parent
, *sk
= NULL
;
3599 int result
, status
= L2CAP_CS_NO_INFO
;
3601 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3602 __le16 psm
= req
->psm
;
3604 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3606 /* Check if we have socket listening on psm */
3607 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3609 result
= L2CAP_CR_BAD_PSM
;
3615 mutex_lock(&conn
->chan_lock
);
3618 /* Check if the ACL is secure enough (if not SDP) */
3619 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3620 !hci_conn_check_link_mode(conn
->hcon
)) {
3621 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3622 result
= L2CAP_CR_SEC_BLOCK
;
3626 result
= L2CAP_CR_NO_MEM
;
3628 /* Check if we already have channel with that dcid */
3629 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3632 chan
= pchan
->ops
->new_connection(pchan
);
3638 hci_conn_hold(conn
->hcon
);
3640 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3641 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3644 chan
->local_amp_id
= amp_id
;
3646 __l2cap_chan_add(conn
, chan
);
3650 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3652 chan
->ident
= cmd
->ident
;
3654 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3655 if (l2cap_chan_check_security(chan
)) {
3656 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3657 __l2cap_state_change(chan
, BT_CONNECT2
);
3658 result
= L2CAP_CR_PEND
;
3659 status
= L2CAP_CS_AUTHOR_PEND
;
3660 chan
->ops
->defer(chan
);
3662 /* Force pending result for AMP controllers.
3663 * The connection will succeed after the
3664 * physical link is up.
3667 __l2cap_state_change(chan
, BT_CONNECT2
);
3668 result
= L2CAP_CR_PEND
;
3670 __l2cap_state_change(chan
, BT_CONFIG
);
3671 result
= L2CAP_CR_SUCCESS
;
3673 status
= L2CAP_CS_NO_INFO
;
3676 __l2cap_state_change(chan
, BT_CONNECT2
);
3677 result
= L2CAP_CR_PEND
;
3678 status
= L2CAP_CS_AUTHEN_PEND
;
3681 __l2cap_state_change(chan
, BT_CONNECT2
);
3682 result
= L2CAP_CR_PEND
;
3683 status
= L2CAP_CS_NO_INFO
;
3687 release_sock(parent
);
3688 mutex_unlock(&conn
->chan_lock
);
3691 rsp
.scid
= cpu_to_le16(scid
);
3692 rsp
.dcid
= cpu_to_le16(dcid
);
3693 rsp
.result
= cpu_to_le16(result
);
3694 rsp
.status
= cpu_to_le16(status
);
3695 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3697 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3698 struct l2cap_info_req info
;
3699 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3701 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3702 conn
->info_ident
= l2cap_get_ident(conn
);
3704 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3706 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3707 sizeof(info
), &info
);
3710 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3711 result
== L2CAP_CR_SUCCESS
) {
3713 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3714 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3715 l2cap_build_conf_req(chan
, buf
), buf
);
3716 chan
->num_conf_req
++;
3722 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3723 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3725 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3729 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3730 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3732 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3733 u16 scid
, dcid
, result
, status
;
3734 struct l2cap_chan
*chan
;
3738 scid
= __le16_to_cpu(rsp
->scid
);
3739 dcid
= __le16_to_cpu(rsp
->dcid
);
3740 result
= __le16_to_cpu(rsp
->result
);
3741 status
= __le16_to_cpu(rsp
->status
);
3743 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3744 dcid
, scid
, result
, status
);
3746 mutex_lock(&conn
->chan_lock
);
3749 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3755 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3764 l2cap_chan_lock(chan
);
3767 case L2CAP_CR_SUCCESS
:
3768 l2cap_state_change(chan
, BT_CONFIG
);
3771 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3773 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3776 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3777 l2cap_build_conf_req(chan
, req
), req
);
3778 chan
->num_conf_req
++;
3782 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3786 l2cap_chan_del(chan
, ECONNREFUSED
);
3790 l2cap_chan_unlock(chan
);
3793 mutex_unlock(&conn
->chan_lock
);
3798 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3800 /* FCS is enabled only in ERTM or streaming mode, if one or both
3803 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3804 chan
->fcs
= L2CAP_FCS_NONE
;
3805 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3806 chan
->fcs
= L2CAP_FCS_CRC16
;
3809 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3810 u8 ident
, u16 flags
)
3812 struct l2cap_conn
*conn
= chan
->conn
;
3814 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3817 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3818 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3820 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3821 l2cap_build_conf_rsp(chan
, data
,
3822 L2CAP_CONF_SUCCESS
, flags
), data
);
3825 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3826 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3829 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3832 struct l2cap_chan
*chan
;
3835 dcid
= __le16_to_cpu(req
->dcid
);
3836 flags
= __le16_to_cpu(req
->flags
);
3838 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3840 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3844 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3845 struct l2cap_cmd_rej_cid rej
;
3847 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3848 rej
.scid
= cpu_to_le16(chan
->scid
);
3849 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3851 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3856 /* Reject if config buffer is too small. */
3857 len
= cmd_len
- sizeof(*req
);
3858 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3859 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3860 l2cap_build_conf_rsp(chan
, rsp
,
3861 L2CAP_CONF_REJECT
, flags
), rsp
);
3866 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3867 chan
->conf_len
+= len
;
3869 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3870 /* Incomplete config. Send empty response. */
3871 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3872 l2cap_build_conf_rsp(chan
, rsp
,
3873 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3877 /* Complete config. */
3878 len
= l2cap_parse_conf_req(chan
, rsp
);
3880 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3884 chan
->ident
= cmd
->ident
;
3885 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3886 chan
->num_conf_rsp
++;
3888 /* Reset config buffer. */
3891 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3894 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3895 set_default_fcs(chan
);
3897 if (chan
->mode
== L2CAP_MODE_ERTM
||
3898 chan
->mode
== L2CAP_MODE_STREAMING
)
3899 err
= l2cap_ertm_init(chan
);
3902 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3904 l2cap_chan_ready(chan
);
3909 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3911 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3912 l2cap_build_conf_req(chan
, buf
), buf
);
3913 chan
->num_conf_req
++;
3916 /* Got Conf Rsp PENDING from remote side and asume we sent
3917 Conf Rsp PENDING in the code above */
3918 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3919 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3921 /* check compatibility */
3923 /* Send rsp for BR/EDR channel */
3925 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
3927 chan
->ident
= cmd
->ident
;
3931 l2cap_chan_unlock(chan
);
3935 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
3936 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3938 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3939 u16 scid
, flags
, result
;
3940 struct l2cap_chan
*chan
;
3941 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3944 scid
= __le16_to_cpu(rsp
->scid
);
3945 flags
= __le16_to_cpu(rsp
->flags
);
3946 result
= __le16_to_cpu(rsp
->result
);
3948 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3951 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3956 case L2CAP_CONF_SUCCESS
:
3957 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3958 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3961 case L2CAP_CONF_PENDING
:
3962 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3964 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3967 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3970 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3974 if (!chan
->hs_hcon
) {
3975 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
3978 if (l2cap_check_efs(chan
)) {
3979 amp_create_logical_link(chan
);
3980 chan
->ident
= cmd
->ident
;
3986 case L2CAP_CONF_UNACCEPT
:
3987 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3990 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3991 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3995 /* throw out any old stored conf requests */
3996 result
= L2CAP_CONF_SUCCESS
;
3997 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4000 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
4004 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4005 L2CAP_CONF_REQ
, len
, req
);
4006 chan
->num_conf_req
++;
4007 if (result
!= L2CAP_CONF_SUCCESS
)
4013 l2cap_chan_set_err(chan
, ECONNRESET
);
4015 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4016 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
4020 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4023 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4025 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4026 set_default_fcs(chan
);
4028 if (chan
->mode
== L2CAP_MODE_ERTM
||
4029 chan
->mode
== L2CAP_MODE_STREAMING
)
4030 err
= l2cap_ertm_init(chan
);
4033 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4035 l2cap_chan_ready(chan
);
4039 l2cap_chan_unlock(chan
);
4043 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4044 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4046 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4047 struct l2cap_disconn_rsp rsp
;
4049 struct l2cap_chan
*chan
;
4052 scid
= __le16_to_cpu(req
->scid
);
4053 dcid
= __le16_to_cpu(req
->dcid
);
4055 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4057 mutex_lock(&conn
->chan_lock
);
4059 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4061 mutex_unlock(&conn
->chan_lock
);
4065 l2cap_chan_lock(chan
);
4069 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4070 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4071 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4074 sk
->sk_shutdown
= SHUTDOWN_MASK
;
4077 l2cap_chan_hold(chan
);
4078 l2cap_chan_del(chan
, ECONNRESET
);
4080 l2cap_chan_unlock(chan
);
4082 chan
->ops
->close(chan
);
4083 l2cap_chan_put(chan
);
4085 mutex_unlock(&conn
->chan_lock
);
4090 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4091 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4093 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4095 struct l2cap_chan
*chan
;
4097 scid
= __le16_to_cpu(rsp
->scid
);
4098 dcid
= __le16_to_cpu(rsp
->dcid
);
4100 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4102 mutex_lock(&conn
->chan_lock
);
4104 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4106 mutex_unlock(&conn
->chan_lock
);
4110 l2cap_chan_lock(chan
);
4112 l2cap_chan_hold(chan
);
4113 l2cap_chan_del(chan
, 0);
4115 l2cap_chan_unlock(chan
);
4117 chan
->ops
->close(chan
);
4118 l2cap_chan_put(chan
);
4120 mutex_unlock(&conn
->chan_lock
);
4125 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4126 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4128 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4131 type
= __le16_to_cpu(req
->type
);
4133 BT_DBG("type 0x%4.4x", type
);
4135 if (type
== L2CAP_IT_FEAT_MASK
) {
4137 u32 feat_mask
= l2cap_feat_mask
;
4138 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4139 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4140 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4142 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4145 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4146 | L2CAP_FEAT_EXT_WINDOW
;
4148 put_unaligned_le32(feat_mask
, rsp
->data
);
4149 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4151 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4153 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4156 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4158 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4160 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4161 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4162 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4163 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4166 struct l2cap_info_rsp rsp
;
4167 rsp
.type
= cpu_to_le16(type
);
4168 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4169 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4176 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4177 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4179 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4182 type
= __le16_to_cpu(rsp
->type
);
4183 result
= __le16_to_cpu(rsp
->result
);
4185 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4187 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4188 if (cmd
->ident
!= conn
->info_ident
||
4189 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4192 cancel_delayed_work(&conn
->info_timer
);
4194 if (result
!= L2CAP_IR_SUCCESS
) {
4195 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4196 conn
->info_ident
= 0;
4198 l2cap_conn_start(conn
);
4204 case L2CAP_IT_FEAT_MASK
:
4205 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4207 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4208 struct l2cap_info_req req
;
4209 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4211 conn
->info_ident
= l2cap_get_ident(conn
);
4213 l2cap_send_cmd(conn
, conn
->info_ident
,
4214 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4216 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4217 conn
->info_ident
= 0;
4219 l2cap_conn_start(conn
);
4223 case L2CAP_IT_FIXED_CHAN
:
4224 conn
->fixed_chan_mask
= rsp
->data
[0];
4225 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4226 conn
->info_ident
= 0;
4228 l2cap_conn_start(conn
);
4235 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4236 struct l2cap_cmd_hdr
*cmd
,
4237 u16 cmd_len
, void *data
)
4239 struct l2cap_create_chan_req
*req
= data
;
4240 struct l2cap_create_chan_rsp rsp
;
4241 struct l2cap_chan
*chan
;
4242 struct hci_dev
*hdev
;
4245 if (cmd_len
!= sizeof(*req
))
4251 psm
= le16_to_cpu(req
->psm
);
4252 scid
= le16_to_cpu(req
->scid
);
4254 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4256 /* For controller id 0 make BR/EDR connection */
4257 if (req
->amp_id
== HCI_BREDR_ID
) {
4258 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4263 /* Validate AMP controller id */
4264 hdev
= hci_dev_get(req
->amp_id
);
4268 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4273 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4276 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4277 struct hci_conn
*hs_hcon
;
4279 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
, conn
->dst
);
4285 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4287 chan
->local_amp_id
= req
->amp_id
;
4288 mgr
->bredr_chan
= chan
;
4289 chan
->hs_hcon
= hs_hcon
;
4290 conn
->mtu
= hdev
->block_mtu
;
4299 rsp
.scid
= cpu_to_le16(scid
);
4300 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4301 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4303 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4309 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4311 struct l2cap_move_chan_req req
;
4314 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4316 ident
= l2cap_get_ident(chan
->conn
);
4317 chan
->ident
= ident
;
4319 req
.icid
= cpu_to_le16(chan
->scid
);
4320 req
.dest_amp_id
= dest_amp_id
;
4322 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4325 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4328 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4330 struct l2cap_move_chan_rsp rsp
;
4332 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4334 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4335 rsp
.result
= cpu_to_le16(result
);
4337 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4341 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4343 struct l2cap_move_chan_cfm cfm
;
4345 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4347 chan
->ident
= l2cap_get_ident(chan
->conn
);
4349 cfm
.icid
= cpu_to_le16(chan
->scid
);
4350 cfm
.result
= cpu_to_le16(result
);
4352 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4355 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4358 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4360 struct l2cap_move_chan_cfm cfm
;
4362 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4364 cfm
.icid
= cpu_to_le16(icid
);
4365 cfm
.result
= __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4367 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4371 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4374 struct l2cap_move_chan_cfm_rsp rsp
;
4376 BT_DBG("icid 0x%4.4x", icid
);
4378 rsp
.icid
= cpu_to_le16(icid
);
4379 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4382 static void __release_logical_link(struct l2cap_chan
*chan
)
4384 chan
->hs_hchan
= NULL
;
4385 chan
->hs_hcon
= NULL
;
4387 /* Placeholder - release the logical link */
4390 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4392 /* Logical link setup failed */
4393 if (chan
->state
!= BT_CONNECTED
) {
4394 /* Create channel failure, disconnect */
4395 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4399 switch (chan
->move_role
) {
4400 case L2CAP_MOVE_ROLE_RESPONDER
:
4401 l2cap_move_done(chan
);
4402 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4404 case L2CAP_MOVE_ROLE_INITIATOR
:
4405 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4406 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4407 /* Remote has only sent pending or
4408 * success responses, clean up
4410 l2cap_move_done(chan
);
4413 /* Other amp move states imply that the move
4414 * has already aborted
4416 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4421 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4422 struct hci_chan
*hchan
)
4424 struct l2cap_conf_rsp rsp
;
4426 chan
->hs_hchan
= hchan
;
4427 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4429 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4431 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4434 set_default_fcs(chan
);
4436 err
= l2cap_ertm_init(chan
);
4438 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4440 l2cap_chan_ready(chan
);
4444 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4445 struct hci_chan
*hchan
)
4447 chan
->hs_hcon
= hchan
->conn
;
4448 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4450 BT_DBG("move_state %d", chan
->move_state
);
4452 switch (chan
->move_state
) {
4453 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4454 /* Move confirm will be sent after a success
4455 * response is received
4457 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4459 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4460 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4461 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4462 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4463 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4464 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4465 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4466 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4467 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4471 /* Move was not in expected state, free the channel */
4472 __release_logical_link(chan
);
4474 chan
->move_state
= L2CAP_MOVE_STABLE
;
4478 /* Call with chan locked */
4479 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4482 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4485 l2cap_logical_fail(chan
);
4486 __release_logical_link(chan
);
4490 if (chan
->state
!= BT_CONNECTED
) {
4491 /* Ignore logical link if channel is on BR/EDR */
4492 if (chan
->local_amp_id
)
4493 l2cap_logical_finish_create(chan
, hchan
);
4495 l2cap_logical_finish_move(chan
, hchan
);
4499 void l2cap_move_start(struct l2cap_chan
*chan
)
4501 BT_DBG("chan %p", chan
);
4503 if (chan
->local_amp_id
== HCI_BREDR_ID
) {
4504 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4506 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4507 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4508 /* Placeholder - start physical link setup */
4510 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4511 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4513 l2cap_move_setup(chan
);
4514 l2cap_send_move_chan_req(chan
, 0);
4518 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4519 u8 local_amp_id
, u8 remote_amp_id
)
4521 if (!test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4522 struct l2cap_conn_rsp rsp
;
4524 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4525 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4527 /* Incoming channel on AMP */
4528 if (result
== L2CAP_CR_SUCCESS
) {
4529 /* Send successful response */
4530 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4531 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4533 /* Send negative response */
4534 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4535 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4538 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4541 if (result
== L2CAP_CR_SUCCESS
) {
4542 __l2cap_state_change(chan
, BT_CONFIG
);
4543 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4544 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4546 l2cap_build_conf_req(chan
, buf
), buf
);
4547 chan
->num_conf_req
++;
4550 /* Outgoing channel on AMP */
4551 if (result
== L2CAP_CR_SUCCESS
) {
4552 chan
->local_amp_id
= local_amp_id
;
4553 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4555 /* Revert to BR/EDR connect */
4556 l2cap_send_conn_req(chan
);
4561 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4564 l2cap_move_setup(chan
);
4565 chan
->move_id
= local_amp_id
;
4566 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4568 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4571 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4573 struct hci_chan
*hchan
= NULL
;
4575 /* Placeholder - get hci_chan for logical link */
4578 if (hchan
->state
== BT_CONNECTED
) {
4579 /* Logical link is ready to go */
4580 chan
->hs_hcon
= hchan
->conn
;
4581 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4582 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4583 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4585 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4587 /* Wait for logical link to be ready */
4588 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4591 /* Logical link not available */
4592 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4596 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4598 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4600 if (result
== -EINVAL
)
4601 rsp_result
= L2CAP_MR_BAD_ID
;
4603 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4605 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4608 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4609 chan
->move_state
= L2CAP_MOVE_STABLE
;
4611 /* Restart data transmission */
4612 l2cap_ertm_send(chan
);
4615 void l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4617 u8 local_amp_id
= chan
->local_amp_id
;
4618 u8 remote_amp_id
= chan
->remote_amp_id
;
4620 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4621 chan
, result
, local_amp_id
, remote_amp_id
);
4623 l2cap_chan_lock(chan
);
4625 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4626 l2cap_chan_unlock(chan
);
4630 if (chan
->state
!= BT_CONNECTED
) {
4631 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4632 } else if (result
!= L2CAP_MR_SUCCESS
) {
4633 l2cap_do_move_cancel(chan
, result
);
4635 switch (chan
->move_role
) {
4636 case L2CAP_MOVE_ROLE_INITIATOR
:
4637 l2cap_do_move_initiate(chan
, local_amp_id
,
4640 case L2CAP_MOVE_ROLE_RESPONDER
:
4641 l2cap_do_move_respond(chan
, result
);
4644 l2cap_do_move_cancel(chan
, result
);
4649 l2cap_chan_unlock(chan
);
4652 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4653 struct l2cap_cmd_hdr
*cmd
,
4654 u16 cmd_len
, void *data
)
4656 struct l2cap_move_chan_req
*req
= data
;
4657 struct l2cap_move_chan_rsp rsp
;
4658 struct l2cap_chan
*chan
;
4660 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4662 if (cmd_len
!= sizeof(*req
))
4665 icid
= le16_to_cpu(req
->icid
);
4667 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4672 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4674 rsp
.icid
= cpu_to_le16(icid
);
4675 rsp
.result
= __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4676 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4681 chan
->ident
= cmd
->ident
;
4683 if (chan
->scid
< L2CAP_CID_DYN_START
||
4684 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4685 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4686 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4687 result
= L2CAP_MR_NOT_ALLOWED
;
4688 goto send_move_response
;
4691 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4692 result
= L2CAP_MR_SAME_ID
;
4693 goto send_move_response
;
4696 if (req
->dest_amp_id
) {
4697 struct hci_dev
*hdev
;
4698 hdev
= hci_dev_get(req
->dest_amp_id
);
4699 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4700 !test_bit(HCI_UP
, &hdev
->flags
)) {
4704 result
= L2CAP_MR_BAD_ID
;
4705 goto send_move_response
;
4710 /* Detect a move collision. Only send a collision response
4711 * if this side has "lost", otherwise proceed with the move.
4712 * The winner has the larger bd_addr.
4714 if ((__chan_is_moving(chan
) ||
4715 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4716 bacmp(conn
->src
, conn
->dst
) > 0) {
4717 result
= L2CAP_MR_COLLISION
;
4718 goto send_move_response
;
4721 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4722 l2cap_move_setup(chan
);
4723 chan
->move_id
= req
->dest_amp_id
;
4726 if (!req
->dest_amp_id
) {
4727 /* Moving to BR/EDR */
4728 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4729 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4730 result
= L2CAP_MR_PEND
;
4732 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4733 result
= L2CAP_MR_SUCCESS
;
4736 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4737 /* Placeholder - uncomment when amp functions are available */
4738 /*amp_accept_physical(chan, req->dest_amp_id);*/
4739 result
= L2CAP_MR_PEND
;
4743 l2cap_send_move_chan_rsp(chan
, result
);
4745 l2cap_chan_unlock(chan
);
4750 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4752 struct l2cap_chan
*chan
;
4753 struct hci_chan
*hchan
= NULL
;
4755 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4757 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4761 __clear_chan_timer(chan
);
4762 if (result
== L2CAP_MR_PEND
)
4763 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4765 switch (chan
->move_state
) {
4766 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4767 /* Move confirm will be sent when logical link
4770 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4772 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4773 if (result
== L2CAP_MR_PEND
) {
4775 } else if (test_bit(CONN_LOCAL_BUSY
,
4776 &chan
->conn_state
)) {
4777 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4779 /* Logical link is up or moving to BR/EDR,
4782 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4783 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4786 case L2CAP_MOVE_WAIT_RSP
:
4788 if (result
== L2CAP_MR_SUCCESS
) {
4789 /* Remote is ready, send confirm immediately
4790 * after logical link is ready
4792 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4794 /* Both logical link and move success
4795 * are required to confirm
4797 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
4800 /* Placeholder - get hci_chan for logical link */
4802 /* Logical link not available */
4803 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4807 /* If the logical link is not yet connected, do not
4808 * send confirmation.
4810 if (hchan
->state
!= BT_CONNECTED
)
4813 /* Logical link is already ready to go */
4815 chan
->hs_hcon
= hchan
->conn
;
4816 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4818 if (result
== L2CAP_MR_SUCCESS
) {
4819 /* Can confirm now */
4820 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4822 /* Now only need move success
4825 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4828 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4831 /* Any other amp move state means the move failed. */
4832 chan
->move_id
= chan
->local_amp_id
;
4833 l2cap_move_done(chan
);
4834 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4837 l2cap_chan_unlock(chan
);
4840 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
4843 struct l2cap_chan
*chan
;
4845 chan
= l2cap_get_chan_by_ident(conn
, ident
);
4847 /* Could not locate channel, icid is best guess */
4848 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4852 __clear_chan_timer(chan
);
4854 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4855 if (result
== L2CAP_MR_COLLISION
) {
4856 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4858 /* Cleanup - cancel move */
4859 chan
->move_id
= chan
->local_amp_id
;
4860 l2cap_move_done(chan
);
4864 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4866 l2cap_chan_unlock(chan
);
4869 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4870 struct l2cap_cmd_hdr
*cmd
,
4871 u16 cmd_len
, void *data
)
4873 struct l2cap_move_chan_rsp
*rsp
= data
;
4876 if (cmd_len
!= sizeof(*rsp
))
4879 icid
= le16_to_cpu(rsp
->icid
);
4880 result
= le16_to_cpu(rsp
->result
);
4882 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4884 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
4885 l2cap_move_continue(conn
, icid
, result
);
4887 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
4892 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4893 struct l2cap_cmd_hdr
*cmd
,
4894 u16 cmd_len
, void *data
)
4896 struct l2cap_move_chan_cfm
*cfm
= data
;
4897 struct l2cap_chan
*chan
;
4900 if (cmd_len
!= sizeof(*cfm
))
4903 icid
= le16_to_cpu(cfm
->icid
);
4904 result
= le16_to_cpu(cfm
->result
);
4906 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4908 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4910 /* Spec requires a response even if the icid was not found */
4911 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4915 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
4916 if (result
== L2CAP_MC_CONFIRMED
) {
4917 chan
->local_amp_id
= chan
->move_id
;
4918 if (!chan
->local_amp_id
)
4919 __release_logical_link(chan
);
4921 chan
->move_id
= chan
->local_amp_id
;
4924 l2cap_move_done(chan
);
4927 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4929 l2cap_chan_unlock(chan
);
4934 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4935 struct l2cap_cmd_hdr
*cmd
,
4936 u16 cmd_len
, void *data
)
4938 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4939 struct l2cap_chan
*chan
;
4942 if (cmd_len
!= sizeof(*rsp
))
4945 icid
= le16_to_cpu(rsp
->icid
);
4947 BT_DBG("icid 0x%4.4x", icid
);
4949 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4953 __clear_chan_timer(chan
);
4955 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
4956 chan
->local_amp_id
= chan
->move_id
;
4958 if (!chan
->local_amp_id
&& chan
->hs_hchan
)
4959 __release_logical_link(chan
);
4961 l2cap_move_done(chan
);
4964 l2cap_chan_unlock(chan
);
4969 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4974 if (min
> max
|| min
< 6 || max
> 3200)
4977 if (to_multiplier
< 10 || to_multiplier
> 3200)
4980 if (max
>= to_multiplier
* 8)
4983 max_latency
= (to_multiplier
* 8 / max
) - 1;
4984 if (latency
> 499 || latency
> max_latency
)
4990 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4991 struct l2cap_cmd_hdr
*cmd
,
4994 struct hci_conn
*hcon
= conn
->hcon
;
4995 struct l2cap_conn_param_update_req
*req
;
4996 struct l2cap_conn_param_update_rsp rsp
;
4997 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
5000 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
5003 cmd_len
= __le16_to_cpu(cmd
->len
);
5004 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5007 req
= (struct l2cap_conn_param_update_req
*) data
;
5008 min
= __le16_to_cpu(req
->min
);
5009 max
= __le16_to_cpu(req
->max
);
5010 latency
= __le16_to_cpu(req
->latency
);
5011 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5013 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5014 min
, max
, latency
, to_multiplier
);
5016 memset(&rsp
, 0, sizeof(rsp
));
5018 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
5020 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5022 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5024 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5028 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5033 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5034 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5039 switch (cmd
->code
) {
5040 case L2CAP_COMMAND_REJ
:
5041 l2cap_command_rej(conn
, cmd
, data
);
5044 case L2CAP_CONN_REQ
:
5045 err
= l2cap_connect_req(conn
, cmd
, data
);
5048 case L2CAP_CONN_RSP
:
5049 case L2CAP_CREATE_CHAN_RSP
:
5050 err
= l2cap_connect_create_rsp(conn
, cmd
, data
);
5053 case L2CAP_CONF_REQ
:
5054 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5057 case L2CAP_CONF_RSP
:
5058 err
= l2cap_config_rsp(conn
, cmd
, data
);
5061 case L2CAP_DISCONN_REQ
:
5062 err
= l2cap_disconnect_req(conn
, cmd
, data
);
5065 case L2CAP_DISCONN_RSP
:
5066 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
5069 case L2CAP_ECHO_REQ
:
5070 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5073 case L2CAP_ECHO_RSP
:
5076 case L2CAP_INFO_REQ
:
5077 err
= l2cap_information_req(conn
, cmd
, data
);
5080 case L2CAP_INFO_RSP
:
5081 err
= l2cap_information_rsp(conn
, cmd
, data
);
5084 case L2CAP_CREATE_CHAN_REQ
:
5085 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5088 case L2CAP_MOVE_CHAN_REQ
:
5089 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5092 case L2CAP_MOVE_CHAN_RSP
:
5093 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5096 case L2CAP_MOVE_CHAN_CFM
:
5097 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5100 case L2CAP_MOVE_CHAN_CFM_RSP
:
5101 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5105 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5113 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5114 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
5116 switch (cmd
->code
) {
5117 case L2CAP_COMMAND_REJ
:
5120 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5121 return l2cap_conn_param_update_req(conn
, cmd
, data
);
5123 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5127 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5132 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5133 struct sk_buff
*skb
)
5135 u8
*data
= skb
->data
;
5137 struct l2cap_cmd_hdr cmd
;
5140 l2cap_raw_recv(conn
, skb
);
5142 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5144 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5145 data
+= L2CAP_CMD_HDR_SIZE
;
5146 len
-= L2CAP_CMD_HDR_SIZE
;
5148 cmd_len
= le16_to_cpu(cmd
.len
);
5150 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5153 if (cmd_len
> len
|| !cmd
.ident
) {
5154 BT_DBG("corrupted command");
5158 if (conn
->hcon
->type
== LE_LINK
)
5159 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
5161 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5164 struct l2cap_cmd_rej_unk rej
;
5166 BT_ERR("Wrong link type (%d)", err
);
5168 /* FIXME: Map err to a valid reason */
5169 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5170 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5181 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5183 u16 our_fcs
, rcv_fcs
;
5186 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5187 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5189 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5191 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5192 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5193 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5194 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5196 if (our_fcs
!= rcv_fcs
)
5202 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5204 struct l2cap_ctrl control
;
5206 BT_DBG("chan %p", chan
);
5208 memset(&control
, 0, sizeof(control
));
5211 control
.reqseq
= chan
->buffer_seq
;
5212 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5214 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5215 control
.super
= L2CAP_SUPER_RNR
;
5216 l2cap_send_sframe(chan
, &control
);
5219 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5220 chan
->unacked_frames
> 0)
5221 __set_retrans_timer(chan
);
5223 /* Send pending iframes */
5224 l2cap_ertm_send(chan
);
5226 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5227 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5228 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5231 control
.super
= L2CAP_SUPER_RR
;
5232 l2cap_send_sframe(chan
, &control
);
5236 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5237 struct sk_buff
**last_frag
)
5239 /* skb->len reflects data in skb as well as all fragments
5240 * skb->data_len reflects only data in fragments
5242 if (!skb_has_frag_list(skb
))
5243 skb_shinfo(skb
)->frag_list
= new_frag
;
5245 new_frag
->next
= NULL
;
5247 (*last_frag
)->next
= new_frag
;
5248 *last_frag
= new_frag
;
5250 skb
->len
+= new_frag
->len
;
5251 skb
->data_len
+= new_frag
->len
;
5252 skb
->truesize
+= new_frag
->truesize
;
5255 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5256 struct l2cap_ctrl
*control
)
5260 switch (control
->sar
) {
5261 case L2CAP_SAR_UNSEGMENTED
:
5265 err
= chan
->ops
->recv(chan
, skb
);
5268 case L2CAP_SAR_START
:
5272 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5273 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5275 if (chan
->sdu_len
> chan
->imtu
) {
5280 if (skb
->len
>= chan
->sdu_len
)
5284 chan
->sdu_last_frag
= skb
;
5290 case L2CAP_SAR_CONTINUE
:
5294 append_skb_frag(chan
->sdu
, skb
,
5295 &chan
->sdu_last_frag
);
5298 if (chan
->sdu
->len
>= chan
->sdu_len
)
5308 append_skb_frag(chan
->sdu
, skb
,
5309 &chan
->sdu_last_frag
);
5312 if (chan
->sdu
->len
!= chan
->sdu_len
)
5315 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5318 /* Reassembly complete */
5320 chan
->sdu_last_frag
= NULL
;
5328 kfree_skb(chan
->sdu
);
5330 chan
->sdu_last_frag
= NULL
;
5337 static int l2cap_resegment(struct l2cap_chan
*chan
)
5343 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5347 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5350 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5351 l2cap_tx(chan
, NULL
, NULL
, event
);
5354 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5357 /* Pass sequential frames to l2cap_reassemble_sdu()
5358 * until a gap is encountered.
5361 BT_DBG("chan %p", chan
);
5363 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5364 struct sk_buff
*skb
;
5365 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5366 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5368 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5373 skb_unlink(skb
, &chan
->srej_q
);
5374 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5375 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5380 if (skb_queue_empty(&chan
->srej_q
)) {
5381 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5382 l2cap_send_ack(chan
);
5388 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5389 struct l2cap_ctrl
*control
)
5391 struct sk_buff
*skb
;
5393 BT_DBG("chan %p, control %p", chan
, control
);
5395 if (control
->reqseq
== chan
->next_tx_seq
) {
5396 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5397 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5401 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5404 BT_DBG("Seq %d not available for retransmission",
5409 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5410 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5411 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5415 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5417 if (control
->poll
) {
5418 l2cap_pass_to_tx(chan
, control
);
5420 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5421 l2cap_retransmit(chan
, control
);
5422 l2cap_ertm_send(chan
);
5424 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5425 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5426 chan
->srej_save_reqseq
= control
->reqseq
;
5429 l2cap_pass_to_tx_fbit(chan
, control
);
5431 if (control
->final
) {
5432 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5433 !test_and_clear_bit(CONN_SREJ_ACT
,
5435 l2cap_retransmit(chan
, control
);
5437 l2cap_retransmit(chan
, control
);
5438 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5439 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5440 chan
->srej_save_reqseq
= control
->reqseq
;
5446 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5447 struct l2cap_ctrl
*control
)
5449 struct sk_buff
*skb
;
5451 BT_DBG("chan %p, control %p", chan
, control
);
5453 if (control
->reqseq
== chan
->next_tx_seq
) {
5454 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5455 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5459 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5461 if (chan
->max_tx
&& skb
&&
5462 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5463 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5464 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5468 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5470 l2cap_pass_to_tx(chan
, control
);
5472 if (control
->final
) {
5473 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5474 l2cap_retransmit_all(chan
, control
);
5476 l2cap_retransmit_all(chan
, control
);
5477 l2cap_ertm_send(chan
);
5478 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5479 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5483 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5485 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5487 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5488 chan
->expected_tx_seq
);
5490 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5491 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5493 /* See notes below regarding "double poll" and
5496 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5497 BT_DBG("Invalid/Ignore - after SREJ");
5498 return L2CAP_TXSEQ_INVALID_IGNORE
;
5500 BT_DBG("Invalid - in window after SREJ sent");
5501 return L2CAP_TXSEQ_INVALID
;
5505 if (chan
->srej_list
.head
== txseq
) {
5506 BT_DBG("Expected SREJ");
5507 return L2CAP_TXSEQ_EXPECTED_SREJ
;
5510 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
5511 BT_DBG("Duplicate SREJ - txseq already stored");
5512 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
5515 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
5516 BT_DBG("Unexpected SREJ - not requested");
5517 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
5521 if (chan
->expected_tx_seq
== txseq
) {
5522 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5524 BT_DBG("Invalid - txseq outside tx window");
5525 return L2CAP_TXSEQ_INVALID
;
5528 return L2CAP_TXSEQ_EXPECTED
;
5532 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
5533 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
5534 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5535 return L2CAP_TXSEQ_DUPLICATE
;
5538 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
5539 /* A source of invalid packets is a "double poll" condition,
5540 * where delays cause us to send multiple poll packets. If
5541 * the remote stack receives and processes both polls,
5542 * sequence numbers can wrap around in such a way that a
5543 * resent frame has a sequence number that looks like new data
5544 * with a sequence gap. This would trigger an erroneous SREJ
5547 * Fortunately, this is impossible with a tx window that's
5548 * less than half of the maximum sequence number, which allows
5549 * invalid frames to be safely ignored.
5551 * With tx window sizes greater than half of the tx window
5552 * maximum, the frame is invalid and cannot be ignored. This
5553 * causes a disconnect.
5556 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5557 BT_DBG("Invalid/Ignore - txseq outside tx window");
5558 return L2CAP_TXSEQ_INVALID_IGNORE
;
5560 BT_DBG("Invalid - txseq outside tx window");
5561 return L2CAP_TXSEQ_INVALID
;
5564 BT_DBG("Unexpected - txseq indicates missing frames");
5565 return L2CAP_TXSEQ_UNEXPECTED
;
5569 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
5570 struct l2cap_ctrl
*control
,
5571 struct sk_buff
*skb
, u8 event
)
5574 bool skb_in_use
= 0;
5576 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5580 case L2CAP_EV_RECV_IFRAME
:
5581 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
5582 case L2CAP_TXSEQ_EXPECTED
:
5583 l2cap_pass_to_tx(chan
, control
);
5585 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5586 BT_DBG("Busy, discarding expected seq %d",
5591 chan
->expected_tx_seq
= __next_seq(chan
,
5594 chan
->buffer_seq
= chan
->expected_tx_seq
;
5597 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
5601 if (control
->final
) {
5602 if (!test_and_clear_bit(CONN_REJ_ACT
,
5603 &chan
->conn_state
)) {
5605 l2cap_retransmit_all(chan
, control
);
5606 l2cap_ertm_send(chan
);
5610 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
5611 l2cap_send_ack(chan
);
5613 case L2CAP_TXSEQ_UNEXPECTED
:
5614 l2cap_pass_to_tx(chan
, control
);
5616 /* Can't issue SREJ frames in the local busy state.
5617 * Drop this frame, it will be seen as missing
5618 * when local busy is exited.
5620 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5621 BT_DBG("Busy, discarding unexpected seq %d",
5626 /* There was a gap in the sequence, so an SREJ
5627 * must be sent for each missing frame. The
5628 * current frame is stored for later use.
5630 skb_queue_tail(&chan
->srej_q
, skb
);
5632 BT_DBG("Queued %p (queue len %d)", skb
,
5633 skb_queue_len(&chan
->srej_q
));
5635 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5636 l2cap_seq_list_clear(&chan
->srej_list
);
5637 l2cap_send_srej(chan
, control
->txseq
);
5639 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
5641 case L2CAP_TXSEQ_DUPLICATE
:
5642 l2cap_pass_to_tx(chan
, control
);
5644 case L2CAP_TXSEQ_INVALID_IGNORE
:
5646 case L2CAP_TXSEQ_INVALID
:
5648 l2cap_send_disconn_req(chan
->conn
, chan
,
5653 case L2CAP_EV_RECV_RR
:
5654 l2cap_pass_to_tx(chan
, control
);
5655 if (control
->final
) {
5656 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5658 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
5659 !__chan_is_moving(chan
)) {
5661 l2cap_retransmit_all(chan
, control
);
5664 l2cap_ertm_send(chan
);
5665 } else if (control
->poll
) {
5666 l2cap_send_i_or_rr_or_rnr(chan
);
5668 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5669 &chan
->conn_state
) &&
5670 chan
->unacked_frames
)
5671 __set_retrans_timer(chan
);
5673 l2cap_ertm_send(chan
);
5676 case L2CAP_EV_RECV_RNR
:
5677 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5678 l2cap_pass_to_tx(chan
, control
);
5679 if (control
&& control
->poll
) {
5680 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5681 l2cap_send_rr_or_rnr(chan
, 0);
5683 __clear_retrans_timer(chan
);
5684 l2cap_seq_list_clear(&chan
->retrans_list
);
5686 case L2CAP_EV_RECV_REJ
:
5687 l2cap_handle_rej(chan
, control
);
5689 case L2CAP_EV_RECV_SREJ
:
5690 l2cap_handle_srej(chan
, control
);
5696 if (skb
&& !skb_in_use
) {
5697 BT_DBG("Freeing %p", skb
);
5704 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
5705 struct l2cap_ctrl
*control
,
5706 struct sk_buff
*skb
, u8 event
)
5709 u16 txseq
= control
->txseq
;
5710 bool skb_in_use
= 0;
5712 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5716 case L2CAP_EV_RECV_IFRAME
:
5717 switch (l2cap_classify_txseq(chan
, txseq
)) {
5718 case L2CAP_TXSEQ_EXPECTED
:
5719 /* Keep frame for reassembly later */
5720 l2cap_pass_to_tx(chan
, control
);
5721 skb_queue_tail(&chan
->srej_q
, skb
);
5723 BT_DBG("Queued %p (queue len %d)", skb
,
5724 skb_queue_len(&chan
->srej_q
));
5726 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
5728 case L2CAP_TXSEQ_EXPECTED_SREJ
:
5729 l2cap_seq_list_pop(&chan
->srej_list
);
5731 l2cap_pass_to_tx(chan
, control
);
5732 skb_queue_tail(&chan
->srej_q
, skb
);
5734 BT_DBG("Queued %p (queue len %d)", skb
,
5735 skb_queue_len(&chan
->srej_q
));
5737 err
= l2cap_rx_queued_iframes(chan
);
5742 case L2CAP_TXSEQ_UNEXPECTED
:
5743 /* Got a frame that can't be reassembled yet.
5744 * Save it for later, and send SREJs to cover
5745 * the missing frames.
5747 skb_queue_tail(&chan
->srej_q
, skb
);
5749 BT_DBG("Queued %p (queue len %d)", skb
,
5750 skb_queue_len(&chan
->srej_q
));
5752 l2cap_pass_to_tx(chan
, control
);
5753 l2cap_send_srej(chan
, control
->txseq
);
5755 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
5756 /* This frame was requested with an SREJ, but
5757 * some expected retransmitted frames are
5758 * missing. Request retransmission of missing
5761 skb_queue_tail(&chan
->srej_q
, skb
);
5763 BT_DBG("Queued %p (queue len %d)", skb
,
5764 skb_queue_len(&chan
->srej_q
));
5766 l2cap_pass_to_tx(chan
, control
);
5767 l2cap_send_srej_list(chan
, control
->txseq
);
5769 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
5770 /* We've already queued this frame. Drop this copy. */
5771 l2cap_pass_to_tx(chan
, control
);
5773 case L2CAP_TXSEQ_DUPLICATE
:
5774 /* Expecting a later sequence number, so this frame
5775 * was already received. Ignore it completely.
5778 case L2CAP_TXSEQ_INVALID_IGNORE
:
5780 case L2CAP_TXSEQ_INVALID
:
5782 l2cap_send_disconn_req(chan
->conn
, chan
,
5787 case L2CAP_EV_RECV_RR
:
5788 l2cap_pass_to_tx(chan
, control
);
5789 if (control
->final
) {
5790 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5792 if (!test_and_clear_bit(CONN_REJ_ACT
,
5793 &chan
->conn_state
)) {
5795 l2cap_retransmit_all(chan
, control
);
5798 l2cap_ertm_send(chan
);
5799 } else if (control
->poll
) {
5800 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5801 &chan
->conn_state
) &&
5802 chan
->unacked_frames
) {
5803 __set_retrans_timer(chan
);
5806 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5807 l2cap_send_srej_tail(chan
);
5809 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5810 &chan
->conn_state
) &&
5811 chan
->unacked_frames
)
5812 __set_retrans_timer(chan
);
5814 l2cap_send_ack(chan
);
5817 case L2CAP_EV_RECV_RNR
:
5818 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5819 l2cap_pass_to_tx(chan
, control
);
5820 if (control
->poll
) {
5821 l2cap_send_srej_tail(chan
);
5823 struct l2cap_ctrl rr_control
;
5824 memset(&rr_control
, 0, sizeof(rr_control
));
5825 rr_control
.sframe
= 1;
5826 rr_control
.super
= L2CAP_SUPER_RR
;
5827 rr_control
.reqseq
= chan
->buffer_seq
;
5828 l2cap_send_sframe(chan
, &rr_control
);
5832 case L2CAP_EV_RECV_REJ
:
5833 l2cap_handle_rej(chan
, control
);
5835 case L2CAP_EV_RECV_SREJ
:
5836 l2cap_handle_srej(chan
, control
);
5840 if (skb
&& !skb_in_use
) {
5841 BT_DBG("Freeing %p", skb
);
5848 static int l2cap_finish_move(struct l2cap_chan
*chan
)
5850 BT_DBG("chan %p", chan
);
5852 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5855 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
5857 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
5859 return l2cap_resegment(chan
);
5862 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
5863 struct l2cap_ctrl
*control
,
5864 struct sk_buff
*skb
, u8 event
)
5868 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5874 l2cap_process_reqseq(chan
, control
->reqseq
);
5876 if (!skb_queue_empty(&chan
->tx_q
))
5877 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
5879 chan
->tx_send_head
= NULL
;
5881 /* Rewind next_tx_seq to the point expected
5884 chan
->next_tx_seq
= control
->reqseq
;
5885 chan
->unacked_frames
= 0;
5887 err
= l2cap_finish_move(chan
);
5891 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5892 l2cap_send_i_or_rr_or_rnr(chan
);
5894 if (event
== L2CAP_EV_RECV_IFRAME
)
5897 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
5900 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
5901 struct l2cap_ctrl
*control
,
5902 struct sk_buff
*skb
, u8 event
)
5906 if (!control
->final
)
5909 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5911 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5912 l2cap_process_reqseq(chan
, control
->reqseq
);
5914 if (!skb_queue_empty(&chan
->tx_q
))
5915 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
5917 chan
->tx_send_head
= NULL
;
5919 /* Rewind next_tx_seq to the point expected
5922 chan
->next_tx_seq
= control
->reqseq
;
5923 chan
->unacked_frames
= 0;
5926 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
5928 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
5930 err
= l2cap_resegment(chan
);
5933 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5938 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5940 /* Make sure reqseq is for a packet that has been sent but not acked */
5943 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5944 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5947 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5948 struct sk_buff
*skb
, u8 event
)
5952 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5953 control
, skb
, event
, chan
->rx_state
);
5955 if (__valid_reqseq(chan
, control
->reqseq
)) {
5956 switch (chan
->rx_state
) {
5957 case L2CAP_RX_STATE_RECV
:
5958 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5960 case L2CAP_RX_STATE_SREJ_SENT
:
5961 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5964 case L2CAP_RX_STATE_WAIT_P
:
5965 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
5967 case L2CAP_RX_STATE_WAIT_F
:
5968 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
5975 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5976 control
->reqseq
, chan
->next_tx_seq
,
5977 chan
->expected_ack_seq
);
5978 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5984 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5985 struct sk_buff
*skb
)
5989 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5992 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5993 L2CAP_TXSEQ_EXPECTED
) {
5994 l2cap_pass_to_tx(chan
, control
);
5996 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5997 __next_seq(chan
, chan
->buffer_seq
));
5999 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6001 l2cap_reassemble_sdu(chan
, skb
, control
);
6004 kfree_skb(chan
->sdu
);
6007 chan
->sdu_last_frag
= NULL
;
6011 BT_DBG("Freeing %p", skb
);
6016 chan
->last_acked_seq
= control
->txseq
;
6017 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6022 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6024 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6028 __unpack_control(chan
, skb
);
6033 * We can just drop the corrupted I-frame here.
6034 * Receiver will miss it and start proper recovery
6035 * procedures and ask for retransmission.
6037 if (l2cap_check_fcs(chan
, skb
))
6040 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6041 len
-= L2CAP_SDULEN_SIZE
;
6043 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6044 len
-= L2CAP_FCS_SIZE
;
6046 if (len
> chan
->mps
) {
6047 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
6051 if (!control
->sframe
) {
6054 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6055 control
->sar
, control
->reqseq
, control
->final
,
6058 /* Validate F-bit - F=0 always valid, F=1 only
6059 * valid in TX WAIT_F
6061 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6064 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6065 event
= L2CAP_EV_RECV_IFRAME
;
6066 err
= l2cap_rx(chan
, control
, skb
, event
);
6068 err
= l2cap_stream_rx(chan
, control
, skb
);
6072 l2cap_send_disconn_req(chan
->conn
, chan
,
6075 const u8 rx_func_to_event
[4] = {
6076 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6077 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6080 /* Only I-frames are expected in streaming mode */
6081 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6084 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6085 control
->reqseq
, control
->final
, control
->poll
,
6090 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
6094 /* Validate F and P bits */
6095 if (control
->final
&& (control
->poll
||
6096 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6099 event
= rx_func_to_event
[control
->super
];
6100 if (l2cap_rx(chan
, control
, skb
, event
))
6101 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
6111 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6112 struct sk_buff
*skb
)
6114 struct l2cap_chan
*chan
;
6116 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6118 if (cid
== L2CAP_CID_A2MP
) {
6119 chan
= a2mp_channel_create(conn
, skb
);
6125 l2cap_chan_lock(chan
);
6127 BT_DBG("unknown cid 0x%4.4x", cid
);
6128 /* Drop packet and return */
6134 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6136 if (chan
->state
!= BT_CONNECTED
)
6139 switch (chan
->mode
) {
6140 case L2CAP_MODE_BASIC
:
6141 /* If socket recv buffers overflows we drop data here
6142 * which is *bad* because L2CAP has to be reliable.
6143 * But we don't have any other choice. L2CAP doesn't
6144 * provide flow control mechanism. */
6146 if (chan
->imtu
< skb
->len
)
6149 if (!chan
->ops
->recv(chan
, skb
))
6153 case L2CAP_MODE_ERTM
:
6154 case L2CAP_MODE_STREAMING
:
6155 l2cap_data_rcv(chan
, skb
);
6159 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6167 l2cap_chan_unlock(chan
);
6170 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6171 struct sk_buff
*skb
)
6173 struct l2cap_chan
*chan
;
6175 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
6179 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6181 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6184 if (chan
->imtu
< skb
->len
)
6187 if (!chan
->ops
->recv(chan
, skb
))
6194 static void l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
6195 struct sk_buff
*skb
)
6197 struct l2cap_chan
*chan
;
6199 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
6203 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6205 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6208 if (chan
->imtu
< skb
->len
)
6211 if (!chan
->ops
->recv(chan
, skb
))
6218 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6220 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6224 skb_pull(skb
, L2CAP_HDR_SIZE
);
6225 cid
= __le16_to_cpu(lh
->cid
);
6226 len
= __le16_to_cpu(lh
->len
);
6228 if (len
!= skb
->len
) {
6233 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6236 case L2CAP_CID_LE_SIGNALING
:
6237 case L2CAP_CID_SIGNALING
:
6238 l2cap_sig_channel(conn
, skb
);
6241 case L2CAP_CID_CONN_LESS
:
6242 psm
= get_unaligned((__le16
*) skb
->data
);
6243 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6244 l2cap_conless_channel(conn
, psm
, skb
);
6247 case L2CAP_CID_LE_DATA
:
6248 l2cap_att_channel(conn
, cid
, skb
);
6252 if (smp_sig_channel(conn
, skb
))
6253 l2cap_conn_del(conn
->hcon
, EACCES
);
6257 l2cap_data_channel(conn
, cid
, skb
);
6262 /* ---- L2CAP interface with lower layer (HCI) ---- */
6264 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
6266 int exact
= 0, lm1
= 0, lm2
= 0;
6267 struct l2cap_chan
*c
;
6269 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
6271 /* Find listening sockets and check their link_mode */
6272 read_lock(&chan_list_lock
);
6273 list_for_each_entry(c
, &chan_list
, global_l
) {
6274 struct sock
*sk
= c
->sk
;
6276 if (c
->state
!= BT_LISTEN
)
6279 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
6280 lm1
|= HCI_LM_ACCEPT
;
6281 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6282 lm1
|= HCI_LM_MASTER
;
6284 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
6285 lm2
|= HCI_LM_ACCEPT
;
6286 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6287 lm2
|= HCI_LM_MASTER
;
6290 read_unlock(&chan_list_lock
);
6292 return exact
? lm1
: lm2
;
6295 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
6297 struct l2cap_conn
*conn
;
6299 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
6302 conn
= l2cap_conn_add(hcon
, status
);
6304 l2cap_conn_ready(conn
);
6306 l2cap_conn_del(hcon
, bt_to_errno(status
));
6310 int l2cap_disconn_ind(struct hci_conn
*hcon
)
6312 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6314 BT_DBG("hcon %p", hcon
);
6317 return HCI_ERROR_REMOTE_USER_TERM
;
6318 return conn
->disc_reason
;
6321 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
6323 BT_DBG("hcon %p reason %d", hcon
, reason
);
6325 l2cap_conn_del(hcon
, bt_to_errno(reason
));
6328 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
6330 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
6333 if (encrypt
== 0x00) {
6334 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
6335 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
6336 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
6337 l2cap_chan_close(chan
, ECONNREFUSED
);
6339 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
6340 __clear_chan_timer(chan
);
6344 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
6346 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6347 struct l2cap_chan
*chan
;
6352 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
6354 if (hcon
->type
== LE_LINK
) {
6355 if (!status
&& encrypt
)
6356 smp_distribute_keys(conn
, 0);
6357 cancel_delayed_work(&conn
->security_timer
);
6360 mutex_lock(&conn
->chan_lock
);
6362 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
6363 l2cap_chan_lock(chan
);
6365 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
6366 state_to_string(chan
->state
));
6368 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
6369 l2cap_chan_unlock(chan
);
6373 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
6374 if (!status
&& encrypt
) {
6375 chan
->sec_level
= hcon
->sec_level
;
6376 l2cap_chan_ready(chan
);
6379 l2cap_chan_unlock(chan
);
6383 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
6384 l2cap_chan_unlock(chan
);
6388 if (!status
&& (chan
->state
== BT_CONNECTED
||
6389 chan
->state
== BT_CONFIG
)) {
6390 struct sock
*sk
= chan
->sk
;
6392 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
6393 sk
->sk_state_change(sk
);
6395 l2cap_check_encryption(chan
, encrypt
);
6396 l2cap_chan_unlock(chan
);
6400 if (chan
->state
== BT_CONNECT
) {
6402 l2cap_start_connection(chan
);
6404 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6406 } else if (chan
->state
== BT_CONNECT2
) {
6407 struct sock
*sk
= chan
->sk
;
6408 struct l2cap_conn_rsp rsp
;
6414 if (test_bit(BT_SK_DEFER_SETUP
,
6415 &bt_sk(sk
)->flags
)) {
6416 res
= L2CAP_CR_PEND
;
6417 stat
= L2CAP_CS_AUTHOR_PEND
;
6418 chan
->ops
->defer(chan
);
6420 __l2cap_state_change(chan
, BT_CONFIG
);
6421 res
= L2CAP_CR_SUCCESS
;
6422 stat
= L2CAP_CS_NO_INFO
;
6425 __l2cap_state_change(chan
, BT_DISCONN
);
6426 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6427 res
= L2CAP_CR_SEC_BLOCK
;
6428 stat
= L2CAP_CS_NO_INFO
;
6433 rsp
.scid
= cpu_to_le16(chan
->dcid
);
6434 rsp
.dcid
= cpu_to_le16(chan
->scid
);
6435 rsp
.result
= cpu_to_le16(res
);
6436 rsp
.status
= cpu_to_le16(stat
);
6437 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
6440 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
6441 res
== L2CAP_CR_SUCCESS
) {
6443 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
6444 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
6446 l2cap_build_conf_req(chan
, buf
),
6448 chan
->num_conf_req
++;
6452 l2cap_chan_unlock(chan
);
6455 mutex_unlock(&conn
->chan_lock
);
6460 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
6462 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6463 struct l2cap_hdr
*hdr
;
6466 /* For AMP controller do not create l2cap conn */
6467 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
6471 conn
= l2cap_conn_add(hcon
, 0);
6476 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
6480 case ACL_START_NO_FLUSH
:
6483 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
6484 kfree_skb(conn
->rx_skb
);
6485 conn
->rx_skb
= NULL
;
6487 l2cap_conn_unreliable(conn
, ECOMM
);
6490 /* Start fragment always begin with Basic L2CAP header */
6491 if (skb
->len
< L2CAP_HDR_SIZE
) {
6492 BT_ERR("Frame is too short (len %d)", skb
->len
);
6493 l2cap_conn_unreliable(conn
, ECOMM
);
6497 hdr
= (struct l2cap_hdr
*) skb
->data
;
6498 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
6500 if (len
== skb
->len
) {
6501 /* Complete frame received */
6502 l2cap_recv_frame(conn
, skb
);
6506 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
6508 if (skb
->len
> len
) {
6509 BT_ERR("Frame is too long (len %d, expected len %d)",
6511 l2cap_conn_unreliable(conn
, ECOMM
);
6515 /* Allocate skb for the complete frame (with header) */
6516 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
6520 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6522 conn
->rx_len
= len
- skb
->len
;
6526 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
6528 if (!conn
->rx_len
) {
6529 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
6530 l2cap_conn_unreliable(conn
, ECOMM
);
6534 if (skb
->len
> conn
->rx_len
) {
6535 BT_ERR("Fragment is too long (len %d, expected %d)",
6536 skb
->len
, conn
->rx_len
);
6537 kfree_skb(conn
->rx_skb
);
6538 conn
->rx_skb
= NULL
;
6540 l2cap_conn_unreliable(conn
, ECOMM
);
6544 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6546 conn
->rx_len
-= skb
->len
;
6548 if (!conn
->rx_len
) {
6549 /* Complete frame received */
6550 l2cap_recv_frame(conn
, conn
->rx_skb
);
6551 conn
->rx_skb
= NULL
;
6561 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
6563 struct l2cap_chan
*c
;
6565 read_lock(&chan_list_lock
);
6567 list_for_each_entry(c
, &chan_list
, global_l
) {
6568 struct sock
*sk
= c
->sk
;
6570 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6571 &bt_sk(sk
)->src
, &bt_sk(sk
)->dst
,
6572 c
->state
, __le16_to_cpu(c
->psm
),
6573 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
6574 c
->sec_level
, c
->mode
);
6577 read_unlock(&chan_list_lock
);
6582 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
6584 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
6587 static const struct file_operations l2cap_debugfs_fops
= {
6588 .open
= l2cap_debugfs_open
,
6590 .llseek
= seq_lseek
,
6591 .release
= single_release
,
6594 static struct dentry
*l2cap_debugfs
;
6596 int __init
l2cap_init(void)
6600 err
= l2cap_init_sockets();
6605 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
6606 NULL
, &l2cap_debugfs_fops
);
6608 BT_ERR("Failed to create L2CAP debug file");
6614 void l2cap_exit(void)
6616 debugfs_remove(l2cap_debugfs
);
6617 l2cap_cleanup_sockets();
6620 module_param(disable_ertm
, bool, 0644);
6621 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");