2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
45 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
47 static LIST_HEAD(chan_list
);
48 static DEFINE_RWLOCK(chan_list_lock
);
50 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
51 u8 code
, u8 ident
, u16 dlen
, void *data
);
52 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
54 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
55 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
56 struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
68 list_for_each_entry(c
, &conn
->chan_l
, list
) {
75 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
80 list_for_each_entry(c
, &conn
->chan_l
, list
) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
94 mutex_lock(&conn
->chan_lock
);
95 c
= __l2cap_get_chan_by_scid(conn
, cid
);
98 mutex_unlock(&conn
->chan_lock
);
103 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
106 struct l2cap_chan
*c
;
108 list_for_each_entry(c
, &conn
->chan_l
, list
) {
109 if (c
->ident
== ident
)
115 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
117 struct l2cap_chan
*c
;
119 list_for_each_entry(c
, &chan_list
, global_l
) {
120 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
126 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
130 write_lock(&chan_list_lock
);
132 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
145 for (p
= 0x1001; p
< 0x1100; p
+= 2)
146 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
147 chan
->psm
= cpu_to_le16(p
);
148 chan
->sport
= cpu_to_le16(p
);
155 write_unlock(&chan_list_lock
);
159 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
161 write_lock(&chan_list_lock
);
165 write_unlock(&chan_list_lock
);
170 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
172 u16 cid
= L2CAP_CID_DYN_START
;
174 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
175 if (!__l2cap_get_chan_by_scid(conn
, cid
))
182 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
184 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
185 state_to_string(state
));
188 chan
->ops
->state_change(chan
, state
);
191 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
193 struct sock
*sk
= chan
->sk
;
196 __l2cap_state_change(chan
, state
);
200 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
202 struct sock
*sk
= chan
->sk
;
207 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
209 struct sock
*sk
= chan
->sk
;
212 __l2cap_chan_set_err(chan
, err
);
216 static void __set_retrans_timer(struct l2cap_chan
*chan
)
218 if (!delayed_work_pending(&chan
->monitor_timer
) &&
219 chan
->retrans_timeout
) {
220 l2cap_set_timer(chan
, &chan
->retrans_timer
,
221 msecs_to_jiffies(chan
->retrans_timeout
));
225 static void __set_monitor_timer(struct l2cap_chan
*chan
)
227 __clear_retrans_timer(chan
);
228 if (chan
->monitor_timeout
) {
229 l2cap_set_timer(chan
, &chan
->monitor_timer
,
230 msecs_to_jiffies(chan
->monitor_timeout
));
234 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
239 skb_queue_walk(head
, skb
) {
240 if (bt_cb(skb
)->control
.txseq
== seq
)
247 /* ---- L2CAP sequence number lists ---- */
249 /* For ERTM, ordered lists of sequence numbers must be tracked for
250 * SREJ requests that are received and for frames that are to be
251 * retransmitted. These seq_list functions implement a singly-linked
252 * list in an array, where membership in the list can also be checked
253 * in constant time. Items can also be added to the tail of the list
254 * and removed from the head in constant time, without further memory
258 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
260 size_t alloc_size
, i
;
262 /* Allocated size is a power of 2 to map sequence numbers
263 * (which may be up to 14 bits) in to a smaller array that is
264 * sized for the negotiated ERTM transmit windows.
266 alloc_size
= roundup_pow_of_two(size
);
268 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
272 seq_list
->mask
= alloc_size
- 1;
273 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
274 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
275 for (i
= 0; i
< alloc_size
; i
++)
276 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
281 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
283 kfree(seq_list
->list
);
286 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
289 /* Constant-time check for list membership */
290 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
293 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
295 u16 mask
= seq_list
->mask
;
297 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
298 /* In case someone tries to pop the head of an empty list */
299 return L2CAP_SEQ_LIST_CLEAR
;
300 } else if (seq_list
->head
== seq
) {
301 /* Head can be removed in constant time */
302 seq_list
->head
= seq_list
->list
[seq
& mask
];
303 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
305 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
306 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
307 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
310 /* Walk the list to find the sequence number */
311 u16 prev
= seq_list
->head
;
312 while (seq_list
->list
[prev
& mask
] != seq
) {
313 prev
= seq_list
->list
[prev
& mask
];
314 if (prev
== L2CAP_SEQ_LIST_TAIL
)
315 return L2CAP_SEQ_LIST_CLEAR
;
318 /* Unlink the number from the list and clear it */
319 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
320 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
321 if (seq_list
->tail
== seq
)
322 seq_list
->tail
= prev
;
327 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
329 /* Remove the head in constant time */
330 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
333 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
337 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
340 for (i
= 0; i
<= seq_list
->mask
; i
++)
341 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
343 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
344 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
347 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
349 u16 mask
= seq_list
->mask
;
351 /* All appends happen in constant time */
353 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
356 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
357 seq_list
->head
= seq
;
359 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
361 seq_list
->tail
= seq
;
362 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
365 static void l2cap_chan_timeout(struct work_struct
*work
)
367 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
369 struct l2cap_conn
*conn
= chan
->conn
;
372 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
374 mutex_lock(&conn
->chan_lock
);
375 l2cap_chan_lock(chan
);
377 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
378 reason
= ECONNREFUSED
;
379 else if (chan
->state
== BT_CONNECT
&&
380 chan
->sec_level
!= BT_SECURITY_SDP
)
381 reason
= ECONNREFUSED
;
385 l2cap_chan_close(chan
, reason
);
387 l2cap_chan_unlock(chan
);
389 chan
->ops
->close(chan
);
390 mutex_unlock(&conn
->chan_lock
);
392 l2cap_chan_put(chan
);
395 struct l2cap_chan
*l2cap_chan_create(void)
397 struct l2cap_chan
*chan
;
399 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
403 mutex_init(&chan
->lock
);
405 write_lock(&chan_list_lock
);
406 list_add(&chan
->global_l
, &chan_list
);
407 write_unlock(&chan_list_lock
);
409 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
411 chan
->state
= BT_OPEN
;
413 kref_init(&chan
->kref
);
415 /* This flag is cleared in l2cap_chan_ready() */
416 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
418 BT_DBG("chan %p", chan
);
423 static void l2cap_chan_destroy(struct kref
*kref
)
425 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
427 BT_DBG("chan %p", chan
);
429 write_lock(&chan_list_lock
);
430 list_del(&chan
->global_l
);
431 write_unlock(&chan_list_lock
);
436 void l2cap_chan_hold(struct l2cap_chan
*c
)
438 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
443 void l2cap_chan_put(struct l2cap_chan
*c
)
445 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
447 kref_put(&c
->kref
, l2cap_chan_destroy
);
450 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
452 chan
->fcs
= L2CAP_FCS_CRC16
;
453 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
454 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
455 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
456 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
457 chan
->sec_level
= BT_SECURITY_LOW
;
459 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
462 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
464 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
465 __le16_to_cpu(chan
->psm
), chan
->dcid
);
467 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
471 switch (chan
->chan_type
) {
472 case L2CAP_CHAN_CONN_ORIENTED
:
473 if (conn
->hcon
->type
== LE_LINK
) {
475 chan
->omtu
= L2CAP_DEFAULT_MTU
;
476 chan
->scid
= L2CAP_CID_LE_DATA
;
477 chan
->dcid
= L2CAP_CID_LE_DATA
;
479 /* Alloc CID for connection-oriented socket */
480 chan
->scid
= l2cap_alloc_cid(conn
);
481 chan
->omtu
= L2CAP_DEFAULT_MTU
;
485 case L2CAP_CHAN_CONN_LESS
:
486 /* Connectionless socket */
487 chan
->scid
= L2CAP_CID_CONN_LESS
;
488 chan
->dcid
= L2CAP_CID_CONN_LESS
;
489 chan
->omtu
= L2CAP_DEFAULT_MTU
;
492 case L2CAP_CHAN_CONN_FIX_A2MP
:
493 chan
->scid
= L2CAP_CID_A2MP
;
494 chan
->dcid
= L2CAP_CID_A2MP
;
495 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
496 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
500 /* Raw socket can send/recv signalling messages only */
501 chan
->scid
= L2CAP_CID_SIGNALING
;
502 chan
->dcid
= L2CAP_CID_SIGNALING
;
503 chan
->omtu
= L2CAP_DEFAULT_MTU
;
506 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
507 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
508 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
509 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
510 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
511 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
513 l2cap_chan_hold(chan
);
515 list_add(&chan
->list
, &conn
->chan_l
);
518 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
520 mutex_lock(&conn
->chan_lock
);
521 __l2cap_chan_add(conn
, chan
);
522 mutex_unlock(&conn
->chan_lock
);
525 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
527 struct l2cap_conn
*conn
= chan
->conn
;
529 __clear_chan_timer(chan
);
531 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
534 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
535 /* Delete from channel list */
536 list_del(&chan
->list
);
538 l2cap_chan_put(chan
);
542 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
543 hci_conn_put(conn
->hcon
);
545 if (mgr
&& mgr
->bredr_chan
== chan
)
546 mgr
->bredr_chan
= NULL
;
549 chan
->ops
->teardown(chan
, err
);
551 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
555 case L2CAP_MODE_BASIC
:
558 case L2CAP_MODE_ERTM
:
559 __clear_retrans_timer(chan
);
560 __clear_monitor_timer(chan
);
561 __clear_ack_timer(chan
);
563 skb_queue_purge(&chan
->srej_q
);
565 l2cap_seq_list_free(&chan
->srej_list
);
566 l2cap_seq_list_free(&chan
->retrans_list
);
570 case L2CAP_MODE_STREAMING
:
571 skb_queue_purge(&chan
->tx_q
);
578 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
580 struct l2cap_conn
*conn
= chan
->conn
;
581 struct sock
*sk
= chan
->sk
;
583 BT_DBG("chan %p state %s sk %p", chan
, state_to_string(chan
->state
),
586 switch (chan
->state
) {
588 chan
->ops
->teardown(chan
, 0);
593 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
594 conn
->hcon
->type
== ACL_LINK
) {
595 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
596 l2cap_send_disconn_req(conn
, chan
, reason
);
598 l2cap_chan_del(chan
, reason
);
602 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
603 conn
->hcon
->type
== ACL_LINK
) {
604 struct l2cap_conn_rsp rsp
;
607 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
608 result
= L2CAP_CR_SEC_BLOCK
;
610 result
= L2CAP_CR_BAD_PSM
;
611 l2cap_state_change(chan
, BT_DISCONN
);
613 rsp
.scid
= cpu_to_le16(chan
->dcid
);
614 rsp
.dcid
= cpu_to_le16(chan
->scid
);
615 rsp
.result
= cpu_to_le16(result
);
616 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
617 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
621 l2cap_chan_del(chan
, reason
);
626 l2cap_chan_del(chan
, reason
);
630 chan
->ops
->teardown(chan
, 0);
635 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
637 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
638 switch (chan
->sec_level
) {
639 case BT_SECURITY_HIGH
:
640 return HCI_AT_DEDICATED_BONDING_MITM
;
641 case BT_SECURITY_MEDIUM
:
642 return HCI_AT_DEDICATED_BONDING
;
644 return HCI_AT_NO_BONDING
;
646 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
647 if (chan
->sec_level
== BT_SECURITY_LOW
)
648 chan
->sec_level
= BT_SECURITY_SDP
;
650 if (chan
->sec_level
== BT_SECURITY_HIGH
)
651 return HCI_AT_NO_BONDING_MITM
;
653 return HCI_AT_NO_BONDING
;
655 switch (chan
->sec_level
) {
656 case BT_SECURITY_HIGH
:
657 return HCI_AT_GENERAL_BONDING_MITM
;
658 case BT_SECURITY_MEDIUM
:
659 return HCI_AT_GENERAL_BONDING
;
661 return HCI_AT_NO_BONDING
;
666 /* Service level security */
667 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
669 struct l2cap_conn
*conn
= chan
->conn
;
672 auth_type
= l2cap_get_auth_type(chan
);
674 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
677 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
681 /* Get next available identificator.
682 * 1 - 128 are used by kernel.
683 * 129 - 199 are reserved.
684 * 200 - 254 are used by utilities like l2ping, etc.
687 spin_lock(&conn
->lock
);
689 if (++conn
->tx_ident
> 128)
694 spin_unlock(&conn
->lock
);
699 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
702 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
705 BT_DBG("code 0x%2.2x", code
);
710 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
711 flags
= ACL_START_NO_FLUSH
;
715 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
716 skb
->priority
= HCI_PRIO_MAX
;
718 hci_send_acl(conn
->hchan
, skb
, flags
);
721 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
723 struct hci_conn
*hcon
= chan
->conn
->hcon
;
726 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
729 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
730 lmp_no_flush_capable(hcon
->hdev
))
731 flags
= ACL_START_NO_FLUSH
;
735 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
736 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
739 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
741 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
742 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
744 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
747 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
748 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
755 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
756 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
763 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
765 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
766 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
768 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
771 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
772 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
779 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
780 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
787 static inline void __unpack_control(struct l2cap_chan
*chan
,
790 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
791 __unpack_extended_control(get_unaligned_le32(skb
->data
),
792 &bt_cb(skb
)->control
);
793 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
795 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
796 &bt_cb(skb
)->control
);
797 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
801 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
805 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
806 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
808 if (control
->sframe
) {
809 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
810 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
811 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
813 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
814 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
820 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
824 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
825 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
827 if (control
->sframe
) {
828 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
829 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
830 packed
|= L2CAP_CTRL_FRAME_TYPE
;
832 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
833 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
839 static inline void __pack_control(struct l2cap_chan
*chan
,
840 struct l2cap_ctrl
*control
,
843 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
844 put_unaligned_le32(__pack_extended_control(control
),
845 skb
->data
+ L2CAP_HDR_SIZE
);
847 put_unaligned_le16(__pack_enhanced_control(control
),
848 skb
->data
+ L2CAP_HDR_SIZE
);
852 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
854 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
855 return L2CAP_EXT_HDR_SIZE
;
857 return L2CAP_ENH_HDR_SIZE
;
860 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
864 struct l2cap_hdr
*lh
;
865 int hlen
= __ertm_hdr_size(chan
);
867 if (chan
->fcs
== L2CAP_FCS_CRC16
)
868 hlen
+= L2CAP_FCS_SIZE
;
870 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
873 return ERR_PTR(-ENOMEM
);
875 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
876 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
877 lh
->cid
= cpu_to_le16(chan
->dcid
);
879 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
880 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
882 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
884 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
885 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
886 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
889 skb
->priority
= HCI_PRIO_MAX
;
893 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
894 struct l2cap_ctrl
*control
)
899 BT_DBG("chan %p, control %p", chan
, control
);
901 if (!control
->sframe
)
904 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
908 if (control
->super
== L2CAP_SUPER_RR
)
909 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
910 else if (control
->super
== L2CAP_SUPER_RNR
)
911 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
913 if (control
->super
!= L2CAP_SUPER_SREJ
) {
914 chan
->last_acked_seq
= control
->reqseq
;
915 __clear_ack_timer(chan
);
918 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
919 control
->final
, control
->poll
, control
->super
);
921 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
922 control_field
= __pack_extended_control(control
);
924 control_field
= __pack_enhanced_control(control
);
926 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
928 l2cap_do_send(chan
, skb
);
931 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
933 struct l2cap_ctrl control
;
935 BT_DBG("chan %p, poll %d", chan
, poll
);
937 memset(&control
, 0, sizeof(control
));
941 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
942 control
.super
= L2CAP_SUPER_RNR
;
944 control
.super
= L2CAP_SUPER_RR
;
946 control
.reqseq
= chan
->buffer_seq
;
947 l2cap_send_sframe(chan
, &control
);
950 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
952 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
955 static bool __amp_capable(struct l2cap_chan
*chan
)
957 struct l2cap_conn
*conn
= chan
->conn
;
960 chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
&&
961 conn
->fixed_chan_mask
& L2CAP_FC_A2MP
)
967 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
969 struct l2cap_conn
*conn
= chan
->conn
;
970 struct l2cap_conn_req req
;
972 req
.scid
= cpu_to_le16(chan
->scid
);
975 chan
->ident
= l2cap_get_ident(conn
);
977 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
979 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
982 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
984 /* This clears all conf flags, including CONF_NOT_COMPLETE */
985 chan
->conf_state
= 0;
986 __clear_chan_timer(chan
);
988 chan
->state
= BT_CONNECTED
;
990 chan
->ops
->ready(chan
);
993 static void l2cap_start_connection(struct l2cap_chan
*chan
)
995 if (__amp_capable(chan
)) {
996 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
997 a2mp_discover_amp(chan
);
999 l2cap_send_conn_req(chan
);
1003 static void l2cap_do_start(struct l2cap_chan
*chan
)
1005 struct l2cap_conn
*conn
= chan
->conn
;
1007 if (conn
->hcon
->type
== LE_LINK
) {
1008 l2cap_chan_ready(chan
);
1012 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1013 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1016 if (l2cap_chan_check_security(chan
) &&
1017 __l2cap_no_conn_pending(chan
)) {
1018 l2cap_start_connection(chan
);
1021 struct l2cap_info_req req
;
1022 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1024 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1025 conn
->info_ident
= l2cap_get_ident(conn
);
1027 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1029 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1034 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1036 u32 local_feat_mask
= l2cap_feat_mask
;
1038 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1041 case L2CAP_MODE_ERTM
:
1042 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1043 case L2CAP_MODE_STREAMING
:
1044 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1050 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
1051 struct l2cap_chan
*chan
, int err
)
1053 struct sock
*sk
= chan
->sk
;
1054 struct l2cap_disconn_req req
;
1059 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1060 __clear_retrans_timer(chan
);
1061 __clear_monitor_timer(chan
);
1062 __clear_ack_timer(chan
);
1065 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1066 l2cap_state_change(chan
, BT_DISCONN
);
1070 req
.dcid
= cpu_to_le16(chan
->dcid
);
1071 req
.scid
= cpu_to_le16(chan
->scid
);
1072 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1076 __l2cap_state_change(chan
, BT_DISCONN
);
1077 __l2cap_chan_set_err(chan
, err
);
1081 /* ---- L2CAP connections ---- */
1082 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1084 struct l2cap_chan
*chan
, *tmp
;
1086 BT_DBG("conn %p", conn
);
1088 mutex_lock(&conn
->chan_lock
);
1090 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1091 struct sock
*sk
= chan
->sk
;
1093 l2cap_chan_lock(chan
);
1095 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1096 l2cap_chan_unlock(chan
);
1100 if (chan
->state
== BT_CONNECT
) {
1101 if (!l2cap_chan_check_security(chan
) ||
1102 !__l2cap_no_conn_pending(chan
)) {
1103 l2cap_chan_unlock(chan
);
1107 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1108 && test_bit(CONF_STATE2_DEVICE
,
1109 &chan
->conf_state
)) {
1110 l2cap_chan_close(chan
, ECONNRESET
);
1111 l2cap_chan_unlock(chan
);
1115 l2cap_start_connection(chan
);
1117 } else if (chan
->state
== BT_CONNECT2
) {
1118 struct l2cap_conn_rsp rsp
;
1120 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1121 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1123 if (l2cap_chan_check_security(chan
)) {
1125 if (test_bit(BT_SK_DEFER_SETUP
,
1126 &bt_sk(sk
)->flags
)) {
1127 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1128 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1129 chan
->ops
->defer(chan
);
1132 __l2cap_state_change(chan
, BT_CONFIG
);
1133 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1134 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1138 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1139 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1142 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1145 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1146 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1147 l2cap_chan_unlock(chan
);
1151 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1152 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1153 l2cap_build_conf_req(chan
, buf
), buf
);
1154 chan
->num_conf_req
++;
1157 l2cap_chan_unlock(chan
);
1160 mutex_unlock(&conn
->chan_lock
);
1163 /* Find socket with cid and source/destination bdaddr.
1164 * Returns closest match, locked.
1166 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1170 struct l2cap_chan
*c
, *c1
= NULL
;
1172 read_lock(&chan_list_lock
);
1174 list_for_each_entry(c
, &chan_list
, global_l
) {
1175 struct sock
*sk
= c
->sk
;
1177 if (state
&& c
->state
!= state
)
1180 if (c
->scid
== cid
) {
1181 int src_match
, dst_match
;
1182 int src_any
, dst_any
;
1185 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1186 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1187 if (src_match
&& dst_match
) {
1188 read_unlock(&chan_list_lock
);
1193 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1194 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1195 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1196 (src_any
&& dst_any
))
1201 read_unlock(&chan_list_lock
);
1206 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1208 struct sock
*parent
, *sk
;
1209 struct l2cap_chan
*chan
, *pchan
;
1213 /* Check if we have socket listening on cid */
1214 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1215 conn
->src
, conn
->dst
);
1223 chan
= pchan
->ops
->new_connection(pchan
);
1229 hci_conn_hold(conn
->hcon
);
1230 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
1232 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1233 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1235 l2cap_chan_add(conn
, chan
);
1237 l2cap_chan_ready(chan
);
1240 release_sock(parent
);
1243 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1245 struct l2cap_chan
*chan
;
1246 struct hci_conn
*hcon
= conn
->hcon
;
1248 BT_DBG("conn %p", conn
);
1250 if (!hcon
->out
&& hcon
->type
== LE_LINK
)
1251 l2cap_le_conn_ready(conn
);
1253 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1254 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1256 mutex_lock(&conn
->chan_lock
);
1258 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1260 l2cap_chan_lock(chan
);
1262 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1263 l2cap_chan_unlock(chan
);
1267 if (hcon
->type
== LE_LINK
) {
1268 if (smp_conn_security(hcon
, chan
->sec_level
))
1269 l2cap_chan_ready(chan
);
1271 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1272 struct sock
*sk
= chan
->sk
;
1273 __clear_chan_timer(chan
);
1275 __l2cap_state_change(chan
, BT_CONNECTED
);
1276 sk
->sk_state_change(sk
);
1279 } else if (chan
->state
== BT_CONNECT
)
1280 l2cap_do_start(chan
);
1282 l2cap_chan_unlock(chan
);
1285 mutex_unlock(&conn
->chan_lock
);
1288 /* Notify sockets that we cannot guaranty reliability anymore */
1289 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1291 struct l2cap_chan
*chan
;
1293 BT_DBG("conn %p", conn
);
1295 mutex_lock(&conn
->chan_lock
);
1297 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1298 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1299 l2cap_chan_set_err(chan
, err
);
1302 mutex_unlock(&conn
->chan_lock
);
1305 static void l2cap_info_timeout(struct work_struct
*work
)
1307 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1310 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1311 conn
->info_ident
= 0;
1313 l2cap_conn_start(conn
);
1316 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1318 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1319 struct l2cap_chan
*chan
, *l
;
1324 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1326 kfree_skb(conn
->rx_skb
);
1328 mutex_lock(&conn
->chan_lock
);
1331 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1332 l2cap_chan_hold(chan
);
1333 l2cap_chan_lock(chan
);
1335 l2cap_chan_del(chan
, err
);
1337 l2cap_chan_unlock(chan
);
1339 chan
->ops
->close(chan
);
1340 l2cap_chan_put(chan
);
1343 mutex_unlock(&conn
->chan_lock
);
1345 hci_chan_del(conn
->hchan
);
1347 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1348 cancel_delayed_work_sync(&conn
->info_timer
);
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1351 cancel_delayed_work_sync(&conn
->security_timer
);
1352 smp_chan_destroy(conn
);
1355 hcon
->l2cap_data
= NULL
;
1359 static void security_timeout(struct work_struct
*work
)
1361 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1362 security_timer
.work
);
1364 BT_DBG("conn %p", conn
);
1366 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1367 smp_chan_destroy(conn
);
1368 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1372 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1374 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1375 struct hci_chan
*hchan
;
1380 hchan
= hci_chan_create(hcon
);
1384 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1386 hci_chan_del(hchan
);
1390 hcon
->l2cap_data
= conn
;
1392 conn
->hchan
= hchan
;
1394 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1396 switch (hcon
->type
) {
1398 conn
->mtu
= hcon
->hdev
->block_mtu
;
1402 if (hcon
->hdev
->le_mtu
) {
1403 conn
->mtu
= hcon
->hdev
->le_mtu
;
1409 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1413 conn
->src
= &hcon
->hdev
->bdaddr
;
1414 conn
->dst
= &hcon
->dst
;
1416 conn
->feat_mask
= 0;
1418 spin_lock_init(&conn
->lock
);
1419 mutex_init(&conn
->chan_lock
);
1421 INIT_LIST_HEAD(&conn
->chan_l
);
1423 if (hcon
->type
== LE_LINK
)
1424 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1426 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1428 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1433 /* ---- Socket interface ---- */
1435 /* Find socket with psm and source / destination bdaddr.
1436 * Returns closest match.
1438 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1442 struct l2cap_chan
*c
, *c1
= NULL
;
1444 read_lock(&chan_list_lock
);
1446 list_for_each_entry(c
, &chan_list
, global_l
) {
1447 struct sock
*sk
= c
->sk
;
1449 if (state
&& c
->state
!= state
)
1452 if (c
->psm
== psm
) {
1453 int src_match
, dst_match
;
1454 int src_any
, dst_any
;
1457 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1458 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1459 if (src_match
&& dst_match
) {
1460 read_unlock(&chan_list_lock
);
1465 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1466 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1467 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1468 (src_any
&& dst_any
))
1473 read_unlock(&chan_list_lock
);
1478 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1479 bdaddr_t
*dst
, u8 dst_type
)
1481 struct sock
*sk
= chan
->sk
;
1482 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1483 struct l2cap_conn
*conn
;
1484 struct hci_conn
*hcon
;
1485 struct hci_dev
*hdev
;
1489 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src
, dst
,
1490 dst_type
, __le16_to_cpu(psm
));
1492 hdev
= hci_get_route(dst
, src
);
1494 return -EHOSTUNREACH
;
1498 l2cap_chan_lock(chan
);
1500 /* PSM must be odd and lsb of upper byte must be 0 */
1501 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1502 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1507 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1512 switch (chan
->mode
) {
1513 case L2CAP_MODE_BASIC
:
1515 case L2CAP_MODE_ERTM
:
1516 case L2CAP_MODE_STREAMING
:
1525 switch (chan
->state
) {
1529 /* Already connecting */
1534 /* Already connected */
1548 /* Set destination address and psm */
1550 bacpy(&bt_sk(sk
)->dst
, dst
);
1556 auth_type
= l2cap_get_auth_type(chan
);
1558 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1559 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1560 chan
->sec_level
, auth_type
);
1562 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1563 chan
->sec_level
, auth_type
);
1566 err
= PTR_ERR(hcon
);
1570 conn
= l2cap_conn_add(hcon
, 0);
1577 if (hcon
->type
== LE_LINK
) {
1580 if (!list_empty(&conn
->chan_l
)) {
1589 /* Update source addr of the socket */
1590 bacpy(src
, conn
->src
);
1592 l2cap_chan_unlock(chan
);
1593 l2cap_chan_add(conn
, chan
);
1594 l2cap_chan_lock(chan
);
1596 l2cap_state_change(chan
, BT_CONNECT
);
1597 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1599 if (hcon
->state
== BT_CONNECTED
) {
1600 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1601 __clear_chan_timer(chan
);
1602 if (l2cap_chan_check_security(chan
))
1603 l2cap_state_change(chan
, BT_CONNECTED
);
1605 l2cap_do_start(chan
);
1611 l2cap_chan_unlock(chan
);
1612 hci_dev_unlock(hdev
);
1617 int __l2cap_wait_ack(struct sock
*sk
)
1619 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1620 DECLARE_WAITQUEUE(wait
, current
);
1624 add_wait_queue(sk_sleep(sk
), &wait
);
1625 set_current_state(TASK_INTERRUPTIBLE
);
1626 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1630 if (signal_pending(current
)) {
1631 err
= sock_intr_errno(timeo
);
1636 timeo
= schedule_timeout(timeo
);
1638 set_current_state(TASK_INTERRUPTIBLE
);
1640 err
= sock_error(sk
);
1644 set_current_state(TASK_RUNNING
);
1645 remove_wait_queue(sk_sleep(sk
), &wait
);
1649 static void l2cap_monitor_timeout(struct work_struct
*work
)
1651 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1652 monitor_timer
.work
);
1654 BT_DBG("chan %p", chan
);
1656 l2cap_chan_lock(chan
);
1659 l2cap_chan_unlock(chan
);
1660 l2cap_chan_put(chan
);
1664 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1666 l2cap_chan_unlock(chan
);
1667 l2cap_chan_put(chan
);
1670 static void l2cap_retrans_timeout(struct work_struct
*work
)
1672 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1673 retrans_timer
.work
);
1675 BT_DBG("chan %p", chan
);
1677 l2cap_chan_lock(chan
);
1680 l2cap_chan_unlock(chan
);
1681 l2cap_chan_put(chan
);
1685 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1686 l2cap_chan_unlock(chan
);
1687 l2cap_chan_put(chan
);
1690 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1691 struct sk_buff_head
*skbs
)
1693 struct sk_buff
*skb
;
1694 struct l2cap_ctrl
*control
;
1696 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1698 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1700 while (!skb_queue_empty(&chan
->tx_q
)) {
1702 skb
= skb_dequeue(&chan
->tx_q
);
1704 bt_cb(skb
)->control
.retries
= 1;
1705 control
= &bt_cb(skb
)->control
;
1707 control
->reqseq
= 0;
1708 control
->txseq
= chan
->next_tx_seq
;
1710 __pack_control(chan
, control
, skb
);
1712 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1713 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1714 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1717 l2cap_do_send(chan
, skb
);
1719 BT_DBG("Sent txseq %u", control
->txseq
);
1721 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1722 chan
->frames_sent
++;
1726 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1728 struct sk_buff
*skb
, *tx_skb
;
1729 struct l2cap_ctrl
*control
;
1732 BT_DBG("chan %p", chan
);
1734 if (chan
->state
!= BT_CONNECTED
)
1737 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1740 while (chan
->tx_send_head
&&
1741 chan
->unacked_frames
< chan
->remote_tx_win
&&
1742 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1744 skb
= chan
->tx_send_head
;
1746 bt_cb(skb
)->control
.retries
= 1;
1747 control
= &bt_cb(skb
)->control
;
1749 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1752 control
->reqseq
= chan
->buffer_seq
;
1753 chan
->last_acked_seq
= chan
->buffer_seq
;
1754 control
->txseq
= chan
->next_tx_seq
;
1756 __pack_control(chan
, control
, skb
);
1758 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1759 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1760 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1763 /* Clone after data has been modified. Data is assumed to be
1764 read-only (for locking purposes) on cloned sk_buffs.
1766 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1771 __set_retrans_timer(chan
);
1773 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1774 chan
->unacked_frames
++;
1775 chan
->frames_sent
++;
1778 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1779 chan
->tx_send_head
= NULL
;
1781 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1783 l2cap_do_send(chan
, tx_skb
);
1784 BT_DBG("Sent txseq %u", control
->txseq
);
1787 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1788 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1793 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1795 struct l2cap_ctrl control
;
1796 struct sk_buff
*skb
;
1797 struct sk_buff
*tx_skb
;
1800 BT_DBG("chan %p", chan
);
1802 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1805 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1806 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1808 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1810 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1815 bt_cb(skb
)->control
.retries
++;
1816 control
= bt_cb(skb
)->control
;
1818 if (chan
->max_tx
!= 0 &&
1819 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1820 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1821 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1822 l2cap_seq_list_clear(&chan
->retrans_list
);
1826 control
.reqseq
= chan
->buffer_seq
;
1827 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1832 if (skb_cloned(skb
)) {
1833 /* Cloned sk_buffs are read-only, so we need a
1836 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1838 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1842 l2cap_seq_list_clear(&chan
->retrans_list
);
1846 /* Update skb contents */
1847 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1848 put_unaligned_le32(__pack_extended_control(&control
),
1849 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1851 put_unaligned_le16(__pack_enhanced_control(&control
),
1852 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1855 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1856 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1857 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1861 l2cap_do_send(chan
, tx_skb
);
1863 BT_DBG("Resent txseq %d", control
.txseq
);
1865 chan
->last_acked_seq
= chan
->buffer_seq
;
1869 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1870 struct l2cap_ctrl
*control
)
1872 BT_DBG("chan %p, control %p", chan
, control
);
1874 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1875 l2cap_ertm_resend(chan
);
1878 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1879 struct l2cap_ctrl
*control
)
1881 struct sk_buff
*skb
;
1883 BT_DBG("chan %p, control %p", chan
, control
);
1886 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1888 l2cap_seq_list_clear(&chan
->retrans_list
);
1890 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1893 if (chan
->unacked_frames
) {
1894 skb_queue_walk(&chan
->tx_q
, skb
) {
1895 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1896 skb
== chan
->tx_send_head
)
1900 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1901 if (skb
== chan
->tx_send_head
)
1904 l2cap_seq_list_append(&chan
->retrans_list
,
1905 bt_cb(skb
)->control
.txseq
);
1908 l2cap_ertm_resend(chan
);
1912 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1914 struct l2cap_ctrl control
;
1915 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1916 chan
->last_acked_seq
);
1919 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1920 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1922 memset(&control
, 0, sizeof(control
));
1925 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1926 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1927 __clear_ack_timer(chan
);
1928 control
.super
= L2CAP_SUPER_RNR
;
1929 control
.reqseq
= chan
->buffer_seq
;
1930 l2cap_send_sframe(chan
, &control
);
1932 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1933 l2cap_ertm_send(chan
);
1934 /* If any i-frames were sent, they included an ack */
1935 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1939 /* Ack now if the window is 3/4ths full.
1940 * Calculate without mul or div
1942 threshold
= chan
->ack_win
;
1943 threshold
+= threshold
<< 1;
1946 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
1949 if (frames_to_ack
>= threshold
) {
1950 __clear_ack_timer(chan
);
1951 control
.super
= L2CAP_SUPER_RR
;
1952 control
.reqseq
= chan
->buffer_seq
;
1953 l2cap_send_sframe(chan
, &control
);
1958 __set_ack_timer(chan
);
1962 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1963 struct msghdr
*msg
, int len
,
1964 int count
, struct sk_buff
*skb
)
1966 struct l2cap_conn
*conn
= chan
->conn
;
1967 struct sk_buff
**frag
;
1970 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1976 /* Continuation fragments (no L2CAP header) */
1977 frag
= &skb_shinfo(skb
)->frag_list
;
1979 struct sk_buff
*tmp
;
1981 count
= min_t(unsigned int, conn
->mtu
, len
);
1983 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1984 msg
->msg_flags
& MSG_DONTWAIT
);
1986 return PTR_ERR(tmp
);
1990 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1993 (*frag
)->priority
= skb
->priority
;
1998 skb
->len
+= (*frag
)->len
;
1999 skb
->data_len
+= (*frag
)->len
;
2001 frag
= &(*frag
)->next
;
2007 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2008 struct msghdr
*msg
, size_t len
,
2011 struct l2cap_conn
*conn
= chan
->conn
;
2012 struct sk_buff
*skb
;
2013 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2014 struct l2cap_hdr
*lh
;
2016 BT_DBG("chan %p len %zu priority %u", chan
, len
, priority
);
2018 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2020 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2021 msg
->msg_flags
& MSG_DONTWAIT
);
2025 skb
->priority
= priority
;
2027 /* Create L2CAP header */
2028 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2029 lh
->cid
= cpu_to_le16(chan
->dcid
);
2030 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2031 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2033 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2034 if (unlikely(err
< 0)) {
2036 return ERR_PTR(err
);
2041 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2042 struct msghdr
*msg
, size_t len
,
2045 struct l2cap_conn
*conn
= chan
->conn
;
2046 struct sk_buff
*skb
;
2048 struct l2cap_hdr
*lh
;
2050 BT_DBG("chan %p len %zu", chan
, len
);
2052 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2054 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2055 msg
->msg_flags
& MSG_DONTWAIT
);
2059 skb
->priority
= priority
;
2061 /* Create L2CAP header */
2062 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2063 lh
->cid
= cpu_to_le16(chan
->dcid
);
2064 lh
->len
= cpu_to_le16(len
);
2066 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2067 if (unlikely(err
< 0)) {
2069 return ERR_PTR(err
);
2074 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2075 struct msghdr
*msg
, size_t len
,
2078 struct l2cap_conn
*conn
= chan
->conn
;
2079 struct sk_buff
*skb
;
2080 int err
, count
, hlen
;
2081 struct l2cap_hdr
*lh
;
2083 BT_DBG("chan %p len %zu", chan
, len
);
2086 return ERR_PTR(-ENOTCONN
);
2088 hlen
= __ertm_hdr_size(chan
);
2091 hlen
+= L2CAP_SDULEN_SIZE
;
2093 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2094 hlen
+= L2CAP_FCS_SIZE
;
2096 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2098 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2099 msg
->msg_flags
& MSG_DONTWAIT
);
2103 /* Create L2CAP header */
2104 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2105 lh
->cid
= cpu_to_le16(chan
->dcid
);
2106 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2108 /* Control header is populated later */
2109 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2110 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2112 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2115 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2117 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2118 if (unlikely(err
< 0)) {
2120 return ERR_PTR(err
);
2123 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2124 bt_cb(skb
)->control
.retries
= 0;
2128 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2129 struct sk_buff_head
*seg_queue
,
2130 struct msghdr
*msg
, size_t len
)
2132 struct sk_buff
*skb
;
2137 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2139 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2140 * so fragmented skbs are not used. The HCI layer's handling
2141 * of fragmented skbs is not compatible with ERTM's queueing.
2144 /* PDU size is derived from the HCI MTU */
2145 pdu_len
= chan
->conn
->mtu
;
2147 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2149 /* Adjust for largest possible L2CAP overhead. */
2151 pdu_len
-= L2CAP_FCS_SIZE
;
2153 pdu_len
-= __ertm_hdr_size(chan
);
2155 /* Remote device may have requested smaller PDUs */
2156 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2158 if (len
<= pdu_len
) {
2159 sar
= L2CAP_SAR_UNSEGMENTED
;
2163 sar
= L2CAP_SAR_START
;
2165 pdu_len
-= L2CAP_SDULEN_SIZE
;
2169 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2172 __skb_queue_purge(seg_queue
);
2173 return PTR_ERR(skb
);
2176 bt_cb(skb
)->control
.sar
= sar
;
2177 __skb_queue_tail(seg_queue
, skb
);
2182 pdu_len
+= L2CAP_SDULEN_SIZE
;
2185 if (len
<= pdu_len
) {
2186 sar
= L2CAP_SAR_END
;
2189 sar
= L2CAP_SAR_CONTINUE
;
2196 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2199 struct sk_buff
*skb
;
2201 struct sk_buff_head seg_queue
;
2203 /* Connectionless channel */
2204 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2205 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2207 return PTR_ERR(skb
);
2209 l2cap_do_send(chan
, skb
);
2213 switch (chan
->mode
) {
2214 case L2CAP_MODE_BASIC
:
2215 /* Check outgoing MTU */
2216 if (len
> chan
->omtu
)
2219 /* Create a basic PDU */
2220 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2222 return PTR_ERR(skb
);
2224 l2cap_do_send(chan
, skb
);
2228 case L2CAP_MODE_ERTM
:
2229 case L2CAP_MODE_STREAMING
:
2230 /* Check outgoing MTU */
2231 if (len
> chan
->omtu
) {
2236 __skb_queue_head_init(&seg_queue
);
2238 /* Do segmentation before calling in to the state machine,
2239 * since it's possible to block while waiting for memory
2242 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2244 /* The channel could have been closed while segmenting,
2245 * check that it is still connected.
2247 if (chan
->state
!= BT_CONNECTED
) {
2248 __skb_queue_purge(&seg_queue
);
2255 if (chan
->mode
== L2CAP_MODE_ERTM
)
2256 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2258 l2cap_streaming_send(chan
, &seg_queue
);
2262 /* If the skbs were not queued for sending, they'll still be in
2263 * seg_queue and need to be purged.
2265 __skb_queue_purge(&seg_queue
);
2269 BT_DBG("bad state %1.1x", chan
->mode
);
2276 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2278 struct l2cap_ctrl control
;
2281 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2283 memset(&control
, 0, sizeof(control
));
2285 control
.super
= L2CAP_SUPER_SREJ
;
2287 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2288 seq
= __next_seq(chan
, seq
)) {
2289 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2290 control
.reqseq
= seq
;
2291 l2cap_send_sframe(chan
, &control
);
2292 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2296 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2299 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2301 struct l2cap_ctrl control
;
2303 BT_DBG("chan %p", chan
);
2305 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2308 memset(&control
, 0, sizeof(control
));
2310 control
.super
= L2CAP_SUPER_SREJ
;
2311 control
.reqseq
= chan
->srej_list
.tail
;
2312 l2cap_send_sframe(chan
, &control
);
2315 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2317 struct l2cap_ctrl control
;
2321 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2323 memset(&control
, 0, sizeof(control
));
2325 control
.super
= L2CAP_SUPER_SREJ
;
2327 /* Capture initial list head to allow only one pass through the list. */
2328 initial_head
= chan
->srej_list
.head
;
2331 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2332 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2335 control
.reqseq
= seq
;
2336 l2cap_send_sframe(chan
, &control
);
2337 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2338 } while (chan
->srej_list
.head
!= initial_head
);
2341 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2343 struct sk_buff
*acked_skb
;
2346 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2348 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2351 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2352 chan
->expected_ack_seq
, chan
->unacked_frames
);
2354 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2355 ackseq
= __next_seq(chan
, ackseq
)) {
2357 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2359 skb_unlink(acked_skb
, &chan
->tx_q
);
2360 kfree_skb(acked_skb
);
2361 chan
->unacked_frames
--;
2365 chan
->expected_ack_seq
= reqseq
;
2367 if (chan
->unacked_frames
== 0)
2368 __clear_retrans_timer(chan
);
2370 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2373 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2375 BT_DBG("chan %p", chan
);
2377 chan
->expected_tx_seq
= chan
->buffer_seq
;
2378 l2cap_seq_list_clear(&chan
->srej_list
);
2379 skb_queue_purge(&chan
->srej_q
);
2380 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2383 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2384 struct l2cap_ctrl
*control
,
2385 struct sk_buff_head
*skbs
, u8 event
)
2387 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2391 case L2CAP_EV_DATA_REQUEST
:
2392 if (chan
->tx_send_head
== NULL
)
2393 chan
->tx_send_head
= skb_peek(skbs
);
2395 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2396 l2cap_ertm_send(chan
);
2398 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2399 BT_DBG("Enter LOCAL_BUSY");
2400 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2402 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2403 /* The SREJ_SENT state must be aborted if we are to
2404 * enter the LOCAL_BUSY state.
2406 l2cap_abort_rx_srej_sent(chan
);
2409 l2cap_send_ack(chan
);
2412 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2413 BT_DBG("Exit LOCAL_BUSY");
2414 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2416 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2417 struct l2cap_ctrl local_control
;
2419 memset(&local_control
, 0, sizeof(local_control
));
2420 local_control
.sframe
= 1;
2421 local_control
.super
= L2CAP_SUPER_RR
;
2422 local_control
.poll
= 1;
2423 local_control
.reqseq
= chan
->buffer_seq
;
2424 l2cap_send_sframe(chan
, &local_control
);
2426 chan
->retry_count
= 1;
2427 __set_monitor_timer(chan
);
2428 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2431 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2432 l2cap_process_reqseq(chan
, control
->reqseq
);
2434 case L2CAP_EV_EXPLICIT_POLL
:
2435 l2cap_send_rr_or_rnr(chan
, 1);
2436 chan
->retry_count
= 1;
2437 __set_monitor_timer(chan
);
2438 __clear_ack_timer(chan
);
2439 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2441 case L2CAP_EV_RETRANS_TO
:
2442 l2cap_send_rr_or_rnr(chan
, 1);
2443 chan
->retry_count
= 1;
2444 __set_monitor_timer(chan
);
2445 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2447 case L2CAP_EV_RECV_FBIT
:
2448 /* Nothing to process */
2455 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2456 struct l2cap_ctrl
*control
,
2457 struct sk_buff_head
*skbs
, u8 event
)
2459 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2463 case L2CAP_EV_DATA_REQUEST
:
2464 if (chan
->tx_send_head
== NULL
)
2465 chan
->tx_send_head
= skb_peek(skbs
);
2466 /* Queue data, but don't send. */
2467 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2469 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2470 BT_DBG("Enter LOCAL_BUSY");
2471 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2473 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2474 /* The SREJ_SENT state must be aborted if we are to
2475 * enter the LOCAL_BUSY state.
2477 l2cap_abort_rx_srej_sent(chan
);
2480 l2cap_send_ack(chan
);
2483 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2484 BT_DBG("Exit LOCAL_BUSY");
2485 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2487 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2488 struct l2cap_ctrl local_control
;
2489 memset(&local_control
, 0, sizeof(local_control
));
2490 local_control
.sframe
= 1;
2491 local_control
.super
= L2CAP_SUPER_RR
;
2492 local_control
.poll
= 1;
2493 local_control
.reqseq
= chan
->buffer_seq
;
2494 l2cap_send_sframe(chan
, &local_control
);
2496 chan
->retry_count
= 1;
2497 __set_monitor_timer(chan
);
2498 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2501 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2502 l2cap_process_reqseq(chan
, control
->reqseq
);
2506 case L2CAP_EV_RECV_FBIT
:
2507 if (control
&& control
->final
) {
2508 __clear_monitor_timer(chan
);
2509 if (chan
->unacked_frames
> 0)
2510 __set_retrans_timer(chan
);
2511 chan
->retry_count
= 0;
2512 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2513 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2516 case L2CAP_EV_EXPLICIT_POLL
:
2519 case L2CAP_EV_MONITOR_TO
:
2520 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2521 l2cap_send_rr_or_rnr(chan
, 1);
2522 __set_monitor_timer(chan
);
2523 chan
->retry_count
++;
2525 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2533 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2534 struct sk_buff_head
*skbs
, u8 event
)
2536 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2537 chan
, control
, skbs
, event
, chan
->tx_state
);
2539 switch (chan
->tx_state
) {
2540 case L2CAP_TX_STATE_XMIT
:
2541 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2543 case L2CAP_TX_STATE_WAIT_F
:
2544 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2552 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2553 struct l2cap_ctrl
*control
)
2555 BT_DBG("chan %p, control %p", chan
, control
);
2556 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2559 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2560 struct l2cap_ctrl
*control
)
2562 BT_DBG("chan %p, control %p", chan
, control
);
2563 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2566 /* Copy frame to all raw sockets on that connection */
2567 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2569 struct sk_buff
*nskb
;
2570 struct l2cap_chan
*chan
;
2572 BT_DBG("conn %p", conn
);
2574 mutex_lock(&conn
->chan_lock
);
2576 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2577 struct sock
*sk
= chan
->sk
;
2578 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2581 /* Don't send frame to the socket it came from */
2584 nskb
= skb_clone(skb
, GFP_KERNEL
);
2588 if (chan
->ops
->recv(chan
, nskb
))
2592 mutex_unlock(&conn
->chan_lock
);
2595 /* ---- L2CAP signalling commands ---- */
2596 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2597 u8 ident
, u16 dlen
, void *data
)
2599 struct sk_buff
*skb
, **frag
;
2600 struct l2cap_cmd_hdr
*cmd
;
2601 struct l2cap_hdr
*lh
;
2604 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2605 conn
, code
, ident
, dlen
);
2607 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2608 count
= min_t(unsigned int, conn
->mtu
, len
);
2610 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2614 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2615 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2617 if (conn
->hcon
->type
== LE_LINK
)
2618 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2620 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2622 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2625 cmd
->len
= cpu_to_le16(dlen
);
2628 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2629 memcpy(skb_put(skb
, count
), data
, count
);
2635 /* Continuation fragments (no L2CAP header) */
2636 frag
= &skb_shinfo(skb
)->frag_list
;
2638 count
= min_t(unsigned int, conn
->mtu
, len
);
2640 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2644 memcpy(skb_put(*frag
, count
), data
, count
);
2649 frag
= &(*frag
)->next
;
2659 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2662 struct l2cap_conf_opt
*opt
= *ptr
;
2665 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2673 *val
= *((u8
*) opt
->val
);
2677 *val
= get_unaligned_le16(opt
->val
);
2681 *val
= get_unaligned_le32(opt
->val
);
2685 *val
= (unsigned long) opt
->val
;
2689 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2693 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2695 struct l2cap_conf_opt
*opt
= *ptr
;
2697 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2704 *((u8
*) opt
->val
) = val
;
2708 put_unaligned_le16(val
, opt
->val
);
2712 put_unaligned_le32(val
, opt
->val
);
2716 memcpy(opt
->val
, (void *) val
, len
);
2720 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2723 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2725 struct l2cap_conf_efs efs
;
2727 switch (chan
->mode
) {
2728 case L2CAP_MODE_ERTM
:
2729 efs
.id
= chan
->local_id
;
2730 efs
.stype
= chan
->local_stype
;
2731 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2732 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2733 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2734 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2737 case L2CAP_MODE_STREAMING
:
2739 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2740 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2741 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2750 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2751 (unsigned long) &efs
);
2754 static void l2cap_ack_timeout(struct work_struct
*work
)
2756 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2760 BT_DBG("chan %p", chan
);
2762 l2cap_chan_lock(chan
);
2764 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2765 chan
->last_acked_seq
);
2768 l2cap_send_rr_or_rnr(chan
, 0);
2770 l2cap_chan_unlock(chan
);
2771 l2cap_chan_put(chan
);
2774 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2778 chan
->next_tx_seq
= 0;
2779 chan
->expected_tx_seq
= 0;
2780 chan
->expected_ack_seq
= 0;
2781 chan
->unacked_frames
= 0;
2782 chan
->buffer_seq
= 0;
2783 chan
->frames_sent
= 0;
2784 chan
->last_acked_seq
= 0;
2786 chan
->sdu_last_frag
= NULL
;
2789 skb_queue_head_init(&chan
->tx_q
);
2791 chan
->local_amp_id
= 0;
2793 chan
->move_state
= L2CAP_MOVE_STABLE
;
2794 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
2796 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2799 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2800 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2802 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2803 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2804 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2806 skb_queue_head_init(&chan
->srej_q
);
2808 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2812 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2814 l2cap_seq_list_free(&chan
->srej_list
);
2819 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2822 case L2CAP_MODE_STREAMING
:
2823 case L2CAP_MODE_ERTM
:
2824 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2828 return L2CAP_MODE_BASIC
;
2832 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2834 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2837 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2839 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2842 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2844 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2845 __l2cap_ews_supported(chan
)) {
2846 /* use extended control field */
2847 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2848 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2850 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2851 L2CAP_DEFAULT_TX_WINDOW
);
2852 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2854 chan
->ack_win
= chan
->tx_win
;
2857 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2859 struct l2cap_conf_req
*req
= data
;
2860 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2861 void *ptr
= req
->data
;
2864 BT_DBG("chan %p", chan
);
2866 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2869 switch (chan
->mode
) {
2870 case L2CAP_MODE_STREAMING
:
2871 case L2CAP_MODE_ERTM
:
2872 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2875 if (__l2cap_efs_supported(chan
))
2876 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2880 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2885 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2886 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2888 switch (chan
->mode
) {
2889 case L2CAP_MODE_BASIC
:
2890 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2891 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2894 rfc
.mode
= L2CAP_MODE_BASIC
;
2896 rfc
.max_transmit
= 0;
2897 rfc
.retrans_timeout
= 0;
2898 rfc
.monitor_timeout
= 0;
2899 rfc
.max_pdu_size
= 0;
2901 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2902 (unsigned long) &rfc
);
2905 case L2CAP_MODE_ERTM
:
2906 rfc
.mode
= L2CAP_MODE_ERTM
;
2907 rfc
.max_transmit
= chan
->max_tx
;
2908 rfc
.retrans_timeout
= 0;
2909 rfc
.monitor_timeout
= 0;
2911 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2912 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
2914 rfc
.max_pdu_size
= cpu_to_le16(size
);
2916 l2cap_txwin_setup(chan
);
2918 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2919 L2CAP_DEFAULT_TX_WINDOW
);
2921 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2922 (unsigned long) &rfc
);
2924 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2925 l2cap_add_opt_efs(&ptr
, chan
);
2927 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2930 if (chan
->fcs
== L2CAP_FCS_NONE
||
2931 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2932 chan
->fcs
= L2CAP_FCS_NONE
;
2933 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2936 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2937 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2941 case L2CAP_MODE_STREAMING
:
2942 l2cap_txwin_setup(chan
);
2943 rfc
.mode
= L2CAP_MODE_STREAMING
;
2945 rfc
.max_transmit
= 0;
2946 rfc
.retrans_timeout
= 0;
2947 rfc
.monitor_timeout
= 0;
2949 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2950 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
2952 rfc
.max_pdu_size
= cpu_to_le16(size
);
2954 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2955 (unsigned long) &rfc
);
2957 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2958 l2cap_add_opt_efs(&ptr
, chan
);
2960 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2963 if (chan
->fcs
== L2CAP_FCS_NONE
||
2964 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2965 chan
->fcs
= L2CAP_FCS_NONE
;
2966 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2971 req
->dcid
= cpu_to_le16(chan
->dcid
);
2972 req
->flags
= __constant_cpu_to_le16(0);
2977 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2979 struct l2cap_conf_rsp
*rsp
= data
;
2980 void *ptr
= rsp
->data
;
2981 void *req
= chan
->conf_req
;
2982 int len
= chan
->conf_len
;
2983 int type
, hint
, olen
;
2985 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2986 struct l2cap_conf_efs efs
;
2988 u16 mtu
= L2CAP_DEFAULT_MTU
;
2989 u16 result
= L2CAP_CONF_SUCCESS
;
2992 BT_DBG("chan %p", chan
);
2994 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2995 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2997 hint
= type
& L2CAP_CONF_HINT
;
2998 type
&= L2CAP_CONF_MASK
;
3001 case L2CAP_CONF_MTU
:
3005 case L2CAP_CONF_FLUSH_TO
:
3006 chan
->flush_to
= val
;
3009 case L2CAP_CONF_QOS
:
3012 case L2CAP_CONF_RFC
:
3013 if (olen
== sizeof(rfc
))
3014 memcpy(&rfc
, (void *) val
, olen
);
3017 case L2CAP_CONF_FCS
:
3018 if (val
== L2CAP_FCS_NONE
)
3019 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3022 case L2CAP_CONF_EFS
:
3024 if (olen
== sizeof(efs
))
3025 memcpy(&efs
, (void *) val
, olen
);
3028 case L2CAP_CONF_EWS
:
3030 return -ECONNREFUSED
;
3032 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3033 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3034 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3035 chan
->remote_tx_win
= val
;
3042 result
= L2CAP_CONF_UNKNOWN
;
3043 *((u8
*) ptr
++) = type
;
3048 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3051 switch (chan
->mode
) {
3052 case L2CAP_MODE_STREAMING
:
3053 case L2CAP_MODE_ERTM
:
3054 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3055 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3056 chan
->conn
->feat_mask
);
3061 if (__l2cap_efs_supported(chan
))
3062 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3064 return -ECONNREFUSED
;
3067 if (chan
->mode
!= rfc
.mode
)
3068 return -ECONNREFUSED
;
3074 if (chan
->mode
!= rfc
.mode
) {
3075 result
= L2CAP_CONF_UNACCEPT
;
3076 rfc
.mode
= chan
->mode
;
3078 if (chan
->num_conf_rsp
== 1)
3079 return -ECONNREFUSED
;
3081 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3082 (unsigned long) &rfc
);
3085 if (result
== L2CAP_CONF_SUCCESS
) {
3086 /* Configure output options and let the other side know
3087 * which ones we don't like. */
3089 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3090 result
= L2CAP_CONF_UNACCEPT
;
3093 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3095 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3098 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3099 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3100 efs
.stype
!= chan
->local_stype
) {
3102 result
= L2CAP_CONF_UNACCEPT
;
3104 if (chan
->num_conf_req
>= 1)
3105 return -ECONNREFUSED
;
3107 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3109 (unsigned long) &efs
);
3111 /* Send PENDING Conf Rsp */
3112 result
= L2CAP_CONF_PENDING
;
3113 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3118 case L2CAP_MODE_BASIC
:
3119 chan
->fcs
= L2CAP_FCS_NONE
;
3120 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3123 case L2CAP_MODE_ERTM
:
3124 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3125 chan
->remote_tx_win
= rfc
.txwin_size
;
3127 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3129 chan
->remote_max_tx
= rfc
.max_transmit
;
3131 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3132 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3133 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3134 rfc
.max_pdu_size
= cpu_to_le16(size
);
3135 chan
->remote_mps
= size
;
3137 rfc
.retrans_timeout
=
3138 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3139 rfc
.monitor_timeout
=
3140 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3142 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3144 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3145 sizeof(rfc
), (unsigned long) &rfc
);
3147 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3148 chan
->remote_id
= efs
.id
;
3149 chan
->remote_stype
= efs
.stype
;
3150 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3151 chan
->remote_flush_to
=
3152 le32_to_cpu(efs
.flush_to
);
3153 chan
->remote_acc_lat
=
3154 le32_to_cpu(efs
.acc_lat
);
3155 chan
->remote_sdu_itime
=
3156 le32_to_cpu(efs
.sdu_itime
);
3157 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3159 (unsigned long) &efs
);
3163 case L2CAP_MODE_STREAMING
:
3164 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3165 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3166 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3167 rfc
.max_pdu_size
= cpu_to_le16(size
);
3168 chan
->remote_mps
= size
;
3170 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3172 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3173 (unsigned long) &rfc
);
3178 result
= L2CAP_CONF_UNACCEPT
;
3180 memset(&rfc
, 0, sizeof(rfc
));
3181 rfc
.mode
= chan
->mode
;
3184 if (result
== L2CAP_CONF_SUCCESS
)
3185 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3187 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3188 rsp
->result
= cpu_to_le16(result
);
3189 rsp
->flags
= __constant_cpu_to_le16(0);
3194 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3195 void *data
, u16
*result
)
3197 struct l2cap_conf_req
*req
= data
;
3198 void *ptr
= req
->data
;
3201 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3202 struct l2cap_conf_efs efs
;
3204 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3206 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3207 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3210 case L2CAP_CONF_MTU
:
3211 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3212 *result
= L2CAP_CONF_UNACCEPT
;
3213 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3216 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3219 case L2CAP_CONF_FLUSH_TO
:
3220 chan
->flush_to
= val
;
3221 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3225 case L2CAP_CONF_RFC
:
3226 if (olen
== sizeof(rfc
))
3227 memcpy(&rfc
, (void *)val
, olen
);
3229 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3230 rfc
.mode
!= chan
->mode
)
3231 return -ECONNREFUSED
;
3235 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3236 sizeof(rfc
), (unsigned long) &rfc
);
3239 case L2CAP_CONF_EWS
:
3240 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3241 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3245 case L2CAP_CONF_EFS
:
3246 if (olen
== sizeof(efs
))
3247 memcpy(&efs
, (void *)val
, olen
);
3249 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3250 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3251 efs
.stype
!= chan
->local_stype
)
3252 return -ECONNREFUSED
;
3254 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3255 (unsigned long) &efs
);
3260 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3261 return -ECONNREFUSED
;
3263 chan
->mode
= rfc
.mode
;
3265 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3267 case L2CAP_MODE_ERTM
:
3268 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3269 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3270 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3271 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3272 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3275 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3276 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3277 chan
->local_sdu_itime
=
3278 le32_to_cpu(efs
.sdu_itime
);
3279 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3280 chan
->local_flush_to
=
3281 le32_to_cpu(efs
.flush_to
);
3285 case L2CAP_MODE_STREAMING
:
3286 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3290 req
->dcid
= cpu_to_le16(chan
->dcid
);
3291 req
->flags
= __constant_cpu_to_le16(0);
3296 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3297 u16 result
, u16 flags
)
3299 struct l2cap_conf_rsp
*rsp
= data
;
3300 void *ptr
= rsp
->data
;
3302 BT_DBG("chan %p", chan
);
3304 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3305 rsp
->result
= cpu_to_le16(result
);
3306 rsp
->flags
= cpu_to_le16(flags
);
3311 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3313 struct l2cap_conn_rsp rsp
;
3314 struct l2cap_conn
*conn
= chan
->conn
;
3317 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3318 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3319 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3320 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3321 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3323 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3326 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3327 l2cap_build_conf_req(chan
, buf
), buf
);
3328 chan
->num_conf_req
++;
3331 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3335 /* Use sane default values in case a misbehaving remote device
3336 * did not send an RFC or extended window size option.
3338 u16 txwin_ext
= chan
->ack_win
;
3339 struct l2cap_conf_rfc rfc
= {
3341 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3342 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3343 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3344 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3347 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3349 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3352 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3353 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3356 case L2CAP_CONF_RFC
:
3357 if (olen
== sizeof(rfc
))
3358 memcpy(&rfc
, (void *)val
, olen
);
3360 case L2CAP_CONF_EWS
:
3367 case L2CAP_MODE_ERTM
:
3368 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3369 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3370 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3371 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3372 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3374 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3377 case L2CAP_MODE_STREAMING
:
3378 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3382 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3383 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3385 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3387 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3390 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3391 cmd
->ident
== conn
->info_ident
) {
3392 cancel_delayed_work(&conn
->info_timer
);
3394 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3395 conn
->info_ident
= 0;
3397 l2cap_conn_start(conn
);
3403 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3404 struct l2cap_cmd_hdr
*cmd
,
3405 u8
*data
, u8 rsp_code
, u8 amp_id
)
3407 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3408 struct l2cap_conn_rsp rsp
;
3409 struct l2cap_chan
*chan
= NULL
, *pchan
;
3410 struct sock
*parent
, *sk
= NULL
;
3411 int result
, status
= L2CAP_CS_NO_INFO
;
3413 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3414 __le16 psm
= req
->psm
;
3416 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3418 /* Check if we have socket listening on psm */
3419 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3421 result
= L2CAP_CR_BAD_PSM
;
3427 mutex_lock(&conn
->chan_lock
);
3430 /* Check if the ACL is secure enough (if not SDP) */
3431 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3432 !hci_conn_check_link_mode(conn
->hcon
)) {
3433 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3434 result
= L2CAP_CR_SEC_BLOCK
;
3438 result
= L2CAP_CR_NO_MEM
;
3440 /* Check if we already have channel with that dcid */
3441 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3444 chan
= pchan
->ops
->new_connection(pchan
);
3450 hci_conn_hold(conn
->hcon
);
3452 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3453 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3456 chan
->local_amp_id
= amp_id
;
3458 __l2cap_chan_add(conn
, chan
);
3462 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3464 chan
->ident
= cmd
->ident
;
3466 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3467 if (l2cap_chan_check_security(chan
)) {
3468 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3469 __l2cap_state_change(chan
, BT_CONNECT2
);
3470 result
= L2CAP_CR_PEND
;
3471 status
= L2CAP_CS_AUTHOR_PEND
;
3472 chan
->ops
->defer(chan
);
3474 /* Force pending result for AMP controllers.
3475 * The connection will succeed after the
3476 * physical link is up.
3479 __l2cap_state_change(chan
, BT_CONNECT2
);
3480 result
= L2CAP_CR_PEND
;
3482 __l2cap_state_change(chan
, BT_CONFIG
);
3483 result
= L2CAP_CR_SUCCESS
;
3485 status
= L2CAP_CS_NO_INFO
;
3488 __l2cap_state_change(chan
, BT_CONNECT2
);
3489 result
= L2CAP_CR_PEND
;
3490 status
= L2CAP_CS_AUTHEN_PEND
;
3493 __l2cap_state_change(chan
, BT_CONNECT2
);
3494 result
= L2CAP_CR_PEND
;
3495 status
= L2CAP_CS_NO_INFO
;
3499 release_sock(parent
);
3500 mutex_unlock(&conn
->chan_lock
);
3503 rsp
.scid
= cpu_to_le16(scid
);
3504 rsp
.dcid
= cpu_to_le16(dcid
);
3505 rsp
.result
= cpu_to_le16(result
);
3506 rsp
.status
= cpu_to_le16(status
);
3507 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3509 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3510 struct l2cap_info_req info
;
3511 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3513 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3514 conn
->info_ident
= l2cap_get_ident(conn
);
3516 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3518 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3519 sizeof(info
), &info
);
3522 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3523 result
== L2CAP_CR_SUCCESS
) {
3525 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3526 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3527 l2cap_build_conf_req(chan
, buf
), buf
);
3528 chan
->num_conf_req
++;
3534 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3535 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3537 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3541 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3542 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3544 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3545 u16 scid
, dcid
, result
, status
;
3546 struct l2cap_chan
*chan
;
3550 scid
= __le16_to_cpu(rsp
->scid
);
3551 dcid
= __le16_to_cpu(rsp
->dcid
);
3552 result
= __le16_to_cpu(rsp
->result
);
3553 status
= __le16_to_cpu(rsp
->status
);
3555 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3556 dcid
, scid
, result
, status
);
3558 mutex_lock(&conn
->chan_lock
);
3561 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3567 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3576 l2cap_chan_lock(chan
);
3579 case L2CAP_CR_SUCCESS
:
3580 l2cap_state_change(chan
, BT_CONFIG
);
3583 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3585 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3588 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3589 l2cap_build_conf_req(chan
, req
), req
);
3590 chan
->num_conf_req
++;
3594 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3598 l2cap_chan_del(chan
, ECONNREFUSED
);
3602 l2cap_chan_unlock(chan
);
3605 mutex_unlock(&conn
->chan_lock
);
3610 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3612 /* FCS is enabled only in ERTM or streaming mode, if one or both
3615 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3616 chan
->fcs
= L2CAP_FCS_NONE
;
3617 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3618 chan
->fcs
= L2CAP_FCS_CRC16
;
3621 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3622 u8 ident
, u16 flags
)
3624 struct l2cap_conn
*conn
= chan
->conn
;
3626 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3629 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3630 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3632 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3633 l2cap_build_conf_rsp(chan
, data
,
3634 L2CAP_CONF_SUCCESS
, flags
), data
);
3637 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3638 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3641 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3644 struct l2cap_chan
*chan
;
3647 dcid
= __le16_to_cpu(req
->dcid
);
3648 flags
= __le16_to_cpu(req
->flags
);
3650 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3652 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3656 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3657 struct l2cap_cmd_rej_cid rej
;
3659 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3660 rej
.scid
= cpu_to_le16(chan
->scid
);
3661 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3663 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3668 /* Reject if config buffer is too small. */
3669 len
= cmd_len
- sizeof(*req
);
3670 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3671 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3672 l2cap_build_conf_rsp(chan
, rsp
,
3673 L2CAP_CONF_REJECT
, flags
), rsp
);
3678 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3679 chan
->conf_len
+= len
;
3681 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3682 /* Incomplete config. Send empty response. */
3683 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3684 l2cap_build_conf_rsp(chan
, rsp
,
3685 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3689 /* Complete config. */
3690 len
= l2cap_parse_conf_req(chan
, rsp
);
3692 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3696 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3697 chan
->num_conf_rsp
++;
3699 /* Reset config buffer. */
3702 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3705 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3706 set_default_fcs(chan
);
3708 if (chan
->mode
== L2CAP_MODE_ERTM
||
3709 chan
->mode
== L2CAP_MODE_STREAMING
)
3710 err
= l2cap_ertm_init(chan
);
3713 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3715 l2cap_chan_ready(chan
);
3720 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3722 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3723 l2cap_build_conf_req(chan
, buf
), buf
);
3724 chan
->num_conf_req
++;
3727 /* Got Conf Rsp PENDING from remote side and asume we sent
3728 Conf Rsp PENDING in the code above */
3729 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3730 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3732 /* check compatibility */
3734 /* Send rsp for BR/EDR channel */
3736 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
3738 chan
->ident
= cmd
->ident
;
3742 l2cap_chan_unlock(chan
);
3746 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
3747 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3749 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3750 u16 scid
, flags
, result
;
3751 struct l2cap_chan
*chan
;
3752 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3755 scid
= __le16_to_cpu(rsp
->scid
);
3756 flags
= __le16_to_cpu(rsp
->flags
);
3757 result
= __le16_to_cpu(rsp
->result
);
3759 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3762 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3767 case L2CAP_CONF_SUCCESS
:
3768 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3769 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3772 case L2CAP_CONF_PENDING
:
3773 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3775 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3778 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3781 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3785 /* check compatibility */
3788 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
3791 chan
->ident
= cmd
->ident
;
3795 case L2CAP_CONF_UNACCEPT
:
3796 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3799 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3800 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3804 /* throw out any old stored conf requests */
3805 result
= L2CAP_CONF_SUCCESS
;
3806 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3809 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3813 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3814 L2CAP_CONF_REQ
, len
, req
);
3815 chan
->num_conf_req
++;
3816 if (result
!= L2CAP_CONF_SUCCESS
)
3822 l2cap_chan_set_err(chan
, ECONNRESET
);
3824 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3825 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3829 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
3832 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3834 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3835 set_default_fcs(chan
);
3837 if (chan
->mode
== L2CAP_MODE_ERTM
||
3838 chan
->mode
== L2CAP_MODE_STREAMING
)
3839 err
= l2cap_ertm_init(chan
);
3842 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3844 l2cap_chan_ready(chan
);
3848 l2cap_chan_unlock(chan
);
3852 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
3853 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3855 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3856 struct l2cap_disconn_rsp rsp
;
3858 struct l2cap_chan
*chan
;
3861 scid
= __le16_to_cpu(req
->scid
);
3862 dcid
= __le16_to_cpu(req
->dcid
);
3864 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3866 mutex_lock(&conn
->chan_lock
);
3868 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3870 mutex_unlock(&conn
->chan_lock
);
3874 l2cap_chan_lock(chan
);
3878 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3879 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3880 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3883 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3886 l2cap_chan_hold(chan
);
3887 l2cap_chan_del(chan
, ECONNRESET
);
3889 l2cap_chan_unlock(chan
);
3891 chan
->ops
->close(chan
);
3892 l2cap_chan_put(chan
);
3894 mutex_unlock(&conn
->chan_lock
);
3899 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
3900 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3902 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3904 struct l2cap_chan
*chan
;
3906 scid
= __le16_to_cpu(rsp
->scid
);
3907 dcid
= __le16_to_cpu(rsp
->dcid
);
3909 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3911 mutex_lock(&conn
->chan_lock
);
3913 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3915 mutex_unlock(&conn
->chan_lock
);
3919 l2cap_chan_lock(chan
);
3921 l2cap_chan_hold(chan
);
3922 l2cap_chan_del(chan
, 0);
3924 l2cap_chan_unlock(chan
);
3926 chan
->ops
->close(chan
);
3927 l2cap_chan_put(chan
);
3929 mutex_unlock(&conn
->chan_lock
);
3934 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
3935 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3937 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3940 type
= __le16_to_cpu(req
->type
);
3942 BT_DBG("type 0x%4.4x", type
);
3944 if (type
== L2CAP_IT_FEAT_MASK
) {
3946 u32 feat_mask
= l2cap_feat_mask
;
3947 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3948 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3949 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3951 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3954 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3955 | L2CAP_FEAT_EXT_WINDOW
;
3957 put_unaligned_le32(feat_mask
, rsp
->data
);
3958 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
3960 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3962 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3965 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3967 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3969 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3970 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3971 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3972 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
3975 struct l2cap_info_rsp rsp
;
3976 rsp
.type
= cpu_to_le16(type
);
3977 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
3978 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
3985 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
3986 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3988 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3991 type
= __le16_to_cpu(rsp
->type
);
3992 result
= __le16_to_cpu(rsp
->result
);
3994 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3996 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3997 if (cmd
->ident
!= conn
->info_ident
||
3998 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4001 cancel_delayed_work(&conn
->info_timer
);
4003 if (result
!= L2CAP_IR_SUCCESS
) {
4004 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4005 conn
->info_ident
= 0;
4007 l2cap_conn_start(conn
);
4013 case L2CAP_IT_FEAT_MASK
:
4014 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4016 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4017 struct l2cap_info_req req
;
4018 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4020 conn
->info_ident
= l2cap_get_ident(conn
);
4022 l2cap_send_cmd(conn
, conn
->info_ident
,
4023 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4025 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4026 conn
->info_ident
= 0;
4028 l2cap_conn_start(conn
);
4032 case L2CAP_IT_FIXED_CHAN
:
4033 conn
->fixed_chan_mask
= rsp
->data
[0];
4034 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4035 conn
->info_ident
= 0;
4037 l2cap_conn_start(conn
);
4044 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4045 struct l2cap_cmd_hdr
*cmd
,
4046 u16 cmd_len
, void *data
)
4048 struct l2cap_create_chan_req
*req
= data
;
4049 struct l2cap_chan
*chan
;
4052 if (cmd_len
!= sizeof(*req
))
4058 psm
= le16_to_cpu(req
->psm
);
4059 scid
= le16_to_cpu(req
->scid
);
4061 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4064 struct hci_dev
*hdev
;
4066 /* Validate AMP controller id */
4067 hdev
= hci_dev_get(req
->amp_id
);
4068 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4069 !test_bit(HCI_UP
, &hdev
->flags
)) {
4070 struct l2cap_create_chan_rsp rsp
;
4073 rsp
.scid
= cpu_to_le16(scid
);
4074 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4075 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4077 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4089 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4095 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
4096 u16 icid
, u16 result
)
4098 struct l2cap_move_chan_rsp rsp
;
4100 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4102 rsp
.icid
= cpu_to_le16(icid
);
4103 rsp
.result
= cpu_to_le16(result
);
4105 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
4108 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
4109 struct l2cap_chan
*chan
,
4110 u16 icid
, u16 result
)
4112 struct l2cap_move_chan_cfm cfm
;
4115 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4117 ident
= l2cap_get_ident(conn
);
4119 chan
->ident
= ident
;
4121 cfm
.icid
= cpu_to_le16(icid
);
4122 cfm
.result
= cpu_to_le16(result
);
4124 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4127 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4130 struct l2cap_move_chan_cfm_rsp rsp
;
4132 BT_DBG("icid 0x%4.4x", icid
);
4134 rsp
.icid
= cpu_to_le16(icid
);
4135 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4138 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4139 struct l2cap_cmd_hdr
*cmd
,
4140 u16 cmd_len
, void *data
)
4142 struct l2cap_move_chan_req
*req
= data
;
4144 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4146 if (cmd_len
!= sizeof(*req
))
4149 icid
= le16_to_cpu(req
->icid
);
4151 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4156 /* Placeholder: Always refuse */
4157 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4162 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4163 struct l2cap_cmd_hdr
*cmd
,
4164 u16 cmd_len
, void *data
)
4166 struct l2cap_move_chan_rsp
*rsp
= data
;
4169 if (cmd_len
!= sizeof(*rsp
))
4172 icid
= le16_to_cpu(rsp
->icid
);
4173 result
= le16_to_cpu(rsp
->result
);
4175 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4177 /* Placeholder: Always unconfirmed */
4178 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4183 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4184 struct l2cap_cmd_hdr
*cmd
,
4185 u16 cmd_len
, void *data
)
4187 struct l2cap_move_chan_cfm
*cfm
= data
;
4190 if (cmd_len
!= sizeof(*cfm
))
4193 icid
= le16_to_cpu(cfm
->icid
);
4194 result
= le16_to_cpu(cfm
->result
);
4196 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4198 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4203 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4204 struct l2cap_cmd_hdr
*cmd
,
4205 u16 cmd_len
, void *data
)
4207 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4210 if (cmd_len
!= sizeof(*rsp
))
4213 icid
= le16_to_cpu(rsp
->icid
);
4215 BT_DBG("icid 0x%4.4x", icid
);
4220 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4225 if (min
> max
|| min
< 6 || max
> 3200)
4228 if (to_multiplier
< 10 || to_multiplier
> 3200)
4231 if (max
>= to_multiplier
* 8)
4234 max_latency
= (to_multiplier
* 8 / max
) - 1;
4235 if (latency
> 499 || latency
> max_latency
)
4241 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4242 struct l2cap_cmd_hdr
*cmd
,
4245 struct hci_conn
*hcon
= conn
->hcon
;
4246 struct l2cap_conn_param_update_req
*req
;
4247 struct l2cap_conn_param_update_rsp rsp
;
4248 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4251 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4254 cmd_len
= __le16_to_cpu(cmd
->len
);
4255 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4258 req
= (struct l2cap_conn_param_update_req
*) data
;
4259 min
= __le16_to_cpu(req
->min
);
4260 max
= __le16_to_cpu(req
->max
);
4261 latency
= __le16_to_cpu(req
->latency
);
4262 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4264 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4265 min
, max
, latency
, to_multiplier
);
4267 memset(&rsp
, 0, sizeof(rsp
));
4269 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4271 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4273 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4275 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4279 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4284 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4285 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4290 switch (cmd
->code
) {
4291 case L2CAP_COMMAND_REJ
:
4292 l2cap_command_rej(conn
, cmd
, data
);
4295 case L2CAP_CONN_REQ
:
4296 err
= l2cap_connect_req(conn
, cmd
, data
);
4299 case L2CAP_CONN_RSP
:
4300 case L2CAP_CREATE_CHAN_RSP
:
4301 err
= l2cap_connect_create_rsp(conn
, cmd
, data
);
4304 case L2CAP_CONF_REQ
:
4305 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4308 case L2CAP_CONF_RSP
:
4309 err
= l2cap_config_rsp(conn
, cmd
, data
);
4312 case L2CAP_DISCONN_REQ
:
4313 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4316 case L2CAP_DISCONN_RSP
:
4317 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4320 case L2CAP_ECHO_REQ
:
4321 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4324 case L2CAP_ECHO_RSP
:
4327 case L2CAP_INFO_REQ
:
4328 err
= l2cap_information_req(conn
, cmd
, data
);
4331 case L2CAP_INFO_RSP
:
4332 err
= l2cap_information_rsp(conn
, cmd
, data
);
4335 case L2CAP_CREATE_CHAN_REQ
:
4336 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4339 case L2CAP_MOVE_CHAN_REQ
:
4340 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4343 case L2CAP_MOVE_CHAN_RSP
:
4344 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4347 case L2CAP_MOVE_CHAN_CFM
:
4348 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4351 case L2CAP_MOVE_CHAN_CFM_RSP
:
4352 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4356 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4364 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4365 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4367 switch (cmd
->code
) {
4368 case L2CAP_COMMAND_REJ
:
4371 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4372 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4374 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4378 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4383 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4384 struct sk_buff
*skb
)
4386 u8
*data
= skb
->data
;
4388 struct l2cap_cmd_hdr cmd
;
4391 l2cap_raw_recv(conn
, skb
);
4393 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4395 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4396 data
+= L2CAP_CMD_HDR_SIZE
;
4397 len
-= L2CAP_CMD_HDR_SIZE
;
4399 cmd_len
= le16_to_cpu(cmd
.len
);
4401 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
4404 if (cmd_len
> len
|| !cmd
.ident
) {
4405 BT_DBG("corrupted command");
4409 if (conn
->hcon
->type
== LE_LINK
)
4410 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4412 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4415 struct l2cap_cmd_rej_unk rej
;
4417 BT_ERR("Wrong link type (%d)", err
);
4419 /* FIXME: Map err to a valid reason */
4420 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4421 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
4432 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4434 u16 our_fcs
, rcv_fcs
;
4437 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4438 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4440 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4442 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4443 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4444 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4445 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4447 if (our_fcs
!= rcv_fcs
)
4453 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4455 struct l2cap_ctrl control
;
4457 BT_DBG("chan %p", chan
);
4459 memset(&control
, 0, sizeof(control
));
4462 control
.reqseq
= chan
->buffer_seq
;
4463 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4465 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4466 control
.super
= L2CAP_SUPER_RNR
;
4467 l2cap_send_sframe(chan
, &control
);
4470 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4471 chan
->unacked_frames
> 0)
4472 __set_retrans_timer(chan
);
4474 /* Send pending iframes */
4475 l2cap_ertm_send(chan
);
4477 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4478 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4479 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4482 control
.super
= L2CAP_SUPER_RR
;
4483 l2cap_send_sframe(chan
, &control
);
4487 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
4488 struct sk_buff
**last_frag
)
4490 /* skb->len reflects data in skb as well as all fragments
4491 * skb->data_len reflects only data in fragments
4493 if (!skb_has_frag_list(skb
))
4494 skb_shinfo(skb
)->frag_list
= new_frag
;
4496 new_frag
->next
= NULL
;
4498 (*last_frag
)->next
= new_frag
;
4499 *last_frag
= new_frag
;
4501 skb
->len
+= new_frag
->len
;
4502 skb
->data_len
+= new_frag
->len
;
4503 skb
->truesize
+= new_frag
->truesize
;
4506 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4507 struct l2cap_ctrl
*control
)
4511 switch (control
->sar
) {
4512 case L2CAP_SAR_UNSEGMENTED
:
4516 err
= chan
->ops
->recv(chan
, skb
);
4519 case L2CAP_SAR_START
:
4523 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4524 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4526 if (chan
->sdu_len
> chan
->imtu
) {
4531 if (skb
->len
>= chan
->sdu_len
)
4535 chan
->sdu_last_frag
= skb
;
4541 case L2CAP_SAR_CONTINUE
:
4545 append_skb_frag(chan
->sdu
, skb
,
4546 &chan
->sdu_last_frag
);
4549 if (chan
->sdu
->len
>= chan
->sdu_len
)
4559 append_skb_frag(chan
->sdu
, skb
,
4560 &chan
->sdu_last_frag
);
4563 if (chan
->sdu
->len
!= chan
->sdu_len
)
4566 err
= chan
->ops
->recv(chan
, chan
->sdu
);
4569 /* Reassembly complete */
4571 chan
->sdu_last_frag
= NULL
;
4579 kfree_skb(chan
->sdu
);
4581 chan
->sdu_last_frag
= NULL
;
4588 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4592 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4595 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4596 l2cap_tx(chan
, NULL
, NULL
, event
);
4599 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4602 /* Pass sequential frames to l2cap_reassemble_sdu()
4603 * until a gap is encountered.
4606 BT_DBG("chan %p", chan
);
4608 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4609 struct sk_buff
*skb
;
4610 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4611 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4613 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4618 skb_unlink(skb
, &chan
->srej_q
);
4619 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4620 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4625 if (skb_queue_empty(&chan
->srej_q
)) {
4626 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4627 l2cap_send_ack(chan
);
4633 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4634 struct l2cap_ctrl
*control
)
4636 struct sk_buff
*skb
;
4638 BT_DBG("chan %p, control %p", chan
, control
);
4640 if (control
->reqseq
== chan
->next_tx_seq
) {
4641 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4642 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4646 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4649 BT_DBG("Seq %d not available for retransmission",
4654 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4655 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4656 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4660 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4662 if (control
->poll
) {
4663 l2cap_pass_to_tx(chan
, control
);
4665 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4666 l2cap_retransmit(chan
, control
);
4667 l2cap_ertm_send(chan
);
4669 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4670 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4671 chan
->srej_save_reqseq
= control
->reqseq
;
4674 l2cap_pass_to_tx_fbit(chan
, control
);
4676 if (control
->final
) {
4677 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4678 !test_and_clear_bit(CONN_SREJ_ACT
,
4680 l2cap_retransmit(chan
, control
);
4682 l2cap_retransmit(chan
, control
);
4683 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4684 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4685 chan
->srej_save_reqseq
= control
->reqseq
;
4691 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4692 struct l2cap_ctrl
*control
)
4694 struct sk_buff
*skb
;
4696 BT_DBG("chan %p, control %p", chan
, control
);
4698 if (control
->reqseq
== chan
->next_tx_seq
) {
4699 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4700 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4704 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4706 if (chan
->max_tx
&& skb
&&
4707 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4708 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4709 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4713 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4715 l2cap_pass_to_tx(chan
, control
);
4717 if (control
->final
) {
4718 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4719 l2cap_retransmit_all(chan
, control
);
4721 l2cap_retransmit_all(chan
, control
);
4722 l2cap_ertm_send(chan
);
4723 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4724 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4728 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4730 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4732 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4733 chan
->expected_tx_seq
);
4735 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4736 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4738 /* See notes below regarding "double poll" and
4741 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4742 BT_DBG("Invalid/Ignore - after SREJ");
4743 return L2CAP_TXSEQ_INVALID_IGNORE
;
4745 BT_DBG("Invalid - in window after SREJ sent");
4746 return L2CAP_TXSEQ_INVALID
;
4750 if (chan
->srej_list
.head
== txseq
) {
4751 BT_DBG("Expected SREJ");
4752 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4755 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4756 BT_DBG("Duplicate SREJ - txseq already stored");
4757 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4760 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4761 BT_DBG("Unexpected SREJ - not requested");
4762 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4766 if (chan
->expected_tx_seq
== txseq
) {
4767 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4769 BT_DBG("Invalid - txseq outside tx window");
4770 return L2CAP_TXSEQ_INVALID
;
4773 return L2CAP_TXSEQ_EXPECTED
;
4777 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4778 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
4779 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4780 return L2CAP_TXSEQ_DUPLICATE
;
4783 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4784 /* A source of invalid packets is a "double poll" condition,
4785 * where delays cause us to send multiple poll packets. If
4786 * the remote stack receives and processes both polls,
4787 * sequence numbers can wrap around in such a way that a
4788 * resent frame has a sequence number that looks like new data
4789 * with a sequence gap. This would trigger an erroneous SREJ
4792 * Fortunately, this is impossible with a tx window that's
4793 * less than half of the maximum sequence number, which allows
4794 * invalid frames to be safely ignored.
4796 * With tx window sizes greater than half of the tx window
4797 * maximum, the frame is invalid and cannot be ignored. This
4798 * causes a disconnect.
4801 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4802 BT_DBG("Invalid/Ignore - txseq outside tx window");
4803 return L2CAP_TXSEQ_INVALID_IGNORE
;
4805 BT_DBG("Invalid - txseq outside tx window");
4806 return L2CAP_TXSEQ_INVALID
;
4809 BT_DBG("Unexpected - txseq indicates missing frames");
4810 return L2CAP_TXSEQ_UNEXPECTED
;
4814 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4815 struct l2cap_ctrl
*control
,
4816 struct sk_buff
*skb
, u8 event
)
4819 bool skb_in_use
= 0;
4821 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4825 case L2CAP_EV_RECV_IFRAME
:
4826 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4827 case L2CAP_TXSEQ_EXPECTED
:
4828 l2cap_pass_to_tx(chan
, control
);
4830 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4831 BT_DBG("Busy, discarding expected seq %d",
4836 chan
->expected_tx_seq
= __next_seq(chan
,
4839 chan
->buffer_seq
= chan
->expected_tx_seq
;
4842 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4846 if (control
->final
) {
4847 if (!test_and_clear_bit(CONN_REJ_ACT
,
4848 &chan
->conn_state
)) {
4850 l2cap_retransmit_all(chan
, control
);
4851 l2cap_ertm_send(chan
);
4855 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4856 l2cap_send_ack(chan
);
4858 case L2CAP_TXSEQ_UNEXPECTED
:
4859 l2cap_pass_to_tx(chan
, control
);
4861 /* Can't issue SREJ frames in the local busy state.
4862 * Drop this frame, it will be seen as missing
4863 * when local busy is exited.
4865 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4866 BT_DBG("Busy, discarding unexpected seq %d",
4871 /* There was a gap in the sequence, so an SREJ
4872 * must be sent for each missing frame. The
4873 * current frame is stored for later use.
4875 skb_queue_tail(&chan
->srej_q
, skb
);
4877 BT_DBG("Queued %p (queue len %d)", skb
,
4878 skb_queue_len(&chan
->srej_q
));
4880 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4881 l2cap_seq_list_clear(&chan
->srej_list
);
4882 l2cap_send_srej(chan
, control
->txseq
);
4884 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4886 case L2CAP_TXSEQ_DUPLICATE
:
4887 l2cap_pass_to_tx(chan
, control
);
4889 case L2CAP_TXSEQ_INVALID_IGNORE
:
4891 case L2CAP_TXSEQ_INVALID
:
4893 l2cap_send_disconn_req(chan
->conn
, chan
,
4898 case L2CAP_EV_RECV_RR
:
4899 l2cap_pass_to_tx(chan
, control
);
4900 if (control
->final
) {
4901 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4903 if (!test_and_clear_bit(CONN_REJ_ACT
,
4904 &chan
->conn_state
)) {
4906 l2cap_retransmit_all(chan
, control
);
4909 l2cap_ertm_send(chan
);
4910 } else if (control
->poll
) {
4911 l2cap_send_i_or_rr_or_rnr(chan
);
4913 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4914 &chan
->conn_state
) &&
4915 chan
->unacked_frames
)
4916 __set_retrans_timer(chan
);
4918 l2cap_ertm_send(chan
);
4921 case L2CAP_EV_RECV_RNR
:
4922 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4923 l2cap_pass_to_tx(chan
, control
);
4924 if (control
&& control
->poll
) {
4925 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4926 l2cap_send_rr_or_rnr(chan
, 0);
4928 __clear_retrans_timer(chan
);
4929 l2cap_seq_list_clear(&chan
->retrans_list
);
4931 case L2CAP_EV_RECV_REJ
:
4932 l2cap_handle_rej(chan
, control
);
4934 case L2CAP_EV_RECV_SREJ
:
4935 l2cap_handle_srej(chan
, control
);
4941 if (skb
&& !skb_in_use
) {
4942 BT_DBG("Freeing %p", skb
);
4949 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4950 struct l2cap_ctrl
*control
,
4951 struct sk_buff
*skb
, u8 event
)
4954 u16 txseq
= control
->txseq
;
4955 bool skb_in_use
= 0;
4957 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4961 case L2CAP_EV_RECV_IFRAME
:
4962 switch (l2cap_classify_txseq(chan
, txseq
)) {
4963 case L2CAP_TXSEQ_EXPECTED
:
4964 /* Keep frame for reassembly later */
4965 l2cap_pass_to_tx(chan
, control
);
4966 skb_queue_tail(&chan
->srej_q
, skb
);
4968 BT_DBG("Queued %p (queue len %d)", skb
,
4969 skb_queue_len(&chan
->srej_q
));
4971 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4973 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4974 l2cap_seq_list_pop(&chan
->srej_list
);
4976 l2cap_pass_to_tx(chan
, control
);
4977 skb_queue_tail(&chan
->srej_q
, skb
);
4979 BT_DBG("Queued %p (queue len %d)", skb
,
4980 skb_queue_len(&chan
->srej_q
));
4982 err
= l2cap_rx_queued_iframes(chan
);
4987 case L2CAP_TXSEQ_UNEXPECTED
:
4988 /* Got a frame that can't be reassembled yet.
4989 * Save it for later, and send SREJs to cover
4990 * the missing frames.
4992 skb_queue_tail(&chan
->srej_q
, skb
);
4994 BT_DBG("Queued %p (queue len %d)", skb
,
4995 skb_queue_len(&chan
->srej_q
));
4997 l2cap_pass_to_tx(chan
, control
);
4998 l2cap_send_srej(chan
, control
->txseq
);
5000 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
5001 /* This frame was requested with an SREJ, but
5002 * some expected retransmitted frames are
5003 * missing. Request retransmission of missing
5006 skb_queue_tail(&chan
->srej_q
, skb
);
5008 BT_DBG("Queued %p (queue len %d)", skb
,
5009 skb_queue_len(&chan
->srej_q
));
5011 l2cap_pass_to_tx(chan
, control
);
5012 l2cap_send_srej_list(chan
, control
->txseq
);
5014 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
5015 /* We've already queued this frame. Drop this copy. */
5016 l2cap_pass_to_tx(chan
, control
);
5018 case L2CAP_TXSEQ_DUPLICATE
:
5019 /* Expecting a later sequence number, so this frame
5020 * was already received. Ignore it completely.
5023 case L2CAP_TXSEQ_INVALID_IGNORE
:
5025 case L2CAP_TXSEQ_INVALID
:
5027 l2cap_send_disconn_req(chan
->conn
, chan
,
5032 case L2CAP_EV_RECV_RR
:
5033 l2cap_pass_to_tx(chan
, control
);
5034 if (control
->final
) {
5035 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5037 if (!test_and_clear_bit(CONN_REJ_ACT
,
5038 &chan
->conn_state
)) {
5040 l2cap_retransmit_all(chan
, control
);
5043 l2cap_ertm_send(chan
);
5044 } else if (control
->poll
) {
5045 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5046 &chan
->conn_state
) &&
5047 chan
->unacked_frames
) {
5048 __set_retrans_timer(chan
);
5051 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5052 l2cap_send_srej_tail(chan
);
5054 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5055 &chan
->conn_state
) &&
5056 chan
->unacked_frames
)
5057 __set_retrans_timer(chan
);
5059 l2cap_send_ack(chan
);
5062 case L2CAP_EV_RECV_RNR
:
5063 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5064 l2cap_pass_to_tx(chan
, control
);
5065 if (control
->poll
) {
5066 l2cap_send_srej_tail(chan
);
5068 struct l2cap_ctrl rr_control
;
5069 memset(&rr_control
, 0, sizeof(rr_control
));
5070 rr_control
.sframe
= 1;
5071 rr_control
.super
= L2CAP_SUPER_RR
;
5072 rr_control
.reqseq
= chan
->buffer_seq
;
5073 l2cap_send_sframe(chan
, &rr_control
);
5077 case L2CAP_EV_RECV_REJ
:
5078 l2cap_handle_rej(chan
, control
);
5080 case L2CAP_EV_RECV_SREJ
:
5081 l2cap_handle_srej(chan
, control
);
5085 if (skb
&& !skb_in_use
) {
5086 BT_DBG("Freeing %p", skb
);
5093 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5095 /* Make sure reqseq is for a packet that has been sent but not acked */
5098 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5099 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5102 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5103 struct sk_buff
*skb
, u8 event
)
5107 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5108 control
, skb
, event
, chan
->rx_state
);
5110 if (__valid_reqseq(chan
, control
->reqseq
)) {
5111 switch (chan
->rx_state
) {
5112 case L2CAP_RX_STATE_RECV
:
5113 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5115 case L2CAP_RX_STATE_SREJ_SENT
:
5116 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5124 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5125 control
->reqseq
, chan
->next_tx_seq
,
5126 chan
->expected_ack_seq
);
5127 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5133 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5134 struct sk_buff
*skb
)
5138 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5141 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5142 L2CAP_TXSEQ_EXPECTED
) {
5143 l2cap_pass_to_tx(chan
, control
);
5145 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5146 __next_seq(chan
, chan
->buffer_seq
));
5148 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5150 l2cap_reassemble_sdu(chan
, skb
, control
);
5153 kfree_skb(chan
->sdu
);
5156 chan
->sdu_last_frag
= NULL
;
5160 BT_DBG("Freeing %p", skb
);
5165 chan
->last_acked_seq
= control
->txseq
;
5166 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5171 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5173 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5177 __unpack_control(chan
, skb
);
5182 * We can just drop the corrupted I-frame here.
5183 * Receiver will miss it and start proper recovery
5184 * procedures and ask for retransmission.
5186 if (l2cap_check_fcs(chan
, skb
))
5189 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5190 len
-= L2CAP_SDULEN_SIZE
;
5192 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5193 len
-= L2CAP_FCS_SIZE
;
5195 if (len
> chan
->mps
) {
5196 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5200 if (!control
->sframe
) {
5203 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5204 control
->sar
, control
->reqseq
, control
->final
,
5207 /* Validate F-bit - F=0 always valid, F=1 only
5208 * valid in TX WAIT_F
5210 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5213 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5214 event
= L2CAP_EV_RECV_IFRAME
;
5215 err
= l2cap_rx(chan
, control
, skb
, event
);
5217 err
= l2cap_stream_rx(chan
, control
, skb
);
5221 l2cap_send_disconn_req(chan
->conn
, chan
,
5224 const u8 rx_func_to_event
[4] = {
5225 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5226 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5229 /* Only I-frames are expected in streaming mode */
5230 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5233 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5234 control
->reqseq
, control
->final
, control
->poll
,
5239 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5243 /* Validate F and P bits */
5244 if (control
->final
&& (control
->poll
||
5245 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5248 event
= rx_func_to_event
[control
->super
];
5249 if (l2cap_rx(chan
, control
, skb
, event
))
5250 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5260 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
5261 struct sk_buff
*skb
)
5263 struct l2cap_chan
*chan
;
5265 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5267 if (cid
== L2CAP_CID_A2MP
) {
5268 chan
= a2mp_channel_create(conn
, skb
);
5274 l2cap_chan_lock(chan
);
5276 BT_DBG("unknown cid 0x%4.4x", cid
);
5277 /* Drop packet and return */
5283 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5285 if (chan
->state
!= BT_CONNECTED
)
5288 switch (chan
->mode
) {
5289 case L2CAP_MODE_BASIC
:
5290 /* If socket recv buffers overflows we drop data here
5291 * which is *bad* because L2CAP has to be reliable.
5292 * But we don't have any other choice. L2CAP doesn't
5293 * provide flow control mechanism. */
5295 if (chan
->imtu
< skb
->len
)
5298 if (!chan
->ops
->recv(chan
, skb
))
5302 case L2CAP_MODE_ERTM
:
5303 case L2CAP_MODE_STREAMING
:
5304 l2cap_data_rcv(chan
, skb
);
5308 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5316 l2cap_chan_unlock(chan
);
5319 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
5320 struct sk_buff
*skb
)
5322 struct l2cap_chan
*chan
;
5324 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5328 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5330 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5333 if (chan
->imtu
< skb
->len
)
5336 if (!chan
->ops
->recv(chan
, skb
))
5343 static void l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5344 struct sk_buff
*skb
)
5346 struct l2cap_chan
*chan
;
5348 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5352 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5354 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5357 if (chan
->imtu
< skb
->len
)
5360 if (!chan
->ops
->recv(chan
, skb
))
5367 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5369 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5373 skb_pull(skb
, L2CAP_HDR_SIZE
);
5374 cid
= __le16_to_cpu(lh
->cid
);
5375 len
= __le16_to_cpu(lh
->len
);
5377 if (len
!= skb
->len
) {
5382 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5385 case L2CAP_CID_LE_SIGNALING
:
5386 case L2CAP_CID_SIGNALING
:
5387 l2cap_sig_channel(conn
, skb
);
5390 case L2CAP_CID_CONN_LESS
:
5391 psm
= get_unaligned((__le16
*) skb
->data
);
5392 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
5393 l2cap_conless_channel(conn
, psm
, skb
);
5396 case L2CAP_CID_LE_DATA
:
5397 l2cap_att_channel(conn
, cid
, skb
);
5401 if (smp_sig_channel(conn
, skb
))
5402 l2cap_conn_del(conn
->hcon
, EACCES
);
5406 l2cap_data_channel(conn
, cid
, skb
);
5411 /* ---- L2CAP interface with lower layer (HCI) ---- */
5413 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5415 int exact
= 0, lm1
= 0, lm2
= 0;
5416 struct l2cap_chan
*c
;
5418 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
5420 /* Find listening sockets and check their link_mode */
5421 read_lock(&chan_list_lock
);
5422 list_for_each_entry(c
, &chan_list
, global_l
) {
5423 struct sock
*sk
= c
->sk
;
5425 if (c
->state
!= BT_LISTEN
)
5428 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5429 lm1
|= HCI_LM_ACCEPT
;
5430 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5431 lm1
|= HCI_LM_MASTER
;
5433 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5434 lm2
|= HCI_LM_ACCEPT
;
5435 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5436 lm2
|= HCI_LM_MASTER
;
5439 read_unlock(&chan_list_lock
);
5441 return exact
? lm1
: lm2
;
5444 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5446 struct l2cap_conn
*conn
;
5448 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
5451 conn
= l2cap_conn_add(hcon
, status
);
5453 l2cap_conn_ready(conn
);
5455 l2cap_conn_del(hcon
, bt_to_errno(status
));
5459 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5461 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5463 BT_DBG("hcon %p", hcon
);
5466 return HCI_ERROR_REMOTE_USER_TERM
;
5467 return conn
->disc_reason
;
5470 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5472 BT_DBG("hcon %p reason %d", hcon
, reason
);
5474 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5477 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5479 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5482 if (encrypt
== 0x00) {
5483 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5484 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5485 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5486 l2cap_chan_close(chan
, ECONNREFUSED
);
5488 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5489 __clear_chan_timer(chan
);
5493 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5495 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5496 struct l2cap_chan
*chan
;
5501 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
5503 if (hcon
->type
== LE_LINK
) {
5504 if (!status
&& encrypt
)
5505 smp_distribute_keys(conn
, 0);
5506 cancel_delayed_work(&conn
->security_timer
);
5509 mutex_lock(&conn
->chan_lock
);
5511 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5512 l2cap_chan_lock(chan
);
5514 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
5515 state_to_string(chan
->state
));
5517 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
5518 l2cap_chan_unlock(chan
);
5522 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5523 if (!status
&& encrypt
) {
5524 chan
->sec_level
= hcon
->sec_level
;
5525 l2cap_chan_ready(chan
);
5528 l2cap_chan_unlock(chan
);
5532 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5533 l2cap_chan_unlock(chan
);
5537 if (!status
&& (chan
->state
== BT_CONNECTED
||
5538 chan
->state
== BT_CONFIG
)) {
5539 struct sock
*sk
= chan
->sk
;
5541 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5542 sk
->sk_state_change(sk
);
5544 l2cap_check_encryption(chan
, encrypt
);
5545 l2cap_chan_unlock(chan
);
5549 if (chan
->state
== BT_CONNECT
) {
5551 l2cap_start_connection(chan
);
5553 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5555 } else if (chan
->state
== BT_CONNECT2
) {
5556 struct sock
*sk
= chan
->sk
;
5557 struct l2cap_conn_rsp rsp
;
5563 if (test_bit(BT_SK_DEFER_SETUP
,
5564 &bt_sk(sk
)->flags
)) {
5565 res
= L2CAP_CR_PEND
;
5566 stat
= L2CAP_CS_AUTHOR_PEND
;
5567 chan
->ops
->defer(chan
);
5569 __l2cap_state_change(chan
, BT_CONFIG
);
5570 res
= L2CAP_CR_SUCCESS
;
5571 stat
= L2CAP_CS_NO_INFO
;
5574 __l2cap_state_change(chan
, BT_DISCONN
);
5575 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5576 res
= L2CAP_CR_SEC_BLOCK
;
5577 stat
= L2CAP_CS_NO_INFO
;
5582 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5583 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5584 rsp
.result
= cpu_to_le16(res
);
5585 rsp
.status
= cpu_to_le16(stat
);
5586 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5589 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
5590 res
== L2CAP_CR_SUCCESS
) {
5592 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
5593 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
5595 l2cap_build_conf_req(chan
, buf
),
5597 chan
->num_conf_req
++;
5601 l2cap_chan_unlock(chan
);
5604 mutex_unlock(&conn
->chan_lock
);
5609 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5611 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5612 struct l2cap_hdr
*hdr
;
5615 /* For AMP controller do not create l2cap conn */
5616 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
5620 conn
= l2cap_conn_add(hcon
, 0);
5625 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5629 case ACL_START_NO_FLUSH
:
5632 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5633 kfree_skb(conn
->rx_skb
);
5634 conn
->rx_skb
= NULL
;
5636 l2cap_conn_unreliable(conn
, ECOMM
);
5639 /* Start fragment always begin with Basic L2CAP header */
5640 if (skb
->len
< L2CAP_HDR_SIZE
) {
5641 BT_ERR("Frame is too short (len %d)", skb
->len
);
5642 l2cap_conn_unreliable(conn
, ECOMM
);
5646 hdr
= (struct l2cap_hdr
*) skb
->data
;
5647 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5649 if (len
== skb
->len
) {
5650 /* Complete frame received */
5651 l2cap_recv_frame(conn
, skb
);
5655 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5657 if (skb
->len
> len
) {
5658 BT_ERR("Frame is too long (len %d, expected len %d)",
5660 l2cap_conn_unreliable(conn
, ECOMM
);
5664 /* Allocate skb for the complete frame (with header) */
5665 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
5669 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5671 conn
->rx_len
= len
- skb
->len
;
5675 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5677 if (!conn
->rx_len
) {
5678 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5679 l2cap_conn_unreliable(conn
, ECOMM
);
5683 if (skb
->len
> conn
->rx_len
) {
5684 BT_ERR("Fragment is too long (len %d, expected %d)",
5685 skb
->len
, conn
->rx_len
);
5686 kfree_skb(conn
->rx_skb
);
5687 conn
->rx_skb
= NULL
;
5689 l2cap_conn_unreliable(conn
, ECOMM
);
5693 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5695 conn
->rx_len
-= skb
->len
;
5697 if (!conn
->rx_len
) {
5698 /* Complete frame received */
5699 l2cap_recv_frame(conn
, conn
->rx_skb
);
5700 conn
->rx_skb
= NULL
;
5710 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5712 struct l2cap_chan
*c
;
5714 read_lock(&chan_list_lock
);
5716 list_for_each_entry(c
, &chan_list
, global_l
) {
5717 struct sock
*sk
= c
->sk
;
5719 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5720 &bt_sk(sk
)->src
, &bt_sk(sk
)->dst
,
5721 c
->state
, __le16_to_cpu(c
->psm
),
5722 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5723 c
->sec_level
, c
->mode
);
5726 read_unlock(&chan_list_lock
);
5731 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5733 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5736 static const struct file_operations l2cap_debugfs_fops
= {
5737 .open
= l2cap_debugfs_open
,
5739 .llseek
= seq_lseek
,
5740 .release
= single_release
,
5743 static struct dentry
*l2cap_debugfs
;
5745 int __init
l2cap_init(void)
5749 err
= l2cap_init_sockets();
5754 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
5755 NULL
, &l2cap_debugfs_fops
);
5757 BT_ERR("Failed to create L2CAP debug file");
5763 void l2cap_exit(void)
5765 debugfs_remove(l2cap_debugfs
);
5766 l2cap_cleanup_sockets();
5769 module_param(disable_ertm
, bool, 0644);
5770 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");