2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
45 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
47 static LIST_HEAD(chan_list
);
48 static DEFINE_RWLOCK(chan_list_lock
);
50 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
51 u8 code
, u8 ident
, u16 dlen
, void *data
);
52 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
54 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
55 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
56 struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
67 list_for_each_entry(c
, &conn
->chan_l
, list
) {
74 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
78 list_for_each_entry(c
, &conn
->chan_l
, list
) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
91 mutex_lock(&conn
->chan_lock
);
92 c
= __l2cap_get_chan_by_scid(conn
, cid
);
95 mutex_unlock(&conn
->chan_lock
);
100 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
102 struct l2cap_chan
*c
;
104 list_for_each_entry(c
, &conn
->chan_l
, list
) {
105 if (c
->ident
== ident
)
111 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
113 struct l2cap_chan
*c
;
115 list_for_each_entry(c
, &chan_list
, global_l
) {
116 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
122 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
126 write_lock(&chan_list_lock
);
128 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
141 for (p
= 0x1001; p
< 0x1100; p
+= 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
143 chan
->psm
= cpu_to_le16(p
);
144 chan
->sport
= cpu_to_le16(p
);
151 write_unlock(&chan_list_lock
);
155 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
157 write_lock(&chan_list_lock
);
161 write_unlock(&chan_list_lock
);
166 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
168 u16 cid
= L2CAP_CID_DYN_START
;
170 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
171 if (!__l2cap_get_chan_by_scid(conn
, cid
))
178 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
180 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
181 state_to_string(state
));
184 chan
->ops
->state_change(chan
, state
);
187 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
189 struct sock
*sk
= chan
->sk
;
192 __l2cap_state_change(chan
, state
);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
198 struct sock
*sk
= chan
->sk
;
203 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
205 struct sock
*sk
= chan
->sk
;
208 __l2cap_chan_set_err(chan
, err
);
212 static void __set_retrans_timer(struct l2cap_chan
*chan
)
214 if (!delayed_work_pending(&chan
->monitor_timer
) &&
215 chan
->retrans_timeout
) {
216 l2cap_set_timer(chan
, &chan
->retrans_timer
,
217 msecs_to_jiffies(chan
->retrans_timeout
));
221 static void __set_monitor_timer(struct l2cap_chan
*chan
)
223 __clear_retrans_timer(chan
);
224 if (chan
->monitor_timeout
) {
225 l2cap_set_timer(chan
, &chan
->monitor_timer
,
226 msecs_to_jiffies(chan
->monitor_timeout
));
230 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
235 skb_queue_walk(head
, skb
) {
236 if (bt_cb(skb
)->control
.txseq
== seq
)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
256 size_t alloc_size
, i
;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size
= roundup_pow_of_two(size
);
264 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
268 seq_list
->mask
= alloc_size
- 1;
269 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
270 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
271 for (i
= 0; i
< alloc_size
; i
++)
272 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
279 kfree(seq_list
->list
);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
285 /* Constant-time check for list membership */
286 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
289 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
291 u16 mask
= seq_list
->mask
;
293 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR
;
296 } else if (seq_list
->head
== seq
) {
297 /* Head can be removed in constant time */
298 seq_list
->head
= seq_list
->list
[seq
& mask
];
299 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
301 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
302 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
303 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 /* Walk the list to find the sequence number */
307 u16 prev
= seq_list
->head
;
308 while (seq_list
->list
[prev
& mask
] != seq
) {
309 prev
= seq_list
->list
[prev
& mask
];
310 if (prev
== L2CAP_SEQ_LIST_TAIL
)
311 return L2CAP_SEQ_LIST_CLEAR
;
314 /* Unlink the number from the list and clear it */
315 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
316 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
317 if (seq_list
->tail
== seq
)
318 seq_list
->tail
= prev
;
323 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
333 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
336 for (i
= 0; i
<= seq_list
->mask
; i
++)
337 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
339 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
340 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
343 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
345 u16 mask
= seq_list
->mask
;
347 /* All appends happen in constant time */
349 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
352 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
353 seq_list
->head
= seq
;
355 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
357 seq_list
->tail
= seq
;
358 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
361 static void l2cap_chan_timeout(struct work_struct
*work
)
363 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
365 struct l2cap_conn
*conn
= chan
->conn
;
368 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
370 mutex_lock(&conn
->chan_lock
);
371 l2cap_chan_lock(chan
);
373 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
374 reason
= ECONNREFUSED
;
375 else if (chan
->state
== BT_CONNECT
&&
376 chan
->sec_level
!= BT_SECURITY_SDP
)
377 reason
= ECONNREFUSED
;
381 l2cap_chan_close(chan
, reason
);
383 l2cap_chan_unlock(chan
);
385 chan
->ops
->close(chan
);
386 mutex_unlock(&conn
->chan_lock
);
388 l2cap_chan_put(chan
);
391 struct l2cap_chan
*l2cap_chan_create(void)
393 struct l2cap_chan
*chan
;
395 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
399 mutex_init(&chan
->lock
);
401 write_lock(&chan_list_lock
);
402 list_add(&chan
->global_l
, &chan_list
);
403 write_unlock(&chan_list_lock
);
405 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
407 chan
->state
= BT_OPEN
;
409 kref_init(&chan
->kref
);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
414 BT_DBG("chan %p", chan
);
419 static void l2cap_chan_destroy(struct kref
*kref
)
421 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
423 BT_DBG("chan %p", chan
);
425 write_lock(&chan_list_lock
);
426 list_del(&chan
->global_l
);
427 write_unlock(&chan_list_lock
);
432 void l2cap_chan_hold(struct l2cap_chan
*c
)
434 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
439 void l2cap_chan_put(struct l2cap_chan
*c
)
441 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
443 kref_put(&c
->kref
, l2cap_chan_destroy
);
446 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
448 chan
->fcs
= L2CAP_FCS_CRC16
;
449 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
450 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
451 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
452 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
453 chan
->sec_level
= BT_SECURITY_LOW
;
455 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
458 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
460 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
461 __le16_to_cpu(chan
->psm
), chan
->dcid
);
463 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
467 switch (chan
->chan_type
) {
468 case L2CAP_CHAN_CONN_ORIENTED
:
469 if (conn
->hcon
->type
== LE_LINK
) {
471 chan
->omtu
= L2CAP_DEFAULT_MTU
;
472 chan
->scid
= L2CAP_CID_LE_DATA
;
473 chan
->dcid
= L2CAP_CID_LE_DATA
;
475 /* Alloc CID for connection-oriented socket */
476 chan
->scid
= l2cap_alloc_cid(conn
);
477 chan
->omtu
= L2CAP_DEFAULT_MTU
;
481 case L2CAP_CHAN_CONN_LESS
:
482 /* Connectionless socket */
483 chan
->scid
= L2CAP_CID_CONN_LESS
;
484 chan
->dcid
= L2CAP_CID_CONN_LESS
;
485 chan
->omtu
= L2CAP_DEFAULT_MTU
;
488 case L2CAP_CHAN_CONN_FIX_A2MP
:
489 chan
->scid
= L2CAP_CID_A2MP
;
490 chan
->dcid
= L2CAP_CID_A2MP
;
491 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
492 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
496 /* Raw socket can send/recv signalling messages only */
497 chan
->scid
= L2CAP_CID_SIGNALING
;
498 chan
->dcid
= L2CAP_CID_SIGNALING
;
499 chan
->omtu
= L2CAP_DEFAULT_MTU
;
502 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
503 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
504 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
505 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
506 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
507 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
509 l2cap_chan_hold(chan
);
511 list_add(&chan
->list
, &conn
->chan_l
);
514 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
516 mutex_lock(&conn
->chan_lock
);
517 __l2cap_chan_add(conn
, chan
);
518 mutex_unlock(&conn
->chan_lock
);
521 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
523 struct l2cap_conn
*conn
= chan
->conn
;
525 __clear_chan_timer(chan
);
527 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
530 /* Delete from channel list */
531 list_del(&chan
->list
);
533 l2cap_chan_put(chan
);
537 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
538 hci_conn_put(conn
->hcon
);
541 if (chan
->ops
->teardown
)
542 chan
->ops
->teardown(chan
, err
);
544 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
548 case L2CAP_MODE_BASIC
:
551 case L2CAP_MODE_ERTM
:
552 __clear_retrans_timer(chan
);
553 __clear_monitor_timer(chan
);
554 __clear_ack_timer(chan
);
556 skb_queue_purge(&chan
->srej_q
);
558 l2cap_seq_list_free(&chan
->srej_list
);
559 l2cap_seq_list_free(&chan
->retrans_list
);
563 case L2CAP_MODE_STREAMING
:
564 skb_queue_purge(&chan
->tx_q
);
571 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
573 struct l2cap_conn
*conn
= chan
->conn
;
574 struct sock
*sk
= chan
->sk
;
576 BT_DBG("chan %p state %s sk %p", chan
,
577 state_to_string(chan
->state
), sk
);
579 switch (chan
->state
) {
581 if (chan
->ops
->teardown
)
582 chan
->ops
->teardown(chan
, 0);
587 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
588 conn
->hcon
->type
== ACL_LINK
) {
589 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
590 l2cap_send_disconn_req(conn
, chan
, reason
);
592 l2cap_chan_del(chan
, reason
);
596 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
597 conn
->hcon
->type
== ACL_LINK
) {
598 struct l2cap_conn_rsp rsp
;
601 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
602 result
= L2CAP_CR_SEC_BLOCK
;
604 result
= L2CAP_CR_BAD_PSM
;
605 l2cap_state_change(chan
, BT_DISCONN
);
607 rsp
.scid
= cpu_to_le16(chan
->dcid
);
608 rsp
.dcid
= cpu_to_le16(chan
->scid
);
609 rsp
.result
= cpu_to_le16(result
);
610 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
611 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
615 l2cap_chan_del(chan
, reason
);
620 l2cap_chan_del(chan
, reason
);
624 if (chan
->ops
->teardown
)
625 chan
->ops
->teardown(chan
, 0);
630 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
632 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
633 switch (chan
->sec_level
) {
634 case BT_SECURITY_HIGH
:
635 return HCI_AT_DEDICATED_BONDING_MITM
;
636 case BT_SECURITY_MEDIUM
:
637 return HCI_AT_DEDICATED_BONDING
;
639 return HCI_AT_NO_BONDING
;
641 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
642 if (chan
->sec_level
== BT_SECURITY_LOW
)
643 chan
->sec_level
= BT_SECURITY_SDP
;
645 if (chan
->sec_level
== BT_SECURITY_HIGH
)
646 return HCI_AT_NO_BONDING_MITM
;
648 return HCI_AT_NO_BONDING
;
650 switch (chan
->sec_level
) {
651 case BT_SECURITY_HIGH
:
652 return HCI_AT_GENERAL_BONDING_MITM
;
653 case BT_SECURITY_MEDIUM
:
654 return HCI_AT_GENERAL_BONDING
;
656 return HCI_AT_NO_BONDING
;
661 /* Service level security */
662 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
664 struct l2cap_conn
*conn
= chan
->conn
;
667 auth_type
= l2cap_get_auth_type(chan
);
669 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
672 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
676 /* Get next available identificator.
677 * 1 - 128 are used by kernel.
678 * 129 - 199 are reserved.
679 * 200 - 254 are used by utilities like l2ping, etc.
682 spin_lock(&conn
->lock
);
684 if (++conn
->tx_ident
> 128)
689 spin_unlock(&conn
->lock
);
694 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
696 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
699 BT_DBG("code 0x%2.2x", code
);
704 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
705 flags
= ACL_START_NO_FLUSH
;
709 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
710 skb
->priority
= HCI_PRIO_MAX
;
712 hci_send_acl(conn
->hchan
, skb
, flags
);
715 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
717 struct hci_conn
*hcon
= chan
->conn
->hcon
;
720 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
723 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
724 lmp_no_flush_capable(hcon
->hdev
))
725 flags
= ACL_START_NO_FLUSH
;
729 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
730 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
733 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
735 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
736 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
738 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
741 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
742 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
749 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
750 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
757 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
759 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
760 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
762 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
765 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
766 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
773 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
774 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
781 static inline void __unpack_control(struct l2cap_chan
*chan
,
784 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
785 __unpack_extended_control(get_unaligned_le32(skb
->data
),
786 &bt_cb(skb
)->control
);
787 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
789 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
790 &bt_cb(skb
)->control
);
791 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
795 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
799 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
800 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
802 if (control
->sframe
) {
803 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
804 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
805 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
807 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
808 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
814 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
818 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
819 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
821 if (control
->sframe
) {
822 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
823 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
824 packed
|= L2CAP_CTRL_FRAME_TYPE
;
826 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
827 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
833 static inline void __pack_control(struct l2cap_chan
*chan
,
834 struct l2cap_ctrl
*control
,
837 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
838 put_unaligned_le32(__pack_extended_control(control
),
839 skb
->data
+ L2CAP_HDR_SIZE
);
841 put_unaligned_le16(__pack_enhanced_control(control
),
842 skb
->data
+ L2CAP_HDR_SIZE
);
846 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
848 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
849 return L2CAP_EXT_HDR_SIZE
;
851 return L2CAP_ENH_HDR_SIZE
;
854 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
858 struct l2cap_hdr
*lh
;
859 int hlen
= __ertm_hdr_size(chan
);
861 if (chan
->fcs
== L2CAP_FCS_CRC16
)
862 hlen
+= L2CAP_FCS_SIZE
;
864 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
867 return ERR_PTR(-ENOMEM
);
869 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
870 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
871 lh
->cid
= cpu_to_le16(chan
->dcid
);
873 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
874 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
876 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
878 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
879 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
880 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
883 skb
->priority
= HCI_PRIO_MAX
;
887 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
888 struct l2cap_ctrl
*control
)
893 BT_DBG("chan %p, control %p", chan
, control
);
895 if (!control
->sframe
)
898 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
902 if (control
->super
== L2CAP_SUPER_RR
)
903 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
904 else if (control
->super
== L2CAP_SUPER_RNR
)
905 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
907 if (control
->super
!= L2CAP_SUPER_SREJ
) {
908 chan
->last_acked_seq
= control
->reqseq
;
909 __clear_ack_timer(chan
);
912 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
913 control
->final
, control
->poll
, control
->super
);
915 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
916 control_field
= __pack_extended_control(control
);
918 control_field
= __pack_enhanced_control(control
);
920 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
922 l2cap_do_send(chan
, skb
);
925 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
927 struct l2cap_ctrl control
;
929 BT_DBG("chan %p, poll %d", chan
, poll
);
931 memset(&control
, 0, sizeof(control
));
935 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
936 control
.super
= L2CAP_SUPER_RNR
;
938 control
.super
= L2CAP_SUPER_RR
;
940 control
.reqseq
= chan
->buffer_seq
;
941 l2cap_send_sframe(chan
, &control
);
944 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
946 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
949 static bool __amp_capable(struct l2cap_chan
*chan
)
951 struct l2cap_conn
*conn
= chan
->conn
;
954 chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
&&
955 conn
->fixed_chan_mask
& L2CAP_FC_A2MP
)
961 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
963 struct l2cap_conn
*conn
= chan
->conn
;
964 struct l2cap_conn_req req
;
966 req
.scid
= cpu_to_le16(chan
->scid
);
969 chan
->ident
= l2cap_get_ident(conn
);
971 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
973 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
976 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
978 /* This clears all conf flags, including CONF_NOT_COMPLETE */
979 chan
->conf_state
= 0;
980 __clear_chan_timer(chan
);
982 chan
->state
= BT_CONNECTED
;
984 chan
->ops
->ready(chan
);
987 static void l2cap_start_connection(struct l2cap_chan
*chan
)
989 if (__amp_capable(chan
)) {
990 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
991 a2mp_discover_amp(chan
);
993 l2cap_send_conn_req(chan
);
997 static void l2cap_do_start(struct l2cap_chan
*chan
)
999 struct l2cap_conn
*conn
= chan
->conn
;
1001 if (conn
->hcon
->type
== LE_LINK
) {
1002 l2cap_chan_ready(chan
);
1006 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1007 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1010 if (l2cap_chan_check_security(chan
) &&
1011 __l2cap_no_conn_pending(chan
)) {
1012 l2cap_start_connection(chan
);
1015 struct l2cap_info_req req
;
1016 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1018 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1019 conn
->info_ident
= l2cap_get_ident(conn
);
1021 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1023 l2cap_send_cmd(conn
, conn
->info_ident
,
1024 L2CAP_INFO_REQ
, sizeof(req
), &req
);
1028 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1030 u32 local_feat_mask
= l2cap_feat_mask
;
1032 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1035 case L2CAP_MODE_ERTM
:
1036 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1037 case L2CAP_MODE_STREAMING
:
1038 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1044 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
1046 struct sock
*sk
= chan
->sk
;
1047 struct l2cap_disconn_req req
;
1052 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1053 __clear_retrans_timer(chan
);
1054 __clear_monitor_timer(chan
);
1055 __clear_ack_timer(chan
);
1058 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1059 __l2cap_state_change(chan
, BT_DISCONN
);
1063 req
.dcid
= cpu_to_le16(chan
->dcid
);
1064 req
.scid
= cpu_to_le16(chan
->scid
);
1065 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1066 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1069 __l2cap_state_change(chan
, BT_DISCONN
);
1070 __l2cap_chan_set_err(chan
, err
);
1074 /* ---- L2CAP connections ---- */
1075 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1077 struct l2cap_chan
*chan
, *tmp
;
1079 BT_DBG("conn %p", conn
);
1081 mutex_lock(&conn
->chan_lock
);
1083 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1084 struct sock
*sk
= chan
->sk
;
1086 l2cap_chan_lock(chan
);
1088 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1089 l2cap_chan_unlock(chan
);
1093 if (chan
->state
== BT_CONNECT
) {
1094 if (!l2cap_chan_check_security(chan
) ||
1095 !__l2cap_no_conn_pending(chan
)) {
1096 l2cap_chan_unlock(chan
);
1100 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1101 && test_bit(CONF_STATE2_DEVICE
,
1102 &chan
->conf_state
)) {
1103 l2cap_chan_close(chan
, ECONNRESET
);
1104 l2cap_chan_unlock(chan
);
1108 l2cap_start_connection(chan
);
1110 } else if (chan
->state
== BT_CONNECT2
) {
1111 struct l2cap_conn_rsp rsp
;
1113 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1114 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1116 if (l2cap_chan_check_security(chan
)) {
1118 if (test_bit(BT_SK_DEFER_SETUP
,
1119 &bt_sk(sk
)->flags
)) {
1120 struct sock
*parent
= bt_sk(sk
)->parent
;
1121 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1122 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1124 parent
->sk_data_ready(parent
, 0);
1127 __l2cap_state_change(chan
, BT_CONFIG
);
1128 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1129 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1133 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1134 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1137 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1140 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1141 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1142 l2cap_chan_unlock(chan
);
1146 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1147 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1148 l2cap_build_conf_req(chan
, buf
), buf
);
1149 chan
->num_conf_req
++;
1152 l2cap_chan_unlock(chan
);
1155 mutex_unlock(&conn
->chan_lock
);
1158 /* Find socket with cid and source/destination bdaddr.
1159 * Returns closest match, locked.
1161 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1165 struct l2cap_chan
*c
, *c1
= NULL
;
1167 read_lock(&chan_list_lock
);
1169 list_for_each_entry(c
, &chan_list
, global_l
) {
1170 struct sock
*sk
= c
->sk
;
1172 if (state
&& c
->state
!= state
)
1175 if (c
->scid
== cid
) {
1176 int src_match
, dst_match
;
1177 int src_any
, dst_any
;
1180 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1181 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1182 if (src_match
&& dst_match
) {
1183 read_unlock(&chan_list_lock
);
1188 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1189 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1190 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1191 (src_any
&& dst_any
))
1196 read_unlock(&chan_list_lock
);
1201 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1203 struct sock
*parent
, *sk
;
1204 struct l2cap_chan
*chan
, *pchan
;
1208 /* Check if we have socket listening on cid */
1209 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1210 conn
->src
, conn
->dst
);
1218 chan
= pchan
->ops
->new_connection(pchan
);
1224 hci_conn_hold(conn
->hcon
);
1225 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
1227 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1228 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1230 bt_accept_enqueue(parent
, sk
);
1232 l2cap_chan_add(conn
, chan
);
1234 l2cap_chan_ready(chan
);
1237 release_sock(parent
);
1240 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1242 struct l2cap_chan
*chan
;
1243 struct hci_conn
*hcon
= conn
->hcon
;
1245 BT_DBG("conn %p", conn
);
1247 if (!hcon
->out
&& hcon
->type
== LE_LINK
)
1248 l2cap_le_conn_ready(conn
);
1250 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1251 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1253 mutex_lock(&conn
->chan_lock
);
1255 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1257 l2cap_chan_lock(chan
);
1259 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1260 l2cap_chan_unlock(chan
);
1264 if (hcon
->type
== LE_LINK
) {
1265 if (smp_conn_security(hcon
, chan
->sec_level
))
1266 l2cap_chan_ready(chan
);
1268 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1269 struct sock
*sk
= chan
->sk
;
1270 __clear_chan_timer(chan
);
1272 __l2cap_state_change(chan
, BT_CONNECTED
);
1273 sk
->sk_state_change(sk
);
1276 } else if (chan
->state
== BT_CONNECT
)
1277 l2cap_do_start(chan
);
1279 l2cap_chan_unlock(chan
);
1282 mutex_unlock(&conn
->chan_lock
);
1285 /* Notify sockets that we cannot guaranty reliability anymore */
1286 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1288 struct l2cap_chan
*chan
;
1290 BT_DBG("conn %p", conn
);
1292 mutex_lock(&conn
->chan_lock
);
1294 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1295 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1296 __l2cap_chan_set_err(chan
, err
);
1299 mutex_unlock(&conn
->chan_lock
);
1302 static void l2cap_info_timeout(struct work_struct
*work
)
1304 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1307 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1308 conn
->info_ident
= 0;
1310 l2cap_conn_start(conn
);
1313 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1315 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1316 struct l2cap_chan
*chan
, *l
;
1321 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1323 kfree_skb(conn
->rx_skb
);
1325 mutex_lock(&conn
->chan_lock
);
1328 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1329 l2cap_chan_hold(chan
);
1330 l2cap_chan_lock(chan
);
1332 l2cap_chan_del(chan
, err
);
1334 l2cap_chan_unlock(chan
);
1336 chan
->ops
->close(chan
);
1337 l2cap_chan_put(chan
);
1340 mutex_unlock(&conn
->chan_lock
);
1342 hci_chan_del(conn
->hchan
);
1344 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1345 cancel_delayed_work_sync(&conn
->info_timer
);
1347 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1348 cancel_delayed_work_sync(&conn
->security_timer
);
1349 smp_chan_destroy(conn
);
1352 hcon
->l2cap_data
= NULL
;
1356 static void security_timeout(struct work_struct
*work
)
1358 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1359 security_timer
.work
);
1361 BT_DBG("conn %p", conn
);
1363 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1364 smp_chan_destroy(conn
);
1365 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1369 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1371 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1372 struct hci_chan
*hchan
;
1377 hchan
= hci_chan_create(hcon
);
1381 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1383 hci_chan_del(hchan
);
1387 hcon
->l2cap_data
= conn
;
1389 conn
->hchan
= hchan
;
1391 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1393 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1394 conn
->mtu
= hcon
->hdev
->le_mtu
;
1396 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1398 conn
->src
= &hcon
->hdev
->bdaddr
;
1399 conn
->dst
= &hcon
->dst
;
1401 conn
->feat_mask
= 0;
1403 spin_lock_init(&conn
->lock
);
1404 mutex_init(&conn
->chan_lock
);
1406 INIT_LIST_HEAD(&conn
->chan_l
);
1408 if (hcon
->type
== LE_LINK
)
1409 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1411 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1413 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1418 /* ---- Socket interface ---- */
1420 /* Find socket with psm and source / destination bdaddr.
1421 * Returns closest match.
1423 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1427 struct l2cap_chan
*c
, *c1
= NULL
;
1429 read_lock(&chan_list_lock
);
1431 list_for_each_entry(c
, &chan_list
, global_l
) {
1432 struct sock
*sk
= c
->sk
;
1434 if (state
&& c
->state
!= state
)
1437 if (c
->psm
== psm
) {
1438 int src_match
, dst_match
;
1439 int src_any
, dst_any
;
1442 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1443 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1444 if (src_match
&& dst_match
) {
1445 read_unlock(&chan_list_lock
);
1450 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1451 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1452 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1453 (src_any
&& dst_any
))
1458 read_unlock(&chan_list_lock
);
1463 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1464 bdaddr_t
*dst
, u8 dst_type
)
1466 struct sock
*sk
= chan
->sk
;
1467 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1468 struct l2cap_conn
*conn
;
1469 struct hci_conn
*hcon
;
1470 struct hci_dev
*hdev
;
1474 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src
, dst
,
1475 dst_type
, __le16_to_cpu(psm
));
1477 hdev
= hci_get_route(dst
, src
);
1479 return -EHOSTUNREACH
;
1483 l2cap_chan_lock(chan
);
1485 /* PSM must be odd and lsb of upper byte must be 0 */
1486 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1487 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1492 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1497 switch (chan
->mode
) {
1498 case L2CAP_MODE_BASIC
:
1500 case L2CAP_MODE_ERTM
:
1501 case L2CAP_MODE_STREAMING
:
1510 switch (chan
->state
) {
1514 /* Already connecting */
1519 /* Already connected */
1533 /* Set destination address and psm */
1535 bacpy(&bt_sk(sk
)->dst
, dst
);
1541 auth_type
= l2cap_get_auth_type(chan
);
1543 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1544 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1545 chan
->sec_level
, auth_type
);
1547 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1548 chan
->sec_level
, auth_type
);
1551 err
= PTR_ERR(hcon
);
1555 conn
= l2cap_conn_add(hcon
, 0);
1562 if (hcon
->type
== LE_LINK
) {
1565 if (!list_empty(&conn
->chan_l
)) {
1574 /* Update source addr of the socket */
1575 bacpy(src
, conn
->src
);
1577 l2cap_chan_unlock(chan
);
1578 l2cap_chan_add(conn
, chan
);
1579 l2cap_chan_lock(chan
);
1581 l2cap_state_change(chan
, BT_CONNECT
);
1582 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1584 if (hcon
->state
== BT_CONNECTED
) {
1585 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1586 __clear_chan_timer(chan
);
1587 if (l2cap_chan_check_security(chan
))
1588 l2cap_state_change(chan
, BT_CONNECTED
);
1590 l2cap_do_start(chan
);
1596 l2cap_chan_unlock(chan
);
1597 hci_dev_unlock(hdev
);
1602 int __l2cap_wait_ack(struct sock
*sk
)
1604 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1605 DECLARE_WAITQUEUE(wait
, current
);
1609 add_wait_queue(sk_sleep(sk
), &wait
);
1610 set_current_state(TASK_INTERRUPTIBLE
);
1611 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1615 if (signal_pending(current
)) {
1616 err
= sock_intr_errno(timeo
);
1621 timeo
= schedule_timeout(timeo
);
1623 set_current_state(TASK_INTERRUPTIBLE
);
1625 err
= sock_error(sk
);
1629 set_current_state(TASK_RUNNING
);
1630 remove_wait_queue(sk_sleep(sk
), &wait
);
1634 static void l2cap_monitor_timeout(struct work_struct
*work
)
1636 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1637 monitor_timer
.work
);
1639 BT_DBG("chan %p", chan
);
1641 l2cap_chan_lock(chan
);
1644 l2cap_chan_unlock(chan
);
1645 l2cap_chan_put(chan
);
1649 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1651 l2cap_chan_unlock(chan
);
1652 l2cap_chan_put(chan
);
1655 static void l2cap_retrans_timeout(struct work_struct
*work
)
1657 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1658 retrans_timer
.work
);
1660 BT_DBG("chan %p", chan
);
1662 l2cap_chan_lock(chan
);
1665 l2cap_chan_unlock(chan
);
1666 l2cap_chan_put(chan
);
1670 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1671 l2cap_chan_unlock(chan
);
1672 l2cap_chan_put(chan
);
1675 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1676 struct sk_buff_head
*skbs
)
1678 struct sk_buff
*skb
;
1679 struct l2cap_ctrl
*control
;
1681 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1683 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1685 while (!skb_queue_empty(&chan
->tx_q
)) {
1687 skb
= skb_dequeue(&chan
->tx_q
);
1689 bt_cb(skb
)->control
.retries
= 1;
1690 control
= &bt_cb(skb
)->control
;
1692 control
->reqseq
= 0;
1693 control
->txseq
= chan
->next_tx_seq
;
1695 __pack_control(chan
, control
, skb
);
1697 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1698 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1699 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1702 l2cap_do_send(chan
, skb
);
1704 BT_DBG("Sent txseq %u", control
->txseq
);
1706 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1707 chan
->frames_sent
++;
1711 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1713 struct sk_buff
*skb
, *tx_skb
;
1714 struct l2cap_ctrl
*control
;
1717 BT_DBG("chan %p", chan
);
1719 if (chan
->state
!= BT_CONNECTED
)
1722 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1725 while (chan
->tx_send_head
&&
1726 chan
->unacked_frames
< chan
->remote_tx_win
&&
1727 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1729 skb
= chan
->tx_send_head
;
1731 bt_cb(skb
)->control
.retries
= 1;
1732 control
= &bt_cb(skb
)->control
;
1734 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1737 control
->reqseq
= chan
->buffer_seq
;
1738 chan
->last_acked_seq
= chan
->buffer_seq
;
1739 control
->txseq
= chan
->next_tx_seq
;
1741 __pack_control(chan
, control
, skb
);
1743 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1744 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1745 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1748 /* Clone after data has been modified. Data is assumed to be
1749 read-only (for locking purposes) on cloned sk_buffs.
1751 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1756 __set_retrans_timer(chan
);
1758 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1759 chan
->unacked_frames
++;
1760 chan
->frames_sent
++;
1763 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1764 chan
->tx_send_head
= NULL
;
1766 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1768 l2cap_do_send(chan
, tx_skb
);
1769 BT_DBG("Sent txseq %u", control
->txseq
);
1772 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1773 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1778 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1780 struct l2cap_ctrl control
;
1781 struct sk_buff
*skb
;
1782 struct sk_buff
*tx_skb
;
1785 BT_DBG("chan %p", chan
);
1787 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1790 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1791 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1793 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1795 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1800 bt_cb(skb
)->control
.retries
++;
1801 control
= bt_cb(skb
)->control
;
1803 if (chan
->max_tx
!= 0 &&
1804 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1805 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1806 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1807 l2cap_seq_list_clear(&chan
->retrans_list
);
1811 control
.reqseq
= chan
->buffer_seq
;
1812 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1817 if (skb_cloned(skb
)) {
1818 /* Cloned sk_buffs are read-only, so we need a
1821 tx_skb
= skb_copy(skb
, GFP_ATOMIC
);
1823 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1827 l2cap_seq_list_clear(&chan
->retrans_list
);
1831 /* Update skb contents */
1832 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1833 put_unaligned_le32(__pack_extended_control(&control
),
1834 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1836 put_unaligned_le16(__pack_enhanced_control(&control
),
1837 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1840 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1841 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1842 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1846 l2cap_do_send(chan
, tx_skb
);
1848 BT_DBG("Resent txseq %d", control
.txseq
);
1850 chan
->last_acked_seq
= chan
->buffer_seq
;
1854 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1855 struct l2cap_ctrl
*control
)
1857 BT_DBG("chan %p, control %p", chan
, control
);
1859 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1860 l2cap_ertm_resend(chan
);
1863 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1864 struct l2cap_ctrl
*control
)
1866 struct sk_buff
*skb
;
1868 BT_DBG("chan %p, control %p", chan
, control
);
1871 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1873 l2cap_seq_list_clear(&chan
->retrans_list
);
1875 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1878 if (chan
->unacked_frames
) {
1879 skb_queue_walk(&chan
->tx_q
, skb
) {
1880 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1881 skb
== chan
->tx_send_head
)
1885 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1886 if (skb
== chan
->tx_send_head
)
1889 l2cap_seq_list_append(&chan
->retrans_list
,
1890 bt_cb(skb
)->control
.txseq
);
1893 l2cap_ertm_resend(chan
);
1897 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1899 struct l2cap_ctrl control
;
1900 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1901 chan
->last_acked_seq
);
1904 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1905 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1907 memset(&control
, 0, sizeof(control
));
1910 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1911 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1912 __clear_ack_timer(chan
);
1913 control
.super
= L2CAP_SUPER_RNR
;
1914 control
.reqseq
= chan
->buffer_seq
;
1915 l2cap_send_sframe(chan
, &control
);
1917 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1918 l2cap_ertm_send(chan
);
1919 /* If any i-frames were sent, they included an ack */
1920 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1924 /* Ack now if the window is 3/4ths full.
1925 * Calculate without mul or div
1927 threshold
= chan
->ack_win
;
1928 threshold
+= threshold
<< 1;
1931 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
1934 if (frames_to_ack
>= threshold
) {
1935 __clear_ack_timer(chan
);
1936 control
.super
= L2CAP_SUPER_RR
;
1937 control
.reqseq
= chan
->buffer_seq
;
1938 l2cap_send_sframe(chan
, &control
);
1943 __set_ack_timer(chan
);
1947 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1948 struct msghdr
*msg
, int len
,
1949 int count
, struct sk_buff
*skb
)
1951 struct l2cap_conn
*conn
= chan
->conn
;
1952 struct sk_buff
**frag
;
1955 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1961 /* Continuation fragments (no L2CAP header) */
1962 frag
= &skb_shinfo(skb
)->frag_list
;
1964 struct sk_buff
*tmp
;
1966 count
= min_t(unsigned int, conn
->mtu
, len
);
1968 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1969 msg
->msg_flags
& MSG_DONTWAIT
);
1971 return PTR_ERR(tmp
);
1975 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1978 (*frag
)->priority
= skb
->priority
;
1983 skb
->len
+= (*frag
)->len
;
1984 skb
->data_len
+= (*frag
)->len
;
1986 frag
= &(*frag
)->next
;
1992 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1993 struct msghdr
*msg
, size_t len
,
1996 struct l2cap_conn
*conn
= chan
->conn
;
1997 struct sk_buff
*skb
;
1998 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1999 struct l2cap_hdr
*lh
;
2001 BT_DBG("chan %p len %zu priority %u", chan
, len
, priority
);
2003 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2005 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2006 msg
->msg_flags
& MSG_DONTWAIT
);
2010 skb
->priority
= priority
;
2012 /* Create L2CAP header */
2013 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2014 lh
->cid
= cpu_to_le16(chan
->dcid
);
2015 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2016 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2018 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2019 if (unlikely(err
< 0)) {
2021 return ERR_PTR(err
);
2026 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2027 struct msghdr
*msg
, size_t len
,
2030 struct l2cap_conn
*conn
= chan
->conn
;
2031 struct sk_buff
*skb
;
2033 struct l2cap_hdr
*lh
;
2035 BT_DBG("chan %p len %zu", chan
, len
);
2037 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2039 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2040 msg
->msg_flags
& MSG_DONTWAIT
);
2044 skb
->priority
= priority
;
2046 /* Create L2CAP header */
2047 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2048 lh
->cid
= cpu_to_le16(chan
->dcid
);
2049 lh
->len
= cpu_to_le16(len
);
2051 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2052 if (unlikely(err
< 0)) {
2054 return ERR_PTR(err
);
2059 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2060 struct msghdr
*msg
, size_t len
,
2063 struct l2cap_conn
*conn
= chan
->conn
;
2064 struct sk_buff
*skb
;
2065 int err
, count
, hlen
;
2066 struct l2cap_hdr
*lh
;
2068 BT_DBG("chan %p len %zu", chan
, len
);
2071 return ERR_PTR(-ENOTCONN
);
2073 hlen
= __ertm_hdr_size(chan
);
2076 hlen
+= L2CAP_SDULEN_SIZE
;
2078 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2079 hlen
+= L2CAP_FCS_SIZE
;
2081 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2083 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2084 msg
->msg_flags
& MSG_DONTWAIT
);
2088 /* Create L2CAP header */
2089 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2090 lh
->cid
= cpu_to_le16(chan
->dcid
);
2091 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2093 /* Control header is populated later */
2094 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2095 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2097 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2100 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2102 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2103 if (unlikely(err
< 0)) {
2105 return ERR_PTR(err
);
2108 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2109 bt_cb(skb
)->control
.retries
= 0;
2113 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2114 struct sk_buff_head
*seg_queue
,
2115 struct msghdr
*msg
, size_t len
)
2117 struct sk_buff
*skb
;
2122 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2124 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2125 * so fragmented skbs are not used. The HCI layer's handling
2126 * of fragmented skbs is not compatible with ERTM's queueing.
2129 /* PDU size is derived from the HCI MTU */
2130 pdu_len
= chan
->conn
->mtu
;
2132 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2134 /* Adjust for largest possible L2CAP overhead. */
2136 pdu_len
-= L2CAP_FCS_SIZE
;
2138 pdu_len
-= __ertm_hdr_size(chan
);
2140 /* Remote device may have requested smaller PDUs */
2141 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2143 if (len
<= pdu_len
) {
2144 sar
= L2CAP_SAR_UNSEGMENTED
;
2148 sar
= L2CAP_SAR_START
;
2150 pdu_len
-= L2CAP_SDULEN_SIZE
;
2154 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2157 __skb_queue_purge(seg_queue
);
2158 return PTR_ERR(skb
);
2161 bt_cb(skb
)->control
.sar
= sar
;
2162 __skb_queue_tail(seg_queue
, skb
);
2167 pdu_len
+= L2CAP_SDULEN_SIZE
;
2170 if (len
<= pdu_len
) {
2171 sar
= L2CAP_SAR_END
;
2174 sar
= L2CAP_SAR_CONTINUE
;
2181 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2184 struct sk_buff
*skb
;
2186 struct sk_buff_head seg_queue
;
2188 /* Connectionless channel */
2189 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2190 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2192 return PTR_ERR(skb
);
2194 l2cap_do_send(chan
, skb
);
2198 switch (chan
->mode
) {
2199 case L2CAP_MODE_BASIC
:
2200 /* Check outgoing MTU */
2201 if (len
> chan
->omtu
)
2204 /* Create a basic PDU */
2205 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2207 return PTR_ERR(skb
);
2209 l2cap_do_send(chan
, skb
);
2213 case L2CAP_MODE_ERTM
:
2214 case L2CAP_MODE_STREAMING
:
2215 /* Check outgoing MTU */
2216 if (len
> chan
->omtu
) {
2221 __skb_queue_head_init(&seg_queue
);
2223 /* Do segmentation before calling in to the state machine,
2224 * since it's possible to block while waiting for memory
2227 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2229 /* The channel could have been closed while segmenting,
2230 * check that it is still connected.
2232 if (chan
->state
!= BT_CONNECTED
) {
2233 __skb_queue_purge(&seg_queue
);
2240 if (chan
->mode
== L2CAP_MODE_ERTM
)
2241 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2243 l2cap_streaming_send(chan
, &seg_queue
);
2247 /* If the skbs were not queued for sending, they'll still be in
2248 * seg_queue and need to be purged.
2250 __skb_queue_purge(&seg_queue
);
2254 BT_DBG("bad state %1.1x", chan
->mode
);
2261 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2263 struct l2cap_ctrl control
;
2266 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2268 memset(&control
, 0, sizeof(control
));
2270 control
.super
= L2CAP_SUPER_SREJ
;
2272 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2273 seq
= __next_seq(chan
, seq
)) {
2274 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2275 control
.reqseq
= seq
;
2276 l2cap_send_sframe(chan
, &control
);
2277 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2281 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2284 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2286 struct l2cap_ctrl control
;
2288 BT_DBG("chan %p", chan
);
2290 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2293 memset(&control
, 0, sizeof(control
));
2295 control
.super
= L2CAP_SUPER_SREJ
;
2296 control
.reqseq
= chan
->srej_list
.tail
;
2297 l2cap_send_sframe(chan
, &control
);
2300 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2302 struct l2cap_ctrl control
;
2306 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2308 memset(&control
, 0, sizeof(control
));
2310 control
.super
= L2CAP_SUPER_SREJ
;
2312 /* Capture initial list head to allow only one pass through the list. */
2313 initial_head
= chan
->srej_list
.head
;
2316 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2317 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2320 control
.reqseq
= seq
;
2321 l2cap_send_sframe(chan
, &control
);
2322 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2323 } while (chan
->srej_list
.head
!= initial_head
);
2326 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2328 struct sk_buff
*acked_skb
;
2331 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2333 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2336 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2337 chan
->expected_ack_seq
, chan
->unacked_frames
);
2339 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2340 ackseq
= __next_seq(chan
, ackseq
)) {
2342 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2344 skb_unlink(acked_skb
, &chan
->tx_q
);
2345 kfree_skb(acked_skb
);
2346 chan
->unacked_frames
--;
2350 chan
->expected_ack_seq
= reqseq
;
2352 if (chan
->unacked_frames
== 0)
2353 __clear_retrans_timer(chan
);
2355 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2358 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2360 BT_DBG("chan %p", chan
);
2362 chan
->expected_tx_seq
= chan
->buffer_seq
;
2363 l2cap_seq_list_clear(&chan
->srej_list
);
2364 skb_queue_purge(&chan
->srej_q
);
2365 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2368 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2369 struct l2cap_ctrl
*control
,
2370 struct sk_buff_head
*skbs
, u8 event
)
2372 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2376 case L2CAP_EV_DATA_REQUEST
:
2377 if (chan
->tx_send_head
== NULL
)
2378 chan
->tx_send_head
= skb_peek(skbs
);
2380 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2381 l2cap_ertm_send(chan
);
2383 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2384 BT_DBG("Enter LOCAL_BUSY");
2385 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2387 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2388 /* The SREJ_SENT state must be aborted if we are to
2389 * enter the LOCAL_BUSY state.
2391 l2cap_abort_rx_srej_sent(chan
);
2394 l2cap_send_ack(chan
);
2397 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2398 BT_DBG("Exit LOCAL_BUSY");
2399 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2401 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2402 struct l2cap_ctrl local_control
;
2404 memset(&local_control
, 0, sizeof(local_control
));
2405 local_control
.sframe
= 1;
2406 local_control
.super
= L2CAP_SUPER_RR
;
2407 local_control
.poll
= 1;
2408 local_control
.reqseq
= chan
->buffer_seq
;
2409 l2cap_send_sframe(chan
, &local_control
);
2411 chan
->retry_count
= 1;
2412 __set_monitor_timer(chan
);
2413 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2416 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2417 l2cap_process_reqseq(chan
, control
->reqseq
);
2419 case L2CAP_EV_EXPLICIT_POLL
:
2420 l2cap_send_rr_or_rnr(chan
, 1);
2421 chan
->retry_count
= 1;
2422 __set_monitor_timer(chan
);
2423 __clear_ack_timer(chan
);
2424 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2426 case L2CAP_EV_RETRANS_TO
:
2427 l2cap_send_rr_or_rnr(chan
, 1);
2428 chan
->retry_count
= 1;
2429 __set_monitor_timer(chan
);
2430 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2432 case L2CAP_EV_RECV_FBIT
:
2433 /* Nothing to process */
2440 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2441 struct l2cap_ctrl
*control
,
2442 struct sk_buff_head
*skbs
, u8 event
)
2444 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2448 case L2CAP_EV_DATA_REQUEST
:
2449 if (chan
->tx_send_head
== NULL
)
2450 chan
->tx_send_head
= skb_peek(skbs
);
2451 /* Queue data, but don't send. */
2452 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2454 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2455 BT_DBG("Enter LOCAL_BUSY");
2456 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2458 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2459 /* The SREJ_SENT state must be aborted if we are to
2460 * enter the LOCAL_BUSY state.
2462 l2cap_abort_rx_srej_sent(chan
);
2465 l2cap_send_ack(chan
);
2468 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2469 BT_DBG("Exit LOCAL_BUSY");
2470 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2472 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2473 struct l2cap_ctrl local_control
;
2474 memset(&local_control
, 0, sizeof(local_control
));
2475 local_control
.sframe
= 1;
2476 local_control
.super
= L2CAP_SUPER_RR
;
2477 local_control
.poll
= 1;
2478 local_control
.reqseq
= chan
->buffer_seq
;
2479 l2cap_send_sframe(chan
, &local_control
);
2481 chan
->retry_count
= 1;
2482 __set_monitor_timer(chan
);
2483 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2486 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2487 l2cap_process_reqseq(chan
, control
->reqseq
);
2491 case L2CAP_EV_RECV_FBIT
:
2492 if (control
&& control
->final
) {
2493 __clear_monitor_timer(chan
);
2494 if (chan
->unacked_frames
> 0)
2495 __set_retrans_timer(chan
);
2496 chan
->retry_count
= 0;
2497 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2498 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2501 case L2CAP_EV_EXPLICIT_POLL
:
2504 case L2CAP_EV_MONITOR_TO
:
2505 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2506 l2cap_send_rr_or_rnr(chan
, 1);
2507 __set_monitor_timer(chan
);
2508 chan
->retry_count
++;
2510 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2518 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2519 struct sk_buff_head
*skbs
, u8 event
)
2521 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2522 chan
, control
, skbs
, event
, chan
->tx_state
);
2524 switch (chan
->tx_state
) {
2525 case L2CAP_TX_STATE_XMIT
:
2526 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2528 case L2CAP_TX_STATE_WAIT_F
:
2529 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2537 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2538 struct l2cap_ctrl
*control
)
2540 BT_DBG("chan %p, control %p", chan
, control
);
2541 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2544 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2545 struct l2cap_ctrl
*control
)
2547 BT_DBG("chan %p, control %p", chan
, control
);
2548 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2551 /* Copy frame to all raw sockets on that connection */
2552 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2554 struct sk_buff
*nskb
;
2555 struct l2cap_chan
*chan
;
2557 BT_DBG("conn %p", conn
);
2559 mutex_lock(&conn
->chan_lock
);
2561 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2562 struct sock
*sk
= chan
->sk
;
2563 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2566 /* Don't send frame to the socket it came from */
2569 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2573 if (chan
->ops
->recv(chan
, nskb
))
2577 mutex_unlock(&conn
->chan_lock
);
2580 /* ---- L2CAP signalling commands ---- */
2581 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2582 u8 ident
, u16 dlen
, void *data
)
2584 struct sk_buff
*skb
, **frag
;
2585 struct l2cap_cmd_hdr
*cmd
;
2586 struct l2cap_hdr
*lh
;
2589 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2590 conn
, code
, ident
, dlen
);
2592 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2593 count
= min_t(unsigned int, conn
->mtu
, len
);
2595 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2599 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2600 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2602 if (conn
->hcon
->type
== LE_LINK
)
2603 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2605 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2607 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2610 cmd
->len
= cpu_to_le16(dlen
);
2613 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2614 memcpy(skb_put(skb
, count
), data
, count
);
2620 /* Continuation fragments (no L2CAP header) */
2621 frag
= &skb_shinfo(skb
)->frag_list
;
2623 count
= min_t(unsigned int, conn
->mtu
, len
);
2625 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2629 memcpy(skb_put(*frag
, count
), data
, count
);
2634 frag
= &(*frag
)->next
;
2644 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2646 struct l2cap_conf_opt
*opt
= *ptr
;
2649 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2657 *val
= *((u8
*) opt
->val
);
2661 *val
= get_unaligned_le16(opt
->val
);
2665 *val
= get_unaligned_le32(opt
->val
);
2669 *val
= (unsigned long) opt
->val
;
2673 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2677 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2679 struct l2cap_conf_opt
*opt
= *ptr
;
2681 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2688 *((u8
*) opt
->val
) = val
;
2692 put_unaligned_le16(val
, opt
->val
);
2696 put_unaligned_le32(val
, opt
->val
);
2700 memcpy(opt
->val
, (void *) val
, len
);
2704 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2707 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2709 struct l2cap_conf_efs efs
;
2711 switch (chan
->mode
) {
2712 case L2CAP_MODE_ERTM
:
2713 efs
.id
= chan
->local_id
;
2714 efs
.stype
= chan
->local_stype
;
2715 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2716 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2717 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2718 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2721 case L2CAP_MODE_STREAMING
:
2723 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2724 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2725 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2734 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2735 (unsigned long) &efs
);
2738 static void l2cap_ack_timeout(struct work_struct
*work
)
2740 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2744 BT_DBG("chan %p", chan
);
2746 l2cap_chan_lock(chan
);
2748 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2749 chan
->last_acked_seq
);
2752 l2cap_send_rr_or_rnr(chan
, 0);
2754 l2cap_chan_unlock(chan
);
2755 l2cap_chan_put(chan
);
2758 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2762 chan
->next_tx_seq
= 0;
2763 chan
->expected_tx_seq
= 0;
2764 chan
->expected_ack_seq
= 0;
2765 chan
->unacked_frames
= 0;
2766 chan
->buffer_seq
= 0;
2767 chan
->frames_sent
= 0;
2768 chan
->last_acked_seq
= 0;
2770 chan
->sdu_last_frag
= NULL
;
2773 skb_queue_head_init(&chan
->tx_q
);
2775 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2778 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2779 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2781 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2782 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2783 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2785 skb_queue_head_init(&chan
->srej_q
);
2787 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2791 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2793 l2cap_seq_list_free(&chan
->srej_list
);
2798 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2801 case L2CAP_MODE_STREAMING
:
2802 case L2CAP_MODE_ERTM
:
2803 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2807 return L2CAP_MODE_BASIC
;
2811 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2813 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2816 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2818 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2821 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2823 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2824 __l2cap_ews_supported(chan
)) {
2825 /* use extended control field */
2826 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2827 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2829 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2830 L2CAP_DEFAULT_TX_WINDOW
);
2831 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2833 chan
->ack_win
= chan
->tx_win
;
2836 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2838 struct l2cap_conf_req
*req
= data
;
2839 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2840 void *ptr
= req
->data
;
2843 BT_DBG("chan %p", chan
);
2845 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2848 switch (chan
->mode
) {
2849 case L2CAP_MODE_STREAMING
:
2850 case L2CAP_MODE_ERTM
:
2851 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2854 if (__l2cap_efs_supported(chan
))
2855 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2859 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2864 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2865 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2867 switch (chan
->mode
) {
2868 case L2CAP_MODE_BASIC
:
2869 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2870 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2873 rfc
.mode
= L2CAP_MODE_BASIC
;
2875 rfc
.max_transmit
= 0;
2876 rfc
.retrans_timeout
= 0;
2877 rfc
.monitor_timeout
= 0;
2878 rfc
.max_pdu_size
= 0;
2880 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2881 (unsigned long) &rfc
);
2884 case L2CAP_MODE_ERTM
:
2885 rfc
.mode
= L2CAP_MODE_ERTM
;
2886 rfc
.max_transmit
= chan
->max_tx
;
2887 rfc
.retrans_timeout
= 0;
2888 rfc
.monitor_timeout
= 0;
2890 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2891 L2CAP_EXT_HDR_SIZE
-
2894 rfc
.max_pdu_size
= cpu_to_le16(size
);
2896 l2cap_txwin_setup(chan
);
2898 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2899 L2CAP_DEFAULT_TX_WINDOW
);
2901 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2902 (unsigned long) &rfc
);
2904 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2905 l2cap_add_opt_efs(&ptr
, chan
);
2907 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2910 if (chan
->fcs
== L2CAP_FCS_NONE
||
2911 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2912 chan
->fcs
= L2CAP_FCS_NONE
;
2913 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2916 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2917 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2921 case L2CAP_MODE_STREAMING
:
2922 l2cap_txwin_setup(chan
);
2923 rfc
.mode
= L2CAP_MODE_STREAMING
;
2925 rfc
.max_transmit
= 0;
2926 rfc
.retrans_timeout
= 0;
2927 rfc
.monitor_timeout
= 0;
2929 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2930 L2CAP_EXT_HDR_SIZE
-
2933 rfc
.max_pdu_size
= cpu_to_le16(size
);
2935 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2936 (unsigned long) &rfc
);
2938 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2939 l2cap_add_opt_efs(&ptr
, chan
);
2941 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2944 if (chan
->fcs
== L2CAP_FCS_NONE
||
2945 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2946 chan
->fcs
= L2CAP_FCS_NONE
;
2947 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2952 req
->dcid
= cpu_to_le16(chan
->dcid
);
2953 req
->flags
= __constant_cpu_to_le16(0);
2958 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2960 struct l2cap_conf_rsp
*rsp
= data
;
2961 void *ptr
= rsp
->data
;
2962 void *req
= chan
->conf_req
;
2963 int len
= chan
->conf_len
;
2964 int type
, hint
, olen
;
2966 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2967 struct l2cap_conf_efs efs
;
2969 u16 mtu
= L2CAP_DEFAULT_MTU
;
2970 u16 result
= L2CAP_CONF_SUCCESS
;
2973 BT_DBG("chan %p", chan
);
2975 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2976 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2978 hint
= type
& L2CAP_CONF_HINT
;
2979 type
&= L2CAP_CONF_MASK
;
2982 case L2CAP_CONF_MTU
:
2986 case L2CAP_CONF_FLUSH_TO
:
2987 chan
->flush_to
= val
;
2990 case L2CAP_CONF_QOS
:
2993 case L2CAP_CONF_RFC
:
2994 if (olen
== sizeof(rfc
))
2995 memcpy(&rfc
, (void *) val
, olen
);
2998 case L2CAP_CONF_FCS
:
2999 if (val
== L2CAP_FCS_NONE
)
3000 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3003 case L2CAP_CONF_EFS
:
3005 if (olen
== sizeof(efs
))
3006 memcpy(&efs
, (void *) val
, olen
);
3009 case L2CAP_CONF_EWS
:
3011 return -ECONNREFUSED
;
3013 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3014 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3015 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3016 chan
->remote_tx_win
= val
;
3023 result
= L2CAP_CONF_UNKNOWN
;
3024 *((u8
*) ptr
++) = type
;
3029 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3032 switch (chan
->mode
) {
3033 case L2CAP_MODE_STREAMING
:
3034 case L2CAP_MODE_ERTM
:
3035 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3036 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3037 chan
->conn
->feat_mask
);
3042 if (__l2cap_efs_supported(chan
))
3043 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3045 return -ECONNREFUSED
;
3048 if (chan
->mode
!= rfc
.mode
)
3049 return -ECONNREFUSED
;
3055 if (chan
->mode
!= rfc
.mode
) {
3056 result
= L2CAP_CONF_UNACCEPT
;
3057 rfc
.mode
= chan
->mode
;
3059 if (chan
->num_conf_rsp
== 1)
3060 return -ECONNREFUSED
;
3062 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3063 sizeof(rfc
), (unsigned long) &rfc
);
3066 if (result
== L2CAP_CONF_SUCCESS
) {
3067 /* Configure output options and let the other side know
3068 * which ones we don't like. */
3070 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3071 result
= L2CAP_CONF_UNACCEPT
;
3074 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3076 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3079 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3080 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3081 efs
.stype
!= chan
->local_stype
) {
3083 result
= L2CAP_CONF_UNACCEPT
;
3085 if (chan
->num_conf_req
>= 1)
3086 return -ECONNREFUSED
;
3088 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3090 (unsigned long) &efs
);
3092 /* Send PENDING Conf Rsp */
3093 result
= L2CAP_CONF_PENDING
;
3094 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3099 case L2CAP_MODE_BASIC
:
3100 chan
->fcs
= L2CAP_FCS_NONE
;
3101 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3104 case L2CAP_MODE_ERTM
:
3105 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3106 chan
->remote_tx_win
= rfc
.txwin_size
;
3108 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3110 chan
->remote_max_tx
= rfc
.max_transmit
;
3112 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3114 L2CAP_EXT_HDR_SIZE
-
3117 rfc
.max_pdu_size
= cpu_to_le16(size
);
3118 chan
->remote_mps
= size
;
3120 rfc
.retrans_timeout
=
3121 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3122 rfc
.monitor_timeout
=
3123 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3125 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3127 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3128 sizeof(rfc
), (unsigned long) &rfc
);
3130 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3131 chan
->remote_id
= efs
.id
;
3132 chan
->remote_stype
= efs
.stype
;
3133 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3134 chan
->remote_flush_to
=
3135 le32_to_cpu(efs
.flush_to
);
3136 chan
->remote_acc_lat
=
3137 le32_to_cpu(efs
.acc_lat
);
3138 chan
->remote_sdu_itime
=
3139 le32_to_cpu(efs
.sdu_itime
);
3140 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3141 sizeof(efs
), (unsigned long) &efs
);
3145 case L2CAP_MODE_STREAMING
:
3146 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3148 L2CAP_EXT_HDR_SIZE
-
3151 rfc
.max_pdu_size
= cpu_to_le16(size
);
3152 chan
->remote_mps
= size
;
3154 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3156 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3157 sizeof(rfc
), (unsigned long) &rfc
);
3162 result
= L2CAP_CONF_UNACCEPT
;
3164 memset(&rfc
, 0, sizeof(rfc
));
3165 rfc
.mode
= chan
->mode
;
3168 if (result
== L2CAP_CONF_SUCCESS
)
3169 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3171 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3172 rsp
->result
= cpu_to_le16(result
);
3173 rsp
->flags
= __constant_cpu_to_le16(0);
3178 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3180 struct l2cap_conf_req
*req
= data
;
3181 void *ptr
= req
->data
;
3184 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3185 struct l2cap_conf_efs efs
;
3187 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3189 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3190 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3193 case L2CAP_CONF_MTU
:
3194 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3195 *result
= L2CAP_CONF_UNACCEPT
;
3196 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3199 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3202 case L2CAP_CONF_FLUSH_TO
:
3203 chan
->flush_to
= val
;
3204 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3208 case L2CAP_CONF_RFC
:
3209 if (olen
== sizeof(rfc
))
3210 memcpy(&rfc
, (void *)val
, olen
);
3212 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3213 rfc
.mode
!= chan
->mode
)
3214 return -ECONNREFUSED
;
3218 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3219 sizeof(rfc
), (unsigned long) &rfc
);
3222 case L2CAP_CONF_EWS
:
3223 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3224 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3228 case L2CAP_CONF_EFS
:
3229 if (olen
== sizeof(efs
))
3230 memcpy(&efs
, (void *)val
, olen
);
3232 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3233 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3234 efs
.stype
!= chan
->local_stype
)
3235 return -ECONNREFUSED
;
3237 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3238 sizeof(efs
), (unsigned long) &efs
);
3243 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3244 return -ECONNREFUSED
;
3246 chan
->mode
= rfc
.mode
;
3248 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3250 case L2CAP_MODE_ERTM
:
3251 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3252 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3253 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3254 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3255 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3258 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3259 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3260 chan
->local_sdu_itime
=
3261 le32_to_cpu(efs
.sdu_itime
);
3262 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3263 chan
->local_flush_to
=
3264 le32_to_cpu(efs
.flush_to
);
3268 case L2CAP_MODE_STREAMING
:
3269 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3273 req
->dcid
= cpu_to_le16(chan
->dcid
);
3274 req
->flags
= __constant_cpu_to_le16(0);
3279 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3281 struct l2cap_conf_rsp
*rsp
= data
;
3282 void *ptr
= rsp
->data
;
3284 BT_DBG("chan %p", chan
);
3286 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3287 rsp
->result
= cpu_to_le16(result
);
3288 rsp
->flags
= cpu_to_le16(flags
);
3293 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3295 struct l2cap_conn_rsp rsp
;
3296 struct l2cap_conn
*conn
= chan
->conn
;
3299 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3300 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3301 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3302 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3303 l2cap_send_cmd(conn
, chan
->ident
,
3304 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3306 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3309 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3310 l2cap_build_conf_req(chan
, buf
), buf
);
3311 chan
->num_conf_req
++;
3314 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3318 /* Use sane default values in case a misbehaving remote device
3319 * did not send an RFC or extended window size option.
3321 u16 txwin_ext
= chan
->ack_win
;
3322 struct l2cap_conf_rfc rfc
= {
3324 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3325 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3326 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3327 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3330 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3332 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3335 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3336 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3339 case L2CAP_CONF_RFC
:
3340 if (olen
== sizeof(rfc
))
3341 memcpy(&rfc
, (void *)val
, olen
);
3343 case L2CAP_CONF_EWS
:
3350 case L2CAP_MODE_ERTM
:
3351 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3352 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3353 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3354 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3355 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3357 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3360 case L2CAP_MODE_STREAMING
:
3361 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3365 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3367 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3369 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3372 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3373 cmd
->ident
== conn
->info_ident
) {
3374 cancel_delayed_work(&conn
->info_timer
);
3376 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3377 conn
->info_ident
= 0;
3379 l2cap_conn_start(conn
);
3385 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3387 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3388 struct l2cap_conn_rsp rsp
;
3389 struct l2cap_chan
*chan
= NULL
, *pchan
;
3390 struct sock
*parent
, *sk
= NULL
;
3391 int result
, status
= L2CAP_CS_NO_INFO
;
3393 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3394 __le16 psm
= req
->psm
;
3396 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3398 /* Check if we have socket listening on psm */
3399 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3401 result
= L2CAP_CR_BAD_PSM
;
3407 mutex_lock(&conn
->chan_lock
);
3410 /* Check if the ACL is secure enough (if not SDP) */
3411 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3412 !hci_conn_check_link_mode(conn
->hcon
)) {
3413 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3414 result
= L2CAP_CR_SEC_BLOCK
;
3418 result
= L2CAP_CR_NO_MEM
;
3420 /* Check if we already have channel with that dcid */
3421 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3424 chan
= pchan
->ops
->new_connection(pchan
);
3430 hci_conn_hold(conn
->hcon
);
3432 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3433 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3437 bt_accept_enqueue(parent
, sk
);
3439 __l2cap_chan_add(conn
, chan
);
3443 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3445 chan
->ident
= cmd
->ident
;
3447 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3448 if (l2cap_chan_check_security(chan
)) {
3449 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3450 __l2cap_state_change(chan
, BT_CONNECT2
);
3451 result
= L2CAP_CR_PEND
;
3452 status
= L2CAP_CS_AUTHOR_PEND
;
3453 parent
->sk_data_ready(parent
, 0);
3455 __l2cap_state_change(chan
, BT_CONFIG
);
3456 result
= L2CAP_CR_SUCCESS
;
3457 status
= L2CAP_CS_NO_INFO
;
3460 __l2cap_state_change(chan
, BT_CONNECT2
);
3461 result
= L2CAP_CR_PEND
;
3462 status
= L2CAP_CS_AUTHEN_PEND
;
3465 __l2cap_state_change(chan
, BT_CONNECT2
);
3466 result
= L2CAP_CR_PEND
;
3467 status
= L2CAP_CS_NO_INFO
;
3471 release_sock(parent
);
3472 mutex_unlock(&conn
->chan_lock
);
3475 rsp
.scid
= cpu_to_le16(scid
);
3476 rsp
.dcid
= cpu_to_le16(dcid
);
3477 rsp
.result
= cpu_to_le16(result
);
3478 rsp
.status
= cpu_to_le16(status
);
3479 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3481 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3482 struct l2cap_info_req info
;
3483 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3485 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3486 conn
->info_ident
= l2cap_get_ident(conn
);
3488 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3490 l2cap_send_cmd(conn
, conn
->info_ident
,
3491 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3494 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3495 result
== L2CAP_CR_SUCCESS
) {
3497 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3498 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3499 l2cap_build_conf_req(chan
, buf
), buf
);
3500 chan
->num_conf_req
++;
3506 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3508 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3509 u16 scid
, dcid
, result
, status
;
3510 struct l2cap_chan
*chan
;
3514 scid
= __le16_to_cpu(rsp
->scid
);
3515 dcid
= __le16_to_cpu(rsp
->dcid
);
3516 result
= __le16_to_cpu(rsp
->result
);
3517 status
= __le16_to_cpu(rsp
->status
);
3519 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3520 dcid
, scid
, result
, status
);
3522 mutex_lock(&conn
->chan_lock
);
3525 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3531 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3540 l2cap_chan_lock(chan
);
3543 case L2CAP_CR_SUCCESS
:
3544 l2cap_state_change(chan
, BT_CONFIG
);
3547 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3549 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3552 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3553 l2cap_build_conf_req(chan
, req
), req
);
3554 chan
->num_conf_req
++;
3558 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3562 l2cap_chan_del(chan
, ECONNREFUSED
);
3566 l2cap_chan_unlock(chan
);
3569 mutex_unlock(&conn
->chan_lock
);
3574 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3576 /* FCS is enabled only in ERTM or streaming mode, if one or both
3579 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3580 chan
->fcs
= L2CAP_FCS_NONE
;
3581 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3582 chan
->fcs
= L2CAP_FCS_CRC16
;
3585 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3587 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3590 struct l2cap_chan
*chan
;
3593 dcid
= __le16_to_cpu(req
->dcid
);
3594 flags
= __le16_to_cpu(req
->flags
);
3596 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3598 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3602 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3603 struct l2cap_cmd_rej_cid rej
;
3605 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3606 rej
.scid
= cpu_to_le16(chan
->scid
);
3607 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3609 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3614 /* Reject if config buffer is too small. */
3615 len
= cmd_len
- sizeof(*req
);
3616 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3617 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3618 l2cap_build_conf_rsp(chan
, rsp
,
3619 L2CAP_CONF_REJECT
, flags
), rsp
);
3624 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3625 chan
->conf_len
+= len
;
3627 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3628 /* Incomplete config. Send empty response. */
3629 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3630 l2cap_build_conf_rsp(chan
, rsp
,
3631 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3635 /* Complete config. */
3636 len
= l2cap_parse_conf_req(chan
, rsp
);
3638 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3642 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3643 chan
->num_conf_rsp
++;
3645 /* Reset config buffer. */
3648 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3651 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3652 set_default_fcs(chan
);
3654 if (chan
->mode
== L2CAP_MODE_ERTM
||
3655 chan
->mode
== L2CAP_MODE_STREAMING
)
3656 err
= l2cap_ertm_init(chan
);
3659 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3661 l2cap_chan_ready(chan
);
3666 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3668 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3669 l2cap_build_conf_req(chan
, buf
), buf
);
3670 chan
->num_conf_req
++;
3673 /* Got Conf Rsp PENDING from remote side and asume we sent
3674 Conf Rsp PENDING in the code above */
3675 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3676 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3678 /* check compatibility */
3680 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3681 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3683 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3684 l2cap_build_conf_rsp(chan
, rsp
,
3685 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3689 l2cap_chan_unlock(chan
);
3693 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3695 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3696 u16 scid
, flags
, result
;
3697 struct l2cap_chan
*chan
;
3698 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3701 scid
= __le16_to_cpu(rsp
->scid
);
3702 flags
= __le16_to_cpu(rsp
->flags
);
3703 result
= __le16_to_cpu(rsp
->result
);
3705 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3708 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3713 case L2CAP_CONF_SUCCESS
:
3714 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3715 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3718 case L2CAP_CONF_PENDING
:
3719 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3721 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3724 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3727 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3731 /* check compatibility */
3733 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3734 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3736 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3737 l2cap_build_conf_rsp(chan
, buf
,
3738 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3742 case L2CAP_CONF_UNACCEPT
:
3743 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3746 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3747 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3751 /* throw out any old stored conf requests */
3752 result
= L2CAP_CONF_SUCCESS
;
3753 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3756 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3760 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3761 L2CAP_CONF_REQ
, len
, req
);
3762 chan
->num_conf_req
++;
3763 if (result
!= L2CAP_CONF_SUCCESS
)
3769 l2cap_chan_set_err(chan
, ECONNRESET
);
3771 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3772 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3776 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
3779 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3781 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3782 set_default_fcs(chan
);
3784 if (chan
->mode
== L2CAP_MODE_ERTM
||
3785 chan
->mode
== L2CAP_MODE_STREAMING
)
3786 err
= l2cap_ertm_init(chan
);
3789 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3791 l2cap_chan_ready(chan
);
3795 l2cap_chan_unlock(chan
);
3799 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3801 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3802 struct l2cap_disconn_rsp rsp
;
3804 struct l2cap_chan
*chan
;
3807 scid
= __le16_to_cpu(req
->scid
);
3808 dcid
= __le16_to_cpu(req
->dcid
);
3810 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3812 mutex_lock(&conn
->chan_lock
);
3814 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3816 mutex_unlock(&conn
->chan_lock
);
3820 l2cap_chan_lock(chan
);
3824 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3825 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3826 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3829 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3832 l2cap_chan_hold(chan
);
3833 l2cap_chan_del(chan
, ECONNRESET
);
3835 l2cap_chan_unlock(chan
);
3837 chan
->ops
->close(chan
);
3838 l2cap_chan_put(chan
);
3840 mutex_unlock(&conn
->chan_lock
);
3845 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3847 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3849 struct l2cap_chan
*chan
;
3851 scid
= __le16_to_cpu(rsp
->scid
);
3852 dcid
= __le16_to_cpu(rsp
->dcid
);
3854 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3856 mutex_lock(&conn
->chan_lock
);
3858 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3860 mutex_unlock(&conn
->chan_lock
);
3864 l2cap_chan_lock(chan
);
3866 l2cap_chan_hold(chan
);
3867 l2cap_chan_del(chan
, 0);
3869 l2cap_chan_unlock(chan
);
3871 chan
->ops
->close(chan
);
3872 l2cap_chan_put(chan
);
3874 mutex_unlock(&conn
->chan_lock
);
3879 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3881 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3884 type
= __le16_to_cpu(req
->type
);
3886 BT_DBG("type 0x%4.4x", type
);
3888 if (type
== L2CAP_IT_FEAT_MASK
) {
3890 u32 feat_mask
= l2cap_feat_mask
;
3891 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3892 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3893 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3895 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3898 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3899 | L2CAP_FEAT_EXT_WINDOW
;
3901 put_unaligned_le32(feat_mask
, rsp
->data
);
3902 l2cap_send_cmd(conn
, cmd
->ident
,
3903 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3904 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3906 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3909 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3911 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3913 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3914 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3915 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3916 l2cap_send_cmd(conn
, cmd
->ident
,
3917 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3919 struct l2cap_info_rsp rsp
;
3920 rsp
.type
= cpu_to_le16(type
);
3921 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
3922 l2cap_send_cmd(conn
, cmd
->ident
,
3923 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3929 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3931 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3934 type
= __le16_to_cpu(rsp
->type
);
3935 result
= __le16_to_cpu(rsp
->result
);
3937 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3939 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3940 if (cmd
->ident
!= conn
->info_ident
||
3941 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3944 cancel_delayed_work(&conn
->info_timer
);
3946 if (result
!= L2CAP_IR_SUCCESS
) {
3947 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3948 conn
->info_ident
= 0;
3950 l2cap_conn_start(conn
);
3956 case L2CAP_IT_FEAT_MASK
:
3957 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3959 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3960 struct l2cap_info_req req
;
3961 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3963 conn
->info_ident
= l2cap_get_ident(conn
);
3965 l2cap_send_cmd(conn
, conn
->info_ident
,
3966 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3968 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3969 conn
->info_ident
= 0;
3971 l2cap_conn_start(conn
);
3975 case L2CAP_IT_FIXED_CHAN
:
3976 conn
->fixed_chan_mask
= rsp
->data
[0];
3977 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3978 conn
->info_ident
= 0;
3980 l2cap_conn_start(conn
);
3987 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3988 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3991 struct l2cap_create_chan_req
*req
= data
;
3992 struct l2cap_create_chan_rsp rsp
;
3995 if (cmd_len
!= sizeof(*req
))
4001 psm
= le16_to_cpu(req
->psm
);
4002 scid
= le16_to_cpu(req
->scid
);
4004 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4006 /* Placeholder: Always reject */
4008 rsp
.scid
= cpu_to_le16(scid
);
4009 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4010 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4012 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4018 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
4019 struct l2cap_cmd_hdr
*cmd
, void *data
)
4021 BT_DBG("conn %p", conn
);
4023 return l2cap_connect_rsp(conn
, cmd
, data
);
4026 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
4027 u16 icid
, u16 result
)
4029 struct l2cap_move_chan_rsp rsp
;
4031 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4033 rsp
.icid
= cpu_to_le16(icid
);
4034 rsp
.result
= cpu_to_le16(result
);
4036 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
4039 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
4040 struct l2cap_chan
*chan
,
4041 u16 icid
, u16 result
)
4043 struct l2cap_move_chan_cfm cfm
;
4046 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4048 ident
= l2cap_get_ident(conn
);
4050 chan
->ident
= ident
;
4052 cfm
.icid
= cpu_to_le16(icid
);
4053 cfm
.result
= cpu_to_le16(result
);
4055 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4058 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4061 struct l2cap_move_chan_cfm_rsp rsp
;
4063 BT_DBG("icid 0x%4.4x", icid
);
4065 rsp
.icid
= cpu_to_le16(icid
);
4066 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4069 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4070 struct l2cap_cmd_hdr
*cmd
,
4071 u16 cmd_len
, void *data
)
4073 struct l2cap_move_chan_req
*req
= data
;
4075 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4077 if (cmd_len
!= sizeof(*req
))
4080 icid
= le16_to_cpu(req
->icid
);
4082 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4087 /* Placeholder: Always refuse */
4088 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4093 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4094 struct l2cap_cmd_hdr
*cmd
,
4095 u16 cmd_len
, void *data
)
4097 struct l2cap_move_chan_rsp
*rsp
= data
;
4100 if (cmd_len
!= sizeof(*rsp
))
4103 icid
= le16_to_cpu(rsp
->icid
);
4104 result
= le16_to_cpu(rsp
->result
);
4106 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4108 /* Placeholder: Always unconfirmed */
4109 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4114 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4115 struct l2cap_cmd_hdr
*cmd
,
4116 u16 cmd_len
, void *data
)
4118 struct l2cap_move_chan_cfm
*cfm
= data
;
4121 if (cmd_len
!= sizeof(*cfm
))
4124 icid
= le16_to_cpu(cfm
->icid
);
4125 result
= le16_to_cpu(cfm
->result
);
4127 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4129 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4134 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4135 struct l2cap_cmd_hdr
*cmd
,
4136 u16 cmd_len
, void *data
)
4138 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4141 if (cmd_len
!= sizeof(*rsp
))
4144 icid
= le16_to_cpu(rsp
->icid
);
4146 BT_DBG("icid 0x%4.4x", icid
);
4151 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4156 if (min
> max
|| min
< 6 || max
> 3200)
4159 if (to_multiplier
< 10 || to_multiplier
> 3200)
4162 if (max
>= to_multiplier
* 8)
4165 max_latency
= (to_multiplier
* 8 / max
) - 1;
4166 if (latency
> 499 || latency
> max_latency
)
4172 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4173 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4175 struct hci_conn
*hcon
= conn
->hcon
;
4176 struct l2cap_conn_param_update_req
*req
;
4177 struct l2cap_conn_param_update_rsp rsp
;
4178 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4181 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4184 cmd_len
= __le16_to_cpu(cmd
->len
);
4185 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4188 req
= (struct l2cap_conn_param_update_req
*) data
;
4189 min
= __le16_to_cpu(req
->min
);
4190 max
= __le16_to_cpu(req
->max
);
4191 latency
= __le16_to_cpu(req
->latency
);
4192 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4194 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4195 min
, max
, latency
, to_multiplier
);
4197 memset(&rsp
, 0, sizeof(rsp
));
4199 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4201 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4203 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4205 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4209 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4214 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4215 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4219 switch (cmd
->code
) {
4220 case L2CAP_COMMAND_REJ
:
4221 l2cap_command_rej(conn
, cmd
, data
);
4224 case L2CAP_CONN_REQ
:
4225 err
= l2cap_connect_req(conn
, cmd
, data
);
4228 case L2CAP_CONN_RSP
:
4229 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4232 case L2CAP_CONF_REQ
:
4233 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4236 case L2CAP_CONF_RSP
:
4237 err
= l2cap_config_rsp(conn
, cmd
, data
);
4240 case L2CAP_DISCONN_REQ
:
4241 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4244 case L2CAP_DISCONN_RSP
:
4245 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4248 case L2CAP_ECHO_REQ
:
4249 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4252 case L2CAP_ECHO_RSP
:
4255 case L2CAP_INFO_REQ
:
4256 err
= l2cap_information_req(conn
, cmd
, data
);
4259 case L2CAP_INFO_RSP
:
4260 err
= l2cap_information_rsp(conn
, cmd
, data
);
4263 case L2CAP_CREATE_CHAN_REQ
:
4264 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4267 case L2CAP_CREATE_CHAN_RSP
:
4268 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4271 case L2CAP_MOVE_CHAN_REQ
:
4272 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4275 case L2CAP_MOVE_CHAN_RSP
:
4276 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4279 case L2CAP_MOVE_CHAN_CFM
:
4280 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4283 case L2CAP_MOVE_CHAN_CFM_RSP
:
4284 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4288 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4296 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4297 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4299 switch (cmd
->code
) {
4300 case L2CAP_COMMAND_REJ
:
4303 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4304 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4306 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4310 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4315 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4316 struct sk_buff
*skb
)
4318 u8
*data
= skb
->data
;
4320 struct l2cap_cmd_hdr cmd
;
4323 l2cap_raw_recv(conn
, skb
);
4325 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4327 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4328 data
+= L2CAP_CMD_HDR_SIZE
;
4329 len
-= L2CAP_CMD_HDR_SIZE
;
4331 cmd_len
= le16_to_cpu(cmd
.len
);
4333 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4335 if (cmd_len
> len
|| !cmd
.ident
) {
4336 BT_DBG("corrupted command");
4340 if (conn
->hcon
->type
== LE_LINK
)
4341 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4343 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4346 struct l2cap_cmd_rej_unk rej
;
4348 BT_ERR("Wrong link type (%d)", err
);
4350 /* FIXME: Map err to a valid reason */
4351 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4352 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4362 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4364 u16 our_fcs
, rcv_fcs
;
4367 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4368 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4370 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4372 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4373 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4374 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4375 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4377 if (our_fcs
!= rcv_fcs
)
4383 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4385 struct l2cap_ctrl control
;
4387 BT_DBG("chan %p", chan
);
4389 memset(&control
, 0, sizeof(control
));
4392 control
.reqseq
= chan
->buffer_seq
;
4393 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4395 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4396 control
.super
= L2CAP_SUPER_RNR
;
4397 l2cap_send_sframe(chan
, &control
);
4400 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4401 chan
->unacked_frames
> 0)
4402 __set_retrans_timer(chan
);
4404 /* Send pending iframes */
4405 l2cap_ertm_send(chan
);
4407 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4408 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4409 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4412 control
.super
= L2CAP_SUPER_RR
;
4413 l2cap_send_sframe(chan
, &control
);
4417 static void append_skb_frag(struct sk_buff
*skb
,
4418 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4420 /* skb->len reflects data in skb as well as all fragments
4421 * skb->data_len reflects only data in fragments
4423 if (!skb_has_frag_list(skb
))
4424 skb_shinfo(skb
)->frag_list
= new_frag
;
4426 new_frag
->next
= NULL
;
4428 (*last_frag
)->next
= new_frag
;
4429 *last_frag
= new_frag
;
4431 skb
->len
+= new_frag
->len
;
4432 skb
->data_len
+= new_frag
->len
;
4433 skb
->truesize
+= new_frag
->truesize
;
4436 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4437 struct l2cap_ctrl
*control
)
4441 switch (control
->sar
) {
4442 case L2CAP_SAR_UNSEGMENTED
:
4446 err
= chan
->ops
->recv(chan
, skb
);
4449 case L2CAP_SAR_START
:
4453 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4454 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4456 if (chan
->sdu_len
> chan
->imtu
) {
4461 if (skb
->len
>= chan
->sdu_len
)
4465 chan
->sdu_last_frag
= skb
;
4471 case L2CAP_SAR_CONTINUE
:
4475 append_skb_frag(chan
->sdu
, skb
,
4476 &chan
->sdu_last_frag
);
4479 if (chan
->sdu
->len
>= chan
->sdu_len
)
4489 append_skb_frag(chan
->sdu
, skb
,
4490 &chan
->sdu_last_frag
);
4493 if (chan
->sdu
->len
!= chan
->sdu_len
)
4496 err
= chan
->ops
->recv(chan
, chan
->sdu
);
4499 /* Reassembly complete */
4501 chan
->sdu_last_frag
= NULL
;
4509 kfree_skb(chan
->sdu
);
4511 chan
->sdu_last_frag
= NULL
;
4518 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4522 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4525 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4526 l2cap_tx(chan
, NULL
, NULL
, event
);
4529 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4532 /* Pass sequential frames to l2cap_reassemble_sdu()
4533 * until a gap is encountered.
4536 BT_DBG("chan %p", chan
);
4538 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4539 struct sk_buff
*skb
;
4540 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4541 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4543 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4548 skb_unlink(skb
, &chan
->srej_q
);
4549 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4550 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4555 if (skb_queue_empty(&chan
->srej_q
)) {
4556 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4557 l2cap_send_ack(chan
);
4563 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4564 struct l2cap_ctrl
*control
)
4566 struct sk_buff
*skb
;
4568 BT_DBG("chan %p, control %p", chan
, control
);
4570 if (control
->reqseq
== chan
->next_tx_seq
) {
4571 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4572 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4576 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4579 BT_DBG("Seq %d not available for retransmission",
4584 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4585 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4586 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4590 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4592 if (control
->poll
) {
4593 l2cap_pass_to_tx(chan
, control
);
4595 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4596 l2cap_retransmit(chan
, control
);
4597 l2cap_ertm_send(chan
);
4599 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4600 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4601 chan
->srej_save_reqseq
= control
->reqseq
;
4604 l2cap_pass_to_tx_fbit(chan
, control
);
4606 if (control
->final
) {
4607 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4608 !test_and_clear_bit(CONN_SREJ_ACT
,
4610 l2cap_retransmit(chan
, control
);
4612 l2cap_retransmit(chan
, control
);
4613 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4614 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4615 chan
->srej_save_reqseq
= control
->reqseq
;
4621 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4622 struct l2cap_ctrl
*control
)
4624 struct sk_buff
*skb
;
4626 BT_DBG("chan %p, control %p", chan
, control
);
4628 if (control
->reqseq
== chan
->next_tx_seq
) {
4629 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4630 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4634 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4636 if (chan
->max_tx
&& skb
&&
4637 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4638 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4639 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4643 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4645 l2cap_pass_to_tx(chan
, control
);
4647 if (control
->final
) {
4648 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4649 l2cap_retransmit_all(chan
, control
);
4651 l2cap_retransmit_all(chan
, control
);
4652 l2cap_ertm_send(chan
);
4653 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4654 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4658 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4660 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4662 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4663 chan
->expected_tx_seq
);
4665 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4666 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4668 /* See notes below regarding "double poll" and
4671 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4672 BT_DBG("Invalid/Ignore - after SREJ");
4673 return L2CAP_TXSEQ_INVALID_IGNORE
;
4675 BT_DBG("Invalid - in window after SREJ sent");
4676 return L2CAP_TXSEQ_INVALID
;
4680 if (chan
->srej_list
.head
== txseq
) {
4681 BT_DBG("Expected SREJ");
4682 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4685 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4686 BT_DBG("Duplicate SREJ - txseq already stored");
4687 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4690 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4691 BT_DBG("Unexpected SREJ - not requested");
4692 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4696 if (chan
->expected_tx_seq
== txseq
) {
4697 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4699 BT_DBG("Invalid - txseq outside tx window");
4700 return L2CAP_TXSEQ_INVALID
;
4703 return L2CAP_TXSEQ_EXPECTED
;
4707 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4708 __seq_offset(chan
, chan
->expected_tx_seq
,
4709 chan
->last_acked_seq
)){
4710 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4711 return L2CAP_TXSEQ_DUPLICATE
;
4714 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4715 /* A source of invalid packets is a "double poll" condition,
4716 * where delays cause us to send multiple poll packets. If
4717 * the remote stack receives and processes both polls,
4718 * sequence numbers can wrap around in such a way that a
4719 * resent frame has a sequence number that looks like new data
4720 * with a sequence gap. This would trigger an erroneous SREJ
4723 * Fortunately, this is impossible with a tx window that's
4724 * less than half of the maximum sequence number, which allows
4725 * invalid frames to be safely ignored.
4727 * With tx window sizes greater than half of the tx window
4728 * maximum, the frame is invalid and cannot be ignored. This
4729 * causes a disconnect.
4732 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4733 BT_DBG("Invalid/Ignore - txseq outside tx window");
4734 return L2CAP_TXSEQ_INVALID_IGNORE
;
4736 BT_DBG("Invalid - txseq outside tx window");
4737 return L2CAP_TXSEQ_INVALID
;
4740 BT_DBG("Unexpected - txseq indicates missing frames");
4741 return L2CAP_TXSEQ_UNEXPECTED
;
4745 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4746 struct l2cap_ctrl
*control
,
4747 struct sk_buff
*skb
, u8 event
)
4750 bool skb_in_use
= 0;
4752 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4756 case L2CAP_EV_RECV_IFRAME
:
4757 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4758 case L2CAP_TXSEQ_EXPECTED
:
4759 l2cap_pass_to_tx(chan
, control
);
4761 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4762 BT_DBG("Busy, discarding expected seq %d",
4767 chan
->expected_tx_seq
= __next_seq(chan
,
4770 chan
->buffer_seq
= chan
->expected_tx_seq
;
4773 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4777 if (control
->final
) {
4778 if (!test_and_clear_bit(CONN_REJ_ACT
,
4779 &chan
->conn_state
)) {
4781 l2cap_retransmit_all(chan
, control
);
4782 l2cap_ertm_send(chan
);
4786 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4787 l2cap_send_ack(chan
);
4789 case L2CAP_TXSEQ_UNEXPECTED
:
4790 l2cap_pass_to_tx(chan
, control
);
4792 /* Can't issue SREJ frames in the local busy state.
4793 * Drop this frame, it will be seen as missing
4794 * when local busy is exited.
4796 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4797 BT_DBG("Busy, discarding unexpected seq %d",
4802 /* There was a gap in the sequence, so an SREJ
4803 * must be sent for each missing frame. The
4804 * current frame is stored for later use.
4806 skb_queue_tail(&chan
->srej_q
, skb
);
4808 BT_DBG("Queued %p (queue len %d)", skb
,
4809 skb_queue_len(&chan
->srej_q
));
4811 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4812 l2cap_seq_list_clear(&chan
->srej_list
);
4813 l2cap_send_srej(chan
, control
->txseq
);
4815 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4817 case L2CAP_TXSEQ_DUPLICATE
:
4818 l2cap_pass_to_tx(chan
, control
);
4820 case L2CAP_TXSEQ_INVALID_IGNORE
:
4822 case L2CAP_TXSEQ_INVALID
:
4824 l2cap_send_disconn_req(chan
->conn
, chan
,
4829 case L2CAP_EV_RECV_RR
:
4830 l2cap_pass_to_tx(chan
, control
);
4831 if (control
->final
) {
4832 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4834 if (!test_and_clear_bit(CONN_REJ_ACT
,
4835 &chan
->conn_state
)) {
4837 l2cap_retransmit_all(chan
, control
);
4840 l2cap_ertm_send(chan
);
4841 } else if (control
->poll
) {
4842 l2cap_send_i_or_rr_or_rnr(chan
);
4844 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4845 &chan
->conn_state
) &&
4846 chan
->unacked_frames
)
4847 __set_retrans_timer(chan
);
4849 l2cap_ertm_send(chan
);
4852 case L2CAP_EV_RECV_RNR
:
4853 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4854 l2cap_pass_to_tx(chan
, control
);
4855 if (control
&& control
->poll
) {
4856 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4857 l2cap_send_rr_or_rnr(chan
, 0);
4859 __clear_retrans_timer(chan
);
4860 l2cap_seq_list_clear(&chan
->retrans_list
);
4862 case L2CAP_EV_RECV_REJ
:
4863 l2cap_handle_rej(chan
, control
);
4865 case L2CAP_EV_RECV_SREJ
:
4866 l2cap_handle_srej(chan
, control
);
4872 if (skb
&& !skb_in_use
) {
4873 BT_DBG("Freeing %p", skb
);
4880 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4881 struct l2cap_ctrl
*control
,
4882 struct sk_buff
*skb
, u8 event
)
4885 u16 txseq
= control
->txseq
;
4886 bool skb_in_use
= 0;
4888 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4892 case L2CAP_EV_RECV_IFRAME
:
4893 switch (l2cap_classify_txseq(chan
, txseq
)) {
4894 case L2CAP_TXSEQ_EXPECTED
:
4895 /* Keep frame for reassembly later */
4896 l2cap_pass_to_tx(chan
, control
);
4897 skb_queue_tail(&chan
->srej_q
, skb
);
4899 BT_DBG("Queued %p (queue len %d)", skb
,
4900 skb_queue_len(&chan
->srej_q
));
4902 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4904 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4905 l2cap_seq_list_pop(&chan
->srej_list
);
4907 l2cap_pass_to_tx(chan
, control
);
4908 skb_queue_tail(&chan
->srej_q
, skb
);
4910 BT_DBG("Queued %p (queue len %d)", skb
,
4911 skb_queue_len(&chan
->srej_q
));
4913 err
= l2cap_rx_queued_iframes(chan
);
4918 case L2CAP_TXSEQ_UNEXPECTED
:
4919 /* Got a frame that can't be reassembled yet.
4920 * Save it for later, and send SREJs to cover
4921 * the missing frames.
4923 skb_queue_tail(&chan
->srej_q
, skb
);
4925 BT_DBG("Queued %p (queue len %d)", skb
,
4926 skb_queue_len(&chan
->srej_q
));
4928 l2cap_pass_to_tx(chan
, control
);
4929 l2cap_send_srej(chan
, control
->txseq
);
4931 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4932 /* This frame was requested with an SREJ, but
4933 * some expected retransmitted frames are
4934 * missing. Request retransmission of missing
4937 skb_queue_tail(&chan
->srej_q
, skb
);
4939 BT_DBG("Queued %p (queue len %d)", skb
,
4940 skb_queue_len(&chan
->srej_q
));
4942 l2cap_pass_to_tx(chan
, control
);
4943 l2cap_send_srej_list(chan
, control
->txseq
);
4945 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4946 /* We've already queued this frame. Drop this copy. */
4947 l2cap_pass_to_tx(chan
, control
);
4949 case L2CAP_TXSEQ_DUPLICATE
:
4950 /* Expecting a later sequence number, so this frame
4951 * was already received. Ignore it completely.
4954 case L2CAP_TXSEQ_INVALID_IGNORE
:
4956 case L2CAP_TXSEQ_INVALID
:
4958 l2cap_send_disconn_req(chan
->conn
, chan
,
4963 case L2CAP_EV_RECV_RR
:
4964 l2cap_pass_to_tx(chan
, control
);
4965 if (control
->final
) {
4966 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4968 if (!test_and_clear_bit(CONN_REJ_ACT
,
4969 &chan
->conn_state
)) {
4971 l2cap_retransmit_all(chan
, control
);
4974 l2cap_ertm_send(chan
);
4975 } else if (control
->poll
) {
4976 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4977 &chan
->conn_state
) &&
4978 chan
->unacked_frames
) {
4979 __set_retrans_timer(chan
);
4982 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4983 l2cap_send_srej_tail(chan
);
4985 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4986 &chan
->conn_state
) &&
4987 chan
->unacked_frames
)
4988 __set_retrans_timer(chan
);
4990 l2cap_send_ack(chan
);
4993 case L2CAP_EV_RECV_RNR
:
4994 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4995 l2cap_pass_to_tx(chan
, control
);
4996 if (control
->poll
) {
4997 l2cap_send_srej_tail(chan
);
4999 struct l2cap_ctrl rr_control
;
5000 memset(&rr_control
, 0, sizeof(rr_control
));
5001 rr_control
.sframe
= 1;
5002 rr_control
.super
= L2CAP_SUPER_RR
;
5003 rr_control
.reqseq
= chan
->buffer_seq
;
5004 l2cap_send_sframe(chan
, &rr_control
);
5008 case L2CAP_EV_RECV_REJ
:
5009 l2cap_handle_rej(chan
, control
);
5011 case L2CAP_EV_RECV_SREJ
:
5012 l2cap_handle_srej(chan
, control
);
5016 if (skb
&& !skb_in_use
) {
5017 BT_DBG("Freeing %p", skb
);
5024 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5026 /* Make sure reqseq is for a packet that has been sent but not acked */
5029 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5030 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5033 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5034 struct sk_buff
*skb
, u8 event
)
5038 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5039 control
, skb
, event
, chan
->rx_state
);
5041 if (__valid_reqseq(chan
, control
->reqseq
)) {
5042 switch (chan
->rx_state
) {
5043 case L2CAP_RX_STATE_RECV
:
5044 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5046 case L2CAP_RX_STATE_SREJ_SENT
:
5047 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5055 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5056 control
->reqseq
, chan
->next_tx_seq
,
5057 chan
->expected_ack_seq
);
5058 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5064 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5065 struct sk_buff
*skb
)
5069 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5072 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5073 L2CAP_TXSEQ_EXPECTED
) {
5074 l2cap_pass_to_tx(chan
, control
);
5076 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5077 __next_seq(chan
, chan
->buffer_seq
));
5079 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5081 l2cap_reassemble_sdu(chan
, skb
, control
);
5084 kfree_skb(chan
->sdu
);
5087 chan
->sdu_last_frag
= NULL
;
5091 BT_DBG("Freeing %p", skb
);
5096 chan
->last_acked_seq
= control
->txseq
;
5097 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5102 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5104 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5108 __unpack_control(chan
, skb
);
5113 * We can just drop the corrupted I-frame here.
5114 * Receiver will miss it and start proper recovery
5115 * procedures and ask for retransmission.
5117 if (l2cap_check_fcs(chan
, skb
))
5120 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5121 len
-= L2CAP_SDULEN_SIZE
;
5123 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5124 len
-= L2CAP_FCS_SIZE
;
5126 if (len
> chan
->mps
) {
5127 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5131 if (!control
->sframe
) {
5134 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5135 control
->sar
, control
->reqseq
, control
->final
,
5138 /* Validate F-bit - F=0 always valid, F=1 only
5139 * valid in TX WAIT_F
5141 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5144 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5145 event
= L2CAP_EV_RECV_IFRAME
;
5146 err
= l2cap_rx(chan
, control
, skb
, event
);
5148 err
= l2cap_stream_rx(chan
, control
, skb
);
5152 l2cap_send_disconn_req(chan
->conn
, chan
,
5155 const u8 rx_func_to_event
[4] = {
5156 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5157 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5160 /* Only I-frames are expected in streaming mode */
5161 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5164 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5165 control
->reqseq
, control
->final
, control
->poll
,
5170 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5174 /* Validate F and P bits */
5175 if (control
->final
&& (control
->poll
||
5176 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5179 event
= rx_func_to_event
[control
->super
];
5180 if (l2cap_rx(chan
, control
, skb
, event
))
5181 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5191 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
5192 struct sk_buff
*skb
)
5194 struct l2cap_chan
*chan
;
5196 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5198 if (cid
== L2CAP_CID_A2MP
) {
5199 chan
= a2mp_channel_create(conn
, skb
);
5205 l2cap_chan_lock(chan
);
5207 BT_DBG("unknown cid 0x%4.4x", cid
);
5208 /* Drop packet and return */
5214 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5216 if (chan
->state
!= BT_CONNECTED
)
5219 switch (chan
->mode
) {
5220 case L2CAP_MODE_BASIC
:
5221 /* If socket recv buffers overflows we drop data here
5222 * which is *bad* because L2CAP has to be reliable.
5223 * But we don't have any other choice. L2CAP doesn't
5224 * provide flow control mechanism. */
5226 if (chan
->imtu
< skb
->len
)
5229 if (!chan
->ops
->recv(chan
, skb
))
5233 case L2CAP_MODE_ERTM
:
5234 case L2CAP_MODE_STREAMING
:
5235 l2cap_data_rcv(chan
, skb
);
5239 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5247 l2cap_chan_unlock(chan
);
5250 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
5251 struct sk_buff
*skb
)
5253 struct l2cap_chan
*chan
;
5255 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5259 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5261 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5264 if (chan
->imtu
< skb
->len
)
5267 if (!chan
->ops
->recv(chan
, skb
))
5274 static void l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5275 struct sk_buff
*skb
)
5277 struct l2cap_chan
*chan
;
5279 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5283 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5285 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5288 if (chan
->imtu
< skb
->len
)
5291 if (!chan
->ops
->recv(chan
, skb
))
5298 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5300 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5304 skb_pull(skb
, L2CAP_HDR_SIZE
);
5305 cid
= __le16_to_cpu(lh
->cid
);
5306 len
= __le16_to_cpu(lh
->len
);
5308 if (len
!= skb
->len
) {
5313 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5316 case L2CAP_CID_LE_SIGNALING
:
5317 case L2CAP_CID_SIGNALING
:
5318 l2cap_sig_channel(conn
, skb
);
5321 case L2CAP_CID_CONN_LESS
:
5322 psm
= get_unaligned((__le16
*) skb
->data
);
5323 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
5324 l2cap_conless_channel(conn
, psm
, skb
);
5327 case L2CAP_CID_LE_DATA
:
5328 l2cap_att_channel(conn
, cid
, skb
);
5332 if (smp_sig_channel(conn
, skb
))
5333 l2cap_conn_del(conn
->hcon
, EACCES
);
5337 l2cap_data_channel(conn
, cid
, skb
);
5342 /* ---- L2CAP interface with lower layer (HCI) ---- */
5344 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5346 int exact
= 0, lm1
= 0, lm2
= 0;
5347 struct l2cap_chan
*c
;
5349 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
5351 /* Find listening sockets and check their link_mode */
5352 read_lock(&chan_list_lock
);
5353 list_for_each_entry(c
, &chan_list
, global_l
) {
5354 struct sock
*sk
= c
->sk
;
5356 if (c
->state
!= BT_LISTEN
)
5359 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5360 lm1
|= HCI_LM_ACCEPT
;
5361 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5362 lm1
|= HCI_LM_MASTER
;
5364 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5365 lm2
|= HCI_LM_ACCEPT
;
5366 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5367 lm2
|= HCI_LM_MASTER
;
5370 read_unlock(&chan_list_lock
);
5372 return exact
? lm1
: lm2
;
5375 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5377 struct l2cap_conn
*conn
;
5379 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
5382 conn
= l2cap_conn_add(hcon
, status
);
5384 l2cap_conn_ready(conn
);
5386 l2cap_conn_del(hcon
, bt_to_errno(status
));
5390 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5392 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5394 BT_DBG("hcon %p", hcon
);
5397 return HCI_ERROR_REMOTE_USER_TERM
;
5398 return conn
->disc_reason
;
5401 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5403 BT_DBG("hcon %p reason %d", hcon
, reason
);
5405 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5408 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5410 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5413 if (encrypt
== 0x00) {
5414 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5415 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5416 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5417 l2cap_chan_close(chan
, ECONNREFUSED
);
5419 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5420 __clear_chan_timer(chan
);
5424 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5426 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5427 struct l2cap_chan
*chan
;
5432 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
5434 if (hcon
->type
== LE_LINK
) {
5435 if (!status
&& encrypt
)
5436 smp_distribute_keys(conn
, 0);
5437 cancel_delayed_work(&conn
->security_timer
);
5440 mutex_lock(&conn
->chan_lock
);
5442 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5443 l2cap_chan_lock(chan
);
5445 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
5446 state_to_string(chan
->state
));
5448 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
5449 l2cap_chan_unlock(chan
);
5453 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5454 if (!status
&& encrypt
) {
5455 chan
->sec_level
= hcon
->sec_level
;
5456 l2cap_chan_ready(chan
);
5459 l2cap_chan_unlock(chan
);
5463 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5464 l2cap_chan_unlock(chan
);
5468 if (!status
&& (chan
->state
== BT_CONNECTED
||
5469 chan
->state
== BT_CONFIG
)) {
5470 struct sock
*sk
= chan
->sk
;
5472 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5473 sk
->sk_state_change(sk
);
5475 l2cap_check_encryption(chan
, encrypt
);
5476 l2cap_chan_unlock(chan
);
5480 if (chan
->state
== BT_CONNECT
) {
5482 l2cap_start_connection(chan
);
5484 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5486 } else if (chan
->state
== BT_CONNECT2
) {
5487 struct sock
*sk
= chan
->sk
;
5488 struct l2cap_conn_rsp rsp
;
5494 if (test_bit(BT_SK_DEFER_SETUP
,
5495 &bt_sk(sk
)->flags
)) {
5496 struct sock
*parent
= bt_sk(sk
)->parent
;
5497 res
= L2CAP_CR_PEND
;
5498 stat
= L2CAP_CS_AUTHOR_PEND
;
5500 parent
->sk_data_ready(parent
, 0);
5502 __l2cap_state_change(chan
, BT_CONFIG
);
5503 res
= L2CAP_CR_SUCCESS
;
5504 stat
= L2CAP_CS_NO_INFO
;
5507 __l2cap_state_change(chan
, BT_DISCONN
);
5508 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5509 res
= L2CAP_CR_SEC_BLOCK
;
5510 stat
= L2CAP_CS_NO_INFO
;
5515 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5516 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5517 rsp
.result
= cpu_to_le16(res
);
5518 rsp
.status
= cpu_to_le16(stat
);
5519 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5522 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
5523 res
== L2CAP_CR_SUCCESS
) {
5525 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
5526 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
5528 l2cap_build_conf_req(chan
, buf
),
5530 chan
->num_conf_req
++;
5534 l2cap_chan_unlock(chan
);
5537 mutex_unlock(&conn
->chan_lock
);
5542 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5544 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5547 conn
= l2cap_conn_add(hcon
, 0);
5552 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5554 if (!(flags
& ACL_CONT
)) {
5555 struct l2cap_hdr
*hdr
;
5559 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5560 kfree_skb(conn
->rx_skb
);
5561 conn
->rx_skb
= NULL
;
5563 l2cap_conn_unreliable(conn
, ECOMM
);
5566 /* Start fragment always begin with Basic L2CAP header */
5567 if (skb
->len
< L2CAP_HDR_SIZE
) {
5568 BT_ERR("Frame is too short (len %d)", skb
->len
);
5569 l2cap_conn_unreliable(conn
, ECOMM
);
5573 hdr
= (struct l2cap_hdr
*) skb
->data
;
5574 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5576 if (len
== skb
->len
) {
5577 /* Complete frame received */
5578 l2cap_recv_frame(conn
, skb
);
5582 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5584 if (skb
->len
> len
) {
5585 BT_ERR("Frame is too long (len %d, expected len %d)",
5587 l2cap_conn_unreliable(conn
, ECOMM
);
5591 /* Allocate skb for the complete frame (with header) */
5592 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5596 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5598 conn
->rx_len
= len
- skb
->len
;
5600 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5602 if (!conn
->rx_len
) {
5603 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5604 l2cap_conn_unreliable(conn
, ECOMM
);
5608 if (skb
->len
> conn
->rx_len
) {
5609 BT_ERR("Fragment is too long (len %d, expected %d)",
5610 skb
->len
, conn
->rx_len
);
5611 kfree_skb(conn
->rx_skb
);
5612 conn
->rx_skb
= NULL
;
5614 l2cap_conn_unreliable(conn
, ECOMM
);
5618 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5620 conn
->rx_len
-= skb
->len
;
5622 if (!conn
->rx_len
) {
5623 /* Complete frame received */
5624 l2cap_recv_frame(conn
, conn
->rx_skb
);
5625 conn
->rx_skb
= NULL
;
5634 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5636 struct l2cap_chan
*c
;
5638 read_lock(&chan_list_lock
);
5640 list_for_each_entry(c
, &chan_list
, global_l
) {
5641 struct sock
*sk
= c
->sk
;
5643 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5644 batostr(&bt_sk(sk
)->src
),
5645 batostr(&bt_sk(sk
)->dst
),
5646 c
->state
, __le16_to_cpu(c
->psm
),
5647 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5648 c
->sec_level
, c
->mode
);
5651 read_unlock(&chan_list_lock
);
5656 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5658 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5661 static const struct file_operations l2cap_debugfs_fops
= {
5662 .open
= l2cap_debugfs_open
,
5664 .llseek
= seq_lseek
,
5665 .release
= single_release
,
5668 static struct dentry
*l2cap_debugfs
;
5670 int __init
l2cap_init(void)
5674 err
= l2cap_init_sockets();
5679 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5680 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5682 BT_ERR("Failed to create L2CAP debug file");
5688 void l2cap_exit(void)
5690 debugfs_remove(l2cap_debugfs
);
5691 l2cap_cleanup_sockets();
5694 module_param(disable_ertm
, bool, 0644);
5695 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");