Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42
43 bool disable_ertm;
44
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 /* Find channel with given DCID.
104 * Returns locked channel.
105 */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108 {
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118 }
119
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
122 {
123 struct l2cap_chan *c;
124
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
128 }
129 return NULL;
130 }
131
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
134 {
135 struct l2cap_chan *c;
136
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
142
143 return c;
144 }
145
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
153 }
154 return NULL;
155 }
156
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 int err;
160
161 write_lock(&chan_list_lock);
162
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
166 }
167
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
174
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
182 }
183 }
184
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
188 }
189
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
191 {
192 write_lock(&chan_list_lock);
193
194 chan->scid = scid;
195
196 write_unlock(&chan_list_lock);
197
198 return 0;
199 }
200
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 u16 cid = L2CAP_CID_DYN_START;
204
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
208 }
209
210 return 0;
211 }
212
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
217
218 chan->state = state;
219 chan->ops->state_change(chan, state);
220 }
221
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 struct sock *sk = chan->sk;
225
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
229 }
230
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 struct sock *sk = chan->sk;
234
235 sk->sk_err = err;
236 }
237
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 struct sock *sk = chan->sk;
241
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
497
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499
500 chan->conn = conn;
501
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
509 } else {
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
513 }
514 break;
515
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 break;
522
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 break;
529
530 default:
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
535 }
536
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
543
544 l2cap_chan_hold(chan);
545
546 list_add(&chan->list, &conn->chan_l);
547 }
548
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
550 {
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
554 }
555
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
557 {
558 struct l2cap_conn *conn = chan->conn;
559
560 __clear_chan_timer(chan);
561
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563
564 if (conn) {
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
568
569 l2cap_chan_put(chan);
570
571 chan->conn = NULL;
572
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_drop(conn->hcon);
575
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
578 }
579
580 if (chan->hs_hchan) {
581 struct hci_chan *hs_hchan = chan->hs_hchan;
582
583 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 amp_disconnect_logical_link(hs_hchan);
585 }
586
587 chan->ops->teardown(chan, err);
588
589 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
590 return;
591
592 switch(chan->mode) {
593 case L2CAP_MODE_BASIC:
594 break;
595
596 case L2CAP_MODE_ERTM:
597 __clear_retrans_timer(chan);
598 __clear_monitor_timer(chan);
599 __clear_ack_timer(chan);
600
601 skb_queue_purge(&chan->srej_q);
602
603 l2cap_seq_list_free(&chan->srej_list);
604 l2cap_seq_list_free(&chan->retrans_list);
605
606 /* fall through */
607
608 case L2CAP_MODE_STREAMING:
609 skb_queue_purge(&chan->tx_q);
610 break;
611 }
612
613 return;
614 }
615
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
617 {
618 struct l2cap_conn *conn = chan->conn;
619 struct sock *sk = chan->sk;
620
621 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
622 sk);
623
624 switch (chan->state) {
625 case BT_LISTEN:
626 chan->ops->teardown(chan, 0);
627 break;
628
629 case BT_CONNECTED:
630 case BT_CONFIG:
631 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 conn->hcon->type == ACL_LINK) {
633 __set_chan_timer(chan, sk->sk_sndtimeo);
634 l2cap_send_disconn_req(chan, reason);
635 } else
636 l2cap_chan_del(chan, reason);
637 break;
638
639 case BT_CONNECT2:
640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 conn->hcon->type == ACL_LINK) {
642 struct l2cap_conn_rsp rsp;
643 __u16 result;
644
645 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 result = L2CAP_CR_SEC_BLOCK;
647 else
648 result = L2CAP_CR_BAD_PSM;
649 l2cap_state_change(chan, BT_DISCONN);
650
651 rsp.scid = cpu_to_le16(chan->dcid);
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.result = cpu_to_le16(result);
654 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
656 sizeof(rsp), &rsp);
657 }
658
659 l2cap_chan_del(chan, reason);
660 break;
661
662 case BT_CONNECT:
663 case BT_DISCONN:
664 l2cap_chan_del(chan, reason);
665 break;
666
667 default:
668 chan->ops->teardown(chan, 0);
669 break;
670 }
671 }
672
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
674 {
675 if (chan->chan_type == L2CAP_CHAN_RAW) {
676 switch (chan->sec_level) {
677 case BT_SECURITY_HIGH:
678 return HCI_AT_DEDICATED_BONDING_MITM;
679 case BT_SECURITY_MEDIUM:
680 return HCI_AT_DEDICATED_BONDING;
681 default:
682 return HCI_AT_NO_BONDING;
683 }
684 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 if (chan->sec_level == BT_SECURITY_LOW)
686 chan->sec_level = BT_SECURITY_SDP;
687
688 if (chan->sec_level == BT_SECURITY_HIGH)
689 return HCI_AT_NO_BONDING_MITM;
690 else
691 return HCI_AT_NO_BONDING;
692 } else {
693 switch (chan->sec_level) {
694 case BT_SECURITY_HIGH:
695 return HCI_AT_GENERAL_BONDING_MITM;
696 case BT_SECURITY_MEDIUM:
697 return HCI_AT_GENERAL_BONDING;
698 default:
699 return HCI_AT_NO_BONDING;
700 }
701 }
702 }
703
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
706 {
707 struct l2cap_conn *conn = chan->conn;
708 __u8 auth_type;
709
710 auth_type = l2cap_get_auth_type(chan);
711
712 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
713 }
714
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
716 {
717 u8 id;
718
719 /* Get next available identificator.
720 * 1 - 128 are used by kernel.
721 * 129 - 199 are reserved.
722 * 200 - 254 are used by utilities like l2ping, etc.
723 */
724
725 spin_lock(&conn->lock);
726
727 if (++conn->tx_ident > 128)
728 conn->tx_ident = 1;
729
730 id = conn->tx_ident;
731
732 spin_unlock(&conn->lock);
733
734 return id;
735 }
736
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 void *data)
739 {
740 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
741 u8 flags;
742
743 BT_DBG("code 0x%2.2x", code);
744
745 if (!skb)
746 return;
747
748 if (lmp_no_flush_capable(conn->hcon->hdev))
749 flags = ACL_START_NO_FLUSH;
750 else
751 flags = ACL_START;
752
753 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 skb->priority = HCI_PRIO_MAX;
755
756 hci_send_acl(conn->hchan, skb, flags);
757 }
758
759 static bool __chan_is_moving(struct l2cap_chan *chan)
760 {
761 return chan->move_state != L2CAP_MOVE_STABLE &&
762 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
763 }
764
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
766 {
767 struct hci_conn *hcon = chan->conn->hcon;
768 u16 flags;
769
770 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
771 skb->priority);
772
773 if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 if (chan->hs_hchan)
775 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 else
777 kfree_skb(skb);
778
779 return;
780 }
781
782 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 lmp_no_flush_capable(hcon->hdev))
784 flags = ACL_START_NO_FLUSH;
785 else
786 flags = ACL_START;
787
788 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 hci_send_acl(chan->conn->hchan, skb, flags);
790 }
791
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
793 {
794 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
796
797 if (enh & L2CAP_CTRL_FRAME_TYPE) {
798 /* S-Frame */
799 control->sframe = 1;
800 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
802
803 control->sar = 0;
804 control->txseq = 0;
805 } else {
806 /* I-Frame */
807 control->sframe = 0;
808 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
810
811 control->poll = 0;
812 control->super = 0;
813 }
814 }
815
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
817 {
818 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
820
821 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
822 /* S-Frame */
823 control->sframe = 1;
824 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
826
827 control->sar = 0;
828 control->txseq = 0;
829 } else {
830 /* I-Frame */
831 control->sframe = 0;
832 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
834
835 control->poll = 0;
836 control->super = 0;
837 }
838 }
839
840 static inline void __unpack_control(struct l2cap_chan *chan,
841 struct sk_buff *skb)
842 {
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 __unpack_extended_control(get_unaligned_le32(skb->data),
845 &bt_cb(skb)->control);
846 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
847 } else {
848 __unpack_enhanced_control(get_unaligned_le16(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
851 }
852 }
853
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
855 {
856 u32 packed;
857
858 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
860
861 if (control->sframe) {
862 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
865 } else {
866 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
868 }
869
870 return packed;
871 }
872
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
874 {
875 u16 packed;
876
877 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
879
880 if (control->sframe) {
881 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 packed |= L2CAP_CTRL_FRAME_TYPE;
884 } else {
885 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
887 }
888
889 return packed;
890 }
891
892 static inline void __pack_control(struct l2cap_chan *chan,
893 struct l2cap_ctrl *control,
894 struct sk_buff *skb)
895 {
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 put_unaligned_le32(__pack_extended_control(control),
898 skb->data + L2CAP_HDR_SIZE);
899 } else {
900 put_unaligned_le16(__pack_enhanced_control(control),
901 skb->data + L2CAP_HDR_SIZE);
902 }
903 }
904
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
906 {
907 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 return L2CAP_EXT_HDR_SIZE;
909 else
910 return L2CAP_ENH_HDR_SIZE;
911 }
912
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
914 u32 control)
915 {
916 struct sk_buff *skb;
917 struct l2cap_hdr *lh;
918 int hlen = __ertm_hdr_size(chan);
919
920 if (chan->fcs == L2CAP_FCS_CRC16)
921 hlen += L2CAP_FCS_SIZE;
922
923 skb = bt_skb_alloc(hlen, GFP_KERNEL);
924
925 if (!skb)
926 return ERR_PTR(-ENOMEM);
927
928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 lh->cid = cpu_to_le16(chan->dcid);
931
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
934 else
935 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
936
937 if (chan->fcs == L2CAP_FCS_CRC16) {
938 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
940 }
941
942 skb->priority = HCI_PRIO_MAX;
943 return skb;
944 }
945
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 struct l2cap_ctrl *control)
948 {
949 struct sk_buff *skb;
950 u32 control_field;
951
952 BT_DBG("chan %p, control %p", chan, control);
953
954 if (!control->sframe)
955 return;
956
957 if (__chan_is_moving(chan))
958 return;
959
960 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
961 !control->poll)
962 control->final = 1;
963
964 if (control->super == L2CAP_SUPER_RR)
965 clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 else if (control->super == L2CAP_SUPER_RNR)
967 set_bit(CONN_RNR_SENT, &chan->conn_state);
968
969 if (control->super != L2CAP_SUPER_SREJ) {
970 chan->last_acked_seq = control->reqseq;
971 __clear_ack_timer(chan);
972 }
973
974 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 control->final, control->poll, control->super);
976
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 control_field = __pack_extended_control(control);
979 else
980 control_field = __pack_enhanced_control(control);
981
982 skb = l2cap_create_sframe_pdu(chan, control_field);
983 if (!IS_ERR(skb))
984 l2cap_do_send(chan, skb);
985 }
986
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
988 {
989 struct l2cap_ctrl control;
990
991 BT_DBG("chan %p, poll %d", chan, poll);
992
993 memset(&control, 0, sizeof(control));
994 control.sframe = 1;
995 control.poll = poll;
996
997 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 control.super = L2CAP_SUPER_RNR;
999 else
1000 control.super = L2CAP_SUPER_RR;
1001
1002 control.reqseq = chan->buffer_seq;
1003 l2cap_send_sframe(chan, &control);
1004 }
1005
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1007 {
1008 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1009 }
1010
1011 static bool __amp_capable(struct l2cap_chan *chan)
1012 {
1013 struct l2cap_conn *conn = chan->conn;
1014
1015 if (enable_hs &&
1016 hci_amp_capable() &&
1017 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 return true;
1020 else
1021 return false;
1022 }
1023
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1025 {
1026 /* Check EFS parameters */
1027 return true;
1028 }
1029
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1031 {
1032 struct l2cap_conn *conn = chan->conn;
1033 struct l2cap_conn_req req;
1034
1035 req.scid = cpu_to_le16(chan->scid);
1036 req.psm = chan->psm;
1037
1038 chan->ident = l2cap_get_ident(conn);
1039
1040 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1041
1042 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1043 }
1044
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1046 {
1047 struct l2cap_create_chan_req req;
1048 req.scid = cpu_to_le16(chan->scid);
1049 req.psm = chan->psm;
1050 req.amp_id = amp_id;
1051
1052 chan->ident = l2cap_get_ident(chan->conn);
1053
1054 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 sizeof(req), &req);
1056 }
1057
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1059 {
1060 struct sk_buff *skb;
1061
1062 BT_DBG("chan %p", chan);
1063
1064 if (chan->mode != L2CAP_MODE_ERTM)
1065 return;
1066
1067 __clear_retrans_timer(chan);
1068 __clear_monitor_timer(chan);
1069 __clear_ack_timer(chan);
1070
1071 chan->retry_count = 0;
1072 skb_queue_walk(&chan->tx_q, skb) {
1073 if (bt_cb(skb)->control.retries)
1074 bt_cb(skb)->control.retries = 1;
1075 else
1076 break;
1077 }
1078
1079 chan->expected_tx_seq = chan->buffer_seq;
1080
1081 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 l2cap_seq_list_clear(&chan->retrans_list);
1084 l2cap_seq_list_clear(&chan->srej_list);
1085 skb_queue_purge(&chan->srej_q);
1086
1087 chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 chan->rx_state = L2CAP_RX_STATE_MOVE;
1089
1090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1091 }
1092
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1094 {
1095 u8 move_role = chan->move_role;
1096 BT_DBG("chan %p", chan);
1097
1098 chan->move_state = L2CAP_MOVE_STABLE;
1099 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1100
1101 if (chan->mode != L2CAP_MODE_ERTM)
1102 return;
1103
1104 switch (move_role) {
1105 case L2CAP_MOVE_ROLE_INITIATOR:
1106 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 break;
1109 case L2CAP_MOVE_ROLE_RESPONDER:
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 break;
1112 }
1113 }
1114
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1116 {
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan->conf_state = 0;
1119 __clear_chan_timer(chan);
1120
1121 chan->state = BT_CONNECTED;
1122
1123 chan->ops->ready(chan);
1124 }
1125
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1127 {
1128 if (__amp_capable(chan)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 a2mp_discover_amp(chan);
1131 } else {
1132 l2cap_send_conn_req(chan);
1133 }
1134 }
1135
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1137 {
1138 struct l2cap_conn *conn = chan->conn;
1139
1140 if (conn->hcon->type == LE_LINK) {
1141 l2cap_chan_ready(chan);
1142 return;
1143 }
1144
1145 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 return;
1148
1149 if (l2cap_chan_check_security(chan) &&
1150 __l2cap_no_conn_pending(chan)) {
1151 l2cap_start_connection(chan);
1152 }
1153 } else {
1154 struct l2cap_info_req req;
1155 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1156
1157 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 conn->info_ident = l2cap_get_ident(conn);
1159
1160 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1161
1162 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 sizeof(req), &req);
1164 }
1165 }
1166
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1168 {
1169 u32 local_feat_mask = l2cap_feat_mask;
1170 if (!disable_ertm)
1171 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1172
1173 switch (mode) {
1174 case L2CAP_MODE_ERTM:
1175 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 case L2CAP_MODE_STREAMING:
1177 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 default:
1179 return 0x00;
1180 }
1181 }
1182
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1184 {
1185 struct sock *sk = chan->sk;
1186 struct l2cap_conn *conn = chan->conn;
1187 struct l2cap_disconn_req req;
1188
1189 if (!conn)
1190 return;
1191
1192 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 __clear_retrans_timer(chan);
1194 __clear_monitor_timer(chan);
1195 __clear_ack_timer(chan);
1196 }
1197
1198 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 l2cap_state_change(chan, BT_DISCONN);
1200 return;
1201 }
1202
1203 req.dcid = cpu_to_le16(chan->dcid);
1204 req.scid = cpu_to_le16(chan->scid);
1205 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 sizeof(req), &req);
1207
1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_DISCONN);
1210 __l2cap_chan_set_err(chan, err);
1211 release_sock(sk);
1212 }
1213
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1216 {
1217 struct l2cap_chan *chan, *tmp;
1218
1219 BT_DBG("conn %p", conn);
1220
1221 mutex_lock(&conn->chan_lock);
1222
1223 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 struct sock *sk = chan->sk;
1225
1226 l2cap_chan_lock(chan);
1227
1228 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 l2cap_chan_unlock(chan);
1230 continue;
1231 }
1232
1233 if (chan->state == BT_CONNECT) {
1234 if (!l2cap_chan_check_security(chan) ||
1235 !__l2cap_no_conn_pending(chan)) {
1236 l2cap_chan_unlock(chan);
1237 continue;
1238 }
1239
1240 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 && test_bit(CONF_STATE2_DEVICE,
1242 &chan->conf_state)) {
1243 l2cap_chan_close(chan, ECONNRESET);
1244 l2cap_chan_unlock(chan);
1245 continue;
1246 }
1247
1248 l2cap_start_connection(chan);
1249
1250 } else if (chan->state == BT_CONNECT2) {
1251 struct l2cap_conn_rsp rsp;
1252 char buf[128];
1253 rsp.scid = cpu_to_le16(chan->dcid);
1254 rsp.dcid = cpu_to_le16(chan->scid);
1255
1256 if (l2cap_chan_check_security(chan)) {
1257 lock_sock(sk);
1258 if (test_bit(BT_SK_DEFER_SETUP,
1259 &bt_sk(sk)->flags)) {
1260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 chan->ops->defer(chan);
1263
1264 } else {
1265 __l2cap_state_change(chan, BT_CONFIG);
1266 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1268 }
1269 release_sock(sk);
1270 } else {
1271 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1273 }
1274
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 sizeof(rsp), &rsp);
1277
1278 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 rsp.result != L2CAP_CR_SUCCESS) {
1280 l2cap_chan_unlock(chan);
1281 continue;
1282 }
1283
1284 set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 l2cap_build_conf_req(chan, buf), buf);
1287 chan->num_conf_req++;
1288 }
1289
1290 l2cap_chan_unlock(chan);
1291 }
1292
1293 mutex_unlock(&conn->chan_lock);
1294 }
1295
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1298 */
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 bdaddr_t *src,
1301 bdaddr_t *dst)
1302 {
1303 struct l2cap_chan *c, *c1 = NULL;
1304
1305 read_lock(&chan_list_lock);
1306
1307 list_for_each_entry(c, &chan_list, global_l) {
1308 struct sock *sk = c->sk;
1309
1310 if (state && c->state != state)
1311 continue;
1312
1313 if (c->scid == cid) {
1314 int src_match, dst_match;
1315 int src_any, dst_any;
1316
1317 /* Exact match. */
1318 src_match = !bacmp(&bt_sk(sk)->src, src);
1319 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 if (src_match && dst_match) {
1321 read_unlock(&chan_list_lock);
1322 return c;
1323 }
1324
1325 /* Closest match */
1326 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 if ((src_match && dst_any) || (src_any && dst_match) ||
1329 (src_any && dst_any))
1330 c1 = c;
1331 }
1332 }
1333
1334 read_unlock(&chan_list_lock);
1335
1336 return c1;
1337 }
1338
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1340 {
1341 struct sock *parent, *sk;
1342 struct l2cap_chan *chan, *pchan;
1343
1344 BT_DBG("");
1345
1346 /* Check if we have socket listening on cid */
1347 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 conn->src, conn->dst);
1349 if (!pchan)
1350 return;
1351
1352 parent = pchan->sk;
1353
1354 lock_sock(parent);
1355
1356 chan = pchan->ops->new_connection(pchan);
1357 if (!chan)
1358 goto clean;
1359
1360 sk = chan->sk;
1361
1362 hci_conn_hold(conn->hcon);
1363 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1364
1365 bacpy(&bt_sk(sk)->src, conn->src);
1366 bacpy(&bt_sk(sk)->dst, conn->dst);
1367
1368 l2cap_chan_add(conn, chan);
1369
1370 l2cap_chan_ready(chan);
1371
1372 clean:
1373 release_sock(parent);
1374 }
1375
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1377 {
1378 struct l2cap_chan *chan;
1379 struct hci_conn *hcon = conn->hcon;
1380
1381 BT_DBG("conn %p", conn);
1382
1383 if (!hcon->out && hcon->type == LE_LINK)
1384 l2cap_le_conn_ready(conn);
1385
1386 if (hcon->out && hcon->type == LE_LINK)
1387 smp_conn_security(hcon, hcon->pending_sec_level);
1388
1389 mutex_lock(&conn->chan_lock);
1390
1391 list_for_each_entry(chan, &conn->chan_l, list) {
1392
1393 l2cap_chan_lock(chan);
1394
1395 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 l2cap_chan_unlock(chan);
1397 continue;
1398 }
1399
1400 if (hcon->type == LE_LINK) {
1401 if (smp_conn_security(hcon, chan->sec_level))
1402 l2cap_chan_ready(chan);
1403
1404 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 struct sock *sk = chan->sk;
1406 __clear_chan_timer(chan);
1407 lock_sock(sk);
1408 __l2cap_state_change(chan, BT_CONNECTED);
1409 sk->sk_state_change(sk);
1410 release_sock(sk);
1411
1412 } else if (chan->state == BT_CONNECT)
1413 l2cap_do_start(chan);
1414
1415 l2cap_chan_unlock(chan);
1416 }
1417
1418 mutex_unlock(&conn->chan_lock);
1419 }
1420
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1423 {
1424 struct l2cap_chan *chan;
1425
1426 BT_DBG("conn %p", conn);
1427
1428 mutex_lock(&conn->chan_lock);
1429
1430 list_for_each_entry(chan, &conn->chan_l, list) {
1431 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 l2cap_chan_set_err(chan, err);
1433 }
1434
1435 mutex_unlock(&conn->chan_lock);
1436 }
1437
1438 static void l2cap_info_timeout(struct work_struct *work)
1439 {
1440 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 info_timer.work);
1442
1443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 conn->info_ident = 0;
1445
1446 l2cap_conn_start(conn);
1447 }
1448
1449 /*
1450 * l2cap_user
1451 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1452 * callback is called during registration. The ->remove callback is called
1453 * during unregistration.
1454 * An l2cap_user object can either be explicitly unregistered or when the
1455 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1456 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1457 * External modules must own a reference to the l2cap_conn object if they intend
1458 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1459 * any time if they don't.
1460 */
1461
1462 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1463 {
1464 struct hci_dev *hdev = conn->hcon->hdev;
1465 int ret;
1466
1467 /* We need to check whether l2cap_conn is registered. If it is not, we
1468 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1469 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1470 * relies on the parent hci_conn object to be locked. This itself relies
1471 * on the hci_dev object to be locked. So we must lock the hci device
1472 * here, too. */
1473
1474 hci_dev_lock(hdev);
1475
1476 if (user->list.next || user->list.prev) {
1477 ret = -EINVAL;
1478 goto out_unlock;
1479 }
1480
1481 /* conn->hchan is NULL after l2cap_conn_del() was called */
1482 if (!conn->hchan) {
1483 ret = -ENODEV;
1484 goto out_unlock;
1485 }
1486
1487 ret = user->probe(conn, user);
1488 if (ret)
1489 goto out_unlock;
1490
1491 list_add(&user->list, &conn->users);
1492 ret = 0;
1493
1494 out_unlock:
1495 hci_dev_unlock(hdev);
1496 return ret;
1497 }
1498 EXPORT_SYMBOL(l2cap_register_user);
1499
1500 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1501 {
1502 struct hci_dev *hdev = conn->hcon->hdev;
1503
1504 hci_dev_lock(hdev);
1505
1506 if (!user->list.next || !user->list.prev)
1507 goto out_unlock;
1508
1509 list_del(&user->list);
1510 user->list.next = NULL;
1511 user->list.prev = NULL;
1512 user->remove(conn, user);
1513
1514 out_unlock:
1515 hci_dev_unlock(hdev);
1516 }
1517 EXPORT_SYMBOL(l2cap_unregister_user);
1518
1519 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1520 {
1521 struct l2cap_user *user;
1522
1523 while (!list_empty(&conn->users)) {
1524 user = list_first_entry(&conn->users, struct l2cap_user, list);
1525 list_del(&user->list);
1526 user->list.next = NULL;
1527 user->list.prev = NULL;
1528 user->remove(conn, user);
1529 }
1530 }
1531
1532 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1533 {
1534 struct l2cap_conn *conn = hcon->l2cap_data;
1535 struct l2cap_chan *chan, *l;
1536
1537 if (!conn)
1538 return;
1539
1540 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1541
1542 kfree_skb(conn->rx_skb);
1543
1544 l2cap_unregister_all_users(conn);
1545
1546 mutex_lock(&conn->chan_lock);
1547
1548 /* Kill channels */
1549 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1550 l2cap_chan_hold(chan);
1551 l2cap_chan_lock(chan);
1552
1553 l2cap_chan_del(chan, err);
1554
1555 l2cap_chan_unlock(chan);
1556
1557 chan->ops->close(chan);
1558 l2cap_chan_put(chan);
1559 }
1560
1561 mutex_unlock(&conn->chan_lock);
1562
1563 hci_chan_del(conn->hchan);
1564
1565 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1566 cancel_delayed_work_sync(&conn->info_timer);
1567
1568 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1569 cancel_delayed_work_sync(&conn->security_timer);
1570 smp_chan_destroy(conn);
1571 }
1572
1573 hcon->l2cap_data = NULL;
1574 conn->hchan = NULL;
1575 l2cap_conn_put(conn);
1576 }
1577
1578 static void security_timeout(struct work_struct *work)
1579 {
1580 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1581 security_timer.work);
1582
1583 BT_DBG("conn %p", conn);
1584
1585 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1586 smp_chan_destroy(conn);
1587 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1588 }
1589 }
1590
1591 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1592 {
1593 struct l2cap_conn *conn = hcon->l2cap_data;
1594 struct hci_chan *hchan;
1595
1596 if (conn)
1597 return conn;
1598
1599 hchan = hci_chan_create(hcon);
1600 if (!hchan)
1601 return NULL;
1602
1603 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1604 if (!conn) {
1605 hci_chan_del(hchan);
1606 return NULL;
1607 }
1608
1609 kref_init(&conn->ref);
1610 hcon->l2cap_data = conn;
1611 conn->hcon = hcon;
1612 hci_conn_get(conn->hcon);
1613 conn->hchan = hchan;
1614
1615 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1616
1617 switch (hcon->type) {
1618 case LE_LINK:
1619 if (hcon->hdev->le_mtu) {
1620 conn->mtu = hcon->hdev->le_mtu;
1621 break;
1622 }
1623 /* fall through */
1624 default:
1625 conn->mtu = hcon->hdev->acl_mtu;
1626 break;
1627 }
1628
1629 conn->src = &hcon->hdev->bdaddr;
1630 conn->dst = &hcon->dst;
1631
1632 conn->feat_mask = 0;
1633
1634 spin_lock_init(&conn->lock);
1635 mutex_init(&conn->chan_lock);
1636
1637 INIT_LIST_HEAD(&conn->chan_l);
1638 INIT_LIST_HEAD(&conn->users);
1639
1640 if (hcon->type == LE_LINK)
1641 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1642 else
1643 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1644
1645 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1646
1647 return conn;
1648 }
1649
1650 static void l2cap_conn_free(struct kref *ref)
1651 {
1652 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1653
1654 hci_conn_put(conn->hcon);
1655 kfree(conn);
1656 }
1657
1658 void l2cap_conn_get(struct l2cap_conn *conn)
1659 {
1660 kref_get(&conn->ref);
1661 }
1662 EXPORT_SYMBOL(l2cap_conn_get);
1663
1664 void l2cap_conn_put(struct l2cap_conn *conn)
1665 {
1666 kref_put(&conn->ref, l2cap_conn_free);
1667 }
1668 EXPORT_SYMBOL(l2cap_conn_put);
1669
1670 /* ---- Socket interface ---- */
1671
1672 /* Find socket with psm and source / destination bdaddr.
1673 * Returns closest match.
1674 */
1675 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1676 bdaddr_t *src,
1677 bdaddr_t *dst)
1678 {
1679 struct l2cap_chan *c, *c1 = NULL;
1680
1681 read_lock(&chan_list_lock);
1682
1683 list_for_each_entry(c, &chan_list, global_l) {
1684 struct sock *sk = c->sk;
1685
1686 if (state && c->state != state)
1687 continue;
1688
1689 if (c->psm == psm) {
1690 int src_match, dst_match;
1691 int src_any, dst_any;
1692
1693 /* Exact match. */
1694 src_match = !bacmp(&bt_sk(sk)->src, src);
1695 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1696 if (src_match && dst_match) {
1697 read_unlock(&chan_list_lock);
1698 return c;
1699 }
1700
1701 /* Closest match */
1702 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1703 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1704 if ((src_match && dst_any) || (src_any && dst_match) ||
1705 (src_any && dst_any))
1706 c1 = c;
1707 }
1708 }
1709
1710 read_unlock(&chan_list_lock);
1711
1712 return c1;
1713 }
1714
1715 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1716 bdaddr_t *dst, u8 dst_type)
1717 {
1718 struct sock *sk = chan->sk;
1719 bdaddr_t *src = &bt_sk(sk)->src;
1720 struct l2cap_conn *conn;
1721 struct hci_conn *hcon;
1722 struct hci_dev *hdev;
1723 __u8 auth_type;
1724 int err;
1725
1726 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1727 dst_type, __le16_to_cpu(psm));
1728
1729 hdev = hci_get_route(dst, src);
1730 if (!hdev)
1731 return -EHOSTUNREACH;
1732
1733 hci_dev_lock(hdev);
1734
1735 l2cap_chan_lock(chan);
1736
1737 /* PSM must be odd and lsb of upper byte must be 0 */
1738 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1739 chan->chan_type != L2CAP_CHAN_RAW) {
1740 err = -EINVAL;
1741 goto done;
1742 }
1743
1744 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1745 err = -EINVAL;
1746 goto done;
1747 }
1748
1749 switch (chan->mode) {
1750 case L2CAP_MODE_BASIC:
1751 break;
1752 case L2CAP_MODE_ERTM:
1753 case L2CAP_MODE_STREAMING:
1754 if (!disable_ertm)
1755 break;
1756 /* fall through */
1757 default:
1758 err = -ENOTSUPP;
1759 goto done;
1760 }
1761
1762 switch (chan->state) {
1763 case BT_CONNECT:
1764 case BT_CONNECT2:
1765 case BT_CONFIG:
1766 /* Already connecting */
1767 err = 0;
1768 goto done;
1769
1770 case BT_CONNECTED:
1771 /* Already connected */
1772 err = -EISCONN;
1773 goto done;
1774
1775 case BT_OPEN:
1776 case BT_BOUND:
1777 /* Can connect */
1778 break;
1779
1780 default:
1781 err = -EBADFD;
1782 goto done;
1783 }
1784
1785 /* Set destination address and psm */
1786 lock_sock(sk);
1787 bacpy(&bt_sk(sk)->dst, dst);
1788 release_sock(sk);
1789
1790 chan->psm = psm;
1791 chan->dcid = cid;
1792
1793 auth_type = l2cap_get_auth_type(chan);
1794
1795 if (chan->dcid == L2CAP_CID_LE_DATA)
1796 hcon = hci_connect(hdev, LE_LINK, 0, dst, dst_type,
1797 chan->sec_level, auth_type);
1798 else
1799 hcon = hci_connect(hdev, ACL_LINK, 0, dst, dst_type,
1800 chan->sec_level, auth_type);
1801
1802 if (IS_ERR(hcon)) {
1803 err = PTR_ERR(hcon);
1804 goto done;
1805 }
1806
1807 conn = l2cap_conn_add(hcon);
1808 if (!conn) {
1809 hci_conn_drop(hcon);
1810 err = -ENOMEM;
1811 goto done;
1812 }
1813
1814 if (hcon->type == LE_LINK) {
1815 err = 0;
1816
1817 if (!list_empty(&conn->chan_l)) {
1818 err = -EBUSY;
1819 hci_conn_drop(hcon);
1820 }
1821
1822 if (err)
1823 goto done;
1824 }
1825
1826 /* Update source addr of the socket */
1827 bacpy(src, conn->src);
1828
1829 l2cap_chan_unlock(chan);
1830 l2cap_chan_add(conn, chan);
1831 l2cap_chan_lock(chan);
1832
1833 l2cap_state_change(chan, BT_CONNECT);
1834 __set_chan_timer(chan, sk->sk_sndtimeo);
1835
1836 if (hcon->state == BT_CONNECTED) {
1837 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1838 __clear_chan_timer(chan);
1839 if (l2cap_chan_check_security(chan))
1840 l2cap_state_change(chan, BT_CONNECTED);
1841 } else
1842 l2cap_do_start(chan);
1843 }
1844
1845 err = 0;
1846
1847 done:
1848 l2cap_chan_unlock(chan);
1849 hci_dev_unlock(hdev);
1850 hci_dev_put(hdev);
1851 return err;
1852 }
1853
1854 int __l2cap_wait_ack(struct sock *sk)
1855 {
1856 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1857 DECLARE_WAITQUEUE(wait, current);
1858 int err = 0;
1859 int timeo = HZ/5;
1860
1861 add_wait_queue(sk_sleep(sk), &wait);
1862 set_current_state(TASK_INTERRUPTIBLE);
1863 while (chan->unacked_frames > 0 && chan->conn) {
1864 if (!timeo)
1865 timeo = HZ/5;
1866
1867 if (signal_pending(current)) {
1868 err = sock_intr_errno(timeo);
1869 break;
1870 }
1871
1872 release_sock(sk);
1873 timeo = schedule_timeout(timeo);
1874 lock_sock(sk);
1875 set_current_state(TASK_INTERRUPTIBLE);
1876
1877 err = sock_error(sk);
1878 if (err)
1879 break;
1880 }
1881 set_current_state(TASK_RUNNING);
1882 remove_wait_queue(sk_sleep(sk), &wait);
1883 return err;
1884 }
1885
1886 static void l2cap_monitor_timeout(struct work_struct *work)
1887 {
1888 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1889 monitor_timer.work);
1890
1891 BT_DBG("chan %p", chan);
1892
1893 l2cap_chan_lock(chan);
1894
1895 if (!chan->conn) {
1896 l2cap_chan_unlock(chan);
1897 l2cap_chan_put(chan);
1898 return;
1899 }
1900
1901 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1902
1903 l2cap_chan_unlock(chan);
1904 l2cap_chan_put(chan);
1905 }
1906
1907 static void l2cap_retrans_timeout(struct work_struct *work)
1908 {
1909 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1910 retrans_timer.work);
1911
1912 BT_DBG("chan %p", chan);
1913
1914 l2cap_chan_lock(chan);
1915
1916 if (!chan->conn) {
1917 l2cap_chan_unlock(chan);
1918 l2cap_chan_put(chan);
1919 return;
1920 }
1921
1922 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1923 l2cap_chan_unlock(chan);
1924 l2cap_chan_put(chan);
1925 }
1926
1927 static void l2cap_streaming_send(struct l2cap_chan *chan,
1928 struct sk_buff_head *skbs)
1929 {
1930 struct sk_buff *skb;
1931 struct l2cap_ctrl *control;
1932
1933 BT_DBG("chan %p, skbs %p", chan, skbs);
1934
1935 if (__chan_is_moving(chan))
1936 return;
1937
1938 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1939
1940 while (!skb_queue_empty(&chan->tx_q)) {
1941
1942 skb = skb_dequeue(&chan->tx_q);
1943
1944 bt_cb(skb)->control.retries = 1;
1945 control = &bt_cb(skb)->control;
1946
1947 control->reqseq = 0;
1948 control->txseq = chan->next_tx_seq;
1949
1950 __pack_control(chan, control, skb);
1951
1952 if (chan->fcs == L2CAP_FCS_CRC16) {
1953 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1954 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1955 }
1956
1957 l2cap_do_send(chan, skb);
1958
1959 BT_DBG("Sent txseq %u", control->txseq);
1960
1961 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1962 chan->frames_sent++;
1963 }
1964 }
1965
1966 static int l2cap_ertm_send(struct l2cap_chan *chan)
1967 {
1968 struct sk_buff *skb, *tx_skb;
1969 struct l2cap_ctrl *control;
1970 int sent = 0;
1971
1972 BT_DBG("chan %p", chan);
1973
1974 if (chan->state != BT_CONNECTED)
1975 return -ENOTCONN;
1976
1977 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1978 return 0;
1979
1980 if (__chan_is_moving(chan))
1981 return 0;
1982
1983 while (chan->tx_send_head &&
1984 chan->unacked_frames < chan->remote_tx_win &&
1985 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1986
1987 skb = chan->tx_send_head;
1988
1989 bt_cb(skb)->control.retries = 1;
1990 control = &bt_cb(skb)->control;
1991
1992 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1993 control->final = 1;
1994
1995 control->reqseq = chan->buffer_seq;
1996 chan->last_acked_seq = chan->buffer_seq;
1997 control->txseq = chan->next_tx_seq;
1998
1999 __pack_control(chan, control, skb);
2000
2001 if (chan->fcs == L2CAP_FCS_CRC16) {
2002 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2003 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2004 }
2005
2006 /* Clone after data has been modified. Data is assumed to be
2007 read-only (for locking purposes) on cloned sk_buffs.
2008 */
2009 tx_skb = skb_clone(skb, GFP_KERNEL);
2010
2011 if (!tx_skb)
2012 break;
2013
2014 __set_retrans_timer(chan);
2015
2016 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2017 chan->unacked_frames++;
2018 chan->frames_sent++;
2019 sent++;
2020
2021 if (skb_queue_is_last(&chan->tx_q, skb))
2022 chan->tx_send_head = NULL;
2023 else
2024 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2025
2026 l2cap_do_send(chan, tx_skb);
2027 BT_DBG("Sent txseq %u", control->txseq);
2028 }
2029
2030 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2031 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2032
2033 return sent;
2034 }
2035
2036 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2037 {
2038 struct l2cap_ctrl control;
2039 struct sk_buff *skb;
2040 struct sk_buff *tx_skb;
2041 u16 seq;
2042
2043 BT_DBG("chan %p", chan);
2044
2045 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2046 return;
2047
2048 if (__chan_is_moving(chan))
2049 return;
2050
2051 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2052 seq = l2cap_seq_list_pop(&chan->retrans_list);
2053
2054 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2055 if (!skb) {
2056 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2057 seq);
2058 continue;
2059 }
2060
2061 bt_cb(skb)->control.retries++;
2062 control = bt_cb(skb)->control;
2063
2064 if (chan->max_tx != 0 &&
2065 bt_cb(skb)->control.retries > chan->max_tx) {
2066 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2067 l2cap_send_disconn_req(chan, ECONNRESET);
2068 l2cap_seq_list_clear(&chan->retrans_list);
2069 break;
2070 }
2071
2072 control.reqseq = chan->buffer_seq;
2073 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2074 control.final = 1;
2075 else
2076 control.final = 0;
2077
2078 if (skb_cloned(skb)) {
2079 /* Cloned sk_buffs are read-only, so we need a
2080 * writeable copy
2081 */
2082 tx_skb = skb_copy(skb, GFP_KERNEL);
2083 } else {
2084 tx_skb = skb_clone(skb, GFP_KERNEL);
2085 }
2086
2087 if (!tx_skb) {
2088 l2cap_seq_list_clear(&chan->retrans_list);
2089 break;
2090 }
2091
2092 /* Update skb contents */
2093 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2094 put_unaligned_le32(__pack_extended_control(&control),
2095 tx_skb->data + L2CAP_HDR_SIZE);
2096 } else {
2097 put_unaligned_le16(__pack_enhanced_control(&control),
2098 tx_skb->data + L2CAP_HDR_SIZE);
2099 }
2100
2101 if (chan->fcs == L2CAP_FCS_CRC16) {
2102 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2103 put_unaligned_le16(fcs, skb_put(tx_skb,
2104 L2CAP_FCS_SIZE));
2105 }
2106
2107 l2cap_do_send(chan, tx_skb);
2108
2109 BT_DBG("Resent txseq %d", control.txseq);
2110
2111 chan->last_acked_seq = chan->buffer_seq;
2112 }
2113 }
2114
2115 static void l2cap_retransmit(struct l2cap_chan *chan,
2116 struct l2cap_ctrl *control)
2117 {
2118 BT_DBG("chan %p, control %p", chan, control);
2119
2120 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2121 l2cap_ertm_resend(chan);
2122 }
2123
2124 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2125 struct l2cap_ctrl *control)
2126 {
2127 struct sk_buff *skb;
2128
2129 BT_DBG("chan %p, control %p", chan, control);
2130
2131 if (control->poll)
2132 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2133
2134 l2cap_seq_list_clear(&chan->retrans_list);
2135
2136 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2137 return;
2138
2139 if (chan->unacked_frames) {
2140 skb_queue_walk(&chan->tx_q, skb) {
2141 if (bt_cb(skb)->control.txseq == control->reqseq ||
2142 skb == chan->tx_send_head)
2143 break;
2144 }
2145
2146 skb_queue_walk_from(&chan->tx_q, skb) {
2147 if (skb == chan->tx_send_head)
2148 break;
2149
2150 l2cap_seq_list_append(&chan->retrans_list,
2151 bt_cb(skb)->control.txseq);
2152 }
2153
2154 l2cap_ertm_resend(chan);
2155 }
2156 }
2157
2158 static void l2cap_send_ack(struct l2cap_chan *chan)
2159 {
2160 struct l2cap_ctrl control;
2161 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2162 chan->last_acked_seq);
2163 int threshold;
2164
2165 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2166 chan, chan->last_acked_seq, chan->buffer_seq);
2167
2168 memset(&control, 0, sizeof(control));
2169 control.sframe = 1;
2170
2171 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2172 chan->rx_state == L2CAP_RX_STATE_RECV) {
2173 __clear_ack_timer(chan);
2174 control.super = L2CAP_SUPER_RNR;
2175 control.reqseq = chan->buffer_seq;
2176 l2cap_send_sframe(chan, &control);
2177 } else {
2178 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2179 l2cap_ertm_send(chan);
2180 /* If any i-frames were sent, they included an ack */
2181 if (chan->buffer_seq == chan->last_acked_seq)
2182 frames_to_ack = 0;
2183 }
2184
2185 /* Ack now if the window is 3/4ths full.
2186 * Calculate without mul or div
2187 */
2188 threshold = chan->ack_win;
2189 threshold += threshold << 1;
2190 threshold >>= 2;
2191
2192 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2193 threshold);
2194
2195 if (frames_to_ack >= threshold) {
2196 __clear_ack_timer(chan);
2197 control.super = L2CAP_SUPER_RR;
2198 control.reqseq = chan->buffer_seq;
2199 l2cap_send_sframe(chan, &control);
2200 frames_to_ack = 0;
2201 }
2202
2203 if (frames_to_ack)
2204 __set_ack_timer(chan);
2205 }
2206 }
2207
2208 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2209 struct msghdr *msg, int len,
2210 int count, struct sk_buff *skb)
2211 {
2212 struct l2cap_conn *conn = chan->conn;
2213 struct sk_buff **frag;
2214 int sent = 0;
2215
2216 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2217 return -EFAULT;
2218
2219 sent += count;
2220 len -= count;
2221
2222 /* Continuation fragments (no L2CAP header) */
2223 frag = &skb_shinfo(skb)->frag_list;
2224 while (len) {
2225 struct sk_buff *tmp;
2226
2227 count = min_t(unsigned int, conn->mtu, len);
2228
2229 tmp = chan->ops->alloc_skb(chan, count,
2230 msg->msg_flags & MSG_DONTWAIT);
2231 if (IS_ERR(tmp))
2232 return PTR_ERR(tmp);
2233
2234 *frag = tmp;
2235
2236 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2237 return -EFAULT;
2238
2239 (*frag)->priority = skb->priority;
2240
2241 sent += count;
2242 len -= count;
2243
2244 skb->len += (*frag)->len;
2245 skb->data_len += (*frag)->len;
2246
2247 frag = &(*frag)->next;
2248 }
2249
2250 return sent;
2251 }
2252
2253 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2254 struct msghdr *msg, size_t len,
2255 u32 priority)
2256 {
2257 struct l2cap_conn *conn = chan->conn;
2258 struct sk_buff *skb;
2259 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2260 struct l2cap_hdr *lh;
2261
2262 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2263
2264 count = min_t(unsigned int, (conn->mtu - hlen), len);
2265
2266 skb = chan->ops->alloc_skb(chan, count + hlen,
2267 msg->msg_flags & MSG_DONTWAIT);
2268 if (IS_ERR(skb))
2269 return skb;
2270
2271 skb->priority = priority;
2272
2273 /* Create L2CAP header */
2274 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2275 lh->cid = cpu_to_le16(chan->dcid);
2276 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2277 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2278
2279 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2280 if (unlikely(err < 0)) {
2281 kfree_skb(skb);
2282 return ERR_PTR(err);
2283 }
2284 return skb;
2285 }
2286
2287 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2288 struct msghdr *msg, size_t len,
2289 u32 priority)
2290 {
2291 struct l2cap_conn *conn = chan->conn;
2292 struct sk_buff *skb;
2293 int err, count;
2294 struct l2cap_hdr *lh;
2295
2296 BT_DBG("chan %p len %zu", chan, len);
2297
2298 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2299
2300 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2301 msg->msg_flags & MSG_DONTWAIT);
2302 if (IS_ERR(skb))
2303 return skb;
2304
2305 skb->priority = priority;
2306
2307 /* Create L2CAP header */
2308 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2309 lh->cid = cpu_to_le16(chan->dcid);
2310 lh->len = cpu_to_le16(len);
2311
2312 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2313 if (unlikely(err < 0)) {
2314 kfree_skb(skb);
2315 return ERR_PTR(err);
2316 }
2317 return skb;
2318 }
2319
2320 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2321 struct msghdr *msg, size_t len,
2322 u16 sdulen)
2323 {
2324 struct l2cap_conn *conn = chan->conn;
2325 struct sk_buff *skb;
2326 int err, count, hlen;
2327 struct l2cap_hdr *lh;
2328
2329 BT_DBG("chan %p len %zu", chan, len);
2330
2331 if (!conn)
2332 return ERR_PTR(-ENOTCONN);
2333
2334 hlen = __ertm_hdr_size(chan);
2335
2336 if (sdulen)
2337 hlen += L2CAP_SDULEN_SIZE;
2338
2339 if (chan->fcs == L2CAP_FCS_CRC16)
2340 hlen += L2CAP_FCS_SIZE;
2341
2342 count = min_t(unsigned int, (conn->mtu - hlen), len);
2343
2344 skb = chan->ops->alloc_skb(chan, count + hlen,
2345 msg->msg_flags & MSG_DONTWAIT);
2346 if (IS_ERR(skb))
2347 return skb;
2348
2349 /* Create L2CAP header */
2350 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2351 lh->cid = cpu_to_le16(chan->dcid);
2352 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2353
2354 /* Control header is populated later */
2355 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2356 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2357 else
2358 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2359
2360 if (sdulen)
2361 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2362
2363 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2364 if (unlikely(err < 0)) {
2365 kfree_skb(skb);
2366 return ERR_PTR(err);
2367 }
2368
2369 bt_cb(skb)->control.fcs = chan->fcs;
2370 bt_cb(skb)->control.retries = 0;
2371 return skb;
2372 }
2373
2374 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2375 struct sk_buff_head *seg_queue,
2376 struct msghdr *msg, size_t len)
2377 {
2378 struct sk_buff *skb;
2379 u16 sdu_len;
2380 size_t pdu_len;
2381 u8 sar;
2382
2383 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2384
2385 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2386 * so fragmented skbs are not used. The HCI layer's handling
2387 * of fragmented skbs is not compatible with ERTM's queueing.
2388 */
2389
2390 /* PDU size is derived from the HCI MTU */
2391 pdu_len = chan->conn->mtu;
2392
2393 /* Constrain PDU size for BR/EDR connections */
2394 if (!chan->hs_hcon)
2395 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2396
2397 /* Adjust for largest possible L2CAP overhead. */
2398 if (chan->fcs)
2399 pdu_len -= L2CAP_FCS_SIZE;
2400
2401 pdu_len -= __ertm_hdr_size(chan);
2402
2403 /* Remote device may have requested smaller PDUs */
2404 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2405
2406 if (len <= pdu_len) {
2407 sar = L2CAP_SAR_UNSEGMENTED;
2408 sdu_len = 0;
2409 pdu_len = len;
2410 } else {
2411 sar = L2CAP_SAR_START;
2412 sdu_len = len;
2413 pdu_len -= L2CAP_SDULEN_SIZE;
2414 }
2415
2416 while (len > 0) {
2417 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2418
2419 if (IS_ERR(skb)) {
2420 __skb_queue_purge(seg_queue);
2421 return PTR_ERR(skb);
2422 }
2423
2424 bt_cb(skb)->control.sar = sar;
2425 __skb_queue_tail(seg_queue, skb);
2426
2427 len -= pdu_len;
2428 if (sdu_len) {
2429 sdu_len = 0;
2430 pdu_len += L2CAP_SDULEN_SIZE;
2431 }
2432
2433 if (len <= pdu_len) {
2434 sar = L2CAP_SAR_END;
2435 pdu_len = len;
2436 } else {
2437 sar = L2CAP_SAR_CONTINUE;
2438 }
2439 }
2440
2441 return 0;
2442 }
2443
2444 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2445 u32 priority)
2446 {
2447 struct sk_buff *skb;
2448 int err;
2449 struct sk_buff_head seg_queue;
2450
2451 /* Connectionless channel */
2452 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2453 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2454 if (IS_ERR(skb))
2455 return PTR_ERR(skb);
2456
2457 l2cap_do_send(chan, skb);
2458 return len;
2459 }
2460
2461 switch (chan->mode) {
2462 case L2CAP_MODE_BASIC:
2463 /* Check outgoing MTU */
2464 if (len > chan->omtu)
2465 return -EMSGSIZE;
2466
2467 /* Create a basic PDU */
2468 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2469 if (IS_ERR(skb))
2470 return PTR_ERR(skb);
2471
2472 l2cap_do_send(chan, skb);
2473 err = len;
2474 break;
2475
2476 case L2CAP_MODE_ERTM:
2477 case L2CAP_MODE_STREAMING:
2478 /* Check outgoing MTU */
2479 if (len > chan->omtu) {
2480 err = -EMSGSIZE;
2481 break;
2482 }
2483
2484 __skb_queue_head_init(&seg_queue);
2485
2486 /* Do segmentation before calling in to the state machine,
2487 * since it's possible to block while waiting for memory
2488 * allocation.
2489 */
2490 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2491
2492 /* The channel could have been closed while segmenting,
2493 * check that it is still connected.
2494 */
2495 if (chan->state != BT_CONNECTED) {
2496 __skb_queue_purge(&seg_queue);
2497 err = -ENOTCONN;
2498 }
2499
2500 if (err)
2501 break;
2502
2503 if (chan->mode == L2CAP_MODE_ERTM)
2504 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2505 else
2506 l2cap_streaming_send(chan, &seg_queue);
2507
2508 err = len;
2509
2510 /* If the skbs were not queued for sending, they'll still be in
2511 * seg_queue and need to be purged.
2512 */
2513 __skb_queue_purge(&seg_queue);
2514 break;
2515
2516 default:
2517 BT_DBG("bad state %1.1x", chan->mode);
2518 err = -EBADFD;
2519 }
2520
2521 return err;
2522 }
2523
2524 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2525 {
2526 struct l2cap_ctrl control;
2527 u16 seq;
2528
2529 BT_DBG("chan %p, txseq %u", chan, txseq);
2530
2531 memset(&control, 0, sizeof(control));
2532 control.sframe = 1;
2533 control.super = L2CAP_SUPER_SREJ;
2534
2535 for (seq = chan->expected_tx_seq; seq != txseq;
2536 seq = __next_seq(chan, seq)) {
2537 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2538 control.reqseq = seq;
2539 l2cap_send_sframe(chan, &control);
2540 l2cap_seq_list_append(&chan->srej_list, seq);
2541 }
2542 }
2543
2544 chan->expected_tx_seq = __next_seq(chan, txseq);
2545 }
2546
2547 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2548 {
2549 struct l2cap_ctrl control;
2550
2551 BT_DBG("chan %p", chan);
2552
2553 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2554 return;
2555
2556 memset(&control, 0, sizeof(control));
2557 control.sframe = 1;
2558 control.super = L2CAP_SUPER_SREJ;
2559 control.reqseq = chan->srej_list.tail;
2560 l2cap_send_sframe(chan, &control);
2561 }
2562
2563 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2564 {
2565 struct l2cap_ctrl control;
2566 u16 initial_head;
2567 u16 seq;
2568
2569 BT_DBG("chan %p, txseq %u", chan, txseq);
2570
2571 memset(&control, 0, sizeof(control));
2572 control.sframe = 1;
2573 control.super = L2CAP_SUPER_SREJ;
2574
2575 /* Capture initial list head to allow only one pass through the list. */
2576 initial_head = chan->srej_list.head;
2577
2578 do {
2579 seq = l2cap_seq_list_pop(&chan->srej_list);
2580 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2581 break;
2582
2583 control.reqseq = seq;
2584 l2cap_send_sframe(chan, &control);
2585 l2cap_seq_list_append(&chan->srej_list, seq);
2586 } while (chan->srej_list.head != initial_head);
2587 }
2588
2589 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2590 {
2591 struct sk_buff *acked_skb;
2592 u16 ackseq;
2593
2594 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2595
2596 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2597 return;
2598
2599 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2600 chan->expected_ack_seq, chan->unacked_frames);
2601
2602 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2603 ackseq = __next_seq(chan, ackseq)) {
2604
2605 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2606 if (acked_skb) {
2607 skb_unlink(acked_skb, &chan->tx_q);
2608 kfree_skb(acked_skb);
2609 chan->unacked_frames--;
2610 }
2611 }
2612
2613 chan->expected_ack_seq = reqseq;
2614
2615 if (chan->unacked_frames == 0)
2616 __clear_retrans_timer(chan);
2617
2618 BT_DBG("unacked_frames %u", chan->unacked_frames);
2619 }
2620
2621 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2622 {
2623 BT_DBG("chan %p", chan);
2624
2625 chan->expected_tx_seq = chan->buffer_seq;
2626 l2cap_seq_list_clear(&chan->srej_list);
2627 skb_queue_purge(&chan->srej_q);
2628 chan->rx_state = L2CAP_RX_STATE_RECV;
2629 }
2630
2631 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2632 struct l2cap_ctrl *control,
2633 struct sk_buff_head *skbs, u8 event)
2634 {
2635 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2636 event);
2637
2638 switch (event) {
2639 case L2CAP_EV_DATA_REQUEST:
2640 if (chan->tx_send_head == NULL)
2641 chan->tx_send_head = skb_peek(skbs);
2642
2643 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2644 l2cap_ertm_send(chan);
2645 break;
2646 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2647 BT_DBG("Enter LOCAL_BUSY");
2648 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2649
2650 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2651 /* The SREJ_SENT state must be aborted if we are to
2652 * enter the LOCAL_BUSY state.
2653 */
2654 l2cap_abort_rx_srej_sent(chan);
2655 }
2656
2657 l2cap_send_ack(chan);
2658
2659 break;
2660 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2661 BT_DBG("Exit LOCAL_BUSY");
2662 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2663
2664 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2665 struct l2cap_ctrl local_control;
2666
2667 memset(&local_control, 0, sizeof(local_control));
2668 local_control.sframe = 1;
2669 local_control.super = L2CAP_SUPER_RR;
2670 local_control.poll = 1;
2671 local_control.reqseq = chan->buffer_seq;
2672 l2cap_send_sframe(chan, &local_control);
2673
2674 chan->retry_count = 1;
2675 __set_monitor_timer(chan);
2676 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2677 }
2678 break;
2679 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2680 l2cap_process_reqseq(chan, control->reqseq);
2681 break;
2682 case L2CAP_EV_EXPLICIT_POLL:
2683 l2cap_send_rr_or_rnr(chan, 1);
2684 chan->retry_count = 1;
2685 __set_monitor_timer(chan);
2686 __clear_ack_timer(chan);
2687 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2688 break;
2689 case L2CAP_EV_RETRANS_TO:
2690 l2cap_send_rr_or_rnr(chan, 1);
2691 chan->retry_count = 1;
2692 __set_monitor_timer(chan);
2693 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2694 break;
2695 case L2CAP_EV_RECV_FBIT:
2696 /* Nothing to process */
2697 break;
2698 default:
2699 break;
2700 }
2701 }
2702
2703 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2704 struct l2cap_ctrl *control,
2705 struct sk_buff_head *skbs, u8 event)
2706 {
2707 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2708 event);
2709
2710 switch (event) {
2711 case L2CAP_EV_DATA_REQUEST:
2712 if (chan->tx_send_head == NULL)
2713 chan->tx_send_head = skb_peek(skbs);
2714 /* Queue data, but don't send. */
2715 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2716 break;
2717 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2718 BT_DBG("Enter LOCAL_BUSY");
2719 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720
2721 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2722 /* The SREJ_SENT state must be aborted if we are to
2723 * enter the LOCAL_BUSY state.
2724 */
2725 l2cap_abort_rx_srej_sent(chan);
2726 }
2727
2728 l2cap_send_ack(chan);
2729
2730 break;
2731 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2732 BT_DBG("Exit LOCAL_BUSY");
2733 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734
2735 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2736 struct l2cap_ctrl local_control;
2737 memset(&local_control, 0, sizeof(local_control));
2738 local_control.sframe = 1;
2739 local_control.super = L2CAP_SUPER_RR;
2740 local_control.poll = 1;
2741 local_control.reqseq = chan->buffer_seq;
2742 l2cap_send_sframe(chan, &local_control);
2743
2744 chan->retry_count = 1;
2745 __set_monitor_timer(chan);
2746 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2747 }
2748 break;
2749 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2750 l2cap_process_reqseq(chan, control->reqseq);
2751
2752 /* Fall through */
2753
2754 case L2CAP_EV_RECV_FBIT:
2755 if (control && control->final) {
2756 __clear_monitor_timer(chan);
2757 if (chan->unacked_frames > 0)
2758 __set_retrans_timer(chan);
2759 chan->retry_count = 0;
2760 chan->tx_state = L2CAP_TX_STATE_XMIT;
2761 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2762 }
2763 break;
2764 case L2CAP_EV_EXPLICIT_POLL:
2765 /* Ignore */
2766 break;
2767 case L2CAP_EV_MONITOR_TO:
2768 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2769 l2cap_send_rr_or_rnr(chan, 1);
2770 __set_monitor_timer(chan);
2771 chan->retry_count++;
2772 } else {
2773 l2cap_send_disconn_req(chan, ECONNABORTED);
2774 }
2775 break;
2776 default:
2777 break;
2778 }
2779 }
2780
2781 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2782 struct sk_buff_head *skbs, u8 event)
2783 {
2784 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2785 chan, control, skbs, event, chan->tx_state);
2786
2787 switch (chan->tx_state) {
2788 case L2CAP_TX_STATE_XMIT:
2789 l2cap_tx_state_xmit(chan, control, skbs, event);
2790 break;
2791 case L2CAP_TX_STATE_WAIT_F:
2792 l2cap_tx_state_wait_f(chan, control, skbs, event);
2793 break;
2794 default:
2795 /* Ignore event */
2796 break;
2797 }
2798 }
2799
2800 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2801 struct l2cap_ctrl *control)
2802 {
2803 BT_DBG("chan %p, control %p", chan, control);
2804 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2805 }
2806
2807 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2808 struct l2cap_ctrl *control)
2809 {
2810 BT_DBG("chan %p, control %p", chan, control);
2811 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2812 }
2813
2814 /* Copy frame to all raw sockets on that connection */
2815 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2816 {
2817 struct sk_buff *nskb;
2818 struct l2cap_chan *chan;
2819
2820 BT_DBG("conn %p", conn);
2821
2822 mutex_lock(&conn->chan_lock);
2823
2824 list_for_each_entry(chan, &conn->chan_l, list) {
2825 struct sock *sk = chan->sk;
2826 if (chan->chan_type != L2CAP_CHAN_RAW)
2827 continue;
2828
2829 /* Don't send frame to the socket it came from */
2830 if (skb->sk == sk)
2831 continue;
2832 nskb = skb_clone(skb, GFP_KERNEL);
2833 if (!nskb)
2834 continue;
2835
2836 if (chan->ops->recv(chan, nskb))
2837 kfree_skb(nskb);
2838 }
2839
2840 mutex_unlock(&conn->chan_lock);
2841 }
2842
2843 /* ---- L2CAP signalling commands ---- */
2844 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2845 u8 ident, u16 dlen, void *data)
2846 {
2847 struct sk_buff *skb, **frag;
2848 struct l2cap_cmd_hdr *cmd;
2849 struct l2cap_hdr *lh;
2850 int len, count;
2851
2852 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2853 conn, code, ident, dlen);
2854
2855 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2856 return NULL;
2857
2858 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2859 count = min_t(unsigned int, conn->mtu, len);
2860
2861 skb = bt_skb_alloc(count, GFP_KERNEL);
2862 if (!skb)
2863 return NULL;
2864
2865 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2866 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2867
2868 if (conn->hcon->type == LE_LINK)
2869 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2870 else
2871 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2872
2873 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2874 cmd->code = code;
2875 cmd->ident = ident;
2876 cmd->len = cpu_to_le16(dlen);
2877
2878 if (dlen) {
2879 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2880 memcpy(skb_put(skb, count), data, count);
2881 data += count;
2882 }
2883
2884 len -= skb->len;
2885
2886 /* Continuation fragments (no L2CAP header) */
2887 frag = &skb_shinfo(skb)->frag_list;
2888 while (len) {
2889 count = min_t(unsigned int, conn->mtu, len);
2890
2891 *frag = bt_skb_alloc(count, GFP_KERNEL);
2892 if (!*frag)
2893 goto fail;
2894
2895 memcpy(skb_put(*frag, count), data, count);
2896
2897 len -= count;
2898 data += count;
2899
2900 frag = &(*frag)->next;
2901 }
2902
2903 return skb;
2904
2905 fail:
2906 kfree_skb(skb);
2907 return NULL;
2908 }
2909
2910 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2911 unsigned long *val)
2912 {
2913 struct l2cap_conf_opt *opt = *ptr;
2914 int len;
2915
2916 len = L2CAP_CONF_OPT_SIZE + opt->len;
2917 *ptr += len;
2918
2919 *type = opt->type;
2920 *olen = opt->len;
2921
2922 switch (opt->len) {
2923 case 1:
2924 *val = *((u8 *) opt->val);
2925 break;
2926
2927 case 2:
2928 *val = get_unaligned_le16(opt->val);
2929 break;
2930
2931 case 4:
2932 *val = get_unaligned_le32(opt->val);
2933 break;
2934
2935 default:
2936 *val = (unsigned long) opt->val;
2937 break;
2938 }
2939
2940 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2941 return len;
2942 }
2943
2944 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2945 {
2946 struct l2cap_conf_opt *opt = *ptr;
2947
2948 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2949
2950 opt->type = type;
2951 opt->len = len;
2952
2953 switch (len) {
2954 case 1:
2955 *((u8 *) opt->val) = val;
2956 break;
2957
2958 case 2:
2959 put_unaligned_le16(val, opt->val);
2960 break;
2961
2962 case 4:
2963 put_unaligned_le32(val, opt->val);
2964 break;
2965
2966 default:
2967 memcpy(opt->val, (void *) val, len);
2968 break;
2969 }
2970
2971 *ptr += L2CAP_CONF_OPT_SIZE + len;
2972 }
2973
2974 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2975 {
2976 struct l2cap_conf_efs efs;
2977
2978 switch (chan->mode) {
2979 case L2CAP_MODE_ERTM:
2980 efs.id = chan->local_id;
2981 efs.stype = chan->local_stype;
2982 efs.msdu = cpu_to_le16(chan->local_msdu);
2983 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2984 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2985 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2986 break;
2987
2988 case L2CAP_MODE_STREAMING:
2989 efs.id = 1;
2990 efs.stype = L2CAP_SERV_BESTEFFORT;
2991 efs.msdu = cpu_to_le16(chan->local_msdu);
2992 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2993 efs.acc_lat = 0;
2994 efs.flush_to = 0;
2995 break;
2996
2997 default:
2998 return;
2999 }
3000
3001 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3002 (unsigned long) &efs);
3003 }
3004
3005 static void l2cap_ack_timeout(struct work_struct *work)
3006 {
3007 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3008 ack_timer.work);
3009 u16 frames_to_ack;
3010
3011 BT_DBG("chan %p", chan);
3012
3013 l2cap_chan_lock(chan);
3014
3015 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3016 chan->last_acked_seq);
3017
3018 if (frames_to_ack)
3019 l2cap_send_rr_or_rnr(chan, 0);
3020
3021 l2cap_chan_unlock(chan);
3022 l2cap_chan_put(chan);
3023 }
3024
3025 int l2cap_ertm_init(struct l2cap_chan *chan)
3026 {
3027 int err;
3028
3029 chan->next_tx_seq = 0;
3030 chan->expected_tx_seq = 0;
3031 chan->expected_ack_seq = 0;
3032 chan->unacked_frames = 0;
3033 chan->buffer_seq = 0;
3034 chan->frames_sent = 0;
3035 chan->last_acked_seq = 0;
3036 chan->sdu = NULL;
3037 chan->sdu_last_frag = NULL;
3038 chan->sdu_len = 0;
3039
3040 skb_queue_head_init(&chan->tx_q);
3041
3042 chan->local_amp_id = 0;
3043 chan->move_id = 0;
3044 chan->move_state = L2CAP_MOVE_STABLE;
3045 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3046
3047 if (chan->mode != L2CAP_MODE_ERTM)
3048 return 0;
3049
3050 chan->rx_state = L2CAP_RX_STATE_RECV;
3051 chan->tx_state = L2CAP_TX_STATE_XMIT;
3052
3053 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3054 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3055 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3056
3057 skb_queue_head_init(&chan->srej_q);
3058
3059 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3060 if (err < 0)
3061 return err;
3062
3063 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3064 if (err < 0)
3065 l2cap_seq_list_free(&chan->srej_list);
3066
3067 return err;
3068 }
3069
3070 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3071 {
3072 switch (mode) {
3073 case L2CAP_MODE_STREAMING:
3074 case L2CAP_MODE_ERTM:
3075 if (l2cap_mode_supported(mode, remote_feat_mask))
3076 return mode;
3077 /* fall through */
3078 default:
3079 return L2CAP_MODE_BASIC;
3080 }
3081 }
3082
3083 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
3084 {
3085 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3086 }
3087
3088 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
3089 {
3090 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3091 }
3092
3093 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3094 struct l2cap_conf_rfc *rfc)
3095 {
3096 if (chan->local_amp_id && chan->hs_hcon) {
3097 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3098
3099 /* Class 1 devices have must have ERTM timeouts
3100 * exceeding the Link Supervision Timeout. The
3101 * default Link Supervision Timeout for AMP
3102 * controllers is 10 seconds.
3103 *
3104 * Class 1 devices use 0xffffffff for their
3105 * best-effort flush timeout, so the clamping logic
3106 * will result in a timeout that meets the above
3107 * requirement. ERTM timeouts are 16-bit values, so
3108 * the maximum timeout is 65.535 seconds.
3109 */
3110
3111 /* Convert timeout to milliseconds and round */
3112 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3113
3114 /* This is the recommended formula for class 2 devices
3115 * that start ERTM timers when packets are sent to the
3116 * controller.
3117 */
3118 ertm_to = 3 * ertm_to + 500;
3119
3120 if (ertm_to > 0xffff)
3121 ertm_to = 0xffff;
3122
3123 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3124 rfc->monitor_timeout = rfc->retrans_timeout;
3125 } else {
3126 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3127 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3128 }
3129 }
3130
3131 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3132 {
3133 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3134 __l2cap_ews_supported(chan)) {
3135 /* use extended control field */
3136 set_bit(FLAG_EXT_CTRL, &chan->flags);
3137 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3138 } else {
3139 chan->tx_win = min_t(u16, chan->tx_win,
3140 L2CAP_DEFAULT_TX_WINDOW);
3141 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3142 }
3143 chan->ack_win = chan->tx_win;
3144 }
3145
3146 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3147 {
3148 struct l2cap_conf_req *req = data;
3149 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3150 void *ptr = req->data;
3151 u16 size;
3152
3153 BT_DBG("chan %p", chan);
3154
3155 if (chan->num_conf_req || chan->num_conf_rsp)
3156 goto done;
3157
3158 switch (chan->mode) {
3159 case L2CAP_MODE_STREAMING:
3160 case L2CAP_MODE_ERTM:
3161 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3162 break;
3163
3164 if (__l2cap_efs_supported(chan))
3165 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3166
3167 /* fall through */
3168 default:
3169 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3170 break;
3171 }
3172
3173 done:
3174 if (chan->imtu != L2CAP_DEFAULT_MTU)
3175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3176
3177 switch (chan->mode) {
3178 case L2CAP_MODE_BASIC:
3179 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3180 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3181 break;
3182
3183 rfc.mode = L2CAP_MODE_BASIC;
3184 rfc.txwin_size = 0;
3185 rfc.max_transmit = 0;
3186 rfc.retrans_timeout = 0;
3187 rfc.monitor_timeout = 0;
3188 rfc.max_pdu_size = 0;
3189
3190 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3191 (unsigned long) &rfc);
3192 break;
3193
3194 case L2CAP_MODE_ERTM:
3195 rfc.mode = L2CAP_MODE_ERTM;
3196 rfc.max_transmit = chan->max_tx;
3197
3198 __l2cap_set_ertm_timeouts(chan, &rfc);
3199
3200 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3201 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3202 L2CAP_FCS_SIZE);
3203 rfc.max_pdu_size = cpu_to_le16(size);
3204
3205 l2cap_txwin_setup(chan);
3206
3207 rfc.txwin_size = min_t(u16, chan->tx_win,
3208 L2CAP_DEFAULT_TX_WINDOW);
3209
3210 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3211 (unsigned long) &rfc);
3212
3213 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3214 l2cap_add_opt_efs(&ptr, chan);
3215
3216 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3217 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3218 chan->tx_win);
3219
3220 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3221 if (chan->fcs == L2CAP_FCS_NONE ||
3222 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3223 chan->fcs = L2CAP_FCS_NONE;
3224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3225 chan->fcs);
3226 }
3227 break;
3228
3229 case L2CAP_MODE_STREAMING:
3230 l2cap_txwin_setup(chan);
3231 rfc.mode = L2CAP_MODE_STREAMING;
3232 rfc.txwin_size = 0;
3233 rfc.max_transmit = 0;
3234 rfc.retrans_timeout = 0;
3235 rfc.monitor_timeout = 0;
3236
3237 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3238 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3239 L2CAP_FCS_SIZE);
3240 rfc.max_pdu_size = cpu_to_le16(size);
3241
3242 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3243 (unsigned long) &rfc);
3244
3245 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3246 l2cap_add_opt_efs(&ptr, chan);
3247
3248 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3249 if (chan->fcs == L2CAP_FCS_NONE ||
3250 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3251 chan->fcs = L2CAP_FCS_NONE;
3252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3253 chan->fcs);
3254 }
3255 break;
3256 }
3257
3258 req->dcid = cpu_to_le16(chan->dcid);
3259 req->flags = __constant_cpu_to_le16(0);
3260
3261 return ptr - data;
3262 }
3263
3264 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3265 {
3266 struct l2cap_conf_rsp *rsp = data;
3267 void *ptr = rsp->data;
3268 void *req = chan->conf_req;
3269 int len = chan->conf_len;
3270 int type, hint, olen;
3271 unsigned long val;
3272 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3273 struct l2cap_conf_efs efs;
3274 u8 remote_efs = 0;
3275 u16 mtu = L2CAP_DEFAULT_MTU;
3276 u16 result = L2CAP_CONF_SUCCESS;
3277 u16 size;
3278
3279 BT_DBG("chan %p", chan);
3280
3281 while (len >= L2CAP_CONF_OPT_SIZE) {
3282 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3283
3284 hint = type & L2CAP_CONF_HINT;
3285 type &= L2CAP_CONF_MASK;
3286
3287 switch (type) {
3288 case L2CAP_CONF_MTU:
3289 mtu = val;
3290 break;
3291
3292 case L2CAP_CONF_FLUSH_TO:
3293 chan->flush_to = val;
3294 break;
3295
3296 case L2CAP_CONF_QOS:
3297 break;
3298
3299 case L2CAP_CONF_RFC:
3300 if (olen == sizeof(rfc))
3301 memcpy(&rfc, (void *) val, olen);
3302 break;
3303
3304 case L2CAP_CONF_FCS:
3305 if (val == L2CAP_FCS_NONE)
3306 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3307 break;
3308
3309 case L2CAP_CONF_EFS:
3310 remote_efs = 1;
3311 if (olen == sizeof(efs))
3312 memcpy(&efs, (void *) val, olen);
3313 break;
3314
3315 case L2CAP_CONF_EWS:
3316 if (!enable_hs)
3317 return -ECONNREFUSED;
3318
3319 set_bit(FLAG_EXT_CTRL, &chan->flags);
3320 set_bit(CONF_EWS_RECV, &chan->conf_state);
3321 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3322 chan->remote_tx_win = val;
3323 break;
3324
3325 default:
3326 if (hint)
3327 break;
3328
3329 result = L2CAP_CONF_UNKNOWN;
3330 *((u8 *) ptr++) = type;
3331 break;
3332 }
3333 }
3334
3335 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3336 goto done;
3337
3338 switch (chan->mode) {
3339 case L2CAP_MODE_STREAMING:
3340 case L2CAP_MODE_ERTM:
3341 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3342 chan->mode = l2cap_select_mode(rfc.mode,
3343 chan->conn->feat_mask);
3344 break;
3345 }
3346
3347 if (remote_efs) {
3348 if (__l2cap_efs_supported(chan))
3349 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3350 else
3351 return -ECONNREFUSED;
3352 }
3353
3354 if (chan->mode != rfc.mode)
3355 return -ECONNREFUSED;
3356
3357 break;
3358 }
3359
3360 done:
3361 if (chan->mode != rfc.mode) {
3362 result = L2CAP_CONF_UNACCEPT;
3363 rfc.mode = chan->mode;
3364
3365 if (chan->num_conf_rsp == 1)
3366 return -ECONNREFUSED;
3367
3368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3369 (unsigned long) &rfc);
3370 }
3371
3372 if (result == L2CAP_CONF_SUCCESS) {
3373 /* Configure output options and let the other side know
3374 * which ones we don't like. */
3375
3376 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3377 result = L2CAP_CONF_UNACCEPT;
3378 else {
3379 chan->omtu = mtu;
3380 set_bit(CONF_MTU_DONE, &chan->conf_state);
3381 }
3382 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3383
3384 if (remote_efs) {
3385 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3386 efs.stype != L2CAP_SERV_NOTRAFIC &&
3387 efs.stype != chan->local_stype) {
3388
3389 result = L2CAP_CONF_UNACCEPT;
3390
3391 if (chan->num_conf_req >= 1)
3392 return -ECONNREFUSED;
3393
3394 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3395 sizeof(efs),
3396 (unsigned long) &efs);
3397 } else {
3398 /* Send PENDING Conf Rsp */
3399 result = L2CAP_CONF_PENDING;
3400 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3401 }
3402 }
3403
3404 switch (rfc.mode) {
3405 case L2CAP_MODE_BASIC:
3406 chan->fcs = L2CAP_FCS_NONE;
3407 set_bit(CONF_MODE_DONE, &chan->conf_state);
3408 break;
3409
3410 case L2CAP_MODE_ERTM:
3411 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3412 chan->remote_tx_win = rfc.txwin_size;
3413 else
3414 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3415
3416 chan->remote_max_tx = rfc.max_transmit;
3417
3418 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3419 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3420 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3421 rfc.max_pdu_size = cpu_to_le16(size);
3422 chan->remote_mps = size;
3423
3424 __l2cap_set_ertm_timeouts(chan, &rfc);
3425
3426 set_bit(CONF_MODE_DONE, &chan->conf_state);
3427
3428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3429 sizeof(rfc), (unsigned long) &rfc);
3430
3431 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3432 chan->remote_id = efs.id;
3433 chan->remote_stype = efs.stype;
3434 chan->remote_msdu = le16_to_cpu(efs.msdu);
3435 chan->remote_flush_to =
3436 le32_to_cpu(efs.flush_to);
3437 chan->remote_acc_lat =
3438 le32_to_cpu(efs.acc_lat);
3439 chan->remote_sdu_itime =
3440 le32_to_cpu(efs.sdu_itime);
3441 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3442 sizeof(efs),
3443 (unsigned long) &efs);
3444 }
3445 break;
3446
3447 case L2CAP_MODE_STREAMING:
3448 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3449 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3450 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3451 rfc.max_pdu_size = cpu_to_le16(size);
3452 chan->remote_mps = size;
3453
3454 set_bit(CONF_MODE_DONE, &chan->conf_state);
3455
3456 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3457 (unsigned long) &rfc);
3458
3459 break;
3460
3461 default:
3462 result = L2CAP_CONF_UNACCEPT;
3463
3464 memset(&rfc, 0, sizeof(rfc));
3465 rfc.mode = chan->mode;
3466 }
3467
3468 if (result == L2CAP_CONF_SUCCESS)
3469 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3470 }
3471 rsp->scid = cpu_to_le16(chan->dcid);
3472 rsp->result = cpu_to_le16(result);
3473 rsp->flags = __constant_cpu_to_le16(0);
3474
3475 return ptr - data;
3476 }
3477
3478 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3479 void *data, u16 *result)
3480 {
3481 struct l2cap_conf_req *req = data;
3482 void *ptr = req->data;
3483 int type, olen;
3484 unsigned long val;
3485 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3486 struct l2cap_conf_efs efs;
3487
3488 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3489
3490 while (len >= L2CAP_CONF_OPT_SIZE) {
3491 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3492
3493 switch (type) {
3494 case L2CAP_CONF_MTU:
3495 if (val < L2CAP_DEFAULT_MIN_MTU) {
3496 *result = L2CAP_CONF_UNACCEPT;
3497 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3498 } else
3499 chan->imtu = val;
3500 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3501 break;
3502
3503 case L2CAP_CONF_FLUSH_TO:
3504 chan->flush_to = val;
3505 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3506 2, chan->flush_to);
3507 break;
3508
3509 case L2CAP_CONF_RFC:
3510 if (olen == sizeof(rfc))
3511 memcpy(&rfc, (void *)val, olen);
3512
3513 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3514 rfc.mode != chan->mode)
3515 return -ECONNREFUSED;
3516
3517 chan->fcs = 0;
3518
3519 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3520 sizeof(rfc), (unsigned long) &rfc);
3521 break;
3522
3523 case L2CAP_CONF_EWS:
3524 chan->ack_win = min_t(u16, val, chan->ack_win);
3525 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3526 chan->tx_win);
3527 break;
3528
3529 case L2CAP_CONF_EFS:
3530 if (olen == sizeof(efs))
3531 memcpy(&efs, (void *)val, olen);
3532
3533 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3534 efs.stype != L2CAP_SERV_NOTRAFIC &&
3535 efs.stype != chan->local_stype)
3536 return -ECONNREFUSED;
3537
3538 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3539 (unsigned long) &efs);
3540 break;
3541
3542 case L2CAP_CONF_FCS:
3543 if (*result == L2CAP_CONF_PENDING)
3544 if (val == L2CAP_FCS_NONE)
3545 set_bit(CONF_RECV_NO_FCS,
3546 &chan->conf_state);
3547 break;
3548 }
3549 }
3550
3551 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3552 return -ECONNREFUSED;
3553
3554 chan->mode = rfc.mode;
3555
3556 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3557 switch (rfc.mode) {
3558 case L2CAP_MODE_ERTM:
3559 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3560 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3561 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3562 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3563 chan->ack_win = min_t(u16, chan->ack_win,
3564 rfc.txwin_size);
3565
3566 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3567 chan->local_msdu = le16_to_cpu(efs.msdu);
3568 chan->local_sdu_itime =
3569 le32_to_cpu(efs.sdu_itime);
3570 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3571 chan->local_flush_to =
3572 le32_to_cpu(efs.flush_to);
3573 }
3574 break;
3575
3576 case L2CAP_MODE_STREAMING:
3577 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3578 }
3579 }
3580
3581 req->dcid = cpu_to_le16(chan->dcid);
3582 req->flags = __constant_cpu_to_le16(0);
3583
3584 return ptr - data;
3585 }
3586
3587 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3588 u16 result, u16 flags)
3589 {
3590 struct l2cap_conf_rsp *rsp = data;
3591 void *ptr = rsp->data;
3592
3593 BT_DBG("chan %p", chan);
3594
3595 rsp->scid = cpu_to_le16(chan->dcid);
3596 rsp->result = cpu_to_le16(result);
3597 rsp->flags = cpu_to_le16(flags);
3598
3599 return ptr - data;
3600 }
3601
3602 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3603 {
3604 struct l2cap_conn_rsp rsp;
3605 struct l2cap_conn *conn = chan->conn;
3606 u8 buf[128];
3607 u8 rsp_code;
3608
3609 rsp.scid = cpu_to_le16(chan->dcid);
3610 rsp.dcid = cpu_to_le16(chan->scid);
3611 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3612 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3613
3614 if (chan->hs_hcon)
3615 rsp_code = L2CAP_CREATE_CHAN_RSP;
3616 else
3617 rsp_code = L2CAP_CONN_RSP;
3618
3619 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3620
3621 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3622
3623 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3624 return;
3625
3626 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3627 l2cap_build_conf_req(chan, buf), buf);
3628 chan->num_conf_req++;
3629 }
3630
3631 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3632 {
3633 int type, olen;
3634 unsigned long val;
3635 /* Use sane default values in case a misbehaving remote device
3636 * did not send an RFC or extended window size option.
3637 */
3638 u16 txwin_ext = chan->ack_win;
3639 struct l2cap_conf_rfc rfc = {
3640 .mode = chan->mode,
3641 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3642 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3643 .max_pdu_size = cpu_to_le16(chan->imtu),
3644 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3645 };
3646
3647 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3648
3649 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3650 return;
3651
3652 while (len >= L2CAP_CONF_OPT_SIZE) {
3653 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3654
3655 switch (type) {
3656 case L2CAP_CONF_RFC:
3657 if (olen == sizeof(rfc))
3658 memcpy(&rfc, (void *)val, olen);
3659 break;
3660 case L2CAP_CONF_EWS:
3661 txwin_ext = val;
3662 break;
3663 }
3664 }
3665
3666 switch (rfc.mode) {
3667 case L2CAP_MODE_ERTM:
3668 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3669 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3670 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3671 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3672 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3673 else
3674 chan->ack_win = min_t(u16, chan->ack_win,
3675 rfc.txwin_size);
3676 break;
3677 case L2CAP_MODE_STREAMING:
3678 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3679 }
3680 }
3681
3682 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3683 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3684 u8 *data)
3685 {
3686 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3687
3688 if (cmd_len < sizeof(*rej))
3689 return -EPROTO;
3690
3691 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3692 return 0;
3693
3694 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3695 cmd->ident == conn->info_ident) {
3696 cancel_delayed_work(&conn->info_timer);
3697
3698 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3699 conn->info_ident = 0;
3700
3701 l2cap_conn_start(conn);
3702 }
3703
3704 return 0;
3705 }
3706
3707 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3708 struct l2cap_cmd_hdr *cmd,
3709 u8 *data, u8 rsp_code, u8 amp_id)
3710 {
3711 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3712 struct l2cap_conn_rsp rsp;
3713 struct l2cap_chan *chan = NULL, *pchan;
3714 struct sock *parent, *sk = NULL;
3715 int result, status = L2CAP_CS_NO_INFO;
3716
3717 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3718 __le16 psm = req->psm;
3719
3720 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3721
3722 /* Check if we have socket listening on psm */
3723 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3724 if (!pchan) {
3725 result = L2CAP_CR_BAD_PSM;
3726 goto sendresp;
3727 }
3728
3729 parent = pchan->sk;
3730
3731 mutex_lock(&conn->chan_lock);
3732 lock_sock(parent);
3733
3734 /* Check if the ACL is secure enough (if not SDP) */
3735 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3736 !hci_conn_check_link_mode(conn->hcon)) {
3737 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3738 result = L2CAP_CR_SEC_BLOCK;
3739 goto response;
3740 }
3741
3742 result = L2CAP_CR_NO_MEM;
3743
3744 /* Check if we already have channel with that dcid */
3745 if (__l2cap_get_chan_by_dcid(conn, scid))
3746 goto response;
3747
3748 chan = pchan->ops->new_connection(pchan);
3749 if (!chan)
3750 goto response;
3751
3752 sk = chan->sk;
3753
3754 hci_conn_hold(conn->hcon);
3755
3756 bacpy(&bt_sk(sk)->src, conn->src);
3757 bacpy(&bt_sk(sk)->dst, conn->dst);
3758 chan->psm = psm;
3759 chan->dcid = scid;
3760 chan->local_amp_id = amp_id;
3761
3762 __l2cap_chan_add(conn, chan);
3763
3764 dcid = chan->scid;
3765
3766 __set_chan_timer(chan, sk->sk_sndtimeo);
3767
3768 chan->ident = cmd->ident;
3769
3770 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3771 if (l2cap_chan_check_security(chan)) {
3772 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3773 __l2cap_state_change(chan, BT_CONNECT2);
3774 result = L2CAP_CR_PEND;
3775 status = L2CAP_CS_AUTHOR_PEND;
3776 chan->ops->defer(chan);
3777 } else {
3778 /* Force pending result for AMP controllers.
3779 * The connection will succeed after the
3780 * physical link is up.
3781 */
3782 if (amp_id) {
3783 __l2cap_state_change(chan, BT_CONNECT2);
3784 result = L2CAP_CR_PEND;
3785 } else {
3786 __l2cap_state_change(chan, BT_CONFIG);
3787 result = L2CAP_CR_SUCCESS;
3788 }
3789 status = L2CAP_CS_NO_INFO;
3790 }
3791 } else {
3792 __l2cap_state_change(chan, BT_CONNECT2);
3793 result = L2CAP_CR_PEND;
3794 status = L2CAP_CS_AUTHEN_PEND;
3795 }
3796 } else {
3797 __l2cap_state_change(chan, BT_CONNECT2);
3798 result = L2CAP_CR_PEND;
3799 status = L2CAP_CS_NO_INFO;
3800 }
3801
3802 response:
3803 release_sock(parent);
3804 mutex_unlock(&conn->chan_lock);
3805
3806 sendresp:
3807 rsp.scid = cpu_to_le16(scid);
3808 rsp.dcid = cpu_to_le16(dcid);
3809 rsp.result = cpu_to_le16(result);
3810 rsp.status = cpu_to_le16(status);
3811 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3812
3813 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3814 struct l2cap_info_req info;
3815 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3816
3817 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3818 conn->info_ident = l2cap_get_ident(conn);
3819
3820 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3821
3822 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3823 sizeof(info), &info);
3824 }
3825
3826 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3827 result == L2CAP_CR_SUCCESS) {
3828 u8 buf[128];
3829 set_bit(CONF_REQ_SENT, &chan->conf_state);
3830 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3831 l2cap_build_conf_req(chan, buf), buf);
3832 chan->num_conf_req++;
3833 }
3834
3835 return chan;
3836 }
3837
3838 static int l2cap_connect_req(struct l2cap_conn *conn,
3839 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3840 {
3841 struct hci_dev *hdev = conn->hcon->hdev;
3842 struct hci_conn *hcon = conn->hcon;
3843
3844 if (cmd_len < sizeof(struct l2cap_conn_req))
3845 return -EPROTO;
3846
3847 hci_dev_lock(hdev);
3848 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3849 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3850 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3851 hcon->dst_type, 0, NULL, 0,
3852 hcon->dev_class);
3853 hci_dev_unlock(hdev);
3854
3855 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3856 return 0;
3857 }
3858
3859 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3860 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3861 u8 *data)
3862 {
3863 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3864 u16 scid, dcid, result, status;
3865 struct l2cap_chan *chan;
3866 u8 req[128];
3867 int err;
3868
3869 if (cmd_len < sizeof(*rsp))
3870 return -EPROTO;
3871
3872 scid = __le16_to_cpu(rsp->scid);
3873 dcid = __le16_to_cpu(rsp->dcid);
3874 result = __le16_to_cpu(rsp->result);
3875 status = __le16_to_cpu(rsp->status);
3876
3877 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3878 dcid, scid, result, status);
3879
3880 mutex_lock(&conn->chan_lock);
3881
3882 if (scid) {
3883 chan = __l2cap_get_chan_by_scid(conn, scid);
3884 if (!chan) {
3885 err = -EFAULT;
3886 goto unlock;
3887 }
3888 } else {
3889 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3890 if (!chan) {
3891 err = -EFAULT;
3892 goto unlock;
3893 }
3894 }
3895
3896 err = 0;
3897
3898 l2cap_chan_lock(chan);
3899
3900 switch (result) {
3901 case L2CAP_CR_SUCCESS:
3902 l2cap_state_change(chan, BT_CONFIG);
3903 chan->ident = 0;
3904 chan->dcid = dcid;
3905 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3906
3907 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3908 break;
3909
3910 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3911 l2cap_build_conf_req(chan, req), req);
3912 chan->num_conf_req++;
3913 break;
3914
3915 case L2CAP_CR_PEND:
3916 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3917 break;
3918
3919 default:
3920 l2cap_chan_del(chan, ECONNREFUSED);
3921 break;
3922 }
3923
3924 l2cap_chan_unlock(chan);
3925
3926 unlock:
3927 mutex_unlock(&conn->chan_lock);
3928
3929 return err;
3930 }
3931
3932 static inline void set_default_fcs(struct l2cap_chan *chan)
3933 {
3934 /* FCS is enabled only in ERTM or streaming mode, if one or both
3935 * sides request it.
3936 */
3937 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3938 chan->fcs = L2CAP_FCS_NONE;
3939 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3940 chan->fcs = L2CAP_FCS_CRC16;
3941 }
3942
3943 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3944 u8 ident, u16 flags)
3945 {
3946 struct l2cap_conn *conn = chan->conn;
3947
3948 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3949 flags);
3950
3951 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3952 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3953
3954 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3955 l2cap_build_conf_rsp(chan, data,
3956 L2CAP_CONF_SUCCESS, flags), data);
3957 }
3958
3959 static inline int l2cap_config_req(struct l2cap_conn *conn,
3960 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3961 u8 *data)
3962 {
3963 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3964 u16 dcid, flags;
3965 u8 rsp[64];
3966 struct l2cap_chan *chan;
3967 int len, err = 0;
3968
3969 if (cmd_len < sizeof(*req))
3970 return -EPROTO;
3971
3972 dcid = __le16_to_cpu(req->dcid);
3973 flags = __le16_to_cpu(req->flags);
3974
3975 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3976
3977 chan = l2cap_get_chan_by_scid(conn, dcid);
3978 if (!chan)
3979 return -ENOENT;
3980
3981 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3982 struct l2cap_cmd_rej_cid rej;
3983
3984 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3985 rej.scid = cpu_to_le16(chan->scid);
3986 rej.dcid = cpu_to_le16(chan->dcid);
3987
3988 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3989 sizeof(rej), &rej);
3990 goto unlock;
3991 }
3992
3993 /* Reject if config buffer is too small. */
3994 len = cmd_len - sizeof(*req);
3995 if (chan->conf_len + len > sizeof(chan->conf_req)) {
3996 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3997 l2cap_build_conf_rsp(chan, rsp,
3998 L2CAP_CONF_REJECT, flags), rsp);
3999 goto unlock;
4000 }
4001
4002 /* Store config. */
4003 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4004 chan->conf_len += len;
4005
4006 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4007 /* Incomplete config. Send empty response. */
4008 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4009 l2cap_build_conf_rsp(chan, rsp,
4010 L2CAP_CONF_SUCCESS, flags), rsp);
4011 goto unlock;
4012 }
4013
4014 /* Complete config. */
4015 len = l2cap_parse_conf_req(chan, rsp);
4016 if (len < 0) {
4017 l2cap_send_disconn_req(chan, ECONNRESET);
4018 goto unlock;
4019 }
4020
4021 chan->ident = cmd->ident;
4022 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4023 chan->num_conf_rsp++;
4024
4025 /* Reset config buffer. */
4026 chan->conf_len = 0;
4027
4028 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4029 goto unlock;
4030
4031 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4032 set_default_fcs(chan);
4033
4034 if (chan->mode == L2CAP_MODE_ERTM ||
4035 chan->mode == L2CAP_MODE_STREAMING)
4036 err = l2cap_ertm_init(chan);
4037
4038 if (err < 0)
4039 l2cap_send_disconn_req(chan, -err);
4040 else
4041 l2cap_chan_ready(chan);
4042
4043 goto unlock;
4044 }
4045
4046 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4047 u8 buf[64];
4048 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4049 l2cap_build_conf_req(chan, buf), buf);
4050 chan->num_conf_req++;
4051 }
4052
4053 /* Got Conf Rsp PENDING from remote side and asume we sent
4054 Conf Rsp PENDING in the code above */
4055 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4056 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4057
4058 /* check compatibility */
4059
4060 /* Send rsp for BR/EDR channel */
4061 if (!chan->hs_hcon)
4062 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4063 else
4064 chan->ident = cmd->ident;
4065 }
4066
4067 unlock:
4068 l2cap_chan_unlock(chan);
4069 return err;
4070 }
4071
4072 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4073 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4074 u8 *data)
4075 {
4076 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4077 u16 scid, flags, result;
4078 struct l2cap_chan *chan;
4079 int len = cmd_len - sizeof(*rsp);
4080 int err = 0;
4081
4082 if (cmd_len < sizeof(*rsp))
4083 return -EPROTO;
4084
4085 scid = __le16_to_cpu(rsp->scid);
4086 flags = __le16_to_cpu(rsp->flags);
4087 result = __le16_to_cpu(rsp->result);
4088
4089 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4090 result, len);
4091
4092 chan = l2cap_get_chan_by_scid(conn, scid);
4093 if (!chan)
4094 return 0;
4095
4096 switch (result) {
4097 case L2CAP_CONF_SUCCESS:
4098 l2cap_conf_rfc_get(chan, rsp->data, len);
4099 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4100 break;
4101
4102 case L2CAP_CONF_PENDING:
4103 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4104
4105 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4106 char buf[64];
4107
4108 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4109 buf, &result);
4110 if (len < 0) {
4111 l2cap_send_disconn_req(chan, ECONNRESET);
4112 goto done;
4113 }
4114
4115 if (!chan->hs_hcon) {
4116 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4117 0);
4118 } else {
4119 if (l2cap_check_efs(chan)) {
4120 amp_create_logical_link(chan);
4121 chan->ident = cmd->ident;
4122 }
4123 }
4124 }
4125 goto done;
4126
4127 case L2CAP_CONF_UNACCEPT:
4128 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4129 char req[64];
4130
4131 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4132 l2cap_send_disconn_req(chan, ECONNRESET);
4133 goto done;
4134 }
4135
4136 /* throw out any old stored conf requests */
4137 result = L2CAP_CONF_SUCCESS;
4138 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4139 req, &result);
4140 if (len < 0) {
4141 l2cap_send_disconn_req(chan, ECONNRESET);
4142 goto done;
4143 }
4144
4145 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4146 L2CAP_CONF_REQ, len, req);
4147 chan->num_conf_req++;
4148 if (result != L2CAP_CONF_SUCCESS)
4149 goto done;
4150 break;
4151 }
4152
4153 default:
4154 l2cap_chan_set_err(chan, ECONNRESET);
4155
4156 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4157 l2cap_send_disconn_req(chan, ECONNRESET);
4158 goto done;
4159 }
4160
4161 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4162 goto done;
4163
4164 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4165
4166 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4167 set_default_fcs(chan);
4168
4169 if (chan->mode == L2CAP_MODE_ERTM ||
4170 chan->mode == L2CAP_MODE_STREAMING)
4171 err = l2cap_ertm_init(chan);
4172
4173 if (err < 0)
4174 l2cap_send_disconn_req(chan, -err);
4175 else
4176 l2cap_chan_ready(chan);
4177 }
4178
4179 done:
4180 l2cap_chan_unlock(chan);
4181 return err;
4182 }
4183
4184 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4185 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4186 u8 *data)
4187 {
4188 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4189 struct l2cap_disconn_rsp rsp;
4190 u16 dcid, scid;
4191 struct l2cap_chan *chan;
4192 struct sock *sk;
4193
4194 if (cmd_len != sizeof(*req))
4195 return -EPROTO;
4196
4197 scid = __le16_to_cpu(req->scid);
4198 dcid = __le16_to_cpu(req->dcid);
4199
4200 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4201
4202 mutex_lock(&conn->chan_lock);
4203
4204 chan = __l2cap_get_chan_by_scid(conn, dcid);
4205 if (!chan) {
4206 mutex_unlock(&conn->chan_lock);
4207 return 0;
4208 }
4209
4210 l2cap_chan_lock(chan);
4211
4212 sk = chan->sk;
4213
4214 rsp.dcid = cpu_to_le16(chan->scid);
4215 rsp.scid = cpu_to_le16(chan->dcid);
4216 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4217
4218 lock_sock(sk);
4219 sk->sk_shutdown = SHUTDOWN_MASK;
4220 release_sock(sk);
4221
4222 l2cap_chan_hold(chan);
4223 l2cap_chan_del(chan, ECONNRESET);
4224
4225 l2cap_chan_unlock(chan);
4226
4227 chan->ops->close(chan);
4228 l2cap_chan_put(chan);
4229
4230 mutex_unlock(&conn->chan_lock);
4231
4232 return 0;
4233 }
4234
4235 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4236 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4237 u8 *data)
4238 {
4239 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4240 u16 dcid, scid;
4241 struct l2cap_chan *chan;
4242
4243 if (cmd_len != sizeof(*rsp))
4244 return -EPROTO;
4245
4246 scid = __le16_to_cpu(rsp->scid);
4247 dcid = __le16_to_cpu(rsp->dcid);
4248
4249 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4250
4251 mutex_lock(&conn->chan_lock);
4252
4253 chan = __l2cap_get_chan_by_scid(conn, scid);
4254 if (!chan) {
4255 mutex_unlock(&conn->chan_lock);
4256 return 0;
4257 }
4258
4259 l2cap_chan_lock(chan);
4260
4261 l2cap_chan_hold(chan);
4262 l2cap_chan_del(chan, 0);
4263
4264 l2cap_chan_unlock(chan);
4265
4266 chan->ops->close(chan);
4267 l2cap_chan_put(chan);
4268
4269 mutex_unlock(&conn->chan_lock);
4270
4271 return 0;
4272 }
4273
4274 static inline int l2cap_information_req(struct l2cap_conn *conn,
4275 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4276 u8 *data)
4277 {
4278 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4279 u16 type;
4280
4281 if (cmd_len != sizeof(*req))
4282 return -EPROTO;
4283
4284 type = __le16_to_cpu(req->type);
4285
4286 BT_DBG("type 0x%4.4x", type);
4287
4288 if (type == L2CAP_IT_FEAT_MASK) {
4289 u8 buf[8];
4290 u32 feat_mask = l2cap_feat_mask;
4291 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4292 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4293 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4294 if (!disable_ertm)
4295 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4296 | L2CAP_FEAT_FCS;
4297 if (enable_hs)
4298 feat_mask |= L2CAP_FEAT_EXT_FLOW
4299 | L2CAP_FEAT_EXT_WINDOW;
4300
4301 put_unaligned_le32(feat_mask, rsp->data);
4302 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4303 buf);
4304 } else if (type == L2CAP_IT_FIXED_CHAN) {
4305 u8 buf[12];
4306 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4307
4308 if (enable_hs)
4309 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4310 else
4311 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4312
4313 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4314 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4315 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4316 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4317 buf);
4318 } else {
4319 struct l2cap_info_rsp rsp;
4320 rsp.type = cpu_to_le16(type);
4321 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4322 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4323 &rsp);
4324 }
4325
4326 return 0;
4327 }
4328
4329 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4330 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4331 u8 *data)
4332 {
4333 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4334 u16 type, result;
4335
4336 if (cmd_len < sizeof(*rsp))
4337 return -EPROTO;
4338
4339 type = __le16_to_cpu(rsp->type);
4340 result = __le16_to_cpu(rsp->result);
4341
4342 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4343
4344 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4345 if (cmd->ident != conn->info_ident ||
4346 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4347 return 0;
4348
4349 cancel_delayed_work(&conn->info_timer);
4350
4351 if (result != L2CAP_IR_SUCCESS) {
4352 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4353 conn->info_ident = 0;
4354
4355 l2cap_conn_start(conn);
4356
4357 return 0;
4358 }
4359
4360 switch (type) {
4361 case L2CAP_IT_FEAT_MASK:
4362 conn->feat_mask = get_unaligned_le32(rsp->data);
4363
4364 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4365 struct l2cap_info_req req;
4366 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4367
4368 conn->info_ident = l2cap_get_ident(conn);
4369
4370 l2cap_send_cmd(conn, conn->info_ident,
4371 L2CAP_INFO_REQ, sizeof(req), &req);
4372 } else {
4373 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4374 conn->info_ident = 0;
4375
4376 l2cap_conn_start(conn);
4377 }
4378 break;
4379
4380 case L2CAP_IT_FIXED_CHAN:
4381 conn->fixed_chan_mask = rsp->data[0];
4382 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4383 conn->info_ident = 0;
4384
4385 l2cap_conn_start(conn);
4386 break;
4387 }
4388
4389 return 0;
4390 }
4391
4392 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4393 struct l2cap_cmd_hdr *cmd,
4394 u16 cmd_len, void *data)
4395 {
4396 struct l2cap_create_chan_req *req = data;
4397 struct l2cap_create_chan_rsp rsp;
4398 struct l2cap_chan *chan;
4399 struct hci_dev *hdev;
4400 u16 psm, scid;
4401
4402 if (cmd_len != sizeof(*req))
4403 return -EPROTO;
4404
4405 if (!enable_hs)
4406 return -EINVAL;
4407
4408 psm = le16_to_cpu(req->psm);
4409 scid = le16_to_cpu(req->scid);
4410
4411 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4412
4413 /* For controller id 0 make BR/EDR connection */
4414 if (req->amp_id == HCI_BREDR_ID) {
4415 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4416 req->amp_id);
4417 return 0;
4418 }
4419
4420 /* Validate AMP controller id */
4421 hdev = hci_dev_get(req->amp_id);
4422 if (!hdev)
4423 goto error;
4424
4425 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4426 hci_dev_put(hdev);
4427 goto error;
4428 }
4429
4430 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4431 req->amp_id);
4432 if (chan) {
4433 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4434 struct hci_conn *hs_hcon;
4435
4436 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4437 if (!hs_hcon) {
4438 hci_dev_put(hdev);
4439 return -EFAULT;
4440 }
4441
4442 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4443
4444 mgr->bredr_chan = chan;
4445 chan->hs_hcon = hs_hcon;
4446 chan->fcs = L2CAP_FCS_NONE;
4447 conn->mtu = hdev->block_mtu;
4448 }
4449
4450 hci_dev_put(hdev);
4451
4452 return 0;
4453
4454 error:
4455 rsp.dcid = 0;
4456 rsp.scid = cpu_to_le16(scid);
4457 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4458 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4459
4460 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4461 sizeof(rsp), &rsp);
4462
4463 return -EFAULT;
4464 }
4465
4466 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4467 {
4468 struct l2cap_move_chan_req req;
4469 u8 ident;
4470
4471 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4472
4473 ident = l2cap_get_ident(chan->conn);
4474 chan->ident = ident;
4475
4476 req.icid = cpu_to_le16(chan->scid);
4477 req.dest_amp_id = dest_amp_id;
4478
4479 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4480 &req);
4481
4482 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4483 }
4484
4485 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4486 {
4487 struct l2cap_move_chan_rsp rsp;
4488
4489 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4490
4491 rsp.icid = cpu_to_le16(chan->dcid);
4492 rsp.result = cpu_to_le16(result);
4493
4494 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4495 sizeof(rsp), &rsp);
4496 }
4497
4498 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4499 {
4500 struct l2cap_move_chan_cfm cfm;
4501
4502 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4503
4504 chan->ident = l2cap_get_ident(chan->conn);
4505
4506 cfm.icid = cpu_to_le16(chan->scid);
4507 cfm.result = cpu_to_le16(result);
4508
4509 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4510 sizeof(cfm), &cfm);
4511
4512 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4513 }
4514
4515 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4516 {
4517 struct l2cap_move_chan_cfm cfm;
4518
4519 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4520
4521 cfm.icid = cpu_to_le16(icid);
4522 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4523
4524 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4525 sizeof(cfm), &cfm);
4526 }
4527
4528 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4529 u16 icid)
4530 {
4531 struct l2cap_move_chan_cfm_rsp rsp;
4532
4533 BT_DBG("icid 0x%4.4x", icid);
4534
4535 rsp.icid = cpu_to_le16(icid);
4536 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4537 }
4538
4539 static void __release_logical_link(struct l2cap_chan *chan)
4540 {
4541 chan->hs_hchan = NULL;
4542 chan->hs_hcon = NULL;
4543
4544 /* Placeholder - release the logical link */
4545 }
4546
4547 static void l2cap_logical_fail(struct l2cap_chan *chan)
4548 {
4549 /* Logical link setup failed */
4550 if (chan->state != BT_CONNECTED) {
4551 /* Create channel failure, disconnect */
4552 l2cap_send_disconn_req(chan, ECONNRESET);
4553 return;
4554 }
4555
4556 switch (chan->move_role) {
4557 case L2CAP_MOVE_ROLE_RESPONDER:
4558 l2cap_move_done(chan);
4559 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4560 break;
4561 case L2CAP_MOVE_ROLE_INITIATOR:
4562 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4563 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4564 /* Remote has only sent pending or
4565 * success responses, clean up
4566 */
4567 l2cap_move_done(chan);
4568 }
4569
4570 /* Other amp move states imply that the move
4571 * has already aborted
4572 */
4573 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4574 break;
4575 }
4576 }
4577
4578 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4579 struct hci_chan *hchan)
4580 {
4581 struct l2cap_conf_rsp rsp;
4582
4583 chan->hs_hchan = hchan;
4584 chan->hs_hcon->l2cap_data = chan->conn;
4585
4586 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4587
4588 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4589 int err;
4590
4591 set_default_fcs(chan);
4592
4593 err = l2cap_ertm_init(chan);
4594 if (err < 0)
4595 l2cap_send_disconn_req(chan, -err);
4596 else
4597 l2cap_chan_ready(chan);
4598 }
4599 }
4600
4601 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4602 struct hci_chan *hchan)
4603 {
4604 chan->hs_hcon = hchan->conn;
4605 chan->hs_hcon->l2cap_data = chan->conn;
4606
4607 BT_DBG("move_state %d", chan->move_state);
4608
4609 switch (chan->move_state) {
4610 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4611 /* Move confirm will be sent after a success
4612 * response is received
4613 */
4614 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4615 break;
4616 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4617 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4618 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4619 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4620 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4621 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4622 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4623 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4624 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4625 }
4626 break;
4627 default:
4628 /* Move was not in expected state, free the channel */
4629 __release_logical_link(chan);
4630
4631 chan->move_state = L2CAP_MOVE_STABLE;
4632 }
4633 }
4634
4635 /* Call with chan locked */
4636 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4637 u8 status)
4638 {
4639 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4640
4641 if (status) {
4642 l2cap_logical_fail(chan);
4643 __release_logical_link(chan);
4644 return;
4645 }
4646
4647 if (chan->state != BT_CONNECTED) {
4648 /* Ignore logical link if channel is on BR/EDR */
4649 if (chan->local_amp_id)
4650 l2cap_logical_finish_create(chan, hchan);
4651 } else {
4652 l2cap_logical_finish_move(chan, hchan);
4653 }
4654 }
4655
4656 void l2cap_move_start(struct l2cap_chan *chan)
4657 {
4658 BT_DBG("chan %p", chan);
4659
4660 if (chan->local_amp_id == HCI_BREDR_ID) {
4661 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4662 return;
4663 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4664 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4665 /* Placeholder - start physical link setup */
4666 } else {
4667 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4668 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4669 chan->move_id = 0;
4670 l2cap_move_setup(chan);
4671 l2cap_send_move_chan_req(chan, 0);
4672 }
4673 }
4674
4675 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4676 u8 local_amp_id, u8 remote_amp_id)
4677 {
4678 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4679 local_amp_id, remote_amp_id);
4680
4681 chan->fcs = L2CAP_FCS_NONE;
4682
4683 /* Outgoing channel on AMP */
4684 if (chan->state == BT_CONNECT) {
4685 if (result == L2CAP_CR_SUCCESS) {
4686 chan->local_amp_id = local_amp_id;
4687 l2cap_send_create_chan_req(chan, remote_amp_id);
4688 } else {
4689 /* Revert to BR/EDR connect */
4690 l2cap_send_conn_req(chan);
4691 }
4692
4693 return;
4694 }
4695
4696 /* Incoming channel on AMP */
4697 if (__l2cap_no_conn_pending(chan)) {
4698 struct l2cap_conn_rsp rsp;
4699 char buf[128];
4700 rsp.scid = cpu_to_le16(chan->dcid);
4701 rsp.dcid = cpu_to_le16(chan->scid);
4702
4703 if (result == L2CAP_CR_SUCCESS) {
4704 /* Send successful response */
4705 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4706 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4707 } else {
4708 /* Send negative response */
4709 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4710 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4711 }
4712
4713 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4714 sizeof(rsp), &rsp);
4715
4716 if (result == L2CAP_CR_SUCCESS) {
4717 __l2cap_state_change(chan, BT_CONFIG);
4718 set_bit(CONF_REQ_SENT, &chan->conf_state);
4719 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4720 L2CAP_CONF_REQ,
4721 l2cap_build_conf_req(chan, buf), buf);
4722 chan->num_conf_req++;
4723 }
4724 }
4725 }
4726
4727 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4728 u8 remote_amp_id)
4729 {
4730 l2cap_move_setup(chan);
4731 chan->move_id = local_amp_id;
4732 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4733
4734 l2cap_send_move_chan_req(chan, remote_amp_id);
4735 }
4736
4737 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4738 {
4739 struct hci_chan *hchan = NULL;
4740
4741 /* Placeholder - get hci_chan for logical link */
4742
4743 if (hchan) {
4744 if (hchan->state == BT_CONNECTED) {
4745 /* Logical link is ready to go */
4746 chan->hs_hcon = hchan->conn;
4747 chan->hs_hcon->l2cap_data = chan->conn;
4748 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4749 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4750
4751 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4752 } else {
4753 /* Wait for logical link to be ready */
4754 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4755 }
4756 } else {
4757 /* Logical link not available */
4758 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4759 }
4760 }
4761
4762 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4763 {
4764 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4765 u8 rsp_result;
4766 if (result == -EINVAL)
4767 rsp_result = L2CAP_MR_BAD_ID;
4768 else
4769 rsp_result = L2CAP_MR_NOT_ALLOWED;
4770
4771 l2cap_send_move_chan_rsp(chan, rsp_result);
4772 }
4773
4774 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4775 chan->move_state = L2CAP_MOVE_STABLE;
4776
4777 /* Restart data transmission */
4778 l2cap_ertm_send(chan);
4779 }
4780
4781 /* Invoke with locked chan */
4782 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4783 {
4784 u8 local_amp_id = chan->local_amp_id;
4785 u8 remote_amp_id = chan->remote_amp_id;
4786
4787 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4788 chan, result, local_amp_id, remote_amp_id);
4789
4790 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4791 l2cap_chan_unlock(chan);
4792 return;
4793 }
4794
4795 if (chan->state != BT_CONNECTED) {
4796 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4797 } else if (result != L2CAP_MR_SUCCESS) {
4798 l2cap_do_move_cancel(chan, result);
4799 } else {
4800 switch (chan->move_role) {
4801 case L2CAP_MOVE_ROLE_INITIATOR:
4802 l2cap_do_move_initiate(chan, local_amp_id,
4803 remote_amp_id);
4804 break;
4805 case L2CAP_MOVE_ROLE_RESPONDER:
4806 l2cap_do_move_respond(chan, result);
4807 break;
4808 default:
4809 l2cap_do_move_cancel(chan, result);
4810 break;
4811 }
4812 }
4813 }
4814
4815 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4816 struct l2cap_cmd_hdr *cmd,
4817 u16 cmd_len, void *data)
4818 {
4819 struct l2cap_move_chan_req *req = data;
4820 struct l2cap_move_chan_rsp rsp;
4821 struct l2cap_chan *chan;
4822 u16 icid = 0;
4823 u16 result = L2CAP_MR_NOT_ALLOWED;
4824
4825 if (cmd_len != sizeof(*req))
4826 return -EPROTO;
4827
4828 icid = le16_to_cpu(req->icid);
4829
4830 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4831
4832 if (!enable_hs)
4833 return -EINVAL;
4834
4835 chan = l2cap_get_chan_by_dcid(conn, icid);
4836 if (!chan) {
4837 rsp.icid = cpu_to_le16(icid);
4838 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4839 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4840 sizeof(rsp), &rsp);
4841 return 0;
4842 }
4843
4844 chan->ident = cmd->ident;
4845
4846 if (chan->scid < L2CAP_CID_DYN_START ||
4847 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4848 (chan->mode != L2CAP_MODE_ERTM &&
4849 chan->mode != L2CAP_MODE_STREAMING)) {
4850 result = L2CAP_MR_NOT_ALLOWED;
4851 goto send_move_response;
4852 }
4853
4854 if (chan->local_amp_id == req->dest_amp_id) {
4855 result = L2CAP_MR_SAME_ID;
4856 goto send_move_response;
4857 }
4858
4859 if (req->dest_amp_id) {
4860 struct hci_dev *hdev;
4861 hdev = hci_dev_get(req->dest_amp_id);
4862 if (!hdev || hdev->dev_type != HCI_AMP ||
4863 !test_bit(HCI_UP, &hdev->flags)) {
4864 if (hdev)
4865 hci_dev_put(hdev);
4866
4867 result = L2CAP_MR_BAD_ID;
4868 goto send_move_response;
4869 }
4870 hci_dev_put(hdev);
4871 }
4872
4873 /* Detect a move collision. Only send a collision response
4874 * if this side has "lost", otherwise proceed with the move.
4875 * The winner has the larger bd_addr.
4876 */
4877 if ((__chan_is_moving(chan) ||
4878 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4879 bacmp(conn->src, conn->dst) > 0) {
4880 result = L2CAP_MR_COLLISION;
4881 goto send_move_response;
4882 }
4883
4884 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4885 l2cap_move_setup(chan);
4886 chan->move_id = req->dest_amp_id;
4887 icid = chan->dcid;
4888
4889 if (!req->dest_amp_id) {
4890 /* Moving to BR/EDR */
4891 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4892 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4893 result = L2CAP_MR_PEND;
4894 } else {
4895 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4896 result = L2CAP_MR_SUCCESS;
4897 }
4898 } else {
4899 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4900 /* Placeholder - uncomment when amp functions are available */
4901 /*amp_accept_physical(chan, req->dest_amp_id);*/
4902 result = L2CAP_MR_PEND;
4903 }
4904
4905 send_move_response:
4906 l2cap_send_move_chan_rsp(chan, result);
4907
4908 l2cap_chan_unlock(chan);
4909
4910 return 0;
4911 }
4912
4913 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4914 {
4915 struct l2cap_chan *chan;
4916 struct hci_chan *hchan = NULL;
4917
4918 chan = l2cap_get_chan_by_scid(conn, icid);
4919 if (!chan) {
4920 l2cap_send_move_chan_cfm_icid(conn, icid);
4921 return;
4922 }
4923
4924 __clear_chan_timer(chan);
4925 if (result == L2CAP_MR_PEND)
4926 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4927
4928 switch (chan->move_state) {
4929 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4930 /* Move confirm will be sent when logical link
4931 * is complete.
4932 */
4933 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4934 break;
4935 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4936 if (result == L2CAP_MR_PEND) {
4937 break;
4938 } else if (test_bit(CONN_LOCAL_BUSY,
4939 &chan->conn_state)) {
4940 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4941 } else {
4942 /* Logical link is up or moving to BR/EDR,
4943 * proceed with move
4944 */
4945 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4946 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4947 }
4948 break;
4949 case L2CAP_MOVE_WAIT_RSP:
4950 /* Moving to AMP */
4951 if (result == L2CAP_MR_SUCCESS) {
4952 /* Remote is ready, send confirm immediately
4953 * after logical link is ready
4954 */
4955 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4956 } else {
4957 /* Both logical link and move success
4958 * are required to confirm
4959 */
4960 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4961 }
4962
4963 /* Placeholder - get hci_chan for logical link */
4964 if (!hchan) {
4965 /* Logical link not available */
4966 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4967 break;
4968 }
4969
4970 /* If the logical link is not yet connected, do not
4971 * send confirmation.
4972 */
4973 if (hchan->state != BT_CONNECTED)
4974 break;
4975
4976 /* Logical link is already ready to go */
4977
4978 chan->hs_hcon = hchan->conn;
4979 chan->hs_hcon->l2cap_data = chan->conn;
4980
4981 if (result == L2CAP_MR_SUCCESS) {
4982 /* Can confirm now */
4983 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4984 } else {
4985 /* Now only need move success
4986 * to confirm
4987 */
4988 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4989 }
4990
4991 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4992 break;
4993 default:
4994 /* Any other amp move state means the move failed. */
4995 chan->move_id = chan->local_amp_id;
4996 l2cap_move_done(chan);
4997 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4998 }
4999
5000 l2cap_chan_unlock(chan);
5001 }
5002
5003 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5004 u16 result)
5005 {
5006 struct l2cap_chan *chan;
5007
5008 chan = l2cap_get_chan_by_ident(conn, ident);
5009 if (!chan) {
5010 /* Could not locate channel, icid is best guess */
5011 l2cap_send_move_chan_cfm_icid(conn, icid);
5012 return;
5013 }
5014
5015 __clear_chan_timer(chan);
5016
5017 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5018 if (result == L2CAP_MR_COLLISION) {
5019 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5020 } else {
5021 /* Cleanup - cancel move */
5022 chan->move_id = chan->local_amp_id;
5023 l2cap_move_done(chan);
5024 }
5025 }
5026
5027 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5028
5029 l2cap_chan_unlock(chan);
5030 }
5031
5032 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5033 struct l2cap_cmd_hdr *cmd,
5034 u16 cmd_len, void *data)
5035 {
5036 struct l2cap_move_chan_rsp *rsp = data;
5037 u16 icid, result;
5038
5039 if (cmd_len != sizeof(*rsp))
5040 return -EPROTO;
5041
5042 icid = le16_to_cpu(rsp->icid);
5043 result = le16_to_cpu(rsp->result);
5044
5045 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5046
5047 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5048 l2cap_move_continue(conn, icid, result);
5049 else
5050 l2cap_move_fail(conn, cmd->ident, icid, result);
5051
5052 return 0;
5053 }
5054
5055 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5056 struct l2cap_cmd_hdr *cmd,
5057 u16 cmd_len, void *data)
5058 {
5059 struct l2cap_move_chan_cfm *cfm = data;
5060 struct l2cap_chan *chan;
5061 u16 icid, result;
5062
5063 if (cmd_len != sizeof(*cfm))
5064 return -EPROTO;
5065
5066 icid = le16_to_cpu(cfm->icid);
5067 result = le16_to_cpu(cfm->result);
5068
5069 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5070
5071 chan = l2cap_get_chan_by_dcid(conn, icid);
5072 if (!chan) {
5073 /* Spec requires a response even if the icid was not found */
5074 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5075 return 0;
5076 }
5077
5078 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5079 if (result == L2CAP_MC_CONFIRMED) {
5080 chan->local_amp_id = chan->move_id;
5081 if (!chan->local_amp_id)
5082 __release_logical_link(chan);
5083 } else {
5084 chan->move_id = chan->local_amp_id;
5085 }
5086
5087 l2cap_move_done(chan);
5088 }
5089
5090 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5091
5092 l2cap_chan_unlock(chan);
5093
5094 return 0;
5095 }
5096
5097 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5098 struct l2cap_cmd_hdr *cmd,
5099 u16 cmd_len, void *data)
5100 {
5101 struct l2cap_move_chan_cfm_rsp *rsp = data;
5102 struct l2cap_chan *chan;
5103 u16 icid;
5104
5105 if (cmd_len != sizeof(*rsp))
5106 return -EPROTO;
5107
5108 icid = le16_to_cpu(rsp->icid);
5109
5110 BT_DBG("icid 0x%4.4x", icid);
5111
5112 chan = l2cap_get_chan_by_scid(conn, icid);
5113 if (!chan)
5114 return 0;
5115
5116 __clear_chan_timer(chan);
5117
5118 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5119 chan->local_amp_id = chan->move_id;
5120
5121 if (!chan->local_amp_id && chan->hs_hchan)
5122 __release_logical_link(chan);
5123
5124 l2cap_move_done(chan);
5125 }
5126
5127 l2cap_chan_unlock(chan);
5128
5129 return 0;
5130 }
5131
5132 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5133 u16 to_multiplier)
5134 {
5135 u16 max_latency;
5136
5137 if (min > max || min < 6 || max > 3200)
5138 return -EINVAL;
5139
5140 if (to_multiplier < 10 || to_multiplier > 3200)
5141 return -EINVAL;
5142
5143 if (max >= to_multiplier * 8)
5144 return -EINVAL;
5145
5146 max_latency = (to_multiplier * 8 / max) - 1;
5147 if (latency > 499 || latency > max_latency)
5148 return -EINVAL;
5149
5150 return 0;
5151 }
5152
5153 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5154 struct l2cap_cmd_hdr *cmd,
5155 u8 *data)
5156 {
5157 struct hci_conn *hcon = conn->hcon;
5158 struct l2cap_conn_param_update_req *req;
5159 struct l2cap_conn_param_update_rsp rsp;
5160 u16 min, max, latency, to_multiplier, cmd_len;
5161 int err;
5162
5163 if (!(hcon->link_mode & HCI_LM_MASTER))
5164 return -EINVAL;
5165
5166 cmd_len = __le16_to_cpu(cmd->len);
5167 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5168 return -EPROTO;
5169
5170 req = (struct l2cap_conn_param_update_req *) data;
5171 min = __le16_to_cpu(req->min);
5172 max = __le16_to_cpu(req->max);
5173 latency = __le16_to_cpu(req->latency);
5174 to_multiplier = __le16_to_cpu(req->to_multiplier);
5175
5176 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5177 min, max, latency, to_multiplier);
5178
5179 memset(&rsp, 0, sizeof(rsp));
5180
5181 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5182 if (err)
5183 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5184 else
5185 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5186
5187 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5188 sizeof(rsp), &rsp);
5189
5190 if (!err)
5191 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5192
5193 return 0;
5194 }
5195
5196 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5197 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5198 u8 *data)
5199 {
5200 int err = 0;
5201
5202 switch (cmd->code) {
5203 case L2CAP_COMMAND_REJ:
5204 l2cap_command_rej(conn, cmd, cmd_len, data);
5205 break;
5206
5207 case L2CAP_CONN_REQ:
5208 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5209 break;
5210
5211 case L2CAP_CONN_RSP:
5212 case L2CAP_CREATE_CHAN_RSP:
5213 err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5214 break;
5215
5216 case L2CAP_CONF_REQ:
5217 err = l2cap_config_req(conn, cmd, cmd_len, data);
5218 break;
5219
5220 case L2CAP_CONF_RSP:
5221 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5222 break;
5223
5224 case L2CAP_DISCONN_REQ:
5225 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5226 break;
5227
5228 case L2CAP_DISCONN_RSP:
5229 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5230 break;
5231
5232 case L2CAP_ECHO_REQ:
5233 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5234 break;
5235
5236 case L2CAP_ECHO_RSP:
5237 break;
5238
5239 case L2CAP_INFO_REQ:
5240 err = l2cap_information_req(conn, cmd, cmd_len, data);
5241 break;
5242
5243 case L2CAP_INFO_RSP:
5244 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5245 break;
5246
5247 case L2CAP_CREATE_CHAN_REQ:
5248 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5249 break;
5250
5251 case L2CAP_MOVE_CHAN_REQ:
5252 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5253 break;
5254
5255 case L2CAP_MOVE_CHAN_RSP:
5256 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5257 break;
5258
5259 case L2CAP_MOVE_CHAN_CFM:
5260 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5261 break;
5262
5263 case L2CAP_MOVE_CHAN_CFM_RSP:
5264 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5265 break;
5266
5267 default:
5268 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5269 err = -EINVAL;
5270 break;
5271 }
5272
5273 return err;
5274 }
5275
5276 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5277 struct l2cap_cmd_hdr *cmd, u8 *data)
5278 {
5279 switch (cmd->code) {
5280 case L2CAP_COMMAND_REJ:
5281 return 0;
5282
5283 case L2CAP_CONN_PARAM_UPDATE_REQ:
5284 return l2cap_conn_param_update_req(conn, cmd, data);
5285
5286 case L2CAP_CONN_PARAM_UPDATE_RSP:
5287 return 0;
5288
5289 default:
5290 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5291 return -EINVAL;
5292 }
5293 }
5294
5295 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5296 struct sk_buff *skb)
5297 {
5298 u8 *data = skb->data;
5299 int len = skb->len;
5300 struct l2cap_cmd_hdr cmd;
5301 int err;
5302
5303 l2cap_raw_recv(conn, skb);
5304
5305 while (len >= L2CAP_CMD_HDR_SIZE) {
5306 u16 cmd_len;
5307 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5308 data += L2CAP_CMD_HDR_SIZE;
5309 len -= L2CAP_CMD_HDR_SIZE;
5310
5311 cmd_len = le16_to_cpu(cmd.len);
5312
5313 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5314 cmd.ident);
5315
5316 if (cmd_len > len || !cmd.ident) {
5317 BT_DBG("corrupted command");
5318 break;
5319 }
5320
5321 if (conn->hcon->type == LE_LINK)
5322 err = l2cap_le_sig_cmd(conn, &cmd, data);
5323 else
5324 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5325
5326 if (err) {
5327 struct l2cap_cmd_rej_unk rej;
5328
5329 BT_ERR("Wrong link type (%d)", err);
5330
5331 /* FIXME: Map err to a valid reason */
5332 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5333 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5334 sizeof(rej), &rej);
5335 }
5336
5337 data += cmd_len;
5338 len -= cmd_len;
5339 }
5340
5341 kfree_skb(skb);
5342 }
5343
5344 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5345 {
5346 u16 our_fcs, rcv_fcs;
5347 int hdr_size;
5348
5349 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5350 hdr_size = L2CAP_EXT_HDR_SIZE;
5351 else
5352 hdr_size = L2CAP_ENH_HDR_SIZE;
5353
5354 if (chan->fcs == L2CAP_FCS_CRC16) {
5355 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5356 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5357 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5358
5359 if (our_fcs != rcv_fcs)
5360 return -EBADMSG;
5361 }
5362 return 0;
5363 }
5364
5365 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5366 {
5367 struct l2cap_ctrl control;
5368
5369 BT_DBG("chan %p", chan);
5370
5371 memset(&control, 0, sizeof(control));
5372 control.sframe = 1;
5373 control.final = 1;
5374 control.reqseq = chan->buffer_seq;
5375 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5376
5377 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5378 control.super = L2CAP_SUPER_RNR;
5379 l2cap_send_sframe(chan, &control);
5380 }
5381
5382 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5383 chan->unacked_frames > 0)
5384 __set_retrans_timer(chan);
5385
5386 /* Send pending iframes */
5387 l2cap_ertm_send(chan);
5388
5389 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5390 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5391 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5392 * send it now.
5393 */
5394 control.super = L2CAP_SUPER_RR;
5395 l2cap_send_sframe(chan, &control);
5396 }
5397 }
5398
5399 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5400 struct sk_buff **last_frag)
5401 {
5402 /* skb->len reflects data in skb as well as all fragments
5403 * skb->data_len reflects only data in fragments
5404 */
5405 if (!skb_has_frag_list(skb))
5406 skb_shinfo(skb)->frag_list = new_frag;
5407
5408 new_frag->next = NULL;
5409
5410 (*last_frag)->next = new_frag;
5411 *last_frag = new_frag;
5412
5413 skb->len += new_frag->len;
5414 skb->data_len += new_frag->len;
5415 skb->truesize += new_frag->truesize;
5416 }
5417
5418 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5419 struct l2cap_ctrl *control)
5420 {
5421 int err = -EINVAL;
5422
5423 switch (control->sar) {
5424 case L2CAP_SAR_UNSEGMENTED:
5425 if (chan->sdu)
5426 break;
5427
5428 err = chan->ops->recv(chan, skb);
5429 break;
5430
5431 case L2CAP_SAR_START:
5432 if (chan->sdu)
5433 break;
5434
5435 chan->sdu_len = get_unaligned_le16(skb->data);
5436 skb_pull(skb, L2CAP_SDULEN_SIZE);
5437
5438 if (chan->sdu_len > chan->imtu) {
5439 err = -EMSGSIZE;
5440 break;
5441 }
5442
5443 if (skb->len >= chan->sdu_len)
5444 break;
5445
5446 chan->sdu = skb;
5447 chan->sdu_last_frag = skb;
5448
5449 skb = NULL;
5450 err = 0;
5451 break;
5452
5453 case L2CAP_SAR_CONTINUE:
5454 if (!chan->sdu)
5455 break;
5456
5457 append_skb_frag(chan->sdu, skb,
5458 &chan->sdu_last_frag);
5459 skb = NULL;
5460
5461 if (chan->sdu->len >= chan->sdu_len)
5462 break;
5463
5464 err = 0;
5465 break;
5466
5467 case L2CAP_SAR_END:
5468 if (!chan->sdu)
5469 break;
5470
5471 append_skb_frag(chan->sdu, skb,
5472 &chan->sdu_last_frag);
5473 skb = NULL;
5474
5475 if (chan->sdu->len != chan->sdu_len)
5476 break;
5477
5478 err = chan->ops->recv(chan, chan->sdu);
5479
5480 if (!err) {
5481 /* Reassembly complete */
5482 chan->sdu = NULL;
5483 chan->sdu_last_frag = NULL;
5484 chan->sdu_len = 0;
5485 }
5486 break;
5487 }
5488
5489 if (err) {
5490 kfree_skb(skb);
5491 kfree_skb(chan->sdu);
5492 chan->sdu = NULL;
5493 chan->sdu_last_frag = NULL;
5494 chan->sdu_len = 0;
5495 }
5496
5497 return err;
5498 }
5499
5500 static int l2cap_resegment(struct l2cap_chan *chan)
5501 {
5502 /* Placeholder */
5503 return 0;
5504 }
5505
5506 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5507 {
5508 u8 event;
5509
5510 if (chan->mode != L2CAP_MODE_ERTM)
5511 return;
5512
5513 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5514 l2cap_tx(chan, NULL, NULL, event);
5515 }
5516
5517 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5518 {
5519 int err = 0;
5520 /* Pass sequential frames to l2cap_reassemble_sdu()
5521 * until a gap is encountered.
5522 */
5523
5524 BT_DBG("chan %p", chan);
5525
5526 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5527 struct sk_buff *skb;
5528 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5529 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5530
5531 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5532
5533 if (!skb)
5534 break;
5535
5536 skb_unlink(skb, &chan->srej_q);
5537 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5538 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5539 if (err)
5540 break;
5541 }
5542
5543 if (skb_queue_empty(&chan->srej_q)) {
5544 chan->rx_state = L2CAP_RX_STATE_RECV;
5545 l2cap_send_ack(chan);
5546 }
5547
5548 return err;
5549 }
5550
5551 static void l2cap_handle_srej(struct l2cap_chan *chan,
5552 struct l2cap_ctrl *control)
5553 {
5554 struct sk_buff *skb;
5555
5556 BT_DBG("chan %p, control %p", chan, control);
5557
5558 if (control->reqseq == chan->next_tx_seq) {
5559 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5560 l2cap_send_disconn_req(chan, ECONNRESET);
5561 return;
5562 }
5563
5564 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5565
5566 if (skb == NULL) {
5567 BT_DBG("Seq %d not available for retransmission",
5568 control->reqseq);
5569 return;
5570 }
5571
5572 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5573 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5574 l2cap_send_disconn_req(chan, ECONNRESET);
5575 return;
5576 }
5577
5578 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5579
5580 if (control->poll) {
5581 l2cap_pass_to_tx(chan, control);
5582
5583 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5584 l2cap_retransmit(chan, control);
5585 l2cap_ertm_send(chan);
5586
5587 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5588 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5589 chan->srej_save_reqseq = control->reqseq;
5590 }
5591 } else {
5592 l2cap_pass_to_tx_fbit(chan, control);
5593
5594 if (control->final) {
5595 if (chan->srej_save_reqseq != control->reqseq ||
5596 !test_and_clear_bit(CONN_SREJ_ACT,
5597 &chan->conn_state))
5598 l2cap_retransmit(chan, control);
5599 } else {
5600 l2cap_retransmit(chan, control);
5601 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5602 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5603 chan->srej_save_reqseq = control->reqseq;
5604 }
5605 }
5606 }
5607 }
5608
5609 static void l2cap_handle_rej(struct l2cap_chan *chan,
5610 struct l2cap_ctrl *control)
5611 {
5612 struct sk_buff *skb;
5613
5614 BT_DBG("chan %p, control %p", chan, control);
5615
5616 if (control->reqseq == chan->next_tx_seq) {
5617 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5618 l2cap_send_disconn_req(chan, ECONNRESET);
5619 return;
5620 }
5621
5622 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5623
5624 if (chan->max_tx && skb &&
5625 bt_cb(skb)->control.retries >= chan->max_tx) {
5626 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5627 l2cap_send_disconn_req(chan, ECONNRESET);
5628 return;
5629 }
5630
5631 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5632
5633 l2cap_pass_to_tx(chan, control);
5634
5635 if (control->final) {
5636 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5637 l2cap_retransmit_all(chan, control);
5638 } else {
5639 l2cap_retransmit_all(chan, control);
5640 l2cap_ertm_send(chan);
5641 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5642 set_bit(CONN_REJ_ACT, &chan->conn_state);
5643 }
5644 }
5645
5646 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5647 {
5648 BT_DBG("chan %p, txseq %d", chan, txseq);
5649
5650 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5651 chan->expected_tx_seq);
5652
5653 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5654 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5655 chan->tx_win) {
5656 /* See notes below regarding "double poll" and
5657 * invalid packets.
5658 */
5659 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5660 BT_DBG("Invalid/Ignore - after SREJ");
5661 return L2CAP_TXSEQ_INVALID_IGNORE;
5662 } else {
5663 BT_DBG("Invalid - in window after SREJ sent");
5664 return L2CAP_TXSEQ_INVALID;
5665 }
5666 }
5667
5668 if (chan->srej_list.head == txseq) {
5669 BT_DBG("Expected SREJ");
5670 return L2CAP_TXSEQ_EXPECTED_SREJ;
5671 }
5672
5673 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5674 BT_DBG("Duplicate SREJ - txseq already stored");
5675 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5676 }
5677
5678 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5679 BT_DBG("Unexpected SREJ - not requested");
5680 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5681 }
5682 }
5683
5684 if (chan->expected_tx_seq == txseq) {
5685 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5686 chan->tx_win) {
5687 BT_DBG("Invalid - txseq outside tx window");
5688 return L2CAP_TXSEQ_INVALID;
5689 } else {
5690 BT_DBG("Expected");
5691 return L2CAP_TXSEQ_EXPECTED;
5692 }
5693 }
5694
5695 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5696 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5697 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5698 return L2CAP_TXSEQ_DUPLICATE;
5699 }
5700
5701 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5702 /* A source of invalid packets is a "double poll" condition,
5703 * where delays cause us to send multiple poll packets. If
5704 * the remote stack receives and processes both polls,
5705 * sequence numbers can wrap around in such a way that a
5706 * resent frame has a sequence number that looks like new data
5707 * with a sequence gap. This would trigger an erroneous SREJ
5708 * request.
5709 *
5710 * Fortunately, this is impossible with a tx window that's
5711 * less than half of the maximum sequence number, which allows
5712 * invalid frames to be safely ignored.
5713 *
5714 * With tx window sizes greater than half of the tx window
5715 * maximum, the frame is invalid and cannot be ignored. This
5716 * causes a disconnect.
5717 */
5718
5719 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5720 BT_DBG("Invalid/Ignore - txseq outside tx window");
5721 return L2CAP_TXSEQ_INVALID_IGNORE;
5722 } else {
5723 BT_DBG("Invalid - txseq outside tx window");
5724 return L2CAP_TXSEQ_INVALID;
5725 }
5726 } else {
5727 BT_DBG("Unexpected - txseq indicates missing frames");
5728 return L2CAP_TXSEQ_UNEXPECTED;
5729 }
5730 }
5731
5732 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5733 struct l2cap_ctrl *control,
5734 struct sk_buff *skb, u8 event)
5735 {
5736 int err = 0;
5737 bool skb_in_use = 0;
5738
5739 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5740 event);
5741
5742 switch (event) {
5743 case L2CAP_EV_RECV_IFRAME:
5744 switch (l2cap_classify_txseq(chan, control->txseq)) {
5745 case L2CAP_TXSEQ_EXPECTED:
5746 l2cap_pass_to_tx(chan, control);
5747
5748 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5749 BT_DBG("Busy, discarding expected seq %d",
5750 control->txseq);
5751 break;
5752 }
5753
5754 chan->expected_tx_seq = __next_seq(chan,
5755 control->txseq);
5756
5757 chan->buffer_seq = chan->expected_tx_seq;
5758 skb_in_use = 1;
5759
5760 err = l2cap_reassemble_sdu(chan, skb, control);
5761 if (err)
5762 break;
5763
5764 if (control->final) {
5765 if (!test_and_clear_bit(CONN_REJ_ACT,
5766 &chan->conn_state)) {
5767 control->final = 0;
5768 l2cap_retransmit_all(chan, control);
5769 l2cap_ertm_send(chan);
5770 }
5771 }
5772
5773 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5774 l2cap_send_ack(chan);
5775 break;
5776 case L2CAP_TXSEQ_UNEXPECTED:
5777 l2cap_pass_to_tx(chan, control);
5778
5779 /* Can't issue SREJ frames in the local busy state.
5780 * Drop this frame, it will be seen as missing
5781 * when local busy is exited.
5782 */
5783 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5784 BT_DBG("Busy, discarding unexpected seq %d",
5785 control->txseq);
5786 break;
5787 }
5788
5789 /* There was a gap in the sequence, so an SREJ
5790 * must be sent for each missing frame. The
5791 * current frame is stored for later use.
5792 */
5793 skb_queue_tail(&chan->srej_q, skb);
5794 skb_in_use = 1;
5795 BT_DBG("Queued %p (queue len %d)", skb,
5796 skb_queue_len(&chan->srej_q));
5797
5798 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5799 l2cap_seq_list_clear(&chan->srej_list);
5800 l2cap_send_srej(chan, control->txseq);
5801
5802 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5803 break;
5804 case L2CAP_TXSEQ_DUPLICATE:
5805 l2cap_pass_to_tx(chan, control);
5806 break;
5807 case L2CAP_TXSEQ_INVALID_IGNORE:
5808 break;
5809 case L2CAP_TXSEQ_INVALID:
5810 default:
5811 l2cap_send_disconn_req(chan, ECONNRESET);
5812 break;
5813 }
5814 break;
5815 case L2CAP_EV_RECV_RR:
5816 l2cap_pass_to_tx(chan, control);
5817 if (control->final) {
5818 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5819
5820 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5821 !__chan_is_moving(chan)) {
5822 control->final = 0;
5823 l2cap_retransmit_all(chan, control);
5824 }
5825
5826 l2cap_ertm_send(chan);
5827 } else if (control->poll) {
5828 l2cap_send_i_or_rr_or_rnr(chan);
5829 } else {
5830 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5831 &chan->conn_state) &&
5832 chan->unacked_frames)
5833 __set_retrans_timer(chan);
5834
5835 l2cap_ertm_send(chan);
5836 }
5837 break;
5838 case L2CAP_EV_RECV_RNR:
5839 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5840 l2cap_pass_to_tx(chan, control);
5841 if (control && control->poll) {
5842 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5843 l2cap_send_rr_or_rnr(chan, 0);
5844 }
5845 __clear_retrans_timer(chan);
5846 l2cap_seq_list_clear(&chan->retrans_list);
5847 break;
5848 case L2CAP_EV_RECV_REJ:
5849 l2cap_handle_rej(chan, control);
5850 break;
5851 case L2CAP_EV_RECV_SREJ:
5852 l2cap_handle_srej(chan, control);
5853 break;
5854 default:
5855 break;
5856 }
5857
5858 if (skb && !skb_in_use) {
5859 BT_DBG("Freeing %p", skb);
5860 kfree_skb(skb);
5861 }
5862
5863 return err;
5864 }
5865
5866 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5867 struct l2cap_ctrl *control,
5868 struct sk_buff *skb, u8 event)
5869 {
5870 int err = 0;
5871 u16 txseq = control->txseq;
5872 bool skb_in_use = 0;
5873
5874 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5875 event);
5876
5877 switch (event) {
5878 case L2CAP_EV_RECV_IFRAME:
5879 switch (l2cap_classify_txseq(chan, txseq)) {
5880 case L2CAP_TXSEQ_EXPECTED:
5881 /* Keep frame for reassembly later */
5882 l2cap_pass_to_tx(chan, control);
5883 skb_queue_tail(&chan->srej_q, skb);
5884 skb_in_use = 1;
5885 BT_DBG("Queued %p (queue len %d)", skb,
5886 skb_queue_len(&chan->srej_q));
5887
5888 chan->expected_tx_seq = __next_seq(chan, txseq);
5889 break;
5890 case L2CAP_TXSEQ_EXPECTED_SREJ:
5891 l2cap_seq_list_pop(&chan->srej_list);
5892
5893 l2cap_pass_to_tx(chan, control);
5894 skb_queue_tail(&chan->srej_q, skb);
5895 skb_in_use = 1;
5896 BT_DBG("Queued %p (queue len %d)", skb,
5897 skb_queue_len(&chan->srej_q));
5898
5899 err = l2cap_rx_queued_iframes(chan);
5900 if (err)
5901 break;
5902
5903 break;
5904 case L2CAP_TXSEQ_UNEXPECTED:
5905 /* Got a frame that can't be reassembled yet.
5906 * Save it for later, and send SREJs to cover
5907 * the missing frames.
5908 */
5909 skb_queue_tail(&chan->srej_q, skb);
5910 skb_in_use = 1;
5911 BT_DBG("Queued %p (queue len %d)", skb,
5912 skb_queue_len(&chan->srej_q));
5913
5914 l2cap_pass_to_tx(chan, control);
5915 l2cap_send_srej(chan, control->txseq);
5916 break;
5917 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5918 /* This frame was requested with an SREJ, but
5919 * some expected retransmitted frames are
5920 * missing. Request retransmission of missing
5921 * SREJ'd frames.
5922 */
5923 skb_queue_tail(&chan->srej_q, skb);
5924 skb_in_use = 1;
5925 BT_DBG("Queued %p (queue len %d)", skb,
5926 skb_queue_len(&chan->srej_q));
5927
5928 l2cap_pass_to_tx(chan, control);
5929 l2cap_send_srej_list(chan, control->txseq);
5930 break;
5931 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5932 /* We've already queued this frame. Drop this copy. */
5933 l2cap_pass_to_tx(chan, control);
5934 break;
5935 case L2CAP_TXSEQ_DUPLICATE:
5936 /* Expecting a later sequence number, so this frame
5937 * was already received. Ignore it completely.
5938 */
5939 break;
5940 case L2CAP_TXSEQ_INVALID_IGNORE:
5941 break;
5942 case L2CAP_TXSEQ_INVALID:
5943 default:
5944 l2cap_send_disconn_req(chan, ECONNRESET);
5945 break;
5946 }
5947 break;
5948 case L2CAP_EV_RECV_RR:
5949 l2cap_pass_to_tx(chan, control);
5950 if (control->final) {
5951 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5952
5953 if (!test_and_clear_bit(CONN_REJ_ACT,
5954 &chan->conn_state)) {
5955 control->final = 0;
5956 l2cap_retransmit_all(chan, control);
5957 }
5958
5959 l2cap_ertm_send(chan);
5960 } else if (control->poll) {
5961 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5962 &chan->conn_state) &&
5963 chan->unacked_frames) {
5964 __set_retrans_timer(chan);
5965 }
5966
5967 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5968 l2cap_send_srej_tail(chan);
5969 } else {
5970 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5971 &chan->conn_state) &&
5972 chan->unacked_frames)
5973 __set_retrans_timer(chan);
5974
5975 l2cap_send_ack(chan);
5976 }
5977 break;
5978 case L2CAP_EV_RECV_RNR:
5979 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5980 l2cap_pass_to_tx(chan, control);
5981 if (control->poll) {
5982 l2cap_send_srej_tail(chan);
5983 } else {
5984 struct l2cap_ctrl rr_control;
5985 memset(&rr_control, 0, sizeof(rr_control));
5986 rr_control.sframe = 1;
5987 rr_control.super = L2CAP_SUPER_RR;
5988 rr_control.reqseq = chan->buffer_seq;
5989 l2cap_send_sframe(chan, &rr_control);
5990 }
5991
5992 break;
5993 case L2CAP_EV_RECV_REJ:
5994 l2cap_handle_rej(chan, control);
5995 break;
5996 case L2CAP_EV_RECV_SREJ:
5997 l2cap_handle_srej(chan, control);
5998 break;
5999 }
6000
6001 if (skb && !skb_in_use) {
6002 BT_DBG("Freeing %p", skb);
6003 kfree_skb(skb);
6004 }
6005
6006 return err;
6007 }
6008
6009 static int l2cap_finish_move(struct l2cap_chan *chan)
6010 {
6011 BT_DBG("chan %p", chan);
6012
6013 chan->rx_state = L2CAP_RX_STATE_RECV;
6014
6015 if (chan->hs_hcon)
6016 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6017 else
6018 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6019
6020 return l2cap_resegment(chan);
6021 }
6022
6023 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6024 struct l2cap_ctrl *control,
6025 struct sk_buff *skb, u8 event)
6026 {
6027 int err;
6028
6029 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6030 event);
6031
6032 if (!control->poll)
6033 return -EPROTO;
6034
6035 l2cap_process_reqseq(chan, control->reqseq);
6036
6037 if (!skb_queue_empty(&chan->tx_q))
6038 chan->tx_send_head = skb_peek(&chan->tx_q);
6039 else
6040 chan->tx_send_head = NULL;
6041
6042 /* Rewind next_tx_seq to the point expected
6043 * by the receiver.
6044 */
6045 chan->next_tx_seq = control->reqseq;
6046 chan->unacked_frames = 0;
6047
6048 err = l2cap_finish_move(chan);
6049 if (err)
6050 return err;
6051
6052 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6053 l2cap_send_i_or_rr_or_rnr(chan);
6054
6055 if (event == L2CAP_EV_RECV_IFRAME)
6056 return -EPROTO;
6057
6058 return l2cap_rx_state_recv(chan, control, NULL, event);
6059 }
6060
6061 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6062 struct l2cap_ctrl *control,
6063 struct sk_buff *skb, u8 event)
6064 {
6065 int err;
6066
6067 if (!control->final)
6068 return -EPROTO;
6069
6070 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6071
6072 chan->rx_state = L2CAP_RX_STATE_RECV;
6073 l2cap_process_reqseq(chan, control->reqseq);
6074
6075 if (!skb_queue_empty(&chan->tx_q))
6076 chan->tx_send_head = skb_peek(&chan->tx_q);
6077 else
6078 chan->tx_send_head = NULL;
6079
6080 /* Rewind next_tx_seq to the point expected
6081 * by the receiver.
6082 */
6083 chan->next_tx_seq = control->reqseq;
6084 chan->unacked_frames = 0;
6085
6086 if (chan->hs_hcon)
6087 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6088 else
6089 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6090
6091 err = l2cap_resegment(chan);
6092
6093 if (!err)
6094 err = l2cap_rx_state_recv(chan, control, skb, event);
6095
6096 return err;
6097 }
6098
6099 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6100 {
6101 /* Make sure reqseq is for a packet that has been sent but not acked */
6102 u16 unacked;
6103
6104 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6105 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6106 }
6107
6108 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6109 struct sk_buff *skb, u8 event)
6110 {
6111 int err = 0;
6112
6113 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6114 control, skb, event, chan->rx_state);
6115
6116 if (__valid_reqseq(chan, control->reqseq)) {
6117 switch (chan->rx_state) {
6118 case L2CAP_RX_STATE_RECV:
6119 err = l2cap_rx_state_recv(chan, control, skb, event);
6120 break;
6121 case L2CAP_RX_STATE_SREJ_SENT:
6122 err = l2cap_rx_state_srej_sent(chan, control, skb,
6123 event);
6124 break;
6125 case L2CAP_RX_STATE_WAIT_P:
6126 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6127 break;
6128 case L2CAP_RX_STATE_WAIT_F:
6129 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6130 break;
6131 default:
6132 /* shut it down */
6133 break;
6134 }
6135 } else {
6136 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6137 control->reqseq, chan->next_tx_seq,
6138 chan->expected_ack_seq);
6139 l2cap_send_disconn_req(chan, ECONNRESET);
6140 }
6141
6142 return err;
6143 }
6144
6145 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6146 struct sk_buff *skb)
6147 {
6148 int err = 0;
6149
6150 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6151 chan->rx_state);
6152
6153 if (l2cap_classify_txseq(chan, control->txseq) ==
6154 L2CAP_TXSEQ_EXPECTED) {
6155 l2cap_pass_to_tx(chan, control);
6156
6157 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6158 __next_seq(chan, chan->buffer_seq));
6159
6160 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6161
6162 l2cap_reassemble_sdu(chan, skb, control);
6163 } else {
6164 if (chan->sdu) {
6165 kfree_skb(chan->sdu);
6166 chan->sdu = NULL;
6167 }
6168 chan->sdu_last_frag = NULL;
6169 chan->sdu_len = 0;
6170
6171 if (skb) {
6172 BT_DBG("Freeing %p", skb);
6173 kfree_skb(skb);
6174 }
6175 }
6176
6177 chan->last_acked_seq = control->txseq;
6178 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6179
6180 return err;
6181 }
6182
6183 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6184 {
6185 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6186 u16 len;
6187 u8 event;
6188
6189 __unpack_control(chan, skb);
6190
6191 len = skb->len;
6192
6193 /*
6194 * We can just drop the corrupted I-frame here.
6195 * Receiver will miss it and start proper recovery
6196 * procedures and ask for retransmission.
6197 */
6198 if (l2cap_check_fcs(chan, skb))
6199 goto drop;
6200
6201 if (!control->sframe && control->sar == L2CAP_SAR_START)
6202 len -= L2CAP_SDULEN_SIZE;
6203
6204 if (chan->fcs == L2CAP_FCS_CRC16)
6205 len -= L2CAP_FCS_SIZE;
6206
6207 if (len > chan->mps) {
6208 l2cap_send_disconn_req(chan, ECONNRESET);
6209 goto drop;
6210 }
6211
6212 if (!control->sframe) {
6213 int err;
6214
6215 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6216 control->sar, control->reqseq, control->final,
6217 control->txseq);
6218
6219 /* Validate F-bit - F=0 always valid, F=1 only
6220 * valid in TX WAIT_F
6221 */
6222 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6223 goto drop;
6224
6225 if (chan->mode != L2CAP_MODE_STREAMING) {
6226 event = L2CAP_EV_RECV_IFRAME;
6227 err = l2cap_rx(chan, control, skb, event);
6228 } else {
6229 err = l2cap_stream_rx(chan, control, skb);
6230 }
6231
6232 if (err)
6233 l2cap_send_disconn_req(chan, ECONNRESET);
6234 } else {
6235 const u8 rx_func_to_event[4] = {
6236 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6237 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6238 };
6239
6240 /* Only I-frames are expected in streaming mode */
6241 if (chan->mode == L2CAP_MODE_STREAMING)
6242 goto drop;
6243
6244 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6245 control->reqseq, control->final, control->poll,
6246 control->super);
6247
6248 if (len != 0) {
6249 BT_ERR("Trailing bytes: %d in sframe", len);
6250 l2cap_send_disconn_req(chan, ECONNRESET);
6251 goto drop;
6252 }
6253
6254 /* Validate F and P bits */
6255 if (control->final && (control->poll ||
6256 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6257 goto drop;
6258
6259 event = rx_func_to_event[control->super];
6260 if (l2cap_rx(chan, control, skb, event))
6261 l2cap_send_disconn_req(chan, ECONNRESET);
6262 }
6263
6264 return 0;
6265
6266 drop:
6267 kfree_skb(skb);
6268 return 0;
6269 }
6270
6271 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6272 struct sk_buff *skb)
6273 {
6274 struct l2cap_chan *chan;
6275
6276 chan = l2cap_get_chan_by_scid(conn, cid);
6277 if (!chan) {
6278 if (cid == L2CAP_CID_A2MP) {
6279 chan = a2mp_channel_create(conn, skb);
6280 if (!chan) {
6281 kfree_skb(skb);
6282 return;
6283 }
6284
6285 l2cap_chan_lock(chan);
6286 } else {
6287 BT_DBG("unknown cid 0x%4.4x", cid);
6288 /* Drop packet and return */
6289 kfree_skb(skb);
6290 return;
6291 }
6292 }
6293
6294 BT_DBG("chan %p, len %d", chan, skb->len);
6295
6296 if (chan->state != BT_CONNECTED)
6297 goto drop;
6298
6299 switch (chan->mode) {
6300 case L2CAP_MODE_BASIC:
6301 /* If socket recv buffers overflows we drop data here
6302 * which is *bad* because L2CAP has to be reliable.
6303 * But we don't have any other choice. L2CAP doesn't
6304 * provide flow control mechanism. */
6305
6306 if (chan->imtu < skb->len)
6307 goto drop;
6308
6309 if (!chan->ops->recv(chan, skb))
6310 goto done;
6311 break;
6312
6313 case L2CAP_MODE_ERTM:
6314 case L2CAP_MODE_STREAMING:
6315 l2cap_data_rcv(chan, skb);
6316 goto done;
6317
6318 default:
6319 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6320 break;
6321 }
6322
6323 drop:
6324 kfree_skb(skb);
6325
6326 done:
6327 l2cap_chan_unlock(chan);
6328 }
6329
6330 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6331 struct sk_buff *skb)
6332 {
6333 struct l2cap_chan *chan;
6334
6335 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6336 if (!chan)
6337 goto drop;
6338
6339 BT_DBG("chan %p, len %d", chan, skb->len);
6340
6341 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6342 goto drop;
6343
6344 if (chan->imtu < skb->len)
6345 goto drop;
6346
6347 if (!chan->ops->recv(chan, skb))
6348 return;
6349
6350 drop:
6351 kfree_skb(skb);
6352 }
6353
6354 static void l2cap_att_channel(struct l2cap_conn *conn,
6355 struct sk_buff *skb)
6356 {
6357 struct l2cap_chan *chan;
6358
6359 chan = l2cap_global_chan_by_scid(0, L2CAP_CID_LE_DATA,
6360 conn->src, conn->dst);
6361 if (!chan)
6362 goto drop;
6363
6364 BT_DBG("chan %p, len %d", chan, skb->len);
6365
6366 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6367 goto drop;
6368
6369 if (chan->imtu < skb->len)
6370 goto drop;
6371
6372 if (!chan->ops->recv(chan, skb))
6373 return;
6374
6375 drop:
6376 kfree_skb(skb);
6377 }
6378
6379 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6380 {
6381 struct l2cap_hdr *lh = (void *) skb->data;
6382 u16 cid, len;
6383 __le16 psm;
6384
6385 skb_pull(skb, L2CAP_HDR_SIZE);
6386 cid = __le16_to_cpu(lh->cid);
6387 len = __le16_to_cpu(lh->len);
6388
6389 if (len != skb->len) {
6390 kfree_skb(skb);
6391 return;
6392 }
6393
6394 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6395
6396 switch (cid) {
6397 case L2CAP_CID_LE_SIGNALING:
6398 case L2CAP_CID_SIGNALING:
6399 l2cap_sig_channel(conn, skb);
6400 break;
6401
6402 case L2CAP_CID_CONN_LESS:
6403 psm = get_unaligned((__le16 *) skb->data);
6404 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6405 l2cap_conless_channel(conn, psm, skb);
6406 break;
6407
6408 case L2CAP_CID_LE_DATA:
6409 l2cap_att_channel(conn, skb);
6410 break;
6411
6412 case L2CAP_CID_SMP:
6413 if (smp_sig_channel(conn, skb))
6414 l2cap_conn_del(conn->hcon, EACCES);
6415 break;
6416
6417 default:
6418 l2cap_data_channel(conn, cid, skb);
6419 break;
6420 }
6421 }
6422
6423 /* ---- L2CAP interface with lower layer (HCI) ---- */
6424
6425 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6426 {
6427 int exact = 0, lm1 = 0, lm2 = 0;
6428 struct l2cap_chan *c;
6429
6430 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6431
6432 /* Find listening sockets and check their link_mode */
6433 read_lock(&chan_list_lock);
6434 list_for_each_entry(c, &chan_list, global_l) {
6435 struct sock *sk = c->sk;
6436
6437 if (c->state != BT_LISTEN)
6438 continue;
6439
6440 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6441 lm1 |= HCI_LM_ACCEPT;
6442 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6443 lm1 |= HCI_LM_MASTER;
6444 exact++;
6445 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6446 lm2 |= HCI_LM_ACCEPT;
6447 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6448 lm2 |= HCI_LM_MASTER;
6449 }
6450 }
6451 read_unlock(&chan_list_lock);
6452
6453 return exact ? lm1 : lm2;
6454 }
6455
6456 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6457 {
6458 struct l2cap_conn *conn;
6459
6460 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6461
6462 if (!status) {
6463 conn = l2cap_conn_add(hcon);
6464 if (conn)
6465 l2cap_conn_ready(conn);
6466 } else {
6467 l2cap_conn_del(hcon, bt_to_errno(status));
6468 }
6469 }
6470
6471 int l2cap_disconn_ind(struct hci_conn *hcon)
6472 {
6473 struct l2cap_conn *conn = hcon->l2cap_data;
6474
6475 BT_DBG("hcon %p", hcon);
6476
6477 if (!conn)
6478 return HCI_ERROR_REMOTE_USER_TERM;
6479 return conn->disc_reason;
6480 }
6481
6482 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6483 {
6484 BT_DBG("hcon %p reason %d", hcon, reason);
6485
6486 l2cap_conn_del(hcon, bt_to_errno(reason));
6487 }
6488
6489 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6490 {
6491 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6492 return;
6493
6494 if (encrypt == 0x00) {
6495 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6496 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6497 } else if (chan->sec_level == BT_SECURITY_HIGH)
6498 l2cap_chan_close(chan, ECONNREFUSED);
6499 } else {
6500 if (chan->sec_level == BT_SECURITY_MEDIUM)
6501 __clear_chan_timer(chan);
6502 }
6503 }
6504
6505 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6506 {
6507 struct l2cap_conn *conn = hcon->l2cap_data;
6508 struct l2cap_chan *chan;
6509
6510 if (!conn)
6511 return 0;
6512
6513 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6514
6515 if (hcon->type == LE_LINK) {
6516 if (!status && encrypt)
6517 smp_distribute_keys(conn, 0);
6518 cancel_delayed_work(&conn->security_timer);
6519 }
6520
6521 mutex_lock(&conn->chan_lock);
6522
6523 list_for_each_entry(chan, &conn->chan_l, list) {
6524 l2cap_chan_lock(chan);
6525
6526 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6527 state_to_string(chan->state));
6528
6529 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6530 l2cap_chan_unlock(chan);
6531 continue;
6532 }
6533
6534 if (chan->scid == L2CAP_CID_LE_DATA) {
6535 if (!status && encrypt) {
6536 chan->sec_level = hcon->sec_level;
6537 l2cap_chan_ready(chan);
6538 }
6539
6540 l2cap_chan_unlock(chan);
6541 continue;
6542 }
6543
6544 if (!__l2cap_no_conn_pending(chan)) {
6545 l2cap_chan_unlock(chan);
6546 continue;
6547 }
6548
6549 if (!status && (chan->state == BT_CONNECTED ||
6550 chan->state == BT_CONFIG)) {
6551 struct sock *sk = chan->sk;
6552
6553 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6554 sk->sk_state_change(sk);
6555
6556 l2cap_check_encryption(chan, encrypt);
6557 l2cap_chan_unlock(chan);
6558 continue;
6559 }
6560
6561 if (chan->state == BT_CONNECT) {
6562 if (!status) {
6563 l2cap_start_connection(chan);
6564 } else {
6565 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6566 }
6567 } else if (chan->state == BT_CONNECT2) {
6568 struct sock *sk = chan->sk;
6569 struct l2cap_conn_rsp rsp;
6570 __u16 res, stat;
6571
6572 lock_sock(sk);
6573
6574 if (!status) {
6575 if (test_bit(BT_SK_DEFER_SETUP,
6576 &bt_sk(sk)->flags)) {
6577 res = L2CAP_CR_PEND;
6578 stat = L2CAP_CS_AUTHOR_PEND;
6579 chan->ops->defer(chan);
6580 } else {
6581 __l2cap_state_change(chan, BT_CONFIG);
6582 res = L2CAP_CR_SUCCESS;
6583 stat = L2CAP_CS_NO_INFO;
6584 }
6585 } else {
6586 __l2cap_state_change(chan, BT_DISCONN);
6587 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6588 res = L2CAP_CR_SEC_BLOCK;
6589 stat = L2CAP_CS_NO_INFO;
6590 }
6591
6592 release_sock(sk);
6593
6594 rsp.scid = cpu_to_le16(chan->dcid);
6595 rsp.dcid = cpu_to_le16(chan->scid);
6596 rsp.result = cpu_to_le16(res);
6597 rsp.status = cpu_to_le16(stat);
6598 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6599 sizeof(rsp), &rsp);
6600
6601 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6602 res == L2CAP_CR_SUCCESS) {
6603 char buf[128];
6604 set_bit(CONF_REQ_SENT, &chan->conf_state);
6605 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6606 L2CAP_CONF_REQ,
6607 l2cap_build_conf_req(chan, buf),
6608 buf);
6609 chan->num_conf_req++;
6610 }
6611 }
6612
6613 l2cap_chan_unlock(chan);
6614 }
6615
6616 mutex_unlock(&conn->chan_lock);
6617
6618 return 0;
6619 }
6620
6621 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6622 {
6623 struct l2cap_conn *conn = hcon->l2cap_data;
6624 struct l2cap_hdr *hdr;
6625 int len;
6626
6627 /* For AMP controller do not create l2cap conn */
6628 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6629 goto drop;
6630
6631 if (!conn)
6632 conn = l2cap_conn_add(hcon);
6633
6634 if (!conn)
6635 goto drop;
6636
6637 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6638
6639 switch (flags) {
6640 case ACL_START:
6641 case ACL_START_NO_FLUSH:
6642 case ACL_COMPLETE:
6643 if (conn->rx_len) {
6644 BT_ERR("Unexpected start frame (len %d)", skb->len);
6645 kfree_skb(conn->rx_skb);
6646 conn->rx_skb = NULL;
6647 conn->rx_len = 0;
6648 l2cap_conn_unreliable(conn, ECOMM);
6649 }
6650
6651 /* Start fragment always begin with Basic L2CAP header */
6652 if (skb->len < L2CAP_HDR_SIZE) {
6653 BT_ERR("Frame is too short (len %d)", skb->len);
6654 l2cap_conn_unreliable(conn, ECOMM);
6655 goto drop;
6656 }
6657
6658 hdr = (struct l2cap_hdr *) skb->data;
6659 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6660
6661 if (len == skb->len) {
6662 /* Complete frame received */
6663 l2cap_recv_frame(conn, skb);
6664 return 0;
6665 }
6666
6667 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6668
6669 if (skb->len > len) {
6670 BT_ERR("Frame is too long (len %d, expected len %d)",
6671 skb->len, len);
6672 l2cap_conn_unreliable(conn, ECOMM);
6673 goto drop;
6674 }
6675
6676 /* Allocate skb for the complete frame (with header) */
6677 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6678 if (!conn->rx_skb)
6679 goto drop;
6680
6681 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6682 skb->len);
6683 conn->rx_len = len - skb->len;
6684 break;
6685
6686 case ACL_CONT:
6687 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6688
6689 if (!conn->rx_len) {
6690 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6691 l2cap_conn_unreliable(conn, ECOMM);
6692 goto drop;
6693 }
6694
6695 if (skb->len > conn->rx_len) {
6696 BT_ERR("Fragment is too long (len %d, expected %d)",
6697 skb->len, conn->rx_len);
6698 kfree_skb(conn->rx_skb);
6699 conn->rx_skb = NULL;
6700 conn->rx_len = 0;
6701 l2cap_conn_unreliable(conn, ECOMM);
6702 goto drop;
6703 }
6704
6705 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6706 skb->len);
6707 conn->rx_len -= skb->len;
6708
6709 if (!conn->rx_len) {
6710 /* Complete frame received */
6711 l2cap_recv_frame(conn, conn->rx_skb);
6712 conn->rx_skb = NULL;
6713 }
6714 break;
6715 }
6716
6717 drop:
6718 kfree_skb(skb);
6719 return 0;
6720 }
6721
6722 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6723 {
6724 struct l2cap_chan *c;
6725
6726 read_lock(&chan_list_lock);
6727
6728 list_for_each_entry(c, &chan_list, global_l) {
6729 struct sock *sk = c->sk;
6730
6731 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6732 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6733 c->state, __le16_to_cpu(c->psm),
6734 c->scid, c->dcid, c->imtu, c->omtu,
6735 c->sec_level, c->mode);
6736 }
6737
6738 read_unlock(&chan_list_lock);
6739
6740 return 0;
6741 }
6742
6743 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6744 {
6745 return single_open(file, l2cap_debugfs_show, inode->i_private);
6746 }
6747
6748 static const struct file_operations l2cap_debugfs_fops = {
6749 .open = l2cap_debugfs_open,
6750 .read = seq_read,
6751 .llseek = seq_lseek,
6752 .release = single_release,
6753 };
6754
6755 static struct dentry *l2cap_debugfs;
6756
6757 int __init l2cap_init(void)
6758 {
6759 int err;
6760
6761 err = l2cap_init_sockets();
6762 if (err < 0)
6763 return err;
6764
6765 if (bt_debugfs) {
6766 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6767 NULL, &l2cap_debugfs_fops);
6768 if (!l2cap_debugfs)
6769 BT_ERR("Failed to create L2CAP debug file");
6770 }
6771
6772 return 0;
6773 }
6774
6775 void l2cap_exit(void)
6776 {
6777 debugfs_remove(l2cap_debugfs);
6778 l2cap_cleanup_sockets();
6779 }
6780
6781 module_param(disable_ertm, bool, 0644);
6782 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");