Bluetooth: Add LE signaling commands handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP core. */
28
29#include <linux/module.h>
30
31#include <linux/types.h>
32#include <linux/capability.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/device.h>
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#include <linux/uaccess.h>
48#include <linux/crc16.h>
49#include <net/sock.h>
50
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h>
57
58int disable_ertm;
59
60static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63static struct workqueue_struct *_busy_wq;
64
65struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67};
68
69static void l2cap_busy_work(struct work_struct *work);
70
71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73
74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75
76/* ---- L2CAP channels ---- */
77static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
78{
79 struct sock *s;
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
81 if (l2cap_pi(s)->dcid == cid)
82 break;
83 }
84 return s;
85}
86
87static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
88{
89 struct sock *s;
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
91 if (l2cap_pi(s)->scid == cid)
92 break;
93 }
94 return s;
95}
96
97/* Find channel with given SCID.
98 * Returns locked socket */
99static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
100{
101 struct sock *s;
102 read_lock(&l->lock);
103 s = __l2cap_get_chan_by_scid(l, cid);
104 if (s)
105 bh_lock_sock(s);
106 read_unlock(&l->lock);
107 return s;
108}
109
110static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
111{
112 struct sock *s;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->ident == ident)
115 break;
116 }
117 return s;
118}
119
120static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
121{
122 struct sock *s;
123 read_lock(&l->lock);
124 s = __l2cap_get_chan_by_ident(l, ident);
125 if (s)
126 bh_lock_sock(s);
127 read_unlock(&l->lock);
128 return s;
129}
130
131static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
132{
133 u16 cid = L2CAP_CID_DYN_START;
134
135 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid))
137 return cid;
138 }
139
140 return 0;
141}
142
143static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
144{
145 sock_hold(sk);
146
147 if (l->head)
148 l2cap_pi(l->head)->prev_c = sk;
149
150 l2cap_pi(sk)->next_c = l->head;
151 l2cap_pi(sk)->prev_c = NULL;
152 l->head = sk;
153}
154
155static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
156{
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
158
159 write_lock_bh(&l->lock);
160 if (sk == l->head)
161 l->head = next;
162
163 if (next)
164 l2cap_pi(next)->prev_c = prev;
165 if (prev)
166 l2cap_pi(prev)->next_c = next;
167 write_unlock_bh(&l->lock);
168
169 __sock_put(sk);
170}
171
172static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
173{
174 struct l2cap_chan_list *l = &conn->chan_list;
175
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
178
179 conn->disc_reason = 0x13;
180
181 l2cap_pi(sk)->conn = conn;
182
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) {
185 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
189 } else {
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 }
194 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
199 } else {
200 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
204 }
205
206 __l2cap_chan_link(l, sk);
207
208 if (parent)
209 bt_accept_enqueue(parent, sk);
210}
211
212/* Delete channel.
213 * Must be called on the locked socket. */
214void l2cap_chan_del(struct sock *sk, int err)
215{
216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
217 struct sock *parent = bt_sk(sk)->parent;
218
219 l2cap_sock_clear_timer(sk);
220
221 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
222
223 if (conn) {
224 /* Unlink from channel list */
225 l2cap_chan_unlink(&conn->chan_list, sk);
226 l2cap_pi(sk)->conn = NULL;
227 hci_conn_put(conn->hcon);
228 }
229
230 sk->sk_state = BT_CLOSED;
231 sock_set_flag(sk, SOCK_ZAPPED);
232
233 if (err)
234 sk->sk_err = err;
235
236 if (parent) {
237 bt_accept_unlink(sk);
238 parent->sk_data_ready(parent, 0);
239 } else
240 sk->sk_state_change(sk);
241
242 skb_queue_purge(TX_QUEUE(sk));
243
244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
245 struct srej_list *l, *tmp;
246
247 del_timer(&l2cap_pi(sk)->retrans_timer);
248 del_timer(&l2cap_pi(sk)->monitor_timer);
249 del_timer(&l2cap_pi(sk)->ack_timer);
250
251 skb_queue_purge(SREJ_QUEUE(sk));
252 skb_queue_purge(BUSY_QUEUE(sk));
253
254 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
255 list_del(&l->list);
256 kfree(l);
257 }
258 }
259}
260
261static inline u8 l2cap_get_auth_type(struct sock *sk)
262{
263 if (sk->sk_type == SOCK_RAW) {
264 switch (l2cap_pi(sk)->sec_level) {
265 case BT_SECURITY_HIGH:
266 return HCI_AT_DEDICATED_BONDING_MITM;
267 case BT_SECURITY_MEDIUM:
268 return HCI_AT_DEDICATED_BONDING;
269 default:
270 return HCI_AT_NO_BONDING;
271 }
272 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
274 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
275
276 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
277 return HCI_AT_NO_BONDING_MITM;
278 else
279 return HCI_AT_NO_BONDING;
280 } else {
281 switch (l2cap_pi(sk)->sec_level) {
282 case BT_SECURITY_HIGH:
283 return HCI_AT_GENERAL_BONDING_MITM;
284 case BT_SECURITY_MEDIUM:
285 return HCI_AT_GENERAL_BONDING;
286 default:
287 return HCI_AT_NO_BONDING;
288 }
289 }
290}
291
292/* Service level security */
293static inline int l2cap_check_security(struct sock *sk)
294{
295 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
296 __u8 auth_type;
297
298 auth_type = l2cap_get_auth_type(sk);
299
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
301 auth_type);
302}
303
304u8 l2cap_get_ident(struct l2cap_conn *conn)
305{
306 u8 id;
307
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
312 */
313
314 spin_lock_bh(&conn->lock);
315
316 if (++conn->tx_ident > 128)
317 conn->tx_ident = 1;
318
319 id = conn->tx_ident;
320
321 spin_unlock_bh(&conn->lock);
322
323 return id;
324}
325
326void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327{
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 u8 flags;
330
331 BT_DBG("code 0x%2.2x", code);
332
333 if (!skb)
334 return;
335
336 if (lmp_no_flush_capable(conn->hcon->hdev))
337 flags = ACL_START_NO_FLUSH;
338 else
339 flags = ACL_START;
340
341 hci_send_acl(conn->hcon, skb, flags);
342}
343
344static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
345{
346 struct sk_buff *skb;
347 struct l2cap_hdr *lh;
348 struct l2cap_conn *conn = pi->conn;
349 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags;
352
353 if (sk->sk_state != BT_CONNECTED)
354 return;
355
356 if (pi->fcs == L2CAP_FCS_CRC16)
357 hlen += 2;
358
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
360
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
363
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
367 }
368
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
372 }
373
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
375 if (!skb)
376 return;
377
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
382
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
386 }
387
388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
390 else
391 flags = ACL_START;
392
393 hci_send_acl(pi->conn->hcon, skb, flags);
394}
395
396static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
397{
398 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY;
400 pi->conn_state |= L2CAP_CONN_RNR_SENT;
401 } else
402 control |= L2CAP_SUPER_RCV_READY;
403
404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
405
406 l2cap_send_sframe(pi, control);
407}
408
409static inline int __l2cap_no_conn_pending(struct sock *sk)
410{
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
412}
413
414static void l2cap_do_start(struct sock *sk)
415{
416 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
417
418 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
420 return;
421
422 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
426
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
429
430 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
431 L2CAP_CONN_REQ, sizeof(req), &req);
432 }
433 } else {
434 struct l2cap_info_req req;
435 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
436
437 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
438 conn->info_ident = l2cap_get_ident(conn);
439
440 mod_timer(&conn->info_timer, jiffies +
441 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
442
443 l2cap_send_cmd(conn, conn->info_ident,
444 L2CAP_INFO_REQ, sizeof(req), &req);
445 }
446}
447
448static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
449{
450 u32 local_feat_mask = l2cap_feat_mask;
451 if (!disable_ertm)
452 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
453
454 switch (mode) {
455 case L2CAP_MODE_ERTM:
456 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
457 case L2CAP_MODE_STREAMING:
458 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
459 default:
460 return 0x00;
461 }
462}
463
464void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
465{
466 struct l2cap_disconn_req req;
467
468 if (!conn)
469 return;
470
471 skb_queue_purge(TX_QUEUE(sk));
472
473 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
474 del_timer(&l2cap_pi(sk)->retrans_timer);
475 del_timer(&l2cap_pi(sk)->monitor_timer);
476 del_timer(&l2cap_pi(sk)->ack_timer);
477 }
478
479 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
481 l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 L2CAP_DISCONN_REQ, sizeof(req), &req);
483
484 sk->sk_state = BT_DISCONN;
485 sk->sk_err = err;
486}
487
488/* ---- L2CAP connections ---- */
489static void l2cap_conn_start(struct l2cap_conn *conn)
490{
491 struct l2cap_chan_list *l = &conn->chan_list;
492 struct sock_del_list del, *tmp1, *tmp2;
493 struct sock *sk;
494
495 BT_DBG("conn %p", conn);
496
497 INIT_LIST_HEAD(&del.list);
498
499 read_lock(&l->lock);
500
501 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
502 bh_lock_sock(sk);
503
504 if (sk->sk_type != SOCK_SEQPACKET &&
505 sk->sk_type != SOCK_STREAM) {
506 bh_unlock_sock(sk);
507 continue;
508 }
509
510 if (sk->sk_state == BT_CONNECT) {
511 struct l2cap_conn_req req;
512
513 if (!l2cap_check_security(sk) ||
514 !__l2cap_no_conn_pending(sk)) {
515 bh_unlock_sock(sk);
516 continue;
517 }
518
519 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
520 conn->feat_mask)
521 && l2cap_pi(sk)->conf_state &
522 L2CAP_CONF_STATE2_DEVICE) {
523 tmp1 = kzalloc(sizeof(struct sock_del_list),
524 GFP_ATOMIC);
525 tmp1->sk = sk;
526 list_add_tail(&tmp1->list, &del.list);
527 bh_unlock_sock(sk);
528 continue;
529 }
530
531 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
532 req.psm = l2cap_pi(sk)->psm;
533
534 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
535 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
536
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_REQ, sizeof(req), &req);
539
540 } else if (sk->sk_state == BT_CONNECT2) {
541 struct l2cap_conn_rsp rsp;
542 char buf[128];
543 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
545
546 if (l2cap_check_security(sk)) {
547 if (bt_sk(sk)->defer_setup) {
548 struct sock *parent = bt_sk(sk)->parent;
549 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
550 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
551 parent->sk_data_ready(parent, 0);
552
553 } else {
554 sk->sk_state = BT_CONFIG;
555 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
556 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
557 }
558 } else {
559 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
561 }
562
563 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
564 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
565
566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
567 rsp.result != L2CAP_CR_SUCCESS) {
568 bh_unlock_sock(sk);
569 continue;
570 }
571
572 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 l2cap_build_conf_req(sk, buf), buf);
575 l2cap_pi(sk)->num_conf_req++;
576 }
577
578 bh_unlock_sock(sk);
579 }
580
581 read_unlock(&l->lock);
582
583 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
584 bh_lock_sock(tmp1->sk);
585 __l2cap_sock_close(tmp1->sk, ECONNRESET);
586 bh_unlock_sock(tmp1->sk);
587 list_del(&tmp1->list);
588 kfree(tmp1);
589 }
590}
591
592/* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
594 */
595static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596{
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
599
600 read_lock(&l2cap_sk_list.lock);
601
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
605
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
610
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
614 }
615 }
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
620
621 return s;
622}
623
624static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625{
626 struct l2cap_chan_list *list = &conn->chan_list;
627 struct sock *parent, *uninitialized_var(sk);
628
629 BT_DBG("");
630
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src);
634 if (!parent)
635 return;
636
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 goto clean;
641 }
642
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 if (!sk)
645 goto clean;
646
647 write_lock_bh(&list->lock);
648
649 hci_conn_hold(conn->hcon);
650
651 l2cap_sock_init(sk, parent);
652 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst);
654
655 __l2cap_chan_add(conn, sk, parent);
656
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658
659 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0);
661
662 write_unlock_bh(&list->lock);
663
664clean:
665 bh_unlock_sock(parent);
666}
667
668static void l2cap_conn_ready(struct l2cap_conn *conn)
669{
670 struct l2cap_chan_list *l = &conn->chan_list;
671 struct sock *sk;
672
673 BT_DBG("conn %p", conn);
674
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn);
677
678 read_lock(&l->lock);
679
680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
681 bh_lock_sock(sk);
682
683 if (conn->hcon->type == LE_LINK) {
684 l2cap_sock_clear_timer(sk);
685 sk->sk_state = BT_CONNECTED;
686 sk->sk_state_change(sk);
687 }
688
689 if (sk->sk_type != SOCK_SEQPACKET &&
690 sk->sk_type != SOCK_STREAM) {
691 l2cap_sock_clear_timer(sk);
692 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk);
694 } else if (sk->sk_state == BT_CONNECT)
695 l2cap_do_start(sk);
696
697 bh_unlock_sock(sk);
698 }
699
700 read_unlock(&l->lock);
701}
702
703/* Notify sockets that we cannot guaranty reliability anymore */
704static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
705{
706 struct l2cap_chan_list *l = &conn->chan_list;
707 struct sock *sk;
708
709 BT_DBG("conn %p", conn);
710
711 read_lock(&l->lock);
712
713 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
714 if (l2cap_pi(sk)->force_reliable)
715 sk->sk_err = err;
716 }
717
718 read_unlock(&l->lock);
719}
720
721static void l2cap_info_timeout(unsigned long arg)
722{
723 struct l2cap_conn *conn = (void *) arg;
724
725 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
726 conn->info_ident = 0;
727
728 l2cap_conn_start(conn);
729}
730
731static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
732{
733 struct l2cap_conn *conn = hcon->l2cap_data;
734
735 if (conn || status)
736 return conn;
737
738 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
739 if (!conn)
740 return NULL;
741
742 hcon->l2cap_data = conn;
743 conn->hcon = hcon;
744
745 BT_DBG("hcon %p conn %p", hcon, conn);
746
747 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
748 conn->mtu = hcon->hdev->le_mtu;
749 else
750 conn->mtu = hcon->hdev->acl_mtu;
751
752 conn->src = &hcon->hdev->bdaddr;
753 conn->dst = &hcon->dst;
754
755 conn->feat_mask = 0;
756
757 spin_lock_init(&conn->lock);
758 rwlock_init(&conn->chan_list.lock);
759
760 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout,
762 (unsigned long) conn);
763
764 conn->disc_reason = 0x13;
765
766 return conn;
767}
768
769static void l2cap_conn_del(struct hci_conn *hcon, int err)
770{
771 struct l2cap_conn *conn = hcon->l2cap_data;
772 struct sock *sk;
773
774 if (!conn)
775 return;
776
777 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
778
779 kfree_skb(conn->rx_skb);
780
781 /* Kill channels */
782 while ((sk = conn->chan_list.head)) {
783 bh_lock_sock(sk);
784 l2cap_chan_del(sk, err);
785 bh_unlock_sock(sk);
786 l2cap_sock_kill(sk);
787 }
788
789 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
790 del_timer_sync(&conn->info_timer);
791
792 hcon->l2cap_data = NULL;
793 kfree(conn);
794}
795
796static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
797{
798 struct l2cap_chan_list *l = &conn->chan_list;
799 write_lock_bh(&l->lock);
800 __l2cap_chan_add(conn, sk, parent);
801 write_unlock_bh(&l->lock);
802}
803
804/* ---- Socket interface ---- */
805
806/* Find socket with psm and source bdaddr.
807 * Returns closest match.
808 */
809static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
810{
811 struct sock *sk = NULL, *sk1 = NULL;
812 struct hlist_node *node;
813
814 read_lock(&l2cap_sk_list.lock);
815
816 sk_for_each(sk, node, &l2cap_sk_list.head) {
817 if (state && sk->sk_state != state)
818 continue;
819
820 if (l2cap_pi(sk)->psm == psm) {
821 /* Exact match. */
822 if (!bacmp(&bt_sk(sk)->src, src))
823 break;
824
825 /* Closest match */
826 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
827 sk1 = sk;
828 }
829 }
830
831 read_unlock(&l2cap_sk_list.lock);
832
833 return node ? sk : sk1;
834}
835
836int l2cap_do_connect(struct sock *sk)
837{
838 bdaddr_t *src = &bt_sk(sk)->src;
839 bdaddr_t *dst = &bt_sk(sk)->dst;
840 struct l2cap_conn *conn;
841 struct hci_conn *hcon;
842 struct hci_dev *hdev;
843 __u8 auth_type;
844 int err;
845
846 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
847 l2cap_pi(sk)->psm);
848
849 hdev = hci_get_route(dst, src);
850 if (!hdev)
851 return -EHOSTUNREACH;
852
853 hci_dev_lock_bh(hdev);
854
855 err = -ENOMEM;
856
857 auth_type = l2cap_get_auth_type(sk);
858
859 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
860 hcon = hci_connect(hdev, LE_LINK, dst,
861 l2cap_pi(sk)->sec_level, auth_type);
862 else
863 hcon = hci_connect(hdev, ACL_LINK, dst,
864 l2cap_pi(sk)->sec_level, auth_type);
865
866 if (!hcon)
867 goto done;
868
869 conn = l2cap_conn_add(hcon, 0);
870 if (!conn) {
871 hci_conn_put(hcon);
872 goto done;
873 }
874
875 err = 0;
876
877 /* Update source addr of the socket */
878 bacpy(src, conn->src);
879
880 l2cap_chan_add(conn, sk, NULL);
881
882 sk->sk_state = BT_CONNECT;
883 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
884
885 if (hcon->state == BT_CONNECTED) {
886 if (sk->sk_type != SOCK_SEQPACKET &&
887 sk->sk_type != SOCK_STREAM) {
888 l2cap_sock_clear_timer(sk);
889 if (l2cap_check_security(sk))
890 sk->sk_state = BT_CONNECTED;
891 } else
892 l2cap_do_start(sk);
893 }
894
895done:
896 hci_dev_unlock_bh(hdev);
897 hci_dev_put(hdev);
898 return err;
899}
900
901int __l2cap_wait_ack(struct sock *sk)
902{
903 DECLARE_WAITQUEUE(wait, current);
904 int err = 0;
905 int timeo = HZ/5;
906
907 add_wait_queue(sk_sleep(sk), &wait);
908 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
909 set_current_state(TASK_INTERRUPTIBLE);
910
911 if (!timeo)
912 timeo = HZ/5;
913
914 if (signal_pending(current)) {
915 err = sock_intr_errno(timeo);
916 break;
917 }
918
919 release_sock(sk);
920 timeo = schedule_timeout(timeo);
921 lock_sock(sk);
922
923 err = sock_error(sk);
924 if (err)
925 break;
926 }
927 set_current_state(TASK_RUNNING);
928 remove_wait_queue(sk_sleep(sk), &wait);
929 return err;
930}
931
932static void l2cap_monitor_timeout(unsigned long arg)
933{
934 struct sock *sk = (void *) arg;
935
936 BT_DBG("sk %p", sk);
937
938 bh_lock_sock(sk);
939 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
940 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
941 bh_unlock_sock(sk);
942 return;
943 }
944
945 l2cap_pi(sk)->retry_count++;
946 __mod_monitor_timer();
947
948 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
949 bh_unlock_sock(sk);
950}
951
952static void l2cap_retrans_timeout(unsigned long arg)
953{
954 struct sock *sk = (void *) arg;
955
956 BT_DBG("sk %p", sk);
957
958 bh_lock_sock(sk);
959 l2cap_pi(sk)->retry_count = 1;
960 __mod_monitor_timer();
961
962 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
963
964 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
965 bh_unlock_sock(sk);
966}
967
968static void l2cap_drop_acked_frames(struct sock *sk)
969{
970 struct sk_buff *skb;
971
972 while ((skb = skb_peek(TX_QUEUE(sk))) &&
973 l2cap_pi(sk)->unacked_frames) {
974 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
975 break;
976
977 skb = skb_dequeue(TX_QUEUE(sk));
978 kfree_skb(skb);
979
980 l2cap_pi(sk)->unacked_frames--;
981 }
982
983 if (!l2cap_pi(sk)->unacked_frames)
984 del_timer(&l2cap_pi(sk)->retrans_timer);
985}
986
987void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
988{
989 struct l2cap_pinfo *pi = l2cap_pi(sk);
990 struct hci_conn *hcon = pi->conn->hcon;
991 u16 flags;
992
993 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
994
995 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
996 flags = ACL_START_NO_FLUSH;
997 else
998 flags = ACL_START;
999
1000 hci_send_acl(hcon, skb, flags);
1001}
1002
1003void l2cap_streaming_send(struct sock *sk)
1004{
1005 struct sk_buff *skb;
1006 struct l2cap_pinfo *pi = l2cap_pi(sk);
1007 u16 control, fcs;
1008
1009 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1010 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1011 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1012 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1013
1014 if (pi->fcs == L2CAP_FCS_CRC16) {
1015 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1016 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1017 }
1018
1019 l2cap_do_send(sk, skb);
1020
1021 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1022 }
1023}
1024
1025static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1026{
1027 struct l2cap_pinfo *pi = l2cap_pi(sk);
1028 struct sk_buff *skb, *tx_skb;
1029 u16 control, fcs;
1030
1031 skb = skb_peek(TX_QUEUE(sk));
1032 if (!skb)
1033 return;
1034
1035 do {
1036 if (bt_cb(skb)->tx_seq == tx_seq)
1037 break;
1038
1039 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1040 return;
1041
1042 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1043
1044 if (pi->remote_max_tx &&
1045 bt_cb(skb)->retries == pi->remote_max_tx) {
1046 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1047 return;
1048 }
1049
1050 tx_skb = skb_clone(skb, GFP_ATOMIC);
1051 bt_cb(skb)->retries++;
1052 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1053
1054 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1055 control |= L2CAP_CTRL_FINAL;
1056 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1057 }
1058
1059 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1060 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1061
1062 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1063
1064 if (pi->fcs == L2CAP_FCS_CRC16) {
1065 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1066 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1067 }
1068
1069 l2cap_do_send(sk, tx_skb);
1070}
1071
1072int l2cap_ertm_send(struct sock *sk)
1073{
1074 struct sk_buff *skb, *tx_skb;
1075 struct l2cap_pinfo *pi = l2cap_pi(sk);
1076 u16 control, fcs;
1077 int nsent = 0;
1078
1079 if (sk->sk_state != BT_CONNECTED)
1080 return -ENOTCONN;
1081
1082 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1083
1084 if (pi->remote_max_tx &&
1085 bt_cb(skb)->retries == pi->remote_max_tx) {
1086 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1087 break;
1088 }
1089
1090 tx_skb = skb_clone(skb, GFP_ATOMIC);
1091
1092 bt_cb(skb)->retries++;
1093
1094 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1095 control &= L2CAP_CTRL_SAR;
1096
1097 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1098 control |= L2CAP_CTRL_FINAL;
1099 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1100 }
1101 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1102 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1103 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1104
1105
1106 if (pi->fcs == L2CAP_FCS_CRC16) {
1107 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1108 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1109 }
1110
1111 l2cap_do_send(sk, tx_skb);
1112
1113 __mod_retrans_timer();
1114
1115 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1116 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1117
1118 pi->unacked_frames++;
1119 pi->frames_sent++;
1120
1121 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1122 sk->sk_send_head = NULL;
1123 else
1124 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1125
1126 nsent++;
1127 }
1128
1129 return nsent;
1130}
1131
1132static int l2cap_retransmit_frames(struct sock *sk)
1133{
1134 struct l2cap_pinfo *pi = l2cap_pi(sk);
1135 int ret;
1136
1137 if (!skb_queue_empty(TX_QUEUE(sk)))
1138 sk->sk_send_head = TX_QUEUE(sk)->next;
1139
1140 pi->next_tx_seq = pi->expected_ack_seq;
1141 ret = l2cap_ertm_send(sk);
1142 return ret;
1143}
1144
1145static void l2cap_send_ack(struct l2cap_pinfo *pi)
1146{
1147 struct sock *sk = (struct sock *)pi;
1148 u16 control = 0;
1149
1150 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1151
1152 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1153 control |= L2CAP_SUPER_RCV_NOT_READY;
1154 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1155 l2cap_send_sframe(pi, control);
1156 return;
1157 }
1158
1159 if (l2cap_ertm_send(sk) > 0)
1160 return;
1161
1162 control |= L2CAP_SUPER_RCV_READY;
1163 l2cap_send_sframe(pi, control);
1164}
1165
1166static void l2cap_send_srejtail(struct sock *sk)
1167{
1168 struct srej_list *tail;
1169 u16 control;
1170
1171 control = L2CAP_SUPER_SELECT_REJECT;
1172 control |= L2CAP_CTRL_FINAL;
1173
1174 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1175 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1176
1177 l2cap_send_sframe(l2cap_pi(sk), control);
1178}
1179
1180static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1181{
1182 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1183 struct sk_buff **frag;
1184 int err, sent = 0;
1185
1186 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1187 return -EFAULT;
1188
1189 sent += count;
1190 len -= count;
1191
1192 /* Continuation fragments (no L2CAP header) */
1193 frag = &skb_shinfo(skb)->frag_list;
1194 while (len) {
1195 count = min_t(unsigned int, conn->mtu, len);
1196
1197 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1198 if (!*frag)
1199 return err;
1200 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1201 return -EFAULT;
1202
1203 sent += count;
1204 len -= count;
1205
1206 frag = &(*frag)->next;
1207 }
1208
1209 return sent;
1210}
1211
1212struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1213{
1214 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1215 struct sk_buff *skb;
1216 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1217 struct l2cap_hdr *lh;
1218
1219 BT_DBG("sk %p len %d", sk, (int)len);
1220
1221 count = min_t(unsigned int, (conn->mtu - hlen), len);
1222 skb = bt_skb_send_alloc(sk, count + hlen,
1223 msg->msg_flags & MSG_DONTWAIT, &err);
1224 if (!skb)
1225 return ERR_PTR(err);
1226
1227 /* Create L2CAP header */
1228 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1229 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1230 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1231 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1232
1233 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1234 if (unlikely(err < 0)) {
1235 kfree_skb(skb);
1236 return ERR_PTR(err);
1237 }
1238 return skb;
1239}
1240
1241struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1242{
1243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1244 struct sk_buff *skb;
1245 int err, count, hlen = L2CAP_HDR_SIZE;
1246 struct l2cap_hdr *lh;
1247
1248 BT_DBG("sk %p len %d", sk, (int)len);
1249
1250 count = min_t(unsigned int, (conn->mtu - hlen), len);
1251 skb = bt_skb_send_alloc(sk, count + hlen,
1252 msg->msg_flags & MSG_DONTWAIT, &err);
1253 if (!skb)
1254 return ERR_PTR(err);
1255
1256 /* Create L2CAP header */
1257 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1258 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1259 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1260
1261 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1262 if (unlikely(err < 0)) {
1263 kfree_skb(skb);
1264 return ERR_PTR(err);
1265 }
1266 return skb;
1267}
1268
1269struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1270{
1271 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1272 struct sk_buff *skb;
1273 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1274 struct l2cap_hdr *lh;
1275
1276 BT_DBG("sk %p len %d", sk, (int)len);
1277
1278 if (!conn)
1279 return ERR_PTR(-ENOTCONN);
1280
1281 if (sdulen)
1282 hlen += 2;
1283
1284 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1285 hlen += 2;
1286
1287 count = min_t(unsigned int, (conn->mtu - hlen), len);
1288 skb = bt_skb_send_alloc(sk, count + hlen,
1289 msg->msg_flags & MSG_DONTWAIT, &err);
1290 if (!skb)
1291 return ERR_PTR(err);
1292
1293 /* Create L2CAP header */
1294 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1295 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1296 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1297 put_unaligned_le16(control, skb_put(skb, 2));
1298 if (sdulen)
1299 put_unaligned_le16(sdulen, skb_put(skb, 2));
1300
1301 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1302 if (unlikely(err < 0)) {
1303 kfree_skb(skb);
1304 return ERR_PTR(err);
1305 }
1306
1307 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1308 put_unaligned_le16(0, skb_put(skb, 2));
1309
1310 bt_cb(skb)->retries = 0;
1311 return skb;
1312}
1313
1314int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1315{
1316 struct l2cap_pinfo *pi = l2cap_pi(sk);
1317 struct sk_buff *skb;
1318 struct sk_buff_head sar_queue;
1319 u16 control;
1320 size_t size = 0;
1321
1322 skb_queue_head_init(&sar_queue);
1323 control = L2CAP_SDU_START;
1324 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1325 if (IS_ERR(skb))
1326 return PTR_ERR(skb);
1327
1328 __skb_queue_tail(&sar_queue, skb);
1329 len -= pi->remote_mps;
1330 size += pi->remote_mps;
1331
1332 while (len > 0) {
1333 size_t buflen;
1334
1335 if (len > pi->remote_mps) {
1336 control = L2CAP_SDU_CONTINUE;
1337 buflen = pi->remote_mps;
1338 } else {
1339 control = L2CAP_SDU_END;
1340 buflen = len;
1341 }
1342
1343 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1344 if (IS_ERR(skb)) {
1345 skb_queue_purge(&sar_queue);
1346 return PTR_ERR(skb);
1347 }
1348
1349 __skb_queue_tail(&sar_queue, skb);
1350 len -= buflen;
1351 size += buflen;
1352 }
1353 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1354 if (sk->sk_send_head == NULL)
1355 sk->sk_send_head = sar_queue.next;
1356
1357 return size;
1358}
1359
1360static void l2cap_chan_ready(struct sock *sk)
1361{
1362 struct sock *parent = bt_sk(sk)->parent;
1363
1364 BT_DBG("sk %p, parent %p", sk, parent);
1365
1366 l2cap_pi(sk)->conf_state = 0;
1367 l2cap_sock_clear_timer(sk);
1368
1369 if (!parent) {
1370 /* Outgoing channel.
1371 * Wake up socket sleeping on connect.
1372 */
1373 sk->sk_state = BT_CONNECTED;
1374 sk->sk_state_change(sk);
1375 } else {
1376 /* Incoming channel.
1377 * Wake up socket sleeping on accept.
1378 */
1379 parent->sk_data_ready(parent, 0);
1380 }
1381}
1382
1383/* Copy frame to all raw sockets on that connection */
1384static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1385{
1386 struct l2cap_chan_list *l = &conn->chan_list;
1387 struct sk_buff *nskb;
1388 struct sock *sk;
1389
1390 BT_DBG("conn %p", conn);
1391
1392 read_lock(&l->lock);
1393 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1394 if (sk->sk_type != SOCK_RAW)
1395 continue;
1396
1397 /* Don't send frame to the socket it came from */
1398 if (skb->sk == sk)
1399 continue;
1400 nskb = skb_clone(skb, GFP_ATOMIC);
1401 if (!nskb)
1402 continue;
1403
1404 if (sock_queue_rcv_skb(sk, nskb))
1405 kfree_skb(nskb);
1406 }
1407 read_unlock(&l->lock);
1408}
1409
1410/* ---- L2CAP signalling commands ---- */
1411static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1412 u8 code, u8 ident, u16 dlen, void *data)
1413{
1414 struct sk_buff *skb, **frag;
1415 struct l2cap_cmd_hdr *cmd;
1416 struct l2cap_hdr *lh;
1417 int len, count;
1418
1419 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1420 conn, code, ident, dlen);
1421
1422 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1423 count = min_t(unsigned int, conn->mtu, len);
1424
1425 skb = bt_skb_alloc(count, GFP_ATOMIC);
1426 if (!skb)
1427 return NULL;
1428
1429 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1430 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1431
1432 if (conn->hcon->type == LE_LINK)
1433 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1434 else
1435 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1436
1437 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1438 cmd->code = code;
1439 cmd->ident = ident;
1440 cmd->len = cpu_to_le16(dlen);
1441
1442 if (dlen) {
1443 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1444 memcpy(skb_put(skb, count), data, count);
1445 data += count;
1446 }
1447
1448 len -= skb->len;
1449
1450 /* Continuation fragments (no L2CAP header) */
1451 frag = &skb_shinfo(skb)->frag_list;
1452 while (len) {
1453 count = min_t(unsigned int, conn->mtu, len);
1454
1455 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1456 if (!*frag)
1457 goto fail;
1458
1459 memcpy(skb_put(*frag, count), data, count);
1460
1461 len -= count;
1462 data += count;
1463
1464 frag = &(*frag)->next;
1465 }
1466
1467 return skb;
1468
1469fail:
1470 kfree_skb(skb);
1471 return NULL;
1472}
1473
1474static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1475{
1476 struct l2cap_conf_opt *opt = *ptr;
1477 int len;
1478
1479 len = L2CAP_CONF_OPT_SIZE + opt->len;
1480 *ptr += len;
1481
1482 *type = opt->type;
1483 *olen = opt->len;
1484
1485 switch (opt->len) {
1486 case 1:
1487 *val = *((u8 *) opt->val);
1488 break;
1489
1490 case 2:
1491 *val = get_unaligned_le16(opt->val);
1492 break;
1493
1494 case 4:
1495 *val = get_unaligned_le32(opt->val);
1496 break;
1497
1498 default:
1499 *val = (unsigned long) opt->val;
1500 break;
1501 }
1502
1503 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1504 return len;
1505}
1506
1507static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1508{
1509 struct l2cap_conf_opt *opt = *ptr;
1510
1511 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1512
1513 opt->type = type;
1514 opt->len = len;
1515
1516 switch (len) {
1517 case 1:
1518 *((u8 *) opt->val) = val;
1519 break;
1520
1521 case 2:
1522 put_unaligned_le16(val, opt->val);
1523 break;
1524
1525 case 4:
1526 put_unaligned_le32(val, opt->val);
1527 break;
1528
1529 default:
1530 memcpy(opt->val, (void *) val, len);
1531 break;
1532 }
1533
1534 *ptr += L2CAP_CONF_OPT_SIZE + len;
1535}
1536
1537static void l2cap_ack_timeout(unsigned long arg)
1538{
1539 struct sock *sk = (void *) arg;
1540
1541 bh_lock_sock(sk);
1542 l2cap_send_ack(l2cap_pi(sk));
1543 bh_unlock_sock(sk);
1544}
1545
1546static inline void l2cap_ertm_init(struct sock *sk)
1547{
1548 l2cap_pi(sk)->expected_ack_seq = 0;
1549 l2cap_pi(sk)->unacked_frames = 0;
1550 l2cap_pi(sk)->buffer_seq = 0;
1551 l2cap_pi(sk)->num_acked = 0;
1552 l2cap_pi(sk)->frames_sent = 0;
1553
1554 setup_timer(&l2cap_pi(sk)->retrans_timer,
1555 l2cap_retrans_timeout, (unsigned long) sk);
1556 setup_timer(&l2cap_pi(sk)->monitor_timer,
1557 l2cap_monitor_timeout, (unsigned long) sk);
1558 setup_timer(&l2cap_pi(sk)->ack_timer,
1559 l2cap_ack_timeout, (unsigned long) sk);
1560
1561 __skb_queue_head_init(SREJ_QUEUE(sk));
1562 __skb_queue_head_init(BUSY_QUEUE(sk));
1563
1564 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1565
1566 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1567}
1568
1569static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1570{
1571 switch (mode) {
1572 case L2CAP_MODE_STREAMING:
1573 case L2CAP_MODE_ERTM:
1574 if (l2cap_mode_supported(mode, remote_feat_mask))
1575 return mode;
1576 /* fall through */
1577 default:
1578 return L2CAP_MODE_BASIC;
1579 }
1580}
1581
1582int l2cap_build_conf_req(struct sock *sk, void *data)
1583{
1584 struct l2cap_pinfo *pi = l2cap_pi(sk);
1585 struct l2cap_conf_req *req = data;
1586 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1587 void *ptr = req->data;
1588
1589 BT_DBG("sk %p", sk);
1590
1591 if (pi->num_conf_req || pi->num_conf_rsp)
1592 goto done;
1593
1594 switch (pi->mode) {
1595 case L2CAP_MODE_STREAMING:
1596 case L2CAP_MODE_ERTM:
1597 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1598 break;
1599
1600 /* fall through */
1601 default:
1602 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1603 break;
1604 }
1605
1606done:
1607 if (pi->imtu != L2CAP_DEFAULT_MTU)
1608 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1609
1610 switch (pi->mode) {
1611 case L2CAP_MODE_BASIC:
1612 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1613 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1614 break;
1615
1616 rfc.mode = L2CAP_MODE_BASIC;
1617 rfc.txwin_size = 0;
1618 rfc.max_transmit = 0;
1619 rfc.retrans_timeout = 0;
1620 rfc.monitor_timeout = 0;
1621 rfc.max_pdu_size = 0;
1622
1623 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1624 (unsigned long) &rfc);
1625 break;
1626
1627 case L2CAP_MODE_ERTM:
1628 rfc.mode = L2CAP_MODE_ERTM;
1629 rfc.txwin_size = pi->tx_win;
1630 rfc.max_transmit = pi->max_tx;
1631 rfc.retrans_timeout = 0;
1632 rfc.monitor_timeout = 0;
1633 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1634 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1635 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1636
1637 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1638 (unsigned long) &rfc);
1639
1640 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1641 break;
1642
1643 if (pi->fcs == L2CAP_FCS_NONE ||
1644 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1645 pi->fcs = L2CAP_FCS_NONE;
1646 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1647 }
1648 break;
1649
1650 case L2CAP_MODE_STREAMING:
1651 rfc.mode = L2CAP_MODE_STREAMING;
1652 rfc.txwin_size = 0;
1653 rfc.max_transmit = 0;
1654 rfc.retrans_timeout = 0;
1655 rfc.monitor_timeout = 0;
1656 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1657 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1658 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1659
1660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1661 (unsigned long) &rfc);
1662
1663 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1664 break;
1665
1666 if (pi->fcs == L2CAP_FCS_NONE ||
1667 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1668 pi->fcs = L2CAP_FCS_NONE;
1669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1670 }
1671 break;
1672 }
1673
1674 /* FIXME: Need actual value of the flush timeout */
1675 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1676 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1677
1678 req->dcid = cpu_to_le16(pi->dcid);
1679 req->flags = cpu_to_le16(0);
1680
1681 return ptr - data;
1682}
1683
1684static int l2cap_parse_conf_req(struct sock *sk, void *data)
1685{
1686 struct l2cap_pinfo *pi = l2cap_pi(sk);
1687 struct l2cap_conf_rsp *rsp = data;
1688 void *ptr = rsp->data;
1689 void *req = pi->conf_req;
1690 int len = pi->conf_len;
1691 int type, hint, olen;
1692 unsigned long val;
1693 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1694 u16 mtu = L2CAP_DEFAULT_MTU;
1695 u16 result = L2CAP_CONF_SUCCESS;
1696
1697 BT_DBG("sk %p", sk);
1698
1699 while (len >= L2CAP_CONF_OPT_SIZE) {
1700 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1701
1702 hint = type & L2CAP_CONF_HINT;
1703 type &= L2CAP_CONF_MASK;
1704
1705 switch (type) {
1706 case L2CAP_CONF_MTU:
1707 mtu = val;
1708 break;
1709
1710 case L2CAP_CONF_FLUSH_TO:
1711 pi->flush_to = val;
1712 break;
1713
1714 case L2CAP_CONF_QOS:
1715 break;
1716
1717 case L2CAP_CONF_RFC:
1718 if (olen == sizeof(rfc))
1719 memcpy(&rfc, (void *) val, olen);
1720 break;
1721
1722 case L2CAP_CONF_FCS:
1723 if (val == L2CAP_FCS_NONE)
1724 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1725
1726 break;
1727
1728 default:
1729 if (hint)
1730 break;
1731
1732 result = L2CAP_CONF_UNKNOWN;
1733 *((u8 *) ptr++) = type;
1734 break;
1735 }
1736 }
1737
1738 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1739 goto done;
1740
1741 switch (pi->mode) {
1742 case L2CAP_MODE_STREAMING:
1743 case L2CAP_MODE_ERTM:
1744 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1745 pi->mode = l2cap_select_mode(rfc.mode,
1746 pi->conn->feat_mask);
1747 break;
1748 }
1749
1750 if (pi->mode != rfc.mode)
1751 return -ECONNREFUSED;
1752
1753 break;
1754 }
1755
1756done:
1757 if (pi->mode != rfc.mode) {
1758 result = L2CAP_CONF_UNACCEPT;
1759 rfc.mode = pi->mode;
1760
1761 if (pi->num_conf_rsp == 1)
1762 return -ECONNREFUSED;
1763
1764 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1765 sizeof(rfc), (unsigned long) &rfc);
1766 }
1767
1768
1769 if (result == L2CAP_CONF_SUCCESS) {
1770 /* Configure output options and let the other side know
1771 * which ones we don't like. */
1772
1773 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1774 result = L2CAP_CONF_UNACCEPT;
1775 else {
1776 pi->omtu = mtu;
1777 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1778 }
1779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1780
1781 switch (rfc.mode) {
1782 case L2CAP_MODE_BASIC:
1783 pi->fcs = L2CAP_FCS_NONE;
1784 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1785 break;
1786
1787 case L2CAP_MODE_ERTM:
1788 pi->remote_tx_win = rfc.txwin_size;
1789 pi->remote_max_tx = rfc.max_transmit;
1790
1791 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1792 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1793
1794 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1795
1796 rfc.retrans_timeout =
1797 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1798 rfc.monitor_timeout =
1799 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1800
1801 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1802
1803 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1804 sizeof(rfc), (unsigned long) &rfc);
1805
1806 break;
1807
1808 case L2CAP_MODE_STREAMING:
1809 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1810 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1811
1812 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1813
1814 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1815
1816 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1817 sizeof(rfc), (unsigned long) &rfc);
1818
1819 break;
1820
1821 default:
1822 result = L2CAP_CONF_UNACCEPT;
1823
1824 memset(&rfc, 0, sizeof(rfc));
1825 rfc.mode = pi->mode;
1826 }
1827
1828 if (result == L2CAP_CONF_SUCCESS)
1829 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1830 }
1831 rsp->scid = cpu_to_le16(pi->dcid);
1832 rsp->result = cpu_to_le16(result);
1833 rsp->flags = cpu_to_le16(0x0000);
1834
1835 return ptr - data;
1836}
1837
1838static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1839{
1840 struct l2cap_pinfo *pi = l2cap_pi(sk);
1841 struct l2cap_conf_req *req = data;
1842 void *ptr = req->data;
1843 int type, olen;
1844 unsigned long val;
1845 struct l2cap_conf_rfc rfc;
1846
1847 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1848
1849 while (len >= L2CAP_CONF_OPT_SIZE) {
1850 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1851
1852 switch (type) {
1853 case L2CAP_CONF_MTU:
1854 if (val < L2CAP_DEFAULT_MIN_MTU) {
1855 *result = L2CAP_CONF_UNACCEPT;
1856 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1857 } else
1858 pi->imtu = val;
1859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1860 break;
1861
1862 case L2CAP_CONF_FLUSH_TO:
1863 pi->flush_to = val;
1864 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1865 2, pi->flush_to);
1866 break;
1867
1868 case L2CAP_CONF_RFC:
1869 if (olen == sizeof(rfc))
1870 memcpy(&rfc, (void *)val, olen);
1871
1872 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1873 rfc.mode != pi->mode)
1874 return -ECONNREFUSED;
1875
1876 pi->fcs = 0;
1877
1878 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1879 sizeof(rfc), (unsigned long) &rfc);
1880 break;
1881 }
1882 }
1883
1884 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1885 return -ECONNREFUSED;
1886
1887 pi->mode = rfc.mode;
1888
1889 if (*result == L2CAP_CONF_SUCCESS) {
1890 switch (rfc.mode) {
1891 case L2CAP_MODE_ERTM:
1892 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1893 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1894 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1895 break;
1896 case L2CAP_MODE_STREAMING:
1897 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1898 }
1899 }
1900
1901 req->dcid = cpu_to_le16(pi->dcid);
1902 req->flags = cpu_to_le16(0x0000);
1903
1904 return ptr - data;
1905}
1906
1907static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1908{
1909 struct l2cap_conf_rsp *rsp = data;
1910 void *ptr = rsp->data;
1911
1912 BT_DBG("sk %p", sk);
1913
1914 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1915 rsp->result = cpu_to_le16(result);
1916 rsp->flags = cpu_to_le16(flags);
1917
1918 return ptr - data;
1919}
1920
1921static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1922{
1923 struct l2cap_pinfo *pi = l2cap_pi(sk);
1924 int type, olen;
1925 unsigned long val;
1926 struct l2cap_conf_rfc rfc;
1927
1928 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1929
1930 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1931 return;
1932
1933 while (len >= L2CAP_CONF_OPT_SIZE) {
1934 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1935
1936 switch (type) {
1937 case L2CAP_CONF_RFC:
1938 if (olen == sizeof(rfc))
1939 memcpy(&rfc, (void *)val, olen);
1940 goto done;
1941 }
1942 }
1943
1944done:
1945 switch (rfc.mode) {
1946 case L2CAP_MODE_ERTM:
1947 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1948 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1949 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1950 break;
1951 case L2CAP_MODE_STREAMING:
1952 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1953 }
1954}
1955
1956static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1957{
1958 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1959
1960 if (rej->reason != 0x0000)
1961 return 0;
1962
1963 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1964 cmd->ident == conn->info_ident) {
1965 del_timer(&conn->info_timer);
1966
1967 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1968 conn->info_ident = 0;
1969
1970 l2cap_conn_start(conn);
1971 }
1972
1973 return 0;
1974}
1975
1976static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1977{
1978 struct l2cap_chan_list *list = &conn->chan_list;
1979 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1980 struct l2cap_conn_rsp rsp;
1981 struct sock *parent, *sk = NULL;
1982 int result, status = L2CAP_CS_NO_INFO;
1983
1984 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1985 __le16 psm = req->psm;
1986
1987 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1988
1989 /* Check if we have socket listening on psm */
1990 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1991 if (!parent) {
1992 result = L2CAP_CR_BAD_PSM;
1993 goto sendresp;
1994 }
1995
1996 bh_lock_sock(parent);
1997
1998 /* Check if the ACL is secure enough (if not SDP) */
1999 if (psm != cpu_to_le16(0x0001) &&
2000 !hci_conn_check_link_mode(conn->hcon)) {
2001 conn->disc_reason = 0x05;
2002 result = L2CAP_CR_SEC_BLOCK;
2003 goto response;
2004 }
2005
2006 result = L2CAP_CR_NO_MEM;
2007
2008 /* Check for backlog size */
2009 if (sk_acceptq_is_full(parent)) {
2010 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2011 goto response;
2012 }
2013
2014 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2015 if (!sk)
2016 goto response;
2017
2018 write_lock_bh(&list->lock);
2019
2020 /* Check if we already have channel with that dcid */
2021 if (__l2cap_get_chan_by_dcid(list, scid)) {
2022 write_unlock_bh(&list->lock);
2023 sock_set_flag(sk, SOCK_ZAPPED);
2024 l2cap_sock_kill(sk);
2025 goto response;
2026 }
2027
2028 hci_conn_hold(conn->hcon);
2029
2030 l2cap_sock_init(sk, parent);
2031 bacpy(&bt_sk(sk)->src, conn->src);
2032 bacpy(&bt_sk(sk)->dst, conn->dst);
2033 l2cap_pi(sk)->psm = psm;
2034 l2cap_pi(sk)->dcid = scid;
2035
2036 __l2cap_chan_add(conn, sk, parent);
2037 dcid = l2cap_pi(sk)->scid;
2038
2039 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2040
2041 l2cap_pi(sk)->ident = cmd->ident;
2042
2043 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2044 if (l2cap_check_security(sk)) {
2045 if (bt_sk(sk)->defer_setup) {
2046 sk->sk_state = BT_CONNECT2;
2047 result = L2CAP_CR_PEND;
2048 status = L2CAP_CS_AUTHOR_PEND;
2049 parent->sk_data_ready(parent, 0);
2050 } else {
2051 sk->sk_state = BT_CONFIG;
2052 result = L2CAP_CR_SUCCESS;
2053 status = L2CAP_CS_NO_INFO;
2054 }
2055 } else {
2056 sk->sk_state = BT_CONNECT2;
2057 result = L2CAP_CR_PEND;
2058 status = L2CAP_CS_AUTHEN_PEND;
2059 }
2060 } else {
2061 sk->sk_state = BT_CONNECT2;
2062 result = L2CAP_CR_PEND;
2063 status = L2CAP_CS_NO_INFO;
2064 }
2065
2066 write_unlock_bh(&list->lock);
2067
2068response:
2069 bh_unlock_sock(parent);
2070
2071sendresp:
2072 rsp.scid = cpu_to_le16(scid);
2073 rsp.dcid = cpu_to_le16(dcid);
2074 rsp.result = cpu_to_le16(result);
2075 rsp.status = cpu_to_le16(status);
2076 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2077
2078 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2079 struct l2cap_info_req info;
2080 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2081
2082 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2083 conn->info_ident = l2cap_get_ident(conn);
2084
2085 mod_timer(&conn->info_timer, jiffies +
2086 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2087
2088 l2cap_send_cmd(conn, conn->info_ident,
2089 L2CAP_INFO_REQ, sizeof(info), &info);
2090 }
2091
2092 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2093 result == L2CAP_CR_SUCCESS) {
2094 u8 buf[128];
2095 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2096 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2097 l2cap_build_conf_req(sk, buf), buf);
2098 l2cap_pi(sk)->num_conf_req++;
2099 }
2100
2101 return 0;
2102}
2103
2104static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2105{
2106 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2107 u16 scid, dcid, result, status;
2108 struct sock *sk;
2109 u8 req[128];
2110
2111 scid = __le16_to_cpu(rsp->scid);
2112 dcid = __le16_to_cpu(rsp->dcid);
2113 result = __le16_to_cpu(rsp->result);
2114 status = __le16_to_cpu(rsp->status);
2115
2116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2117
2118 if (scid) {
2119 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2120 if (!sk)
2121 return -EFAULT;
2122 } else {
2123 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2124 if (!sk)
2125 return -EFAULT;
2126 }
2127
2128 switch (result) {
2129 case L2CAP_CR_SUCCESS:
2130 sk->sk_state = BT_CONFIG;
2131 l2cap_pi(sk)->ident = 0;
2132 l2cap_pi(sk)->dcid = dcid;
2133 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2134
2135 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2136 break;
2137
2138 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2139
2140 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2141 l2cap_build_conf_req(sk, req), req);
2142 l2cap_pi(sk)->num_conf_req++;
2143 break;
2144
2145 case L2CAP_CR_PEND:
2146 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2147 break;
2148
2149 default:
2150 /* don't delete l2cap channel if sk is owned by user */
2151 if (sock_owned_by_user(sk)) {
2152 sk->sk_state = BT_DISCONN;
2153 l2cap_sock_clear_timer(sk);
2154 l2cap_sock_set_timer(sk, HZ / 5);
2155 break;
2156 }
2157
2158 l2cap_chan_del(sk, ECONNREFUSED);
2159 break;
2160 }
2161
2162 bh_unlock_sock(sk);
2163 return 0;
2164}
2165
2166static inline void set_default_fcs(struct l2cap_pinfo *pi)
2167{
2168 /* FCS is enabled only in ERTM or streaming mode, if one or both
2169 * sides request it.
2170 */
2171 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2172 pi->fcs = L2CAP_FCS_NONE;
2173 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2174 pi->fcs = L2CAP_FCS_CRC16;
2175}
2176
2177static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2178{
2179 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2180 u16 dcid, flags;
2181 u8 rsp[64];
2182 struct sock *sk;
2183 int len;
2184
2185 dcid = __le16_to_cpu(req->dcid);
2186 flags = __le16_to_cpu(req->flags);
2187
2188 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2189
2190 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2191 if (!sk)
2192 return -ENOENT;
2193
2194 if (sk->sk_state != BT_CONFIG) {
2195 struct l2cap_cmd_rej rej;
2196
2197 rej.reason = cpu_to_le16(0x0002);
2198 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2199 sizeof(rej), &rej);
2200 goto unlock;
2201 }
2202
2203 /* Reject if config buffer is too small. */
2204 len = cmd_len - sizeof(*req);
2205 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2206 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2207 l2cap_build_conf_rsp(sk, rsp,
2208 L2CAP_CONF_REJECT, flags), rsp);
2209 goto unlock;
2210 }
2211
2212 /* Store config. */
2213 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2214 l2cap_pi(sk)->conf_len += len;
2215
2216 if (flags & 0x0001) {
2217 /* Incomplete config. Send empty response. */
2218 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2219 l2cap_build_conf_rsp(sk, rsp,
2220 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2221 goto unlock;
2222 }
2223
2224 /* Complete config. */
2225 len = l2cap_parse_conf_req(sk, rsp);
2226 if (len < 0) {
2227 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2228 goto unlock;
2229 }
2230
2231 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2232 l2cap_pi(sk)->num_conf_rsp++;
2233
2234 /* Reset config buffer. */
2235 l2cap_pi(sk)->conf_len = 0;
2236
2237 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2238 goto unlock;
2239
2240 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2241 set_default_fcs(l2cap_pi(sk));
2242
2243 sk->sk_state = BT_CONNECTED;
2244
2245 l2cap_pi(sk)->next_tx_seq = 0;
2246 l2cap_pi(sk)->expected_tx_seq = 0;
2247 __skb_queue_head_init(TX_QUEUE(sk));
2248 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2249 l2cap_ertm_init(sk);
2250
2251 l2cap_chan_ready(sk);
2252 goto unlock;
2253 }
2254
2255 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2256 u8 buf[64];
2257 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2258 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2259 l2cap_build_conf_req(sk, buf), buf);
2260 l2cap_pi(sk)->num_conf_req++;
2261 }
2262
2263unlock:
2264 bh_unlock_sock(sk);
2265 return 0;
2266}
2267
2268static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2269{
2270 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2271 u16 scid, flags, result;
2272 struct sock *sk;
2273 int len = cmd->len - sizeof(*rsp);
2274
2275 scid = __le16_to_cpu(rsp->scid);
2276 flags = __le16_to_cpu(rsp->flags);
2277 result = __le16_to_cpu(rsp->result);
2278
2279 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2280 scid, flags, result);
2281
2282 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2283 if (!sk)
2284 return 0;
2285
2286 switch (result) {
2287 case L2CAP_CONF_SUCCESS:
2288 l2cap_conf_rfc_get(sk, rsp->data, len);
2289 break;
2290
2291 case L2CAP_CONF_UNACCEPT:
2292 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2293 char req[64];
2294
2295 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2296 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2297 goto done;
2298 }
2299
2300 /* throw out any old stored conf requests */
2301 result = L2CAP_CONF_SUCCESS;
2302 len = l2cap_parse_conf_rsp(sk, rsp->data,
2303 len, req, &result);
2304 if (len < 0) {
2305 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2306 goto done;
2307 }
2308
2309 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2310 L2CAP_CONF_REQ, len, req);
2311 l2cap_pi(sk)->num_conf_req++;
2312 if (result != L2CAP_CONF_SUCCESS)
2313 goto done;
2314 break;
2315 }
2316
2317 default:
2318 sk->sk_err = ECONNRESET;
2319 l2cap_sock_set_timer(sk, HZ * 5);
2320 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2321 goto done;
2322 }
2323
2324 if (flags & 0x01)
2325 goto done;
2326
2327 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2328
2329 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2330 set_default_fcs(l2cap_pi(sk));
2331
2332 sk->sk_state = BT_CONNECTED;
2333 l2cap_pi(sk)->next_tx_seq = 0;
2334 l2cap_pi(sk)->expected_tx_seq = 0;
2335 __skb_queue_head_init(TX_QUEUE(sk));
2336 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2337 l2cap_ertm_init(sk);
2338
2339 l2cap_chan_ready(sk);
2340 }
2341
2342done:
2343 bh_unlock_sock(sk);
2344 return 0;
2345}
2346
2347static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2348{
2349 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2350 struct l2cap_disconn_rsp rsp;
2351 u16 dcid, scid;
2352 struct sock *sk;
2353
2354 scid = __le16_to_cpu(req->scid);
2355 dcid = __le16_to_cpu(req->dcid);
2356
2357 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2358
2359 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2360 if (!sk)
2361 return 0;
2362
2363 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2364 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2365 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2366
2367 sk->sk_shutdown = SHUTDOWN_MASK;
2368
2369 /* don't delete l2cap channel if sk is owned by user */
2370 if (sock_owned_by_user(sk)) {
2371 sk->sk_state = BT_DISCONN;
2372 l2cap_sock_clear_timer(sk);
2373 l2cap_sock_set_timer(sk, HZ / 5);
2374 bh_unlock_sock(sk);
2375 return 0;
2376 }
2377
2378 l2cap_chan_del(sk, ECONNRESET);
2379 bh_unlock_sock(sk);
2380
2381 l2cap_sock_kill(sk);
2382 return 0;
2383}
2384
2385static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2386{
2387 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2388 u16 dcid, scid;
2389 struct sock *sk;
2390
2391 scid = __le16_to_cpu(rsp->scid);
2392 dcid = __le16_to_cpu(rsp->dcid);
2393
2394 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2395
2396 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2397 if (!sk)
2398 return 0;
2399
2400 /* don't delete l2cap channel if sk is owned by user */
2401 if (sock_owned_by_user(sk)) {
2402 sk->sk_state = BT_DISCONN;
2403 l2cap_sock_clear_timer(sk);
2404 l2cap_sock_set_timer(sk, HZ / 5);
2405 bh_unlock_sock(sk);
2406 return 0;
2407 }
2408
2409 l2cap_chan_del(sk, 0);
2410 bh_unlock_sock(sk);
2411
2412 l2cap_sock_kill(sk);
2413 return 0;
2414}
2415
2416static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2417{
2418 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2419 u16 type;
2420
2421 type = __le16_to_cpu(req->type);
2422
2423 BT_DBG("type 0x%4.4x", type);
2424
2425 if (type == L2CAP_IT_FEAT_MASK) {
2426 u8 buf[8];
2427 u32 feat_mask = l2cap_feat_mask;
2428 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2429 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2430 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2431 if (!disable_ertm)
2432 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2433 | L2CAP_FEAT_FCS;
2434 put_unaligned_le32(feat_mask, rsp->data);
2435 l2cap_send_cmd(conn, cmd->ident,
2436 L2CAP_INFO_RSP, sizeof(buf), buf);
2437 } else if (type == L2CAP_IT_FIXED_CHAN) {
2438 u8 buf[12];
2439 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2440 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2441 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2442 memcpy(buf + 4, l2cap_fixed_chan, 8);
2443 l2cap_send_cmd(conn, cmd->ident,
2444 L2CAP_INFO_RSP, sizeof(buf), buf);
2445 } else {
2446 struct l2cap_info_rsp rsp;
2447 rsp.type = cpu_to_le16(type);
2448 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2449 l2cap_send_cmd(conn, cmd->ident,
2450 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2451 }
2452
2453 return 0;
2454}
2455
2456static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2457{
2458 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2459 u16 type, result;
2460
2461 type = __le16_to_cpu(rsp->type);
2462 result = __le16_to_cpu(rsp->result);
2463
2464 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2465
2466 del_timer(&conn->info_timer);
2467
2468 if (result != L2CAP_IR_SUCCESS) {
2469 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2470 conn->info_ident = 0;
2471
2472 l2cap_conn_start(conn);
2473
2474 return 0;
2475 }
2476
2477 if (type == L2CAP_IT_FEAT_MASK) {
2478 conn->feat_mask = get_unaligned_le32(rsp->data);
2479
2480 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2481 struct l2cap_info_req req;
2482 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2483
2484 conn->info_ident = l2cap_get_ident(conn);
2485
2486 l2cap_send_cmd(conn, conn->info_ident,
2487 L2CAP_INFO_REQ, sizeof(req), &req);
2488 } else {
2489 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2490 conn->info_ident = 0;
2491
2492 l2cap_conn_start(conn);
2493 }
2494 } else if (type == L2CAP_IT_FIXED_CHAN) {
2495 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2496 conn->info_ident = 0;
2497
2498 l2cap_conn_start(conn);
2499 }
2500
2501 return 0;
2502}
2503
2504static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2505 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2506{
2507 int err = 0;
2508
2509 switch (cmd->code) {
2510 case L2CAP_COMMAND_REJ:
2511 l2cap_command_rej(conn, cmd, data);
2512 break;
2513
2514 case L2CAP_CONN_REQ:
2515 err = l2cap_connect_req(conn, cmd, data);
2516 break;
2517
2518 case L2CAP_CONN_RSP:
2519 err = l2cap_connect_rsp(conn, cmd, data);
2520 break;
2521
2522 case L2CAP_CONF_REQ:
2523 err = l2cap_config_req(conn, cmd, cmd_len, data);
2524 break;
2525
2526 case L2CAP_CONF_RSP:
2527 err = l2cap_config_rsp(conn, cmd, data);
2528 break;
2529
2530 case L2CAP_DISCONN_REQ:
2531 err = l2cap_disconnect_req(conn, cmd, data);
2532 break;
2533
2534 case L2CAP_DISCONN_RSP:
2535 err = l2cap_disconnect_rsp(conn, cmd, data);
2536 break;
2537
2538 case L2CAP_ECHO_REQ:
2539 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2540 break;
2541
2542 case L2CAP_ECHO_RSP:
2543 break;
2544
2545 case L2CAP_INFO_REQ:
2546 err = l2cap_information_req(conn, cmd, data);
2547 break;
2548
2549 case L2CAP_INFO_RSP:
2550 err = l2cap_information_rsp(conn, cmd, data);
2551 break;
2552
2553 default:
2554 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2555 err = -EINVAL;
2556 break;
2557 }
2558
2559 return err;
2560}
2561
2562static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2563 struct l2cap_cmd_hdr *cmd, u8 *data)
2564{
2565 switch (cmd->code) {
2566 case L2CAP_COMMAND_REJ:
2567 return 0;
2568
2569 case L2CAP_CONN_PARAM_UPDATE_REQ:
2570 return -EINVAL;
2571
2572 case L2CAP_CONN_PARAM_UPDATE_RSP:
2573 return 0;
2574
2575 default:
2576 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2577 return -EINVAL;
2578 }
2579}
2580
2581static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2582 struct sk_buff *skb)
2583{
2584 u8 *data = skb->data;
2585 int len = skb->len;
2586 struct l2cap_cmd_hdr cmd;
2587 int err;
2588
2589 l2cap_raw_recv(conn, skb);
2590
2591 while (len >= L2CAP_CMD_HDR_SIZE) {
2592 u16 cmd_len;
2593 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2594 data += L2CAP_CMD_HDR_SIZE;
2595 len -= L2CAP_CMD_HDR_SIZE;
2596
2597 cmd_len = le16_to_cpu(cmd.len);
2598
2599 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2600
2601 if (cmd_len > len || !cmd.ident) {
2602 BT_DBG("corrupted command");
2603 break;
2604 }
2605
2606 if (conn->hcon->type == LE_LINK)
2607 err = l2cap_le_sig_cmd(conn, &cmd, data);
2608 else
2609 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2610
2611 if (err) {
2612 struct l2cap_cmd_rej rej;
2613 BT_DBG("error %d", err);
2614
2615 /* FIXME: Map err to a valid reason */
2616 rej.reason = cpu_to_le16(0);
2617 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2618 }
2619
2620 data += cmd_len;
2621 len -= cmd_len;
2622 }
2623
2624 kfree_skb(skb);
2625}
2626
2627static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2628{
2629 u16 our_fcs, rcv_fcs;
2630 int hdr_size = L2CAP_HDR_SIZE + 2;
2631
2632 if (pi->fcs == L2CAP_FCS_CRC16) {
2633 skb_trim(skb, skb->len - 2);
2634 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2635 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2636
2637 if (our_fcs != rcv_fcs)
2638 return -EBADMSG;
2639 }
2640 return 0;
2641}
2642
2643static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2644{
2645 struct l2cap_pinfo *pi = l2cap_pi(sk);
2646 u16 control = 0;
2647
2648 pi->frames_sent = 0;
2649
2650 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2651
2652 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2653 control |= L2CAP_SUPER_RCV_NOT_READY;
2654 l2cap_send_sframe(pi, control);
2655 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2656 }
2657
2658 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2659 l2cap_retransmit_frames(sk);
2660
2661 l2cap_ertm_send(sk);
2662
2663 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2664 pi->frames_sent == 0) {
2665 control |= L2CAP_SUPER_RCV_READY;
2666 l2cap_send_sframe(pi, control);
2667 }
2668}
2669
2670static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2671{
2672 struct sk_buff *next_skb;
2673 struct l2cap_pinfo *pi = l2cap_pi(sk);
2674 int tx_seq_offset, next_tx_seq_offset;
2675
2676 bt_cb(skb)->tx_seq = tx_seq;
2677 bt_cb(skb)->sar = sar;
2678
2679 next_skb = skb_peek(SREJ_QUEUE(sk));
2680 if (!next_skb) {
2681 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2682 return 0;
2683 }
2684
2685 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2686 if (tx_seq_offset < 0)
2687 tx_seq_offset += 64;
2688
2689 do {
2690 if (bt_cb(next_skb)->tx_seq == tx_seq)
2691 return -EINVAL;
2692
2693 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2694 pi->buffer_seq) % 64;
2695 if (next_tx_seq_offset < 0)
2696 next_tx_seq_offset += 64;
2697
2698 if (next_tx_seq_offset > tx_seq_offset) {
2699 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2700 return 0;
2701 }
2702
2703 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2704 break;
2705
2706 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2707
2708 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2709
2710 return 0;
2711}
2712
2713static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2714{
2715 struct l2cap_pinfo *pi = l2cap_pi(sk);
2716 struct sk_buff *_skb;
2717 int err;
2718
2719 switch (control & L2CAP_CTRL_SAR) {
2720 case L2CAP_SDU_UNSEGMENTED:
2721 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2722 goto drop;
2723
2724 err = sock_queue_rcv_skb(sk, skb);
2725 if (!err)
2726 return err;
2727
2728 break;
2729
2730 case L2CAP_SDU_START:
2731 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2732 goto drop;
2733
2734 pi->sdu_len = get_unaligned_le16(skb->data);
2735
2736 if (pi->sdu_len > pi->imtu)
2737 goto disconnect;
2738
2739 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2740 if (!pi->sdu)
2741 return -ENOMEM;
2742
2743 /* pull sdu_len bytes only after alloc, because of Local Busy
2744 * condition we have to be sure that this will be executed
2745 * only once, i.e., when alloc does not fail */
2746 skb_pull(skb, 2);
2747
2748 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2749
2750 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2751 pi->partial_sdu_len = skb->len;
2752 break;
2753
2754 case L2CAP_SDU_CONTINUE:
2755 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2756 goto disconnect;
2757
2758 if (!pi->sdu)
2759 goto disconnect;
2760
2761 pi->partial_sdu_len += skb->len;
2762 if (pi->partial_sdu_len > pi->sdu_len)
2763 goto drop;
2764
2765 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2766
2767 break;
2768
2769 case L2CAP_SDU_END:
2770 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2771 goto disconnect;
2772
2773 if (!pi->sdu)
2774 goto disconnect;
2775
2776 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2777 pi->partial_sdu_len += skb->len;
2778
2779 if (pi->partial_sdu_len > pi->imtu)
2780 goto drop;
2781
2782 if (pi->partial_sdu_len != pi->sdu_len)
2783 goto drop;
2784
2785 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2786 }
2787
2788 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2789 if (!_skb) {
2790 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2791 return -ENOMEM;
2792 }
2793
2794 err = sock_queue_rcv_skb(sk, _skb);
2795 if (err < 0) {
2796 kfree_skb(_skb);
2797 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2798 return err;
2799 }
2800
2801 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2802 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2803
2804 kfree_skb(pi->sdu);
2805 break;
2806 }
2807
2808 kfree_skb(skb);
2809 return 0;
2810
2811drop:
2812 kfree_skb(pi->sdu);
2813 pi->sdu = NULL;
2814
2815disconnect:
2816 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2817 kfree_skb(skb);
2818 return 0;
2819}
2820
2821static int l2cap_try_push_rx_skb(struct sock *sk)
2822{
2823 struct l2cap_pinfo *pi = l2cap_pi(sk);
2824 struct sk_buff *skb;
2825 u16 control;
2826 int err;
2827
2828 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2829 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2830 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2831 if (err < 0) {
2832 skb_queue_head(BUSY_QUEUE(sk), skb);
2833 return -EBUSY;
2834 }
2835
2836 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2837 }
2838
2839 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2840 goto done;
2841
2842 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2843 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2844 l2cap_send_sframe(pi, control);
2845 l2cap_pi(sk)->retry_count = 1;
2846
2847 del_timer(&pi->retrans_timer);
2848 __mod_monitor_timer();
2849
2850 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2851
2852done:
2853 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2854 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2855
2856 BT_DBG("sk %p, Exit local busy", sk);
2857
2858 return 0;
2859}
2860
2861static void l2cap_busy_work(struct work_struct *work)
2862{
2863 DECLARE_WAITQUEUE(wait, current);
2864 struct l2cap_pinfo *pi =
2865 container_of(work, struct l2cap_pinfo, busy_work);
2866 struct sock *sk = (struct sock *)pi;
2867 int n_tries = 0, timeo = HZ/5, err;
2868 struct sk_buff *skb;
2869
2870 lock_sock(sk);
2871
2872 add_wait_queue(sk_sleep(sk), &wait);
2873 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2874 set_current_state(TASK_INTERRUPTIBLE);
2875
2876 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2877 err = -EBUSY;
2878 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2879 break;
2880 }
2881
2882 if (!timeo)
2883 timeo = HZ/5;
2884
2885 if (signal_pending(current)) {
2886 err = sock_intr_errno(timeo);
2887 break;
2888 }
2889
2890 release_sock(sk);
2891 timeo = schedule_timeout(timeo);
2892 lock_sock(sk);
2893
2894 err = sock_error(sk);
2895 if (err)
2896 break;
2897
2898 if (l2cap_try_push_rx_skb(sk) == 0)
2899 break;
2900 }
2901
2902 set_current_state(TASK_RUNNING);
2903 remove_wait_queue(sk_sleep(sk), &wait);
2904
2905 release_sock(sk);
2906}
2907
2908static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2909{
2910 struct l2cap_pinfo *pi = l2cap_pi(sk);
2911 int sctrl, err;
2912
2913 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2914 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2915 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2916 return l2cap_try_push_rx_skb(sk);
2917
2918
2919 }
2920
2921 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2922 if (err >= 0) {
2923 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2924 return err;
2925 }
2926
2927 /* Busy Condition */
2928 BT_DBG("sk %p, Enter local busy", sk);
2929
2930 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2931 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2932 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2933
2934 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2935 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2936 l2cap_send_sframe(pi, sctrl);
2937
2938 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2939
2940 del_timer(&pi->ack_timer);
2941
2942 queue_work(_busy_wq, &pi->busy_work);
2943
2944 return err;
2945}
2946
2947static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2948{
2949 struct l2cap_pinfo *pi = l2cap_pi(sk);
2950 struct sk_buff *_skb;
2951 int err = -EINVAL;
2952
2953 /*
2954 * TODO: We have to notify the userland if some data is lost with the
2955 * Streaming Mode.
2956 */
2957
2958 switch (control & L2CAP_CTRL_SAR) {
2959 case L2CAP_SDU_UNSEGMENTED:
2960 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2961 kfree_skb(pi->sdu);
2962 break;
2963 }
2964
2965 err = sock_queue_rcv_skb(sk, skb);
2966 if (!err)
2967 return 0;
2968
2969 break;
2970
2971 case L2CAP_SDU_START:
2972 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2973 kfree_skb(pi->sdu);
2974 break;
2975 }
2976
2977 pi->sdu_len = get_unaligned_le16(skb->data);
2978 skb_pull(skb, 2);
2979
2980 if (pi->sdu_len > pi->imtu) {
2981 err = -EMSGSIZE;
2982 break;
2983 }
2984
2985 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2986 if (!pi->sdu) {
2987 err = -ENOMEM;
2988 break;
2989 }
2990
2991 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2992
2993 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2994 pi->partial_sdu_len = skb->len;
2995 err = 0;
2996 break;
2997
2998 case L2CAP_SDU_CONTINUE:
2999 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3000 break;
3001
3002 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3003
3004 pi->partial_sdu_len += skb->len;
3005 if (pi->partial_sdu_len > pi->sdu_len)
3006 kfree_skb(pi->sdu);
3007 else
3008 err = 0;
3009
3010 break;
3011
3012 case L2CAP_SDU_END:
3013 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3014 break;
3015
3016 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3017
3018 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3019 pi->partial_sdu_len += skb->len;
3020
3021 if (pi->partial_sdu_len > pi->imtu)
3022 goto drop;
3023
3024 if (pi->partial_sdu_len == pi->sdu_len) {
3025 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3026 err = sock_queue_rcv_skb(sk, _skb);
3027 if (err < 0)
3028 kfree_skb(_skb);
3029 }
3030 err = 0;
3031
3032drop:
3033 kfree_skb(pi->sdu);
3034 break;
3035 }
3036
3037 kfree_skb(skb);
3038 return err;
3039}
3040
3041static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3042{
3043 struct sk_buff *skb;
3044 u16 control;
3045
3046 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3047 if (bt_cb(skb)->tx_seq != tx_seq)
3048 break;
3049
3050 skb = skb_dequeue(SREJ_QUEUE(sk));
3051 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3052 l2cap_ertm_reassembly_sdu(sk, skb, control);
3053 l2cap_pi(sk)->buffer_seq_srej =
3054 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3055 tx_seq = (tx_seq + 1) % 64;
3056 }
3057}
3058
3059static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3060{
3061 struct l2cap_pinfo *pi = l2cap_pi(sk);
3062 struct srej_list *l, *tmp;
3063 u16 control;
3064
3065 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3066 if (l->tx_seq == tx_seq) {
3067 list_del(&l->list);
3068 kfree(l);
3069 return;
3070 }
3071 control = L2CAP_SUPER_SELECT_REJECT;
3072 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3073 l2cap_send_sframe(pi, control);
3074 list_del(&l->list);
3075 list_add_tail(&l->list, SREJ_LIST(sk));
3076 }
3077}
3078
3079static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3080{
3081 struct l2cap_pinfo *pi = l2cap_pi(sk);
3082 struct srej_list *new;
3083 u16 control;
3084
3085 while (tx_seq != pi->expected_tx_seq) {
3086 control = L2CAP_SUPER_SELECT_REJECT;
3087 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3088 l2cap_send_sframe(pi, control);
3089
3090 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3091 new->tx_seq = pi->expected_tx_seq;
3092 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3093 list_add_tail(&new->list, SREJ_LIST(sk));
3094 }
3095 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3096}
3097
3098static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3099{
3100 struct l2cap_pinfo *pi = l2cap_pi(sk);
3101 u8 tx_seq = __get_txseq(rx_control);
3102 u8 req_seq = __get_reqseq(rx_control);
3103 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3104 int tx_seq_offset, expected_tx_seq_offset;
3105 int num_to_ack = (pi->tx_win/6) + 1;
3106 int err = 0;
3107
3108 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3109 rx_control);
3110
3111 if (L2CAP_CTRL_FINAL & rx_control &&
3112 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3113 del_timer(&pi->monitor_timer);
3114 if (pi->unacked_frames > 0)
3115 __mod_retrans_timer();
3116 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3117 }
3118
3119 pi->expected_ack_seq = req_seq;
3120 l2cap_drop_acked_frames(sk);
3121
3122 if (tx_seq == pi->expected_tx_seq)
3123 goto expected;
3124
3125 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3126 if (tx_seq_offset < 0)
3127 tx_seq_offset += 64;
3128
3129 /* invalid tx_seq */
3130 if (tx_seq_offset >= pi->tx_win) {
3131 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3132 goto drop;
3133 }
3134
3135 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3136 goto drop;
3137
3138 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3139 struct srej_list *first;
3140
3141 first = list_first_entry(SREJ_LIST(sk),
3142 struct srej_list, list);
3143 if (tx_seq == first->tx_seq) {
3144 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3145 l2cap_check_srej_gap(sk, tx_seq);
3146
3147 list_del(&first->list);
3148 kfree(first);
3149
3150 if (list_empty(SREJ_LIST(sk))) {
3151 pi->buffer_seq = pi->buffer_seq_srej;
3152 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3153 l2cap_send_ack(pi);
3154 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3155 }
3156 } else {
3157 struct srej_list *l;
3158
3159 /* duplicated tx_seq */
3160 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3161 goto drop;
3162
3163 list_for_each_entry(l, SREJ_LIST(sk), list) {
3164 if (l->tx_seq == tx_seq) {
3165 l2cap_resend_srejframe(sk, tx_seq);
3166 return 0;
3167 }
3168 }
3169 l2cap_send_srejframe(sk, tx_seq);
3170 }
3171 } else {
3172 expected_tx_seq_offset =
3173 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3174 if (expected_tx_seq_offset < 0)
3175 expected_tx_seq_offset += 64;
3176
3177 /* duplicated tx_seq */
3178 if (tx_seq_offset < expected_tx_seq_offset)
3179 goto drop;
3180
3181 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3182
3183 BT_DBG("sk %p, Enter SREJ", sk);
3184
3185 INIT_LIST_HEAD(SREJ_LIST(sk));
3186 pi->buffer_seq_srej = pi->buffer_seq;
3187
3188 __skb_queue_head_init(SREJ_QUEUE(sk));
3189 __skb_queue_head_init(BUSY_QUEUE(sk));
3190 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3191
3192 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3193
3194 l2cap_send_srejframe(sk, tx_seq);
3195
3196 del_timer(&pi->ack_timer);
3197 }
3198 return 0;
3199
3200expected:
3201 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3202
3203 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3204 bt_cb(skb)->tx_seq = tx_seq;
3205 bt_cb(skb)->sar = sar;
3206 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3207 return 0;
3208 }
3209
3210 err = l2cap_push_rx_skb(sk, skb, rx_control);
3211 if (err < 0)
3212 return 0;
3213
3214 if (rx_control & L2CAP_CTRL_FINAL) {
3215 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3216 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3217 else
3218 l2cap_retransmit_frames(sk);
3219 }
3220
3221 __mod_ack_timer();
3222
3223 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3224 if (pi->num_acked == num_to_ack - 1)
3225 l2cap_send_ack(pi);
3226
3227 return 0;
3228
3229drop:
3230 kfree_skb(skb);
3231 return 0;
3232}
3233
3234static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3235{
3236 struct l2cap_pinfo *pi = l2cap_pi(sk);
3237
3238 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3239 rx_control);
3240
3241 pi->expected_ack_seq = __get_reqseq(rx_control);
3242 l2cap_drop_acked_frames(sk);
3243
3244 if (rx_control & L2CAP_CTRL_POLL) {
3245 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3246 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3247 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3248 (pi->unacked_frames > 0))
3249 __mod_retrans_timer();
3250
3251 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3252 l2cap_send_srejtail(sk);
3253 } else {
3254 l2cap_send_i_or_rr_or_rnr(sk);
3255 }
3256
3257 } else if (rx_control & L2CAP_CTRL_FINAL) {
3258 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3259
3260 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3261 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3262 else
3263 l2cap_retransmit_frames(sk);
3264
3265 } else {
3266 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3267 (pi->unacked_frames > 0))
3268 __mod_retrans_timer();
3269
3270 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3271 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3272 l2cap_send_ack(pi);
3273 else
3274 l2cap_ertm_send(sk);
3275 }
3276}
3277
3278static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3279{
3280 struct l2cap_pinfo *pi = l2cap_pi(sk);
3281 u8 tx_seq = __get_reqseq(rx_control);
3282
3283 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3284
3285 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3286
3287 pi->expected_ack_seq = tx_seq;
3288 l2cap_drop_acked_frames(sk);
3289
3290 if (rx_control & L2CAP_CTRL_FINAL) {
3291 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3292 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3293 else
3294 l2cap_retransmit_frames(sk);
3295 } else {
3296 l2cap_retransmit_frames(sk);
3297
3298 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3299 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3300 }
3301}
3302static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3303{
3304 struct l2cap_pinfo *pi = l2cap_pi(sk);
3305 u8 tx_seq = __get_reqseq(rx_control);
3306
3307 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3308
3309 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3310
3311 if (rx_control & L2CAP_CTRL_POLL) {
3312 pi->expected_ack_seq = tx_seq;
3313 l2cap_drop_acked_frames(sk);
3314
3315 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3316 l2cap_retransmit_one_frame(sk, tx_seq);
3317
3318 l2cap_ertm_send(sk);
3319
3320 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3321 pi->srej_save_reqseq = tx_seq;
3322 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3323 }
3324 } else if (rx_control & L2CAP_CTRL_FINAL) {
3325 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3326 pi->srej_save_reqseq == tx_seq)
3327 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3328 else
3329 l2cap_retransmit_one_frame(sk, tx_seq);
3330 } else {
3331 l2cap_retransmit_one_frame(sk, tx_seq);
3332 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3333 pi->srej_save_reqseq = tx_seq;
3334 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3335 }
3336 }
3337}
3338
3339static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3340{
3341 struct l2cap_pinfo *pi = l2cap_pi(sk);
3342 u8 tx_seq = __get_reqseq(rx_control);
3343
3344 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3345
3346 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3347 pi->expected_ack_seq = tx_seq;
3348 l2cap_drop_acked_frames(sk);
3349
3350 if (rx_control & L2CAP_CTRL_POLL)
3351 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3352
3353 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3354 del_timer(&pi->retrans_timer);
3355 if (rx_control & L2CAP_CTRL_POLL)
3356 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3357 return;
3358 }
3359
3360 if (rx_control & L2CAP_CTRL_POLL)
3361 l2cap_send_srejtail(sk);
3362 else
3363 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3364}
3365
3366static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3367{
3368 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3369
3370 if (L2CAP_CTRL_FINAL & rx_control &&
3371 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3372 del_timer(&l2cap_pi(sk)->monitor_timer);
3373 if (l2cap_pi(sk)->unacked_frames > 0)
3374 __mod_retrans_timer();
3375 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3376 }
3377
3378 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3379 case L2CAP_SUPER_RCV_READY:
3380 l2cap_data_channel_rrframe(sk, rx_control);
3381 break;
3382
3383 case L2CAP_SUPER_REJECT:
3384 l2cap_data_channel_rejframe(sk, rx_control);
3385 break;
3386
3387 case L2CAP_SUPER_SELECT_REJECT:
3388 l2cap_data_channel_srejframe(sk, rx_control);
3389 break;
3390
3391 case L2CAP_SUPER_RCV_NOT_READY:
3392 l2cap_data_channel_rnrframe(sk, rx_control);
3393 break;
3394 }
3395
3396 kfree_skb(skb);
3397 return 0;
3398}
3399
3400static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3401{
3402 struct l2cap_pinfo *pi = l2cap_pi(sk);
3403 u16 control;
3404 u8 req_seq;
3405 int len, next_tx_seq_offset, req_seq_offset;
3406
3407 control = get_unaligned_le16(skb->data);
3408 skb_pull(skb, 2);
3409 len = skb->len;
3410
3411 /*
3412 * We can just drop the corrupted I-frame here.
3413 * Receiver will miss it and start proper recovery
3414 * procedures and ask retransmission.
3415 */
3416 if (l2cap_check_fcs(pi, skb))
3417 goto drop;
3418
3419 if (__is_sar_start(control) && __is_iframe(control))
3420 len -= 2;
3421
3422 if (pi->fcs == L2CAP_FCS_CRC16)
3423 len -= 2;
3424
3425 if (len > pi->mps) {
3426 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3427 goto drop;
3428 }
3429
3430 req_seq = __get_reqseq(control);
3431 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3432 if (req_seq_offset < 0)
3433 req_seq_offset += 64;
3434
3435 next_tx_seq_offset =
3436 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3437 if (next_tx_seq_offset < 0)
3438 next_tx_seq_offset += 64;
3439
3440 /* check for invalid req-seq */
3441 if (req_seq_offset > next_tx_seq_offset) {
3442 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3443 goto drop;
3444 }
3445
3446 if (__is_iframe(control)) {
3447 if (len < 0) {
3448 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3449 goto drop;
3450 }
3451
3452 l2cap_data_channel_iframe(sk, control, skb);
3453 } else {
3454 if (len != 0) {
3455 BT_ERR("%d", len);
3456 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3457 goto drop;
3458 }
3459
3460 l2cap_data_channel_sframe(sk, control, skb);
3461 }
3462
3463 return 0;
3464
3465drop:
3466 kfree_skb(skb);
3467 return 0;
3468}
3469
3470static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3471{
3472 struct sock *sk;
3473 struct l2cap_pinfo *pi;
3474 u16 control;
3475 u8 tx_seq;
3476 int len;
3477
3478 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3479 if (!sk) {
3480 BT_DBG("unknown cid 0x%4.4x", cid);
3481 goto drop;
3482 }
3483
3484 pi = l2cap_pi(sk);
3485
3486 BT_DBG("sk %p, len %d", sk, skb->len);
3487
3488 if (sk->sk_state != BT_CONNECTED)
3489 goto drop;
3490
3491 switch (pi->mode) {
3492 case L2CAP_MODE_BASIC:
3493 /* If socket recv buffers overflows we drop data here
3494 * which is *bad* because L2CAP has to be reliable.
3495 * But we don't have any other choice. L2CAP doesn't
3496 * provide flow control mechanism. */
3497
3498 if (pi->imtu < skb->len)
3499 goto drop;
3500
3501 if (!sock_queue_rcv_skb(sk, skb))
3502 goto done;
3503 break;
3504
3505 case L2CAP_MODE_ERTM:
3506 if (!sock_owned_by_user(sk)) {
3507 l2cap_ertm_data_rcv(sk, skb);
3508 } else {
3509 if (sk_add_backlog(sk, skb))
3510 goto drop;
3511 }
3512
3513 goto done;
3514
3515 case L2CAP_MODE_STREAMING:
3516 control = get_unaligned_le16(skb->data);
3517 skb_pull(skb, 2);
3518 len = skb->len;
3519
3520 if (l2cap_check_fcs(pi, skb))
3521 goto drop;
3522
3523 if (__is_sar_start(control))
3524 len -= 2;
3525
3526 if (pi->fcs == L2CAP_FCS_CRC16)
3527 len -= 2;
3528
3529 if (len > pi->mps || len < 0 || __is_sframe(control))
3530 goto drop;
3531
3532 tx_seq = __get_txseq(control);
3533
3534 if (pi->expected_tx_seq == tx_seq)
3535 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3536 else
3537 pi->expected_tx_seq = (tx_seq + 1) % 64;
3538
3539 l2cap_streaming_reassembly_sdu(sk, skb, control);
3540
3541 goto done;
3542
3543 default:
3544 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3545 break;
3546 }
3547
3548drop:
3549 kfree_skb(skb);
3550
3551done:
3552 if (sk)
3553 bh_unlock_sock(sk);
3554
3555 return 0;
3556}
3557
3558static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3559{
3560 struct sock *sk;
3561
3562 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3563 if (!sk)
3564 goto drop;
3565
3566 bh_lock_sock(sk);
3567
3568 BT_DBG("sk %p, len %d", sk, skb->len);
3569
3570 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3571 goto drop;
3572
3573 if (l2cap_pi(sk)->imtu < skb->len)
3574 goto drop;
3575
3576 if (!sock_queue_rcv_skb(sk, skb))
3577 goto done;
3578
3579drop:
3580 kfree_skb(skb);
3581
3582done:
3583 if (sk)
3584 bh_unlock_sock(sk);
3585 return 0;
3586}
3587
3588static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3589{
3590 struct l2cap_hdr *lh = (void *) skb->data;
3591 u16 cid, len;
3592 __le16 psm;
3593
3594 skb_pull(skb, L2CAP_HDR_SIZE);
3595 cid = __le16_to_cpu(lh->cid);
3596 len = __le16_to_cpu(lh->len);
3597
3598 if (len != skb->len) {
3599 kfree_skb(skb);
3600 return;
3601 }
3602
3603 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3604
3605 switch (cid) {
3606 case L2CAP_CID_LE_SIGNALING:
3607 case L2CAP_CID_SIGNALING:
3608 l2cap_sig_channel(conn, skb);
3609 break;
3610
3611 case L2CAP_CID_CONN_LESS:
3612 psm = get_unaligned_le16(skb->data);
3613 skb_pull(skb, 2);
3614 l2cap_conless_channel(conn, psm, skb);
3615 break;
3616
3617 default:
3618 l2cap_data_channel(conn, cid, skb);
3619 break;
3620 }
3621}
3622
3623/* ---- L2CAP interface with lower layer (HCI) ---- */
3624
3625static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3626{
3627 int exact = 0, lm1 = 0, lm2 = 0;
3628 register struct sock *sk;
3629 struct hlist_node *node;
3630
3631 if (type != ACL_LINK)
3632 return -EINVAL;
3633
3634 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3635
3636 /* Find listening sockets and check their link_mode */
3637 read_lock(&l2cap_sk_list.lock);
3638 sk_for_each(sk, node, &l2cap_sk_list.head) {
3639 if (sk->sk_state != BT_LISTEN)
3640 continue;
3641
3642 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3643 lm1 |= HCI_LM_ACCEPT;
3644 if (l2cap_pi(sk)->role_switch)
3645 lm1 |= HCI_LM_MASTER;
3646 exact++;
3647 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3648 lm2 |= HCI_LM_ACCEPT;
3649 if (l2cap_pi(sk)->role_switch)
3650 lm2 |= HCI_LM_MASTER;
3651 }
3652 }
3653 read_unlock(&l2cap_sk_list.lock);
3654
3655 return exact ? lm1 : lm2;
3656}
3657
3658static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3659{
3660 struct l2cap_conn *conn;
3661
3662 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3663
3664 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3665 return -EINVAL;
3666
3667 if (!status) {
3668 conn = l2cap_conn_add(hcon, status);
3669 if (conn)
3670 l2cap_conn_ready(conn);
3671 } else
3672 l2cap_conn_del(hcon, bt_err(status));
3673
3674 return 0;
3675}
3676
3677static int l2cap_disconn_ind(struct hci_conn *hcon)
3678{
3679 struct l2cap_conn *conn = hcon->l2cap_data;
3680
3681 BT_DBG("hcon %p", hcon);
3682
3683 if (hcon->type != ACL_LINK || !conn)
3684 return 0x13;
3685
3686 return conn->disc_reason;
3687}
3688
3689static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3690{
3691 BT_DBG("hcon %p reason %d", hcon, reason);
3692
3693 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3694 return -EINVAL;
3695
3696 l2cap_conn_del(hcon, bt_err(reason));
3697
3698 return 0;
3699}
3700
3701static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3702{
3703 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3704 return;
3705
3706 if (encrypt == 0x00) {
3707 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3708 l2cap_sock_clear_timer(sk);
3709 l2cap_sock_set_timer(sk, HZ * 5);
3710 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3711 __l2cap_sock_close(sk, ECONNREFUSED);
3712 } else {
3713 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3714 l2cap_sock_clear_timer(sk);
3715 }
3716}
3717
3718static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3719{
3720 struct l2cap_chan_list *l;
3721 struct l2cap_conn *conn = hcon->l2cap_data;
3722 struct sock *sk;
3723
3724 if (!conn)
3725 return 0;
3726
3727 l = &conn->chan_list;
3728
3729 BT_DBG("conn %p", conn);
3730
3731 read_lock(&l->lock);
3732
3733 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3734 bh_lock_sock(sk);
3735
3736 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3737 bh_unlock_sock(sk);
3738 continue;
3739 }
3740
3741 if (!status && (sk->sk_state == BT_CONNECTED ||
3742 sk->sk_state == BT_CONFIG)) {
3743 l2cap_check_encryption(sk, encrypt);
3744 bh_unlock_sock(sk);
3745 continue;
3746 }
3747
3748 if (sk->sk_state == BT_CONNECT) {
3749 if (!status) {
3750 struct l2cap_conn_req req;
3751 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3752 req.psm = l2cap_pi(sk)->psm;
3753
3754 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3755 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3756
3757 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3758 L2CAP_CONN_REQ, sizeof(req), &req);
3759 } else {
3760 l2cap_sock_clear_timer(sk);
3761 l2cap_sock_set_timer(sk, HZ / 10);
3762 }
3763 } else if (sk->sk_state == BT_CONNECT2) {
3764 struct l2cap_conn_rsp rsp;
3765 __u16 result;
3766
3767 if (!status) {
3768 sk->sk_state = BT_CONFIG;
3769 result = L2CAP_CR_SUCCESS;
3770 } else {
3771 sk->sk_state = BT_DISCONN;
3772 l2cap_sock_set_timer(sk, HZ / 10);
3773 result = L2CAP_CR_SEC_BLOCK;
3774 }
3775
3776 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3777 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3778 rsp.result = cpu_to_le16(result);
3779 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3780 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3781 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3782 }
3783
3784 bh_unlock_sock(sk);
3785 }
3786
3787 read_unlock(&l->lock);
3788
3789 return 0;
3790}
3791
3792static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3793{
3794 struct l2cap_conn *conn = hcon->l2cap_data;
3795
3796 if (!conn)
3797 conn = l2cap_conn_add(hcon, 0);
3798
3799 if (!conn)
3800 goto drop;
3801
3802 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3803
3804 if (!(flags & ACL_CONT)) {
3805 struct l2cap_hdr *hdr;
3806 struct sock *sk;
3807 u16 cid;
3808 int len;
3809
3810 if (conn->rx_len) {
3811 BT_ERR("Unexpected start frame (len %d)", skb->len);
3812 kfree_skb(conn->rx_skb);
3813 conn->rx_skb = NULL;
3814 conn->rx_len = 0;
3815 l2cap_conn_unreliable(conn, ECOMM);
3816 }
3817
3818 /* Start fragment always begin with Basic L2CAP header */
3819 if (skb->len < L2CAP_HDR_SIZE) {
3820 BT_ERR("Frame is too short (len %d)", skb->len);
3821 l2cap_conn_unreliable(conn, ECOMM);
3822 goto drop;
3823 }
3824
3825 hdr = (struct l2cap_hdr *) skb->data;
3826 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3827 cid = __le16_to_cpu(hdr->cid);
3828
3829 if (len == skb->len) {
3830 /* Complete frame received */
3831 l2cap_recv_frame(conn, skb);
3832 return 0;
3833 }
3834
3835 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3836
3837 if (skb->len > len) {
3838 BT_ERR("Frame is too long (len %d, expected len %d)",
3839 skb->len, len);
3840 l2cap_conn_unreliable(conn, ECOMM);
3841 goto drop;
3842 }
3843
3844 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3845
3846 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3847 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3848 len, l2cap_pi(sk)->imtu);
3849 bh_unlock_sock(sk);
3850 l2cap_conn_unreliable(conn, ECOMM);
3851 goto drop;
3852 }
3853
3854 if (sk)
3855 bh_unlock_sock(sk);
3856
3857 /* Allocate skb for the complete frame (with header) */
3858 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3859 if (!conn->rx_skb)
3860 goto drop;
3861
3862 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3863 skb->len);
3864 conn->rx_len = len - skb->len;
3865 } else {
3866 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3867
3868 if (!conn->rx_len) {
3869 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3870 l2cap_conn_unreliable(conn, ECOMM);
3871 goto drop;
3872 }
3873
3874 if (skb->len > conn->rx_len) {
3875 BT_ERR("Fragment is too long (len %d, expected %d)",
3876 skb->len, conn->rx_len);
3877 kfree_skb(conn->rx_skb);
3878 conn->rx_skb = NULL;
3879 conn->rx_len = 0;
3880 l2cap_conn_unreliable(conn, ECOMM);
3881 goto drop;
3882 }
3883
3884 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3885 skb->len);
3886 conn->rx_len -= skb->len;
3887
3888 if (!conn->rx_len) {
3889 /* Complete frame received */
3890 l2cap_recv_frame(conn, conn->rx_skb);
3891 conn->rx_skb = NULL;
3892 }
3893 }
3894
3895drop:
3896 kfree_skb(skb);
3897 return 0;
3898}
3899
3900static int l2cap_debugfs_show(struct seq_file *f, void *p)
3901{
3902 struct sock *sk;
3903 struct hlist_node *node;
3904
3905 read_lock_bh(&l2cap_sk_list.lock);
3906
3907 sk_for_each(sk, node, &l2cap_sk_list.head) {
3908 struct l2cap_pinfo *pi = l2cap_pi(sk);
3909
3910 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3911 batostr(&bt_sk(sk)->src),
3912 batostr(&bt_sk(sk)->dst),
3913 sk->sk_state, __le16_to_cpu(pi->psm),
3914 pi->scid, pi->dcid,
3915 pi->imtu, pi->omtu, pi->sec_level,
3916 pi->mode);
3917 }
3918
3919 read_unlock_bh(&l2cap_sk_list.lock);
3920
3921 return 0;
3922}
3923
3924static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3925{
3926 return single_open(file, l2cap_debugfs_show, inode->i_private);
3927}
3928
3929static const struct file_operations l2cap_debugfs_fops = {
3930 .open = l2cap_debugfs_open,
3931 .read = seq_read,
3932 .llseek = seq_lseek,
3933 .release = single_release,
3934};
3935
3936static struct dentry *l2cap_debugfs;
3937
3938static struct hci_proto l2cap_hci_proto = {
3939 .name = "L2CAP",
3940 .id = HCI_PROTO_L2CAP,
3941 .connect_ind = l2cap_connect_ind,
3942 .connect_cfm = l2cap_connect_cfm,
3943 .disconn_ind = l2cap_disconn_ind,
3944 .disconn_cfm = l2cap_disconn_cfm,
3945 .security_cfm = l2cap_security_cfm,
3946 .recv_acldata = l2cap_recv_acldata
3947};
3948
3949int __init l2cap_init(void)
3950{
3951 int err;
3952
3953 err = l2cap_init_sockets();
3954 if (err < 0)
3955 return err;
3956
3957 _busy_wq = create_singlethread_workqueue("l2cap");
3958 if (!_busy_wq) {
3959 err = -ENOMEM;
3960 goto error;
3961 }
3962
3963 err = hci_register_proto(&l2cap_hci_proto);
3964 if (err < 0) {
3965 BT_ERR("L2CAP protocol registration failed");
3966 bt_sock_unregister(BTPROTO_L2CAP);
3967 goto error;
3968 }
3969
3970 if (bt_debugfs) {
3971 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
3972 bt_debugfs, NULL, &l2cap_debugfs_fops);
3973 if (!l2cap_debugfs)
3974 BT_ERR("Failed to create L2CAP debug file");
3975 }
3976
3977 BT_INFO("L2CAP socket layer initialized");
3978
3979 return 0;
3980
3981error:
3982 destroy_workqueue(_busy_wq);
3983 l2cap_cleanup_sockets();
3984 return err;
3985}
3986
3987void l2cap_exit(void)
3988{
3989 debugfs_remove(l2cap_debugfs);
3990
3991 flush_workqueue(_busy_wq);
3992 destroy_workqueue(_busy_wq);
3993
3994 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3995 BT_ERR("L2CAP protocol unregistration failed");
3996
3997 l2cap_cleanup_sockets();
3998}
3999
4000module_param(disable_ertm, bool, 0644);
4001MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");