TOMOYO: Fix wrong domainname validation.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 int disable_ertm;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static struct workqueue_struct *_busy_wq;
64
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 };
68
69 static void l2cap_busy_work(struct work_struct *work);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75
76 /* ---- L2CAP channels ---- */
77 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
78 {
79 struct sock *s;
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
81 if (l2cap_pi(s)->dcid == cid)
82 break;
83 }
84 return s;
85 }
86
87 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
88 {
89 struct sock *s;
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
91 if (l2cap_pi(s)->scid == cid)
92 break;
93 }
94 return s;
95 }
96
97 /* Find channel with given SCID.
98 * Returns locked socket */
99 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
100 {
101 struct sock *s;
102 read_lock(&l->lock);
103 s = __l2cap_get_chan_by_scid(l, cid);
104 if (s)
105 bh_lock_sock(s);
106 read_unlock(&l->lock);
107 return s;
108 }
109
110 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
111 {
112 struct sock *s;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->ident == ident)
115 break;
116 }
117 return s;
118 }
119
120 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
121 {
122 struct sock *s;
123 read_lock(&l->lock);
124 s = __l2cap_get_chan_by_ident(l, ident);
125 if (s)
126 bh_lock_sock(s);
127 read_unlock(&l->lock);
128 return s;
129 }
130
131 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
132 {
133 u16 cid = L2CAP_CID_DYN_START;
134
135 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid))
137 return cid;
138 }
139
140 return 0;
141 }
142
143 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
144 {
145 sock_hold(sk);
146
147 if (l->head)
148 l2cap_pi(l->head)->prev_c = sk;
149
150 l2cap_pi(sk)->next_c = l->head;
151 l2cap_pi(sk)->prev_c = NULL;
152 l->head = sk;
153 }
154
155 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
156 {
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
158
159 write_lock_bh(&l->lock);
160 if (sk == l->head)
161 l->head = next;
162
163 if (next)
164 l2cap_pi(next)->prev_c = prev;
165 if (prev)
166 l2cap_pi(prev)->next_c = next;
167 write_unlock_bh(&l->lock);
168
169 __sock_put(sk);
170 }
171
172 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
173 {
174 struct l2cap_chan_list *l = &conn->chan_list;
175
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
178
179 conn->disc_reason = 0x13;
180
181 l2cap_pi(sk)->conn = conn;
182
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) {
185 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
189 } else {
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 }
194 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
199 } else {
200 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
204 }
205
206 __l2cap_chan_link(l, sk);
207
208 if (parent)
209 bt_accept_enqueue(parent, sk);
210 }
211
212 /* Delete channel.
213 * Must be called on the locked socket. */
214 void l2cap_chan_del(struct sock *sk, int err)
215 {
216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
217 struct sock *parent = bt_sk(sk)->parent;
218
219 l2cap_sock_clear_timer(sk);
220
221 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
222
223 if (conn) {
224 /* Unlink from channel list */
225 l2cap_chan_unlink(&conn->chan_list, sk);
226 l2cap_pi(sk)->conn = NULL;
227 hci_conn_put(conn->hcon);
228 }
229
230 sk->sk_state = BT_CLOSED;
231 sock_set_flag(sk, SOCK_ZAPPED);
232
233 if (err)
234 sk->sk_err = err;
235
236 if (parent) {
237 bt_accept_unlink(sk);
238 parent->sk_data_ready(parent, 0);
239 } else
240 sk->sk_state_change(sk);
241
242 skb_queue_purge(TX_QUEUE(sk));
243
244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
245 struct srej_list *l, *tmp;
246
247 del_timer(&l2cap_pi(sk)->retrans_timer);
248 del_timer(&l2cap_pi(sk)->monitor_timer);
249 del_timer(&l2cap_pi(sk)->ack_timer);
250
251 skb_queue_purge(SREJ_QUEUE(sk));
252 skb_queue_purge(BUSY_QUEUE(sk));
253
254 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
255 list_del(&l->list);
256 kfree(l);
257 }
258 }
259 }
260
261 static inline u8 l2cap_get_auth_type(struct sock *sk)
262 {
263 if (sk->sk_type == SOCK_RAW) {
264 switch (l2cap_pi(sk)->sec_level) {
265 case BT_SECURITY_HIGH:
266 return HCI_AT_DEDICATED_BONDING_MITM;
267 case BT_SECURITY_MEDIUM:
268 return HCI_AT_DEDICATED_BONDING;
269 default:
270 return HCI_AT_NO_BONDING;
271 }
272 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
274 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
275
276 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
277 return HCI_AT_NO_BONDING_MITM;
278 else
279 return HCI_AT_NO_BONDING;
280 } else {
281 switch (l2cap_pi(sk)->sec_level) {
282 case BT_SECURITY_HIGH:
283 return HCI_AT_GENERAL_BONDING_MITM;
284 case BT_SECURITY_MEDIUM:
285 return HCI_AT_GENERAL_BONDING;
286 default:
287 return HCI_AT_NO_BONDING;
288 }
289 }
290 }
291
292 /* Service level security */
293 static inline int l2cap_check_security(struct sock *sk)
294 {
295 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
296 __u8 auth_type;
297
298 auth_type = l2cap_get_auth_type(sk);
299
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
301 auth_type);
302 }
303
304 u8 l2cap_get_ident(struct l2cap_conn *conn)
305 {
306 u8 id;
307
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
312 */
313
314 spin_lock_bh(&conn->lock);
315
316 if (++conn->tx_ident > 128)
317 conn->tx_ident = 1;
318
319 id = conn->tx_ident;
320
321 spin_unlock_bh(&conn->lock);
322
323 return id;
324 }
325
326 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 {
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 u8 flags;
330
331 BT_DBG("code 0x%2.2x", code);
332
333 if (!skb)
334 return;
335
336 if (lmp_no_flush_capable(conn->hcon->hdev))
337 flags = ACL_START_NO_FLUSH;
338 else
339 flags = ACL_START;
340
341 hci_send_acl(conn->hcon, skb, flags);
342 }
343
344 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
345 {
346 struct sk_buff *skb;
347 struct l2cap_hdr *lh;
348 struct l2cap_conn *conn = pi->conn;
349 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags;
352
353 if (sk->sk_state != BT_CONNECTED)
354 return;
355
356 if (pi->fcs == L2CAP_FCS_CRC16)
357 hlen += 2;
358
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
360
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
363
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
367 }
368
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
372 }
373
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
375 if (!skb)
376 return;
377
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
382
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
386 }
387
388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
390 else
391 flags = ACL_START;
392
393 hci_send_acl(pi->conn->hcon, skb, flags);
394 }
395
396 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
397 {
398 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY;
400 pi->conn_state |= L2CAP_CONN_RNR_SENT;
401 } else
402 control |= L2CAP_SUPER_RCV_READY;
403
404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
405
406 l2cap_send_sframe(pi, control);
407 }
408
409 static inline int __l2cap_no_conn_pending(struct sock *sk)
410 {
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
412 }
413
414 static void l2cap_do_start(struct sock *sk)
415 {
416 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
417
418 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
420 return;
421
422 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
426
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
429
430 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
431 L2CAP_CONN_REQ, sizeof(req), &req);
432 }
433 } else {
434 struct l2cap_info_req req;
435 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
436
437 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
438 conn->info_ident = l2cap_get_ident(conn);
439
440 mod_timer(&conn->info_timer, jiffies +
441 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
442
443 l2cap_send_cmd(conn, conn->info_ident,
444 L2CAP_INFO_REQ, sizeof(req), &req);
445 }
446 }
447
448 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
449 {
450 u32 local_feat_mask = l2cap_feat_mask;
451 if (!disable_ertm)
452 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
453
454 switch (mode) {
455 case L2CAP_MODE_ERTM:
456 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
457 case L2CAP_MODE_STREAMING:
458 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
459 default:
460 return 0x00;
461 }
462 }
463
464 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
465 {
466 struct l2cap_disconn_req req;
467
468 if (!conn)
469 return;
470
471 skb_queue_purge(TX_QUEUE(sk));
472
473 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
474 del_timer(&l2cap_pi(sk)->retrans_timer);
475 del_timer(&l2cap_pi(sk)->monitor_timer);
476 del_timer(&l2cap_pi(sk)->ack_timer);
477 }
478
479 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
481 l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 L2CAP_DISCONN_REQ, sizeof(req), &req);
483
484 sk->sk_state = BT_DISCONN;
485 sk->sk_err = err;
486 }
487
488 /* ---- L2CAP connections ---- */
489 static void l2cap_conn_start(struct l2cap_conn *conn)
490 {
491 struct l2cap_chan_list *l = &conn->chan_list;
492 struct sock_del_list del, *tmp1, *tmp2;
493 struct sock *sk;
494
495 BT_DBG("conn %p", conn);
496
497 INIT_LIST_HEAD(&del.list);
498
499 read_lock(&l->lock);
500
501 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
502 bh_lock_sock(sk);
503
504 if (sk->sk_type != SOCK_SEQPACKET &&
505 sk->sk_type != SOCK_STREAM) {
506 bh_unlock_sock(sk);
507 continue;
508 }
509
510 if (sk->sk_state == BT_CONNECT) {
511 struct l2cap_conn_req req;
512
513 if (!l2cap_check_security(sk) ||
514 !__l2cap_no_conn_pending(sk)) {
515 bh_unlock_sock(sk);
516 continue;
517 }
518
519 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
520 conn->feat_mask)
521 && l2cap_pi(sk)->conf_state &
522 L2CAP_CONF_STATE2_DEVICE) {
523 tmp1 = kzalloc(sizeof(struct sock_del_list),
524 GFP_ATOMIC);
525 tmp1->sk = sk;
526 list_add_tail(&tmp1->list, &del.list);
527 bh_unlock_sock(sk);
528 continue;
529 }
530
531 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
532 req.psm = l2cap_pi(sk)->psm;
533
534 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
535 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
536
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_REQ, sizeof(req), &req);
539
540 } else if (sk->sk_state == BT_CONNECT2) {
541 struct l2cap_conn_rsp rsp;
542 char buf[128];
543 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
545
546 if (l2cap_check_security(sk)) {
547 if (bt_sk(sk)->defer_setup) {
548 struct sock *parent = bt_sk(sk)->parent;
549 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
550 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
551 parent->sk_data_ready(parent, 0);
552
553 } else {
554 sk->sk_state = BT_CONFIG;
555 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
556 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
557 }
558 } else {
559 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
561 }
562
563 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
564 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
565
566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
567 rsp.result != L2CAP_CR_SUCCESS) {
568 bh_unlock_sock(sk);
569 continue;
570 }
571
572 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 l2cap_build_conf_req(sk, buf), buf);
575 l2cap_pi(sk)->num_conf_req++;
576 }
577
578 bh_unlock_sock(sk);
579 }
580
581 read_unlock(&l->lock);
582
583 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
584 bh_lock_sock(tmp1->sk);
585 __l2cap_sock_close(tmp1->sk, ECONNRESET);
586 bh_unlock_sock(tmp1->sk);
587 list_del(&tmp1->list);
588 kfree(tmp1);
589 }
590 }
591
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
594 */
595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596 {
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
599
600 read_lock(&l2cap_sk_list.lock);
601
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
605
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
610
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
614 }
615 }
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
620
621 return s;
622 }
623
624 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625 {
626 struct l2cap_chan_list *list = &conn->chan_list;
627 struct sock *parent, *uninitialized_var(sk);
628
629 BT_DBG("");
630
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src);
634 if (!parent)
635 return;
636
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 goto clean;
641 }
642
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 if (!sk)
645 goto clean;
646
647 write_lock_bh(&list->lock);
648
649 hci_conn_hold(conn->hcon);
650
651 l2cap_sock_init(sk, parent);
652 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst);
654
655 __l2cap_chan_add(conn, sk, parent);
656
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658
659 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0);
661
662 write_unlock_bh(&list->lock);
663
664 clean:
665 bh_unlock_sock(parent);
666 }
667
668 static void l2cap_conn_ready(struct l2cap_conn *conn)
669 {
670 struct l2cap_chan_list *l = &conn->chan_list;
671 struct sock *sk;
672
673 BT_DBG("conn %p", conn);
674
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn);
677
678 read_lock(&l->lock);
679
680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
681 bh_lock_sock(sk);
682
683 if (conn->hcon->type == LE_LINK) {
684 l2cap_sock_clear_timer(sk);
685 sk->sk_state = BT_CONNECTED;
686 sk->sk_state_change(sk);
687 }
688
689 if (sk->sk_type != SOCK_SEQPACKET &&
690 sk->sk_type != SOCK_STREAM) {
691 l2cap_sock_clear_timer(sk);
692 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk);
694 } else if (sk->sk_state == BT_CONNECT)
695 l2cap_do_start(sk);
696
697 bh_unlock_sock(sk);
698 }
699
700 read_unlock(&l->lock);
701 }
702
703 /* Notify sockets that we cannot guaranty reliability anymore */
704 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
705 {
706 struct l2cap_chan_list *l = &conn->chan_list;
707 struct sock *sk;
708
709 BT_DBG("conn %p", conn);
710
711 read_lock(&l->lock);
712
713 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
714 if (l2cap_pi(sk)->force_reliable)
715 sk->sk_err = err;
716 }
717
718 read_unlock(&l->lock);
719 }
720
721 static void l2cap_info_timeout(unsigned long arg)
722 {
723 struct l2cap_conn *conn = (void *) arg;
724
725 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
726 conn->info_ident = 0;
727
728 l2cap_conn_start(conn);
729 }
730
731 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
732 {
733 struct l2cap_conn *conn = hcon->l2cap_data;
734
735 if (conn || status)
736 return conn;
737
738 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
739 if (!conn)
740 return NULL;
741
742 hcon->l2cap_data = conn;
743 conn->hcon = hcon;
744
745 BT_DBG("hcon %p conn %p", hcon, conn);
746
747 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
748 conn->mtu = hcon->hdev->le_mtu;
749 else
750 conn->mtu = hcon->hdev->acl_mtu;
751
752 conn->src = &hcon->hdev->bdaddr;
753 conn->dst = &hcon->dst;
754
755 conn->feat_mask = 0;
756
757 spin_lock_init(&conn->lock);
758 rwlock_init(&conn->chan_list.lock);
759
760 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout,
762 (unsigned long) conn);
763
764 conn->disc_reason = 0x13;
765
766 return conn;
767 }
768
769 static void l2cap_conn_del(struct hci_conn *hcon, int err)
770 {
771 struct l2cap_conn *conn = hcon->l2cap_data;
772 struct sock *sk;
773
774 if (!conn)
775 return;
776
777 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
778
779 kfree_skb(conn->rx_skb);
780
781 /* Kill channels */
782 while ((sk = conn->chan_list.head)) {
783 bh_lock_sock(sk);
784 l2cap_chan_del(sk, err);
785 bh_unlock_sock(sk);
786 l2cap_sock_kill(sk);
787 }
788
789 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
790 del_timer_sync(&conn->info_timer);
791
792 hcon->l2cap_data = NULL;
793 kfree(conn);
794 }
795
796 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
797 {
798 struct l2cap_chan_list *l = &conn->chan_list;
799 write_lock_bh(&l->lock);
800 __l2cap_chan_add(conn, sk, parent);
801 write_unlock_bh(&l->lock);
802 }
803
804 /* ---- Socket interface ---- */
805
806 /* Find socket with psm and source bdaddr.
807 * Returns closest match.
808 */
809 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
810 {
811 struct sock *sk = NULL, *sk1 = NULL;
812 struct hlist_node *node;
813
814 read_lock(&l2cap_sk_list.lock);
815
816 sk_for_each(sk, node, &l2cap_sk_list.head) {
817 if (state && sk->sk_state != state)
818 continue;
819
820 if (l2cap_pi(sk)->psm == psm) {
821 /* Exact match. */
822 if (!bacmp(&bt_sk(sk)->src, src))
823 break;
824
825 /* Closest match */
826 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
827 sk1 = sk;
828 }
829 }
830
831 read_unlock(&l2cap_sk_list.lock);
832
833 return node ? sk : sk1;
834 }
835
836 int l2cap_do_connect(struct sock *sk)
837 {
838 bdaddr_t *src = &bt_sk(sk)->src;
839 bdaddr_t *dst = &bt_sk(sk)->dst;
840 struct l2cap_conn *conn;
841 struct hci_conn *hcon;
842 struct hci_dev *hdev;
843 __u8 auth_type;
844 int err;
845
846 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
847 l2cap_pi(sk)->psm);
848
849 hdev = hci_get_route(dst, src);
850 if (!hdev)
851 return -EHOSTUNREACH;
852
853 hci_dev_lock_bh(hdev);
854
855 auth_type = l2cap_get_auth_type(sk);
856
857 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
858 hcon = hci_connect(hdev, LE_LINK, dst,
859 l2cap_pi(sk)->sec_level, auth_type);
860 else
861 hcon = hci_connect(hdev, ACL_LINK, dst,
862 l2cap_pi(sk)->sec_level, auth_type);
863
864 if (IS_ERR(hcon)) {
865 err = PTR_ERR(hcon);
866 goto done;
867 }
868
869 conn = l2cap_conn_add(hcon, 0);
870 if (!conn) {
871 hci_conn_put(hcon);
872 err = -ENOMEM;
873 goto done;
874 }
875
876 /* Update source addr of the socket */
877 bacpy(src, conn->src);
878
879 l2cap_chan_add(conn, sk, NULL);
880
881 sk->sk_state = BT_CONNECT;
882 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
883
884 if (hcon->state == BT_CONNECTED) {
885 if (sk->sk_type != SOCK_SEQPACKET &&
886 sk->sk_type != SOCK_STREAM) {
887 l2cap_sock_clear_timer(sk);
888 if (l2cap_check_security(sk))
889 sk->sk_state = BT_CONNECTED;
890 } else
891 l2cap_do_start(sk);
892 }
893
894 err = 0;
895
896 done:
897 hci_dev_unlock_bh(hdev);
898 hci_dev_put(hdev);
899 return err;
900 }
901
902 int __l2cap_wait_ack(struct sock *sk)
903 {
904 DECLARE_WAITQUEUE(wait, current);
905 int err = 0;
906 int timeo = HZ/5;
907
908 add_wait_queue(sk_sleep(sk), &wait);
909 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
910 set_current_state(TASK_INTERRUPTIBLE);
911
912 if (!timeo)
913 timeo = HZ/5;
914
915 if (signal_pending(current)) {
916 err = sock_intr_errno(timeo);
917 break;
918 }
919
920 release_sock(sk);
921 timeo = schedule_timeout(timeo);
922 lock_sock(sk);
923
924 err = sock_error(sk);
925 if (err)
926 break;
927 }
928 set_current_state(TASK_RUNNING);
929 remove_wait_queue(sk_sleep(sk), &wait);
930 return err;
931 }
932
933 static void l2cap_monitor_timeout(unsigned long arg)
934 {
935 struct sock *sk = (void *) arg;
936
937 BT_DBG("sk %p", sk);
938
939 bh_lock_sock(sk);
940 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
941 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
942 bh_unlock_sock(sk);
943 return;
944 }
945
946 l2cap_pi(sk)->retry_count++;
947 __mod_monitor_timer();
948
949 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
950 bh_unlock_sock(sk);
951 }
952
953 static void l2cap_retrans_timeout(unsigned long arg)
954 {
955 struct sock *sk = (void *) arg;
956
957 BT_DBG("sk %p", sk);
958
959 bh_lock_sock(sk);
960 l2cap_pi(sk)->retry_count = 1;
961 __mod_monitor_timer();
962
963 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
964
965 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
966 bh_unlock_sock(sk);
967 }
968
969 static void l2cap_drop_acked_frames(struct sock *sk)
970 {
971 struct sk_buff *skb;
972
973 while ((skb = skb_peek(TX_QUEUE(sk))) &&
974 l2cap_pi(sk)->unacked_frames) {
975 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
976 break;
977
978 skb = skb_dequeue(TX_QUEUE(sk));
979 kfree_skb(skb);
980
981 l2cap_pi(sk)->unacked_frames--;
982 }
983
984 if (!l2cap_pi(sk)->unacked_frames)
985 del_timer(&l2cap_pi(sk)->retrans_timer);
986 }
987
988 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
989 {
990 struct l2cap_pinfo *pi = l2cap_pi(sk);
991 struct hci_conn *hcon = pi->conn->hcon;
992 u16 flags;
993
994 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
995
996 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
997 flags = ACL_START_NO_FLUSH;
998 else
999 flags = ACL_START;
1000
1001 hci_send_acl(hcon, skb, flags);
1002 }
1003
1004 void l2cap_streaming_send(struct sock *sk)
1005 {
1006 struct sk_buff *skb;
1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
1008 u16 control, fcs;
1009
1010 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1011 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1012 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1013 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1014
1015 if (pi->fcs == L2CAP_FCS_CRC16) {
1016 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1017 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1018 }
1019
1020 l2cap_do_send(sk, skb);
1021
1022 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1023 }
1024 }
1025
1026 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1027 {
1028 struct l2cap_pinfo *pi = l2cap_pi(sk);
1029 struct sk_buff *skb, *tx_skb;
1030 u16 control, fcs;
1031
1032 skb = skb_peek(TX_QUEUE(sk));
1033 if (!skb)
1034 return;
1035
1036 do {
1037 if (bt_cb(skb)->tx_seq == tx_seq)
1038 break;
1039
1040 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1041 return;
1042
1043 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1044
1045 if (pi->remote_max_tx &&
1046 bt_cb(skb)->retries == pi->remote_max_tx) {
1047 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1048 return;
1049 }
1050
1051 tx_skb = skb_clone(skb, GFP_ATOMIC);
1052 bt_cb(skb)->retries++;
1053 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1054
1055 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1056 control |= L2CAP_CTRL_FINAL;
1057 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1058 }
1059
1060 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1061 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1062
1063 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1064
1065 if (pi->fcs == L2CAP_FCS_CRC16) {
1066 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1067 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1068 }
1069
1070 l2cap_do_send(sk, tx_skb);
1071 }
1072
1073 int l2cap_ertm_send(struct sock *sk)
1074 {
1075 struct sk_buff *skb, *tx_skb;
1076 struct l2cap_pinfo *pi = l2cap_pi(sk);
1077 u16 control, fcs;
1078 int nsent = 0;
1079
1080 if (sk->sk_state != BT_CONNECTED)
1081 return -ENOTCONN;
1082
1083 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1084
1085 if (pi->remote_max_tx &&
1086 bt_cb(skb)->retries == pi->remote_max_tx) {
1087 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1088 break;
1089 }
1090
1091 tx_skb = skb_clone(skb, GFP_ATOMIC);
1092
1093 bt_cb(skb)->retries++;
1094
1095 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1096 control &= L2CAP_CTRL_SAR;
1097
1098 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1099 control |= L2CAP_CTRL_FINAL;
1100 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1101 }
1102 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1103 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1104 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1105
1106
1107 if (pi->fcs == L2CAP_FCS_CRC16) {
1108 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1109 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1110 }
1111
1112 l2cap_do_send(sk, tx_skb);
1113
1114 __mod_retrans_timer();
1115
1116 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1117 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1118
1119 if (bt_cb(skb)->retries == 1)
1120 pi->unacked_frames++;
1121
1122 pi->frames_sent++;
1123
1124 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1125 sk->sk_send_head = NULL;
1126 else
1127 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1128
1129 nsent++;
1130 }
1131
1132 return nsent;
1133 }
1134
1135 static int l2cap_retransmit_frames(struct sock *sk)
1136 {
1137 struct l2cap_pinfo *pi = l2cap_pi(sk);
1138 int ret;
1139
1140 if (!skb_queue_empty(TX_QUEUE(sk)))
1141 sk->sk_send_head = TX_QUEUE(sk)->next;
1142
1143 pi->next_tx_seq = pi->expected_ack_seq;
1144 ret = l2cap_ertm_send(sk);
1145 return ret;
1146 }
1147
1148 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1149 {
1150 struct sock *sk = (struct sock *)pi;
1151 u16 control = 0;
1152
1153 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1154
1155 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1156 control |= L2CAP_SUPER_RCV_NOT_READY;
1157 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1158 l2cap_send_sframe(pi, control);
1159 return;
1160 }
1161
1162 if (l2cap_ertm_send(sk) > 0)
1163 return;
1164
1165 control |= L2CAP_SUPER_RCV_READY;
1166 l2cap_send_sframe(pi, control);
1167 }
1168
1169 static void l2cap_send_srejtail(struct sock *sk)
1170 {
1171 struct srej_list *tail;
1172 u16 control;
1173
1174 control = L2CAP_SUPER_SELECT_REJECT;
1175 control |= L2CAP_CTRL_FINAL;
1176
1177 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1178 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1179
1180 l2cap_send_sframe(l2cap_pi(sk), control);
1181 }
1182
1183 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1184 {
1185 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1186 struct sk_buff **frag;
1187 int err, sent = 0;
1188
1189 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1190 return -EFAULT;
1191
1192 sent += count;
1193 len -= count;
1194
1195 /* Continuation fragments (no L2CAP header) */
1196 frag = &skb_shinfo(skb)->frag_list;
1197 while (len) {
1198 count = min_t(unsigned int, conn->mtu, len);
1199
1200 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1201 if (!*frag)
1202 return err;
1203 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1204 return -EFAULT;
1205
1206 sent += count;
1207 len -= count;
1208
1209 frag = &(*frag)->next;
1210 }
1211
1212 return sent;
1213 }
1214
1215 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1216 {
1217 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1218 struct sk_buff *skb;
1219 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1220 struct l2cap_hdr *lh;
1221
1222 BT_DBG("sk %p len %d", sk, (int)len);
1223
1224 count = min_t(unsigned int, (conn->mtu - hlen), len);
1225 skb = bt_skb_send_alloc(sk, count + hlen,
1226 msg->msg_flags & MSG_DONTWAIT, &err);
1227 if (!skb)
1228 return ERR_PTR(err);
1229
1230 /* Create L2CAP header */
1231 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1232 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1233 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1234 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1235
1236 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1237 if (unlikely(err < 0)) {
1238 kfree_skb(skb);
1239 return ERR_PTR(err);
1240 }
1241 return skb;
1242 }
1243
1244 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1245 {
1246 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1247 struct sk_buff *skb;
1248 int err, count, hlen = L2CAP_HDR_SIZE;
1249 struct l2cap_hdr *lh;
1250
1251 BT_DBG("sk %p len %d", sk, (int)len);
1252
1253 count = min_t(unsigned int, (conn->mtu - hlen), len);
1254 skb = bt_skb_send_alloc(sk, count + hlen,
1255 msg->msg_flags & MSG_DONTWAIT, &err);
1256 if (!skb)
1257 return ERR_PTR(err);
1258
1259 /* Create L2CAP header */
1260 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1261 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1262 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1263
1264 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1265 if (unlikely(err < 0)) {
1266 kfree_skb(skb);
1267 return ERR_PTR(err);
1268 }
1269 return skb;
1270 }
1271
1272 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1273 {
1274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1275 struct sk_buff *skb;
1276 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1277 struct l2cap_hdr *lh;
1278
1279 BT_DBG("sk %p len %d", sk, (int)len);
1280
1281 if (!conn)
1282 return ERR_PTR(-ENOTCONN);
1283
1284 if (sdulen)
1285 hlen += 2;
1286
1287 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1288 hlen += 2;
1289
1290 count = min_t(unsigned int, (conn->mtu - hlen), len);
1291 skb = bt_skb_send_alloc(sk, count + hlen,
1292 msg->msg_flags & MSG_DONTWAIT, &err);
1293 if (!skb)
1294 return ERR_PTR(err);
1295
1296 /* Create L2CAP header */
1297 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1298 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1299 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1300 put_unaligned_le16(control, skb_put(skb, 2));
1301 if (sdulen)
1302 put_unaligned_le16(sdulen, skb_put(skb, 2));
1303
1304 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1305 if (unlikely(err < 0)) {
1306 kfree_skb(skb);
1307 return ERR_PTR(err);
1308 }
1309
1310 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1311 put_unaligned_le16(0, skb_put(skb, 2));
1312
1313 bt_cb(skb)->retries = 0;
1314 return skb;
1315 }
1316
1317 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1318 {
1319 struct l2cap_pinfo *pi = l2cap_pi(sk);
1320 struct sk_buff *skb;
1321 struct sk_buff_head sar_queue;
1322 u16 control;
1323 size_t size = 0;
1324
1325 skb_queue_head_init(&sar_queue);
1326 control = L2CAP_SDU_START;
1327 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1328 if (IS_ERR(skb))
1329 return PTR_ERR(skb);
1330
1331 __skb_queue_tail(&sar_queue, skb);
1332 len -= pi->remote_mps;
1333 size += pi->remote_mps;
1334
1335 while (len > 0) {
1336 size_t buflen;
1337
1338 if (len > pi->remote_mps) {
1339 control = L2CAP_SDU_CONTINUE;
1340 buflen = pi->remote_mps;
1341 } else {
1342 control = L2CAP_SDU_END;
1343 buflen = len;
1344 }
1345
1346 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1347 if (IS_ERR(skb)) {
1348 skb_queue_purge(&sar_queue);
1349 return PTR_ERR(skb);
1350 }
1351
1352 __skb_queue_tail(&sar_queue, skb);
1353 len -= buflen;
1354 size += buflen;
1355 }
1356 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1357 if (sk->sk_send_head == NULL)
1358 sk->sk_send_head = sar_queue.next;
1359
1360 return size;
1361 }
1362
1363 static void l2cap_chan_ready(struct sock *sk)
1364 {
1365 struct sock *parent = bt_sk(sk)->parent;
1366
1367 BT_DBG("sk %p, parent %p", sk, parent);
1368
1369 l2cap_pi(sk)->conf_state = 0;
1370 l2cap_sock_clear_timer(sk);
1371
1372 if (!parent) {
1373 /* Outgoing channel.
1374 * Wake up socket sleeping on connect.
1375 */
1376 sk->sk_state = BT_CONNECTED;
1377 sk->sk_state_change(sk);
1378 } else {
1379 /* Incoming channel.
1380 * Wake up socket sleeping on accept.
1381 */
1382 parent->sk_data_ready(parent, 0);
1383 }
1384 }
1385
1386 /* Copy frame to all raw sockets on that connection */
1387 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1388 {
1389 struct l2cap_chan_list *l = &conn->chan_list;
1390 struct sk_buff *nskb;
1391 struct sock *sk;
1392
1393 BT_DBG("conn %p", conn);
1394
1395 read_lock(&l->lock);
1396 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1397 if (sk->sk_type != SOCK_RAW)
1398 continue;
1399
1400 /* Don't send frame to the socket it came from */
1401 if (skb->sk == sk)
1402 continue;
1403 nskb = skb_clone(skb, GFP_ATOMIC);
1404 if (!nskb)
1405 continue;
1406
1407 if (sock_queue_rcv_skb(sk, nskb))
1408 kfree_skb(nskb);
1409 }
1410 read_unlock(&l->lock);
1411 }
1412
1413 /* ---- L2CAP signalling commands ---- */
1414 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1415 u8 code, u8 ident, u16 dlen, void *data)
1416 {
1417 struct sk_buff *skb, **frag;
1418 struct l2cap_cmd_hdr *cmd;
1419 struct l2cap_hdr *lh;
1420 int len, count;
1421
1422 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1423 conn, code, ident, dlen);
1424
1425 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1426 count = min_t(unsigned int, conn->mtu, len);
1427
1428 skb = bt_skb_alloc(count, GFP_ATOMIC);
1429 if (!skb)
1430 return NULL;
1431
1432 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1433 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1434
1435 if (conn->hcon->type == LE_LINK)
1436 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1437 else
1438 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1439
1440 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1441 cmd->code = code;
1442 cmd->ident = ident;
1443 cmd->len = cpu_to_le16(dlen);
1444
1445 if (dlen) {
1446 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1447 memcpy(skb_put(skb, count), data, count);
1448 data += count;
1449 }
1450
1451 len -= skb->len;
1452
1453 /* Continuation fragments (no L2CAP header) */
1454 frag = &skb_shinfo(skb)->frag_list;
1455 while (len) {
1456 count = min_t(unsigned int, conn->mtu, len);
1457
1458 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1459 if (!*frag)
1460 goto fail;
1461
1462 memcpy(skb_put(*frag, count), data, count);
1463
1464 len -= count;
1465 data += count;
1466
1467 frag = &(*frag)->next;
1468 }
1469
1470 return skb;
1471
1472 fail:
1473 kfree_skb(skb);
1474 return NULL;
1475 }
1476
1477 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1478 {
1479 struct l2cap_conf_opt *opt = *ptr;
1480 int len;
1481
1482 len = L2CAP_CONF_OPT_SIZE + opt->len;
1483 *ptr += len;
1484
1485 *type = opt->type;
1486 *olen = opt->len;
1487
1488 switch (opt->len) {
1489 case 1:
1490 *val = *((u8 *) opt->val);
1491 break;
1492
1493 case 2:
1494 *val = get_unaligned_le16(opt->val);
1495 break;
1496
1497 case 4:
1498 *val = get_unaligned_le32(opt->val);
1499 break;
1500
1501 default:
1502 *val = (unsigned long) opt->val;
1503 break;
1504 }
1505
1506 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1507 return len;
1508 }
1509
1510 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1511 {
1512 struct l2cap_conf_opt *opt = *ptr;
1513
1514 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1515
1516 opt->type = type;
1517 opt->len = len;
1518
1519 switch (len) {
1520 case 1:
1521 *((u8 *) opt->val) = val;
1522 break;
1523
1524 case 2:
1525 put_unaligned_le16(val, opt->val);
1526 break;
1527
1528 case 4:
1529 put_unaligned_le32(val, opt->val);
1530 break;
1531
1532 default:
1533 memcpy(opt->val, (void *) val, len);
1534 break;
1535 }
1536
1537 *ptr += L2CAP_CONF_OPT_SIZE + len;
1538 }
1539
1540 static void l2cap_ack_timeout(unsigned long arg)
1541 {
1542 struct sock *sk = (void *) arg;
1543
1544 bh_lock_sock(sk);
1545 l2cap_send_ack(l2cap_pi(sk));
1546 bh_unlock_sock(sk);
1547 }
1548
1549 static inline void l2cap_ertm_init(struct sock *sk)
1550 {
1551 l2cap_pi(sk)->expected_ack_seq = 0;
1552 l2cap_pi(sk)->unacked_frames = 0;
1553 l2cap_pi(sk)->buffer_seq = 0;
1554 l2cap_pi(sk)->num_acked = 0;
1555 l2cap_pi(sk)->frames_sent = 0;
1556
1557 setup_timer(&l2cap_pi(sk)->retrans_timer,
1558 l2cap_retrans_timeout, (unsigned long) sk);
1559 setup_timer(&l2cap_pi(sk)->monitor_timer,
1560 l2cap_monitor_timeout, (unsigned long) sk);
1561 setup_timer(&l2cap_pi(sk)->ack_timer,
1562 l2cap_ack_timeout, (unsigned long) sk);
1563
1564 __skb_queue_head_init(SREJ_QUEUE(sk));
1565 __skb_queue_head_init(BUSY_QUEUE(sk));
1566
1567 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1568
1569 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1570 }
1571
1572 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1573 {
1574 switch (mode) {
1575 case L2CAP_MODE_STREAMING:
1576 case L2CAP_MODE_ERTM:
1577 if (l2cap_mode_supported(mode, remote_feat_mask))
1578 return mode;
1579 /* fall through */
1580 default:
1581 return L2CAP_MODE_BASIC;
1582 }
1583 }
1584
1585 int l2cap_build_conf_req(struct sock *sk, void *data)
1586 {
1587 struct l2cap_pinfo *pi = l2cap_pi(sk);
1588 struct l2cap_conf_req *req = data;
1589 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1590 void *ptr = req->data;
1591
1592 BT_DBG("sk %p", sk);
1593
1594 if (pi->num_conf_req || pi->num_conf_rsp)
1595 goto done;
1596
1597 switch (pi->mode) {
1598 case L2CAP_MODE_STREAMING:
1599 case L2CAP_MODE_ERTM:
1600 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1601 break;
1602
1603 /* fall through */
1604 default:
1605 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1606 break;
1607 }
1608
1609 done:
1610 if (pi->imtu != L2CAP_DEFAULT_MTU)
1611 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1612
1613 switch (pi->mode) {
1614 case L2CAP_MODE_BASIC:
1615 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1616 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1617 break;
1618
1619 rfc.mode = L2CAP_MODE_BASIC;
1620 rfc.txwin_size = 0;
1621 rfc.max_transmit = 0;
1622 rfc.retrans_timeout = 0;
1623 rfc.monitor_timeout = 0;
1624 rfc.max_pdu_size = 0;
1625
1626 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1627 (unsigned long) &rfc);
1628 break;
1629
1630 case L2CAP_MODE_ERTM:
1631 rfc.mode = L2CAP_MODE_ERTM;
1632 rfc.txwin_size = pi->tx_win;
1633 rfc.max_transmit = pi->max_tx;
1634 rfc.retrans_timeout = 0;
1635 rfc.monitor_timeout = 0;
1636 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1637 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1638 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1639
1640 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1641 (unsigned long) &rfc);
1642
1643 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1644 break;
1645
1646 if (pi->fcs == L2CAP_FCS_NONE ||
1647 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1648 pi->fcs = L2CAP_FCS_NONE;
1649 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1650 }
1651 break;
1652
1653 case L2CAP_MODE_STREAMING:
1654 rfc.mode = L2CAP_MODE_STREAMING;
1655 rfc.txwin_size = 0;
1656 rfc.max_transmit = 0;
1657 rfc.retrans_timeout = 0;
1658 rfc.monitor_timeout = 0;
1659 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1660 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1661 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1662
1663 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1664 (unsigned long) &rfc);
1665
1666 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1667 break;
1668
1669 if (pi->fcs == L2CAP_FCS_NONE ||
1670 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1671 pi->fcs = L2CAP_FCS_NONE;
1672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1673 }
1674 break;
1675 }
1676
1677 req->dcid = cpu_to_le16(pi->dcid);
1678 req->flags = cpu_to_le16(0);
1679
1680 return ptr - data;
1681 }
1682
1683 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1684 {
1685 struct l2cap_pinfo *pi = l2cap_pi(sk);
1686 struct l2cap_conf_rsp *rsp = data;
1687 void *ptr = rsp->data;
1688 void *req = pi->conf_req;
1689 int len = pi->conf_len;
1690 int type, hint, olen;
1691 unsigned long val;
1692 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1693 u16 mtu = L2CAP_DEFAULT_MTU;
1694 u16 result = L2CAP_CONF_SUCCESS;
1695
1696 BT_DBG("sk %p", sk);
1697
1698 while (len >= L2CAP_CONF_OPT_SIZE) {
1699 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1700
1701 hint = type & L2CAP_CONF_HINT;
1702 type &= L2CAP_CONF_MASK;
1703
1704 switch (type) {
1705 case L2CAP_CONF_MTU:
1706 mtu = val;
1707 break;
1708
1709 case L2CAP_CONF_FLUSH_TO:
1710 pi->flush_to = val;
1711 break;
1712
1713 case L2CAP_CONF_QOS:
1714 break;
1715
1716 case L2CAP_CONF_RFC:
1717 if (olen == sizeof(rfc))
1718 memcpy(&rfc, (void *) val, olen);
1719 break;
1720
1721 case L2CAP_CONF_FCS:
1722 if (val == L2CAP_FCS_NONE)
1723 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1724
1725 break;
1726
1727 default:
1728 if (hint)
1729 break;
1730
1731 result = L2CAP_CONF_UNKNOWN;
1732 *((u8 *) ptr++) = type;
1733 break;
1734 }
1735 }
1736
1737 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1738 goto done;
1739
1740 switch (pi->mode) {
1741 case L2CAP_MODE_STREAMING:
1742 case L2CAP_MODE_ERTM:
1743 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1744 pi->mode = l2cap_select_mode(rfc.mode,
1745 pi->conn->feat_mask);
1746 break;
1747 }
1748
1749 if (pi->mode != rfc.mode)
1750 return -ECONNREFUSED;
1751
1752 break;
1753 }
1754
1755 done:
1756 if (pi->mode != rfc.mode) {
1757 result = L2CAP_CONF_UNACCEPT;
1758 rfc.mode = pi->mode;
1759
1760 if (pi->num_conf_rsp == 1)
1761 return -ECONNREFUSED;
1762
1763 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1764 sizeof(rfc), (unsigned long) &rfc);
1765 }
1766
1767
1768 if (result == L2CAP_CONF_SUCCESS) {
1769 /* Configure output options and let the other side know
1770 * which ones we don't like. */
1771
1772 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1773 result = L2CAP_CONF_UNACCEPT;
1774 else {
1775 pi->omtu = mtu;
1776 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1777 }
1778 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1779
1780 switch (rfc.mode) {
1781 case L2CAP_MODE_BASIC:
1782 pi->fcs = L2CAP_FCS_NONE;
1783 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1784 break;
1785
1786 case L2CAP_MODE_ERTM:
1787 pi->remote_tx_win = rfc.txwin_size;
1788 pi->remote_max_tx = rfc.max_transmit;
1789
1790 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1791 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1792
1793 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1794
1795 rfc.retrans_timeout =
1796 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1797 rfc.monitor_timeout =
1798 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1799
1800 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1801
1802 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1803 sizeof(rfc), (unsigned long) &rfc);
1804
1805 break;
1806
1807 case L2CAP_MODE_STREAMING:
1808 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1809 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1810
1811 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1812
1813 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1814
1815 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1816 sizeof(rfc), (unsigned long) &rfc);
1817
1818 break;
1819
1820 default:
1821 result = L2CAP_CONF_UNACCEPT;
1822
1823 memset(&rfc, 0, sizeof(rfc));
1824 rfc.mode = pi->mode;
1825 }
1826
1827 if (result == L2CAP_CONF_SUCCESS)
1828 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1829 }
1830 rsp->scid = cpu_to_le16(pi->dcid);
1831 rsp->result = cpu_to_le16(result);
1832 rsp->flags = cpu_to_le16(0x0000);
1833
1834 return ptr - data;
1835 }
1836
1837 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1838 {
1839 struct l2cap_pinfo *pi = l2cap_pi(sk);
1840 struct l2cap_conf_req *req = data;
1841 void *ptr = req->data;
1842 int type, olen;
1843 unsigned long val;
1844 struct l2cap_conf_rfc rfc;
1845
1846 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1847
1848 while (len >= L2CAP_CONF_OPT_SIZE) {
1849 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1850
1851 switch (type) {
1852 case L2CAP_CONF_MTU:
1853 if (val < L2CAP_DEFAULT_MIN_MTU) {
1854 *result = L2CAP_CONF_UNACCEPT;
1855 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1856 } else
1857 pi->imtu = val;
1858 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1859 break;
1860
1861 case L2CAP_CONF_FLUSH_TO:
1862 pi->flush_to = val;
1863 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1864 2, pi->flush_to);
1865 break;
1866
1867 case L2CAP_CONF_RFC:
1868 if (olen == sizeof(rfc))
1869 memcpy(&rfc, (void *)val, olen);
1870
1871 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1872 rfc.mode != pi->mode)
1873 return -ECONNREFUSED;
1874
1875 pi->fcs = 0;
1876
1877 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1878 sizeof(rfc), (unsigned long) &rfc);
1879 break;
1880 }
1881 }
1882
1883 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1884 return -ECONNREFUSED;
1885
1886 pi->mode = rfc.mode;
1887
1888 if (*result == L2CAP_CONF_SUCCESS) {
1889 switch (rfc.mode) {
1890 case L2CAP_MODE_ERTM:
1891 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1892 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1893 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1894 break;
1895 case L2CAP_MODE_STREAMING:
1896 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1897 }
1898 }
1899
1900 req->dcid = cpu_to_le16(pi->dcid);
1901 req->flags = cpu_to_le16(0x0000);
1902
1903 return ptr - data;
1904 }
1905
1906 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1907 {
1908 struct l2cap_conf_rsp *rsp = data;
1909 void *ptr = rsp->data;
1910
1911 BT_DBG("sk %p", sk);
1912
1913 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1914 rsp->result = cpu_to_le16(result);
1915 rsp->flags = cpu_to_le16(flags);
1916
1917 return ptr - data;
1918 }
1919
1920 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1921 {
1922 struct l2cap_pinfo *pi = l2cap_pi(sk);
1923 int type, olen;
1924 unsigned long val;
1925 struct l2cap_conf_rfc rfc;
1926
1927 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1928
1929 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1930 return;
1931
1932 while (len >= L2CAP_CONF_OPT_SIZE) {
1933 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1934
1935 switch (type) {
1936 case L2CAP_CONF_RFC:
1937 if (olen == sizeof(rfc))
1938 memcpy(&rfc, (void *)val, olen);
1939 goto done;
1940 }
1941 }
1942
1943 done:
1944 switch (rfc.mode) {
1945 case L2CAP_MODE_ERTM:
1946 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1947 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1948 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1949 break;
1950 case L2CAP_MODE_STREAMING:
1951 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1952 }
1953 }
1954
1955 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1956 {
1957 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1958
1959 if (rej->reason != 0x0000)
1960 return 0;
1961
1962 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1963 cmd->ident == conn->info_ident) {
1964 del_timer(&conn->info_timer);
1965
1966 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1967 conn->info_ident = 0;
1968
1969 l2cap_conn_start(conn);
1970 }
1971
1972 return 0;
1973 }
1974
1975 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1976 {
1977 struct l2cap_chan_list *list = &conn->chan_list;
1978 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1979 struct l2cap_conn_rsp rsp;
1980 struct sock *parent, *sk = NULL;
1981 int result, status = L2CAP_CS_NO_INFO;
1982
1983 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1984 __le16 psm = req->psm;
1985
1986 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1987
1988 /* Check if we have socket listening on psm */
1989 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1990 if (!parent) {
1991 result = L2CAP_CR_BAD_PSM;
1992 goto sendresp;
1993 }
1994
1995 bh_lock_sock(parent);
1996
1997 /* Check if the ACL is secure enough (if not SDP) */
1998 if (psm != cpu_to_le16(0x0001) &&
1999 !hci_conn_check_link_mode(conn->hcon)) {
2000 conn->disc_reason = 0x05;
2001 result = L2CAP_CR_SEC_BLOCK;
2002 goto response;
2003 }
2004
2005 result = L2CAP_CR_NO_MEM;
2006
2007 /* Check for backlog size */
2008 if (sk_acceptq_is_full(parent)) {
2009 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2010 goto response;
2011 }
2012
2013 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2014 if (!sk)
2015 goto response;
2016
2017 write_lock_bh(&list->lock);
2018
2019 /* Check if we already have channel with that dcid */
2020 if (__l2cap_get_chan_by_dcid(list, scid)) {
2021 write_unlock_bh(&list->lock);
2022 sock_set_flag(sk, SOCK_ZAPPED);
2023 l2cap_sock_kill(sk);
2024 goto response;
2025 }
2026
2027 hci_conn_hold(conn->hcon);
2028
2029 l2cap_sock_init(sk, parent);
2030 bacpy(&bt_sk(sk)->src, conn->src);
2031 bacpy(&bt_sk(sk)->dst, conn->dst);
2032 l2cap_pi(sk)->psm = psm;
2033 l2cap_pi(sk)->dcid = scid;
2034
2035 __l2cap_chan_add(conn, sk, parent);
2036 dcid = l2cap_pi(sk)->scid;
2037
2038 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2039
2040 l2cap_pi(sk)->ident = cmd->ident;
2041
2042 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2043 if (l2cap_check_security(sk)) {
2044 if (bt_sk(sk)->defer_setup) {
2045 sk->sk_state = BT_CONNECT2;
2046 result = L2CAP_CR_PEND;
2047 status = L2CAP_CS_AUTHOR_PEND;
2048 parent->sk_data_ready(parent, 0);
2049 } else {
2050 sk->sk_state = BT_CONFIG;
2051 result = L2CAP_CR_SUCCESS;
2052 status = L2CAP_CS_NO_INFO;
2053 }
2054 } else {
2055 sk->sk_state = BT_CONNECT2;
2056 result = L2CAP_CR_PEND;
2057 status = L2CAP_CS_AUTHEN_PEND;
2058 }
2059 } else {
2060 sk->sk_state = BT_CONNECT2;
2061 result = L2CAP_CR_PEND;
2062 status = L2CAP_CS_NO_INFO;
2063 }
2064
2065 write_unlock_bh(&list->lock);
2066
2067 response:
2068 bh_unlock_sock(parent);
2069
2070 sendresp:
2071 rsp.scid = cpu_to_le16(scid);
2072 rsp.dcid = cpu_to_le16(dcid);
2073 rsp.result = cpu_to_le16(result);
2074 rsp.status = cpu_to_le16(status);
2075 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2076
2077 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2078 struct l2cap_info_req info;
2079 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2080
2081 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2082 conn->info_ident = l2cap_get_ident(conn);
2083
2084 mod_timer(&conn->info_timer, jiffies +
2085 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2086
2087 l2cap_send_cmd(conn, conn->info_ident,
2088 L2CAP_INFO_REQ, sizeof(info), &info);
2089 }
2090
2091 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2092 result == L2CAP_CR_SUCCESS) {
2093 u8 buf[128];
2094 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2095 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2096 l2cap_build_conf_req(sk, buf), buf);
2097 l2cap_pi(sk)->num_conf_req++;
2098 }
2099
2100 return 0;
2101 }
2102
2103 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2104 {
2105 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2106 u16 scid, dcid, result, status;
2107 struct sock *sk;
2108 u8 req[128];
2109
2110 scid = __le16_to_cpu(rsp->scid);
2111 dcid = __le16_to_cpu(rsp->dcid);
2112 result = __le16_to_cpu(rsp->result);
2113 status = __le16_to_cpu(rsp->status);
2114
2115 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2116
2117 if (scid) {
2118 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2119 if (!sk)
2120 return -EFAULT;
2121 } else {
2122 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2123 if (!sk)
2124 return -EFAULT;
2125 }
2126
2127 switch (result) {
2128 case L2CAP_CR_SUCCESS:
2129 sk->sk_state = BT_CONFIG;
2130 l2cap_pi(sk)->ident = 0;
2131 l2cap_pi(sk)->dcid = dcid;
2132 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2133
2134 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2135 break;
2136
2137 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2138
2139 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2140 l2cap_build_conf_req(sk, req), req);
2141 l2cap_pi(sk)->num_conf_req++;
2142 break;
2143
2144 case L2CAP_CR_PEND:
2145 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2146 break;
2147
2148 default:
2149 /* don't delete l2cap channel if sk is owned by user */
2150 if (sock_owned_by_user(sk)) {
2151 sk->sk_state = BT_DISCONN;
2152 l2cap_sock_clear_timer(sk);
2153 l2cap_sock_set_timer(sk, HZ / 5);
2154 break;
2155 }
2156
2157 l2cap_chan_del(sk, ECONNREFUSED);
2158 break;
2159 }
2160
2161 bh_unlock_sock(sk);
2162 return 0;
2163 }
2164
2165 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2166 {
2167 /* FCS is enabled only in ERTM or streaming mode, if one or both
2168 * sides request it.
2169 */
2170 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2171 pi->fcs = L2CAP_FCS_NONE;
2172 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2173 pi->fcs = L2CAP_FCS_CRC16;
2174 }
2175
2176 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2177 {
2178 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2179 u16 dcid, flags;
2180 u8 rsp[64];
2181 struct sock *sk;
2182 int len;
2183
2184 dcid = __le16_to_cpu(req->dcid);
2185 flags = __le16_to_cpu(req->flags);
2186
2187 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2188
2189 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2190 if (!sk)
2191 return -ENOENT;
2192
2193 if (sk->sk_state != BT_CONFIG) {
2194 struct l2cap_cmd_rej rej;
2195
2196 rej.reason = cpu_to_le16(0x0002);
2197 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2198 sizeof(rej), &rej);
2199 goto unlock;
2200 }
2201
2202 /* Reject if config buffer is too small. */
2203 len = cmd_len - sizeof(*req);
2204 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2205 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2206 l2cap_build_conf_rsp(sk, rsp,
2207 L2CAP_CONF_REJECT, flags), rsp);
2208 goto unlock;
2209 }
2210
2211 /* Store config. */
2212 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2213 l2cap_pi(sk)->conf_len += len;
2214
2215 if (flags & 0x0001) {
2216 /* Incomplete config. Send empty response. */
2217 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2218 l2cap_build_conf_rsp(sk, rsp,
2219 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2220 goto unlock;
2221 }
2222
2223 /* Complete config. */
2224 len = l2cap_parse_conf_req(sk, rsp);
2225 if (len < 0) {
2226 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2227 goto unlock;
2228 }
2229
2230 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2231 l2cap_pi(sk)->num_conf_rsp++;
2232
2233 /* Reset config buffer. */
2234 l2cap_pi(sk)->conf_len = 0;
2235
2236 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2237 goto unlock;
2238
2239 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2240 set_default_fcs(l2cap_pi(sk));
2241
2242 sk->sk_state = BT_CONNECTED;
2243
2244 l2cap_pi(sk)->next_tx_seq = 0;
2245 l2cap_pi(sk)->expected_tx_seq = 0;
2246 __skb_queue_head_init(TX_QUEUE(sk));
2247 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2248 l2cap_ertm_init(sk);
2249
2250 l2cap_chan_ready(sk);
2251 goto unlock;
2252 }
2253
2254 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2255 u8 buf[64];
2256 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2257 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2258 l2cap_build_conf_req(sk, buf), buf);
2259 l2cap_pi(sk)->num_conf_req++;
2260 }
2261
2262 unlock:
2263 bh_unlock_sock(sk);
2264 return 0;
2265 }
2266
2267 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2268 {
2269 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2270 u16 scid, flags, result;
2271 struct sock *sk;
2272 int len = cmd->len - sizeof(*rsp);
2273
2274 scid = __le16_to_cpu(rsp->scid);
2275 flags = __le16_to_cpu(rsp->flags);
2276 result = __le16_to_cpu(rsp->result);
2277
2278 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2279 scid, flags, result);
2280
2281 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2282 if (!sk)
2283 return 0;
2284
2285 switch (result) {
2286 case L2CAP_CONF_SUCCESS:
2287 l2cap_conf_rfc_get(sk, rsp->data, len);
2288 break;
2289
2290 case L2CAP_CONF_UNACCEPT:
2291 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2292 char req[64];
2293
2294 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2295 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2296 goto done;
2297 }
2298
2299 /* throw out any old stored conf requests */
2300 result = L2CAP_CONF_SUCCESS;
2301 len = l2cap_parse_conf_rsp(sk, rsp->data,
2302 len, req, &result);
2303 if (len < 0) {
2304 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2305 goto done;
2306 }
2307
2308 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2309 L2CAP_CONF_REQ, len, req);
2310 l2cap_pi(sk)->num_conf_req++;
2311 if (result != L2CAP_CONF_SUCCESS)
2312 goto done;
2313 break;
2314 }
2315
2316 default:
2317 sk->sk_err = ECONNRESET;
2318 l2cap_sock_set_timer(sk, HZ * 5);
2319 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2320 goto done;
2321 }
2322
2323 if (flags & 0x01)
2324 goto done;
2325
2326 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2327
2328 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2329 set_default_fcs(l2cap_pi(sk));
2330
2331 sk->sk_state = BT_CONNECTED;
2332 l2cap_pi(sk)->next_tx_seq = 0;
2333 l2cap_pi(sk)->expected_tx_seq = 0;
2334 __skb_queue_head_init(TX_QUEUE(sk));
2335 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2336 l2cap_ertm_init(sk);
2337
2338 l2cap_chan_ready(sk);
2339 }
2340
2341 done:
2342 bh_unlock_sock(sk);
2343 return 0;
2344 }
2345
2346 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2347 {
2348 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2349 struct l2cap_disconn_rsp rsp;
2350 u16 dcid, scid;
2351 struct sock *sk;
2352
2353 scid = __le16_to_cpu(req->scid);
2354 dcid = __le16_to_cpu(req->dcid);
2355
2356 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2357
2358 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2359 if (!sk)
2360 return 0;
2361
2362 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2363 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2364 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2365
2366 sk->sk_shutdown = SHUTDOWN_MASK;
2367
2368 /* don't delete l2cap channel if sk is owned by user */
2369 if (sock_owned_by_user(sk)) {
2370 sk->sk_state = BT_DISCONN;
2371 l2cap_sock_clear_timer(sk);
2372 l2cap_sock_set_timer(sk, HZ / 5);
2373 bh_unlock_sock(sk);
2374 return 0;
2375 }
2376
2377 l2cap_chan_del(sk, ECONNRESET);
2378 bh_unlock_sock(sk);
2379
2380 l2cap_sock_kill(sk);
2381 return 0;
2382 }
2383
2384 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2385 {
2386 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2387 u16 dcid, scid;
2388 struct sock *sk;
2389
2390 scid = __le16_to_cpu(rsp->scid);
2391 dcid = __le16_to_cpu(rsp->dcid);
2392
2393 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2394
2395 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2396 if (!sk)
2397 return 0;
2398
2399 /* don't delete l2cap channel if sk is owned by user */
2400 if (sock_owned_by_user(sk)) {
2401 sk->sk_state = BT_DISCONN;
2402 l2cap_sock_clear_timer(sk);
2403 l2cap_sock_set_timer(sk, HZ / 5);
2404 bh_unlock_sock(sk);
2405 return 0;
2406 }
2407
2408 l2cap_chan_del(sk, 0);
2409 bh_unlock_sock(sk);
2410
2411 l2cap_sock_kill(sk);
2412 return 0;
2413 }
2414
2415 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2416 {
2417 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2418 u16 type;
2419
2420 type = __le16_to_cpu(req->type);
2421
2422 BT_DBG("type 0x%4.4x", type);
2423
2424 if (type == L2CAP_IT_FEAT_MASK) {
2425 u8 buf[8];
2426 u32 feat_mask = l2cap_feat_mask;
2427 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2428 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2429 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2430 if (!disable_ertm)
2431 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2432 | L2CAP_FEAT_FCS;
2433 put_unaligned_le32(feat_mask, rsp->data);
2434 l2cap_send_cmd(conn, cmd->ident,
2435 L2CAP_INFO_RSP, sizeof(buf), buf);
2436 } else if (type == L2CAP_IT_FIXED_CHAN) {
2437 u8 buf[12];
2438 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2439 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2440 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2441 memcpy(buf + 4, l2cap_fixed_chan, 8);
2442 l2cap_send_cmd(conn, cmd->ident,
2443 L2CAP_INFO_RSP, sizeof(buf), buf);
2444 } else {
2445 struct l2cap_info_rsp rsp;
2446 rsp.type = cpu_to_le16(type);
2447 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2448 l2cap_send_cmd(conn, cmd->ident,
2449 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2450 }
2451
2452 return 0;
2453 }
2454
2455 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2456 {
2457 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2458 u16 type, result;
2459
2460 type = __le16_to_cpu(rsp->type);
2461 result = __le16_to_cpu(rsp->result);
2462
2463 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2464
2465 del_timer(&conn->info_timer);
2466
2467 if (result != L2CAP_IR_SUCCESS) {
2468 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2469 conn->info_ident = 0;
2470
2471 l2cap_conn_start(conn);
2472
2473 return 0;
2474 }
2475
2476 if (type == L2CAP_IT_FEAT_MASK) {
2477 conn->feat_mask = get_unaligned_le32(rsp->data);
2478
2479 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2480 struct l2cap_info_req req;
2481 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2482
2483 conn->info_ident = l2cap_get_ident(conn);
2484
2485 l2cap_send_cmd(conn, conn->info_ident,
2486 L2CAP_INFO_REQ, sizeof(req), &req);
2487 } else {
2488 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2489 conn->info_ident = 0;
2490
2491 l2cap_conn_start(conn);
2492 }
2493 } else if (type == L2CAP_IT_FIXED_CHAN) {
2494 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2495 conn->info_ident = 0;
2496
2497 l2cap_conn_start(conn);
2498 }
2499
2500 return 0;
2501 }
2502
2503 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2504 u16 to_multiplier)
2505 {
2506 u16 max_latency;
2507
2508 if (min > max || min < 6 || max > 3200)
2509 return -EINVAL;
2510
2511 if (to_multiplier < 10 || to_multiplier > 3200)
2512 return -EINVAL;
2513
2514 if (max >= to_multiplier * 8)
2515 return -EINVAL;
2516
2517 max_latency = (to_multiplier * 8 / max) - 1;
2518 if (latency > 499 || latency > max_latency)
2519 return -EINVAL;
2520
2521 return 0;
2522 }
2523
2524 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2525 struct l2cap_cmd_hdr *cmd, u8 *data)
2526 {
2527 struct hci_conn *hcon = conn->hcon;
2528 struct l2cap_conn_param_update_req *req;
2529 struct l2cap_conn_param_update_rsp rsp;
2530 u16 min, max, latency, to_multiplier, cmd_len;
2531 int err;
2532
2533 if (!(hcon->link_mode & HCI_LM_MASTER))
2534 return -EINVAL;
2535
2536 cmd_len = __le16_to_cpu(cmd->len);
2537 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2538 return -EPROTO;
2539
2540 req = (struct l2cap_conn_param_update_req *) data;
2541 min = __le16_to_cpu(req->min);
2542 max = __le16_to_cpu(req->max);
2543 latency = __le16_to_cpu(req->latency);
2544 to_multiplier = __le16_to_cpu(req->to_multiplier);
2545
2546 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2547 min, max, latency, to_multiplier);
2548
2549 memset(&rsp, 0, sizeof(rsp));
2550
2551 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2552 if (err)
2553 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2554 else
2555 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2556
2557 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2558 sizeof(rsp), &rsp);
2559
2560 if (!err)
2561 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2562
2563 return 0;
2564 }
2565
2566 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2567 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2568 {
2569 int err = 0;
2570
2571 switch (cmd->code) {
2572 case L2CAP_COMMAND_REJ:
2573 l2cap_command_rej(conn, cmd, data);
2574 break;
2575
2576 case L2CAP_CONN_REQ:
2577 err = l2cap_connect_req(conn, cmd, data);
2578 break;
2579
2580 case L2CAP_CONN_RSP:
2581 err = l2cap_connect_rsp(conn, cmd, data);
2582 break;
2583
2584 case L2CAP_CONF_REQ:
2585 err = l2cap_config_req(conn, cmd, cmd_len, data);
2586 break;
2587
2588 case L2CAP_CONF_RSP:
2589 err = l2cap_config_rsp(conn, cmd, data);
2590 break;
2591
2592 case L2CAP_DISCONN_REQ:
2593 err = l2cap_disconnect_req(conn, cmd, data);
2594 break;
2595
2596 case L2CAP_DISCONN_RSP:
2597 err = l2cap_disconnect_rsp(conn, cmd, data);
2598 break;
2599
2600 case L2CAP_ECHO_REQ:
2601 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2602 break;
2603
2604 case L2CAP_ECHO_RSP:
2605 break;
2606
2607 case L2CAP_INFO_REQ:
2608 err = l2cap_information_req(conn, cmd, data);
2609 break;
2610
2611 case L2CAP_INFO_RSP:
2612 err = l2cap_information_rsp(conn, cmd, data);
2613 break;
2614
2615 default:
2616 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2617 err = -EINVAL;
2618 break;
2619 }
2620
2621 return err;
2622 }
2623
2624 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2625 struct l2cap_cmd_hdr *cmd, u8 *data)
2626 {
2627 switch (cmd->code) {
2628 case L2CAP_COMMAND_REJ:
2629 return 0;
2630
2631 case L2CAP_CONN_PARAM_UPDATE_REQ:
2632 return l2cap_conn_param_update_req(conn, cmd, data);
2633
2634 case L2CAP_CONN_PARAM_UPDATE_RSP:
2635 return 0;
2636
2637 default:
2638 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2639 return -EINVAL;
2640 }
2641 }
2642
2643 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2644 struct sk_buff *skb)
2645 {
2646 u8 *data = skb->data;
2647 int len = skb->len;
2648 struct l2cap_cmd_hdr cmd;
2649 int err;
2650
2651 l2cap_raw_recv(conn, skb);
2652
2653 while (len >= L2CAP_CMD_HDR_SIZE) {
2654 u16 cmd_len;
2655 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2656 data += L2CAP_CMD_HDR_SIZE;
2657 len -= L2CAP_CMD_HDR_SIZE;
2658
2659 cmd_len = le16_to_cpu(cmd.len);
2660
2661 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2662
2663 if (cmd_len > len || !cmd.ident) {
2664 BT_DBG("corrupted command");
2665 break;
2666 }
2667
2668 if (conn->hcon->type == LE_LINK)
2669 err = l2cap_le_sig_cmd(conn, &cmd, data);
2670 else
2671 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2672
2673 if (err) {
2674 struct l2cap_cmd_rej rej;
2675 BT_DBG("error %d", err);
2676
2677 /* FIXME: Map err to a valid reason */
2678 rej.reason = cpu_to_le16(0);
2679 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2680 }
2681
2682 data += cmd_len;
2683 len -= cmd_len;
2684 }
2685
2686 kfree_skb(skb);
2687 }
2688
2689 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2690 {
2691 u16 our_fcs, rcv_fcs;
2692 int hdr_size = L2CAP_HDR_SIZE + 2;
2693
2694 if (pi->fcs == L2CAP_FCS_CRC16) {
2695 skb_trim(skb, skb->len - 2);
2696 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2697 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2698
2699 if (our_fcs != rcv_fcs)
2700 return -EBADMSG;
2701 }
2702 return 0;
2703 }
2704
2705 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2706 {
2707 struct l2cap_pinfo *pi = l2cap_pi(sk);
2708 u16 control = 0;
2709
2710 pi->frames_sent = 0;
2711
2712 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2713
2714 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2715 control |= L2CAP_SUPER_RCV_NOT_READY;
2716 l2cap_send_sframe(pi, control);
2717 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2718 }
2719
2720 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2721 l2cap_retransmit_frames(sk);
2722
2723 l2cap_ertm_send(sk);
2724
2725 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2726 pi->frames_sent == 0) {
2727 control |= L2CAP_SUPER_RCV_READY;
2728 l2cap_send_sframe(pi, control);
2729 }
2730 }
2731
2732 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2733 {
2734 struct sk_buff *next_skb;
2735 struct l2cap_pinfo *pi = l2cap_pi(sk);
2736 int tx_seq_offset, next_tx_seq_offset;
2737
2738 bt_cb(skb)->tx_seq = tx_seq;
2739 bt_cb(skb)->sar = sar;
2740
2741 next_skb = skb_peek(SREJ_QUEUE(sk));
2742 if (!next_skb) {
2743 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2744 return 0;
2745 }
2746
2747 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2748 if (tx_seq_offset < 0)
2749 tx_seq_offset += 64;
2750
2751 do {
2752 if (bt_cb(next_skb)->tx_seq == tx_seq)
2753 return -EINVAL;
2754
2755 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2756 pi->buffer_seq) % 64;
2757 if (next_tx_seq_offset < 0)
2758 next_tx_seq_offset += 64;
2759
2760 if (next_tx_seq_offset > tx_seq_offset) {
2761 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2762 return 0;
2763 }
2764
2765 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2766 break;
2767
2768 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2769
2770 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2771
2772 return 0;
2773 }
2774
2775 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2776 {
2777 struct l2cap_pinfo *pi = l2cap_pi(sk);
2778 struct sk_buff *_skb;
2779 int err;
2780
2781 switch (control & L2CAP_CTRL_SAR) {
2782 case L2CAP_SDU_UNSEGMENTED:
2783 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2784 goto drop;
2785
2786 err = sock_queue_rcv_skb(sk, skb);
2787 if (!err)
2788 return err;
2789
2790 break;
2791
2792 case L2CAP_SDU_START:
2793 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2794 goto drop;
2795
2796 pi->sdu_len = get_unaligned_le16(skb->data);
2797
2798 if (pi->sdu_len > pi->imtu)
2799 goto disconnect;
2800
2801 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2802 if (!pi->sdu)
2803 return -ENOMEM;
2804
2805 /* pull sdu_len bytes only after alloc, because of Local Busy
2806 * condition we have to be sure that this will be executed
2807 * only once, i.e., when alloc does not fail */
2808 skb_pull(skb, 2);
2809
2810 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2811
2812 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2813 pi->partial_sdu_len = skb->len;
2814 break;
2815
2816 case L2CAP_SDU_CONTINUE:
2817 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2818 goto disconnect;
2819
2820 if (!pi->sdu)
2821 goto disconnect;
2822
2823 pi->partial_sdu_len += skb->len;
2824 if (pi->partial_sdu_len > pi->sdu_len)
2825 goto drop;
2826
2827 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2828
2829 break;
2830
2831 case L2CAP_SDU_END:
2832 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2833 goto disconnect;
2834
2835 if (!pi->sdu)
2836 goto disconnect;
2837
2838 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2839 pi->partial_sdu_len += skb->len;
2840
2841 if (pi->partial_sdu_len > pi->imtu)
2842 goto drop;
2843
2844 if (pi->partial_sdu_len != pi->sdu_len)
2845 goto drop;
2846
2847 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2848 }
2849
2850 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2851 if (!_skb) {
2852 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2853 return -ENOMEM;
2854 }
2855
2856 err = sock_queue_rcv_skb(sk, _skb);
2857 if (err < 0) {
2858 kfree_skb(_skb);
2859 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2860 return err;
2861 }
2862
2863 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2864 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2865
2866 kfree_skb(pi->sdu);
2867 break;
2868 }
2869
2870 kfree_skb(skb);
2871 return 0;
2872
2873 drop:
2874 kfree_skb(pi->sdu);
2875 pi->sdu = NULL;
2876
2877 disconnect:
2878 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2879 kfree_skb(skb);
2880 return 0;
2881 }
2882
2883 static int l2cap_try_push_rx_skb(struct sock *sk)
2884 {
2885 struct l2cap_pinfo *pi = l2cap_pi(sk);
2886 struct sk_buff *skb;
2887 u16 control;
2888 int err;
2889
2890 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2891 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2892 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2893 if (err < 0) {
2894 skb_queue_head(BUSY_QUEUE(sk), skb);
2895 return -EBUSY;
2896 }
2897
2898 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2899 }
2900
2901 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2902 goto done;
2903
2904 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2905 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2906 l2cap_send_sframe(pi, control);
2907 l2cap_pi(sk)->retry_count = 1;
2908
2909 del_timer(&pi->retrans_timer);
2910 __mod_monitor_timer();
2911
2912 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2913
2914 done:
2915 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2916 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2917
2918 BT_DBG("sk %p, Exit local busy", sk);
2919
2920 return 0;
2921 }
2922
2923 static void l2cap_busy_work(struct work_struct *work)
2924 {
2925 DECLARE_WAITQUEUE(wait, current);
2926 struct l2cap_pinfo *pi =
2927 container_of(work, struct l2cap_pinfo, busy_work);
2928 struct sock *sk = (struct sock *)pi;
2929 int n_tries = 0, timeo = HZ/5, err;
2930 struct sk_buff *skb;
2931
2932 lock_sock(sk);
2933
2934 add_wait_queue(sk_sleep(sk), &wait);
2935 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2936 set_current_state(TASK_INTERRUPTIBLE);
2937
2938 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2939 err = -EBUSY;
2940 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2941 break;
2942 }
2943
2944 if (!timeo)
2945 timeo = HZ/5;
2946
2947 if (signal_pending(current)) {
2948 err = sock_intr_errno(timeo);
2949 break;
2950 }
2951
2952 release_sock(sk);
2953 timeo = schedule_timeout(timeo);
2954 lock_sock(sk);
2955
2956 err = sock_error(sk);
2957 if (err)
2958 break;
2959
2960 if (l2cap_try_push_rx_skb(sk) == 0)
2961 break;
2962 }
2963
2964 set_current_state(TASK_RUNNING);
2965 remove_wait_queue(sk_sleep(sk), &wait);
2966
2967 release_sock(sk);
2968 }
2969
2970 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2971 {
2972 struct l2cap_pinfo *pi = l2cap_pi(sk);
2973 int sctrl, err;
2974
2975 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2976 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2977 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2978 return l2cap_try_push_rx_skb(sk);
2979
2980
2981 }
2982
2983 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2984 if (err >= 0) {
2985 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2986 return err;
2987 }
2988
2989 /* Busy Condition */
2990 BT_DBG("sk %p, Enter local busy", sk);
2991
2992 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2993 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2994 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2995
2996 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2997 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2998 l2cap_send_sframe(pi, sctrl);
2999
3000 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3001
3002 del_timer(&pi->ack_timer);
3003
3004 queue_work(_busy_wq, &pi->busy_work);
3005
3006 return err;
3007 }
3008
3009 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3010 {
3011 struct l2cap_pinfo *pi = l2cap_pi(sk);
3012 struct sk_buff *_skb;
3013 int err = -EINVAL;
3014
3015 /*
3016 * TODO: We have to notify the userland if some data is lost with the
3017 * Streaming Mode.
3018 */
3019
3020 switch (control & L2CAP_CTRL_SAR) {
3021 case L2CAP_SDU_UNSEGMENTED:
3022 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3023 kfree_skb(pi->sdu);
3024 break;
3025 }
3026
3027 err = sock_queue_rcv_skb(sk, skb);
3028 if (!err)
3029 return 0;
3030
3031 break;
3032
3033 case L2CAP_SDU_START:
3034 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3035 kfree_skb(pi->sdu);
3036 break;
3037 }
3038
3039 pi->sdu_len = get_unaligned_le16(skb->data);
3040 skb_pull(skb, 2);
3041
3042 if (pi->sdu_len > pi->imtu) {
3043 err = -EMSGSIZE;
3044 break;
3045 }
3046
3047 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3048 if (!pi->sdu) {
3049 err = -ENOMEM;
3050 break;
3051 }
3052
3053 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3054
3055 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3056 pi->partial_sdu_len = skb->len;
3057 err = 0;
3058 break;
3059
3060 case L2CAP_SDU_CONTINUE:
3061 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3062 break;
3063
3064 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3065
3066 pi->partial_sdu_len += skb->len;
3067 if (pi->partial_sdu_len > pi->sdu_len)
3068 kfree_skb(pi->sdu);
3069 else
3070 err = 0;
3071
3072 break;
3073
3074 case L2CAP_SDU_END:
3075 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3076 break;
3077
3078 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3079
3080 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3081 pi->partial_sdu_len += skb->len;
3082
3083 if (pi->partial_sdu_len > pi->imtu)
3084 goto drop;
3085
3086 if (pi->partial_sdu_len == pi->sdu_len) {
3087 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3088 err = sock_queue_rcv_skb(sk, _skb);
3089 if (err < 0)
3090 kfree_skb(_skb);
3091 }
3092 err = 0;
3093
3094 drop:
3095 kfree_skb(pi->sdu);
3096 break;
3097 }
3098
3099 kfree_skb(skb);
3100 return err;
3101 }
3102
3103 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3104 {
3105 struct sk_buff *skb;
3106 u16 control;
3107
3108 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3109 if (bt_cb(skb)->tx_seq != tx_seq)
3110 break;
3111
3112 skb = skb_dequeue(SREJ_QUEUE(sk));
3113 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3114 l2cap_ertm_reassembly_sdu(sk, skb, control);
3115 l2cap_pi(sk)->buffer_seq_srej =
3116 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3117 tx_seq = (tx_seq + 1) % 64;
3118 }
3119 }
3120
3121 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3122 {
3123 struct l2cap_pinfo *pi = l2cap_pi(sk);
3124 struct srej_list *l, *tmp;
3125 u16 control;
3126
3127 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3128 if (l->tx_seq == tx_seq) {
3129 list_del(&l->list);
3130 kfree(l);
3131 return;
3132 }
3133 control = L2CAP_SUPER_SELECT_REJECT;
3134 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3135 l2cap_send_sframe(pi, control);
3136 list_del(&l->list);
3137 list_add_tail(&l->list, SREJ_LIST(sk));
3138 }
3139 }
3140
3141 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3142 {
3143 struct l2cap_pinfo *pi = l2cap_pi(sk);
3144 struct srej_list *new;
3145 u16 control;
3146
3147 while (tx_seq != pi->expected_tx_seq) {
3148 control = L2CAP_SUPER_SELECT_REJECT;
3149 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3150 l2cap_send_sframe(pi, control);
3151
3152 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3153 new->tx_seq = pi->expected_tx_seq;
3154 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3155 list_add_tail(&new->list, SREJ_LIST(sk));
3156 }
3157 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3158 }
3159
3160 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3161 {
3162 struct l2cap_pinfo *pi = l2cap_pi(sk);
3163 u8 tx_seq = __get_txseq(rx_control);
3164 u8 req_seq = __get_reqseq(rx_control);
3165 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3166 int tx_seq_offset, expected_tx_seq_offset;
3167 int num_to_ack = (pi->tx_win/6) + 1;
3168 int err = 0;
3169
3170 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3171 rx_control);
3172
3173 if (L2CAP_CTRL_FINAL & rx_control &&
3174 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3175 del_timer(&pi->monitor_timer);
3176 if (pi->unacked_frames > 0)
3177 __mod_retrans_timer();
3178 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3179 }
3180
3181 pi->expected_ack_seq = req_seq;
3182 l2cap_drop_acked_frames(sk);
3183
3184 if (tx_seq == pi->expected_tx_seq)
3185 goto expected;
3186
3187 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3188 if (tx_seq_offset < 0)
3189 tx_seq_offset += 64;
3190
3191 /* invalid tx_seq */
3192 if (tx_seq_offset >= pi->tx_win) {
3193 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3194 goto drop;
3195 }
3196
3197 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3198 goto drop;
3199
3200 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3201 struct srej_list *first;
3202
3203 first = list_first_entry(SREJ_LIST(sk),
3204 struct srej_list, list);
3205 if (tx_seq == first->tx_seq) {
3206 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3207 l2cap_check_srej_gap(sk, tx_seq);
3208
3209 list_del(&first->list);
3210 kfree(first);
3211
3212 if (list_empty(SREJ_LIST(sk))) {
3213 pi->buffer_seq = pi->buffer_seq_srej;
3214 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3215 l2cap_send_ack(pi);
3216 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3217 }
3218 } else {
3219 struct srej_list *l;
3220
3221 /* duplicated tx_seq */
3222 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3223 goto drop;
3224
3225 list_for_each_entry(l, SREJ_LIST(sk), list) {
3226 if (l->tx_seq == tx_seq) {
3227 l2cap_resend_srejframe(sk, tx_seq);
3228 return 0;
3229 }
3230 }
3231 l2cap_send_srejframe(sk, tx_seq);
3232 }
3233 } else {
3234 expected_tx_seq_offset =
3235 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3236 if (expected_tx_seq_offset < 0)
3237 expected_tx_seq_offset += 64;
3238
3239 /* duplicated tx_seq */
3240 if (tx_seq_offset < expected_tx_seq_offset)
3241 goto drop;
3242
3243 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3244
3245 BT_DBG("sk %p, Enter SREJ", sk);
3246
3247 INIT_LIST_HEAD(SREJ_LIST(sk));
3248 pi->buffer_seq_srej = pi->buffer_seq;
3249
3250 __skb_queue_head_init(SREJ_QUEUE(sk));
3251 __skb_queue_head_init(BUSY_QUEUE(sk));
3252 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3253
3254 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3255
3256 l2cap_send_srejframe(sk, tx_seq);
3257
3258 del_timer(&pi->ack_timer);
3259 }
3260 return 0;
3261
3262 expected:
3263 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3264
3265 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3266 bt_cb(skb)->tx_seq = tx_seq;
3267 bt_cb(skb)->sar = sar;
3268 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3269 return 0;
3270 }
3271
3272 err = l2cap_push_rx_skb(sk, skb, rx_control);
3273 if (err < 0)
3274 return 0;
3275
3276 if (rx_control & L2CAP_CTRL_FINAL) {
3277 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3278 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3279 else
3280 l2cap_retransmit_frames(sk);
3281 }
3282
3283 __mod_ack_timer();
3284
3285 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3286 if (pi->num_acked == num_to_ack - 1)
3287 l2cap_send_ack(pi);
3288
3289 return 0;
3290
3291 drop:
3292 kfree_skb(skb);
3293 return 0;
3294 }
3295
3296 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3297 {
3298 struct l2cap_pinfo *pi = l2cap_pi(sk);
3299
3300 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3301 rx_control);
3302
3303 pi->expected_ack_seq = __get_reqseq(rx_control);
3304 l2cap_drop_acked_frames(sk);
3305
3306 if (rx_control & L2CAP_CTRL_POLL) {
3307 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3308 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3309 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3310 (pi->unacked_frames > 0))
3311 __mod_retrans_timer();
3312
3313 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3314 l2cap_send_srejtail(sk);
3315 } else {
3316 l2cap_send_i_or_rr_or_rnr(sk);
3317 }
3318
3319 } else if (rx_control & L2CAP_CTRL_FINAL) {
3320 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3321
3322 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3323 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3324 else
3325 l2cap_retransmit_frames(sk);
3326
3327 } else {
3328 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3329 (pi->unacked_frames > 0))
3330 __mod_retrans_timer();
3331
3332 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3333 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3334 l2cap_send_ack(pi);
3335 else
3336 l2cap_ertm_send(sk);
3337 }
3338 }
3339
3340 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3341 {
3342 struct l2cap_pinfo *pi = l2cap_pi(sk);
3343 u8 tx_seq = __get_reqseq(rx_control);
3344
3345 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3346
3347 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3348
3349 pi->expected_ack_seq = tx_seq;
3350 l2cap_drop_acked_frames(sk);
3351
3352 if (rx_control & L2CAP_CTRL_FINAL) {
3353 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3354 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3355 else
3356 l2cap_retransmit_frames(sk);
3357 } else {
3358 l2cap_retransmit_frames(sk);
3359
3360 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3361 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3362 }
3363 }
3364 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3365 {
3366 struct l2cap_pinfo *pi = l2cap_pi(sk);
3367 u8 tx_seq = __get_reqseq(rx_control);
3368
3369 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3370
3371 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3372
3373 if (rx_control & L2CAP_CTRL_POLL) {
3374 pi->expected_ack_seq = tx_seq;
3375 l2cap_drop_acked_frames(sk);
3376
3377 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3378 l2cap_retransmit_one_frame(sk, tx_seq);
3379
3380 l2cap_ertm_send(sk);
3381
3382 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3383 pi->srej_save_reqseq = tx_seq;
3384 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3385 }
3386 } else if (rx_control & L2CAP_CTRL_FINAL) {
3387 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3388 pi->srej_save_reqseq == tx_seq)
3389 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3390 else
3391 l2cap_retransmit_one_frame(sk, tx_seq);
3392 } else {
3393 l2cap_retransmit_one_frame(sk, tx_seq);
3394 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3395 pi->srej_save_reqseq = tx_seq;
3396 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3397 }
3398 }
3399 }
3400
3401 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3402 {
3403 struct l2cap_pinfo *pi = l2cap_pi(sk);
3404 u8 tx_seq = __get_reqseq(rx_control);
3405
3406 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3407
3408 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3409 pi->expected_ack_seq = tx_seq;
3410 l2cap_drop_acked_frames(sk);
3411
3412 if (rx_control & L2CAP_CTRL_POLL)
3413 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3414
3415 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3416 del_timer(&pi->retrans_timer);
3417 if (rx_control & L2CAP_CTRL_POLL)
3418 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3419 return;
3420 }
3421
3422 if (rx_control & L2CAP_CTRL_POLL)
3423 l2cap_send_srejtail(sk);
3424 else
3425 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3426 }
3427
3428 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3429 {
3430 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3431
3432 if (L2CAP_CTRL_FINAL & rx_control &&
3433 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3434 del_timer(&l2cap_pi(sk)->monitor_timer);
3435 if (l2cap_pi(sk)->unacked_frames > 0)
3436 __mod_retrans_timer();
3437 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3438 }
3439
3440 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3441 case L2CAP_SUPER_RCV_READY:
3442 l2cap_data_channel_rrframe(sk, rx_control);
3443 break;
3444
3445 case L2CAP_SUPER_REJECT:
3446 l2cap_data_channel_rejframe(sk, rx_control);
3447 break;
3448
3449 case L2CAP_SUPER_SELECT_REJECT:
3450 l2cap_data_channel_srejframe(sk, rx_control);
3451 break;
3452
3453 case L2CAP_SUPER_RCV_NOT_READY:
3454 l2cap_data_channel_rnrframe(sk, rx_control);
3455 break;
3456 }
3457
3458 kfree_skb(skb);
3459 return 0;
3460 }
3461
3462 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3463 {
3464 struct l2cap_pinfo *pi = l2cap_pi(sk);
3465 u16 control;
3466 u8 req_seq;
3467 int len, next_tx_seq_offset, req_seq_offset;
3468
3469 control = get_unaligned_le16(skb->data);
3470 skb_pull(skb, 2);
3471 len = skb->len;
3472
3473 /*
3474 * We can just drop the corrupted I-frame here.
3475 * Receiver will miss it and start proper recovery
3476 * procedures and ask retransmission.
3477 */
3478 if (l2cap_check_fcs(pi, skb))
3479 goto drop;
3480
3481 if (__is_sar_start(control) && __is_iframe(control))
3482 len -= 2;
3483
3484 if (pi->fcs == L2CAP_FCS_CRC16)
3485 len -= 2;
3486
3487 if (len > pi->mps) {
3488 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3489 goto drop;
3490 }
3491
3492 req_seq = __get_reqseq(control);
3493 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3494 if (req_seq_offset < 0)
3495 req_seq_offset += 64;
3496
3497 next_tx_seq_offset =
3498 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3499 if (next_tx_seq_offset < 0)
3500 next_tx_seq_offset += 64;
3501
3502 /* check for invalid req-seq */
3503 if (req_seq_offset > next_tx_seq_offset) {
3504 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3505 goto drop;
3506 }
3507
3508 if (__is_iframe(control)) {
3509 if (len < 0) {
3510 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3511 goto drop;
3512 }
3513
3514 l2cap_data_channel_iframe(sk, control, skb);
3515 } else {
3516 if (len != 0) {
3517 BT_ERR("%d", len);
3518 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3519 goto drop;
3520 }
3521
3522 l2cap_data_channel_sframe(sk, control, skb);
3523 }
3524
3525 return 0;
3526
3527 drop:
3528 kfree_skb(skb);
3529 return 0;
3530 }
3531
3532 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3533 {
3534 struct sock *sk;
3535 struct l2cap_pinfo *pi;
3536 u16 control;
3537 u8 tx_seq;
3538 int len;
3539
3540 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3541 if (!sk) {
3542 BT_DBG("unknown cid 0x%4.4x", cid);
3543 goto drop;
3544 }
3545
3546 pi = l2cap_pi(sk);
3547
3548 BT_DBG("sk %p, len %d", sk, skb->len);
3549
3550 if (sk->sk_state != BT_CONNECTED)
3551 goto drop;
3552
3553 switch (pi->mode) {
3554 case L2CAP_MODE_BASIC:
3555 /* If socket recv buffers overflows we drop data here
3556 * which is *bad* because L2CAP has to be reliable.
3557 * But we don't have any other choice. L2CAP doesn't
3558 * provide flow control mechanism. */
3559
3560 if (pi->imtu < skb->len)
3561 goto drop;
3562
3563 if (!sock_queue_rcv_skb(sk, skb))
3564 goto done;
3565 break;
3566
3567 case L2CAP_MODE_ERTM:
3568 if (!sock_owned_by_user(sk)) {
3569 l2cap_ertm_data_rcv(sk, skb);
3570 } else {
3571 if (sk_add_backlog(sk, skb))
3572 goto drop;
3573 }
3574
3575 goto done;
3576
3577 case L2CAP_MODE_STREAMING:
3578 control = get_unaligned_le16(skb->data);
3579 skb_pull(skb, 2);
3580 len = skb->len;
3581
3582 if (l2cap_check_fcs(pi, skb))
3583 goto drop;
3584
3585 if (__is_sar_start(control))
3586 len -= 2;
3587
3588 if (pi->fcs == L2CAP_FCS_CRC16)
3589 len -= 2;
3590
3591 if (len > pi->mps || len < 0 || __is_sframe(control))
3592 goto drop;
3593
3594 tx_seq = __get_txseq(control);
3595
3596 if (pi->expected_tx_seq == tx_seq)
3597 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3598 else
3599 pi->expected_tx_seq = (tx_seq + 1) % 64;
3600
3601 l2cap_streaming_reassembly_sdu(sk, skb, control);
3602
3603 goto done;
3604
3605 default:
3606 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3607 break;
3608 }
3609
3610 drop:
3611 kfree_skb(skb);
3612
3613 done:
3614 if (sk)
3615 bh_unlock_sock(sk);
3616
3617 return 0;
3618 }
3619
3620 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3621 {
3622 struct sock *sk;
3623
3624 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3625 if (!sk)
3626 goto drop;
3627
3628 bh_lock_sock(sk);
3629
3630 BT_DBG("sk %p, len %d", sk, skb->len);
3631
3632 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3633 goto drop;
3634
3635 if (l2cap_pi(sk)->imtu < skb->len)
3636 goto drop;
3637
3638 if (!sock_queue_rcv_skb(sk, skb))
3639 goto done;
3640
3641 drop:
3642 kfree_skb(skb);
3643
3644 done:
3645 if (sk)
3646 bh_unlock_sock(sk);
3647 return 0;
3648 }
3649
3650 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3651 {
3652 struct l2cap_hdr *lh = (void *) skb->data;
3653 u16 cid, len;
3654 __le16 psm;
3655
3656 skb_pull(skb, L2CAP_HDR_SIZE);
3657 cid = __le16_to_cpu(lh->cid);
3658 len = __le16_to_cpu(lh->len);
3659
3660 if (len != skb->len) {
3661 kfree_skb(skb);
3662 return;
3663 }
3664
3665 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3666
3667 switch (cid) {
3668 case L2CAP_CID_LE_SIGNALING:
3669 case L2CAP_CID_SIGNALING:
3670 l2cap_sig_channel(conn, skb);
3671 break;
3672
3673 case L2CAP_CID_CONN_LESS:
3674 psm = get_unaligned_le16(skb->data);
3675 skb_pull(skb, 2);
3676 l2cap_conless_channel(conn, psm, skb);
3677 break;
3678
3679 default:
3680 l2cap_data_channel(conn, cid, skb);
3681 break;
3682 }
3683 }
3684
3685 /* ---- L2CAP interface with lower layer (HCI) ---- */
3686
3687 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3688 {
3689 int exact = 0, lm1 = 0, lm2 = 0;
3690 register struct sock *sk;
3691 struct hlist_node *node;
3692
3693 if (type != ACL_LINK)
3694 return -EINVAL;
3695
3696 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3697
3698 /* Find listening sockets and check their link_mode */
3699 read_lock(&l2cap_sk_list.lock);
3700 sk_for_each(sk, node, &l2cap_sk_list.head) {
3701 if (sk->sk_state != BT_LISTEN)
3702 continue;
3703
3704 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3705 lm1 |= HCI_LM_ACCEPT;
3706 if (l2cap_pi(sk)->role_switch)
3707 lm1 |= HCI_LM_MASTER;
3708 exact++;
3709 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3710 lm2 |= HCI_LM_ACCEPT;
3711 if (l2cap_pi(sk)->role_switch)
3712 lm2 |= HCI_LM_MASTER;
3713 }
3714 }
3715 read_unlock(&l2cap_sk_list.lock);
3716
3717 return exact ? lm1 : lm2;
3718 }
3719
3720 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3721 {
3722 struct l2cap_conn *conn;
3723
3724 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3725
3726 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3727 return -EINVAL;
3728
3729 if (!status) {
3730 conn = l2cap_conn_add(hcon, status);
3731 if (conn)
3732 l2cap_conn_ready(conn);
3733 } else
3734 l2cap_conn_del(hcon, bt_err(status));
3735
3736 return 0;
3737 }
3738
3739 static int l2cap_disconn_ind(struct hci_conn *hcon)
3740 {
3741 struct l2cap_conn *conn = hcon->l2cap_data;
3742
3743 BT_DBG("hcon %p", hcon);
3744
3745 if (hcon->type != ACL_LINK || !conn)
3746 return 0x13;
3747
3748 return conn->disc_reason;
3749 }
3750
3751 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3752 {
3753 BT_DBG("hcon %p reason %d", hcon, reason);
3754
3755 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3756 return -EINVAL;
3757
3758 l2cap_conn_del(hcon, bt_err(reason));
3759
3760 return 0;
3761 }
3762
3763 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3764 {
3765 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3766 return;
3767
3768 if (encrypt == 0x00) {
3769 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3770 l2cap_sock_clear_timer(sk);
3771 l2cap_sock_set_timer(sk, HZ * 5);
3772 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3773 __l2cap_sock_close(sk, ECONNREFUSED);
3774 } else {
3775 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3776 l2cap_sock_clear_timer(sk);
3777 }
3778 }
3779
3780 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3781 {
3782 struct l2cap_chan_list *l;
3783 struct l2cap_conn *conn = hcon->l2cap_data;
3784 struct sock *sk;
3785
3786 if (!conn)
3787 return 0;
3788
3789 l = &conn->chan_list;
3790
3791 BT_DBG("conn %p", conn);
3792
3793 read_lock(&l->lock);
3794
3795 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3796 bh_lock_sock(sk);
3797
3798 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3799 bh_unlock_sock(sk);
3800 continue;
3801 }
3802
3803 if (!status && (sk->sk_state == BT_CONNECTED ||
3804 sk->sk_state == BT_CONFIG)) {
3805 l2cap_check_encryption(sk, encrypt);
3806 bh_unlock_sock(sk);
3807 continue;
3808 }
3809
3810 if (sk->sk_state == BT_CONNECT) {
3811 if (!status) {
3812 struct l2cap_conn_req req;
3813 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3814 req.psm = l2cap_pi(sk)->psm;
3815
3816 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3817 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3818
3819 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3820 L2CAP_CONN_REQ, sizeof(req), &req);
3821 } else {
3822 l2cap_sock_clear_timer(sk);
3823 l2cap_sock_set_timer(sk, HZ / 10);
3824 }
3825 } else if (sk->sk_state == BT_CONNECT2) {
3826 struct l2cap_conn_rsp rsp;
3827 __u16 result;
3828
3829 if (!status) {
3830 sk->sk_state = BT_CONFIG;
3831 result = L2CAP_CR_SUCCESS;
3832 } else {
3833 sk->sk_state = BT_DISCONN;
3834 l2cap_sock_set_timer(sk, HZ / 10);
3835 result = L2CAP_CR_SEC_BLOCK;
3836 }
3837
3838 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3839 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3840 rsp.result = cpu_to_le16(result);
3841 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3842 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3843 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3844 }
3845
3846 bh_unlock_sock(sk);
3847 }
3848
3849 read_unlock(&l->lock);
3850
3851 return 0;
3852 }
3853
3854 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3855 {
3856 struct l2cap_conn *conn = hcon->l2cap_data;
3857
3858 if (!conn)
3859 conn = l2cap_conn_add(hcon, 0);
3860
3861 if (!conn)
3862 goto drop;
3863
3864 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3865
3866 if (!(flags & ACL_CONT)) {
3867 struct l2cap_hdr *hdr;
3868 struct sock *sk;
3869 u16 cid;
3870 int len;
3871
3872 if (conn->rx_len) {
3873 BT_ERR("Unexpected start frame (len %d)", skb->len);
3874 kfree_skb(conn->rx_skb);
3875 conn->rx_skb = NULL;
3876 conn->rx_len = 0;
3877 l2cap_conn_unreliable(conn, ECOMM);
3878 }
3879
3880 /* Start fragment always begin with Basic L2CAP header */
3881 if (skb->len < L2CAP_HDR_SIZE) {
3882 BT_ERR("Frame is too short (len %d)", skb->len);
3883 l2cap_conn_unreliable(conn, ECOMM);
3884 goto drop;
3885 }
3886
3887 hdr = (struct l2cap_hdr *) skb->data;
3888 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3889 cid = __le16_to_cpu(hdr->cid);
3890
3891 if (len == skb->len) {
3892 /* Complete frame received */
3893 l2cap_recv_frame(conn, skb);
3894 return 0;
3895 }
3896
3897 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3898
3899 if (skb->len > len) {
3900 BT_ERR("Frame is too long (len %d, expected len %d)",
3901 skb->len, len);
3902 l2cap_conn_unreliable(conn, ECOMM);
3903 goto drop;
3904 }
3905
3906 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3907
3908 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3909 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3910 len, l2cap_pi(sk)->imtu);
3911 bh_unlock_sock(sk);
3912 l2cap_conn_unreliable(conn, ECOMM);
3913 goto drop;
3914 }
3915
3916 if (sk)
3917 bh_unlock_sock(sk);
3918
3919 /* Allocate skb for the complete frame (with header) */
3920 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3921 if (!conn->rx_skb)
3922 goto drop;
3923
3924 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3925 skb->len);
3926 conn->rx_len = len - skb->len;
3927 } else {
3928 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3929
3930 if (!conn->rx_len) {
3931 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3932 l2cap_conn_unreliable(conn, ECOMM);
3933 goto drop;
3934 }
3935
3936 if (skb->len > conn->rx_len) {
3937 BT_ERR("Fragment is too long (len %d, expected %d)",
3938 skb->len, conn->rx_len);
3939 kfree_skb(conn->rx_skb);
3940 conn->rx_skb = NULL;
3941 conn->rx_len = 0;
3942 l2cap_conn_unreliable(conn, ECOMM);
3943 goto drop;
3944 }
3945
3946 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3947 skb->len);
3948 conn->rx_len -= skb->len;
3949
3950 if (!conn->rx_len) {
3951 /* Complete frame received */
3952 l2cap_recv_frame(conn, conn->rx_skb);
3953 conn->rx_skb = NULL;
3954 }
3955 }
3956
3957 drop:
3958 kfree_skb(skb);
3959 return 0;
3960 }
3961
3962 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3963 {
3964 struct sock *sk;
3965 struct hlist_node *node;
3966
3967 read_lock_bh(&l2cap_sk_list.lock);
3968
3969 sk_for_each(sk, node, &l2cap_sk_list.head) {
3970 struct l2cap_pinfo *pi = l2cap_pi(sk);
3971
3972 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3973 batostr(&bt_sk(sk)->src),
3974 batostr(&bt_sk(sk)->dst),
3975 sk->sk_state, __le16_to_cpu(pi->psm),
3976 pi->scid, pi->dcid,
3977 pi->imtu, pi->omtu, pi->sec_level,
3978 pi->mode);
3979 }
3980
3981 read_unlock_bh(&l2cap_sk_list.lock);
3982
3983 return 0;
3984 }
3985
3986 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3987 {
3988 return single_open(file, l2cap_debugfs_show, inode->i_private);
3989 }
3990
3991 static const struct file_operations l2cap_debugfs_fops = {
3992 .open = l2cap_debugfs_open,
3993 .read = seq_read,
3994 .llseek = seq_lseek,
3995 .release = single_release,
3996 };
3997
3998 static struct dentry *l2cap_debugfs;
3999
4000 static struct hci_proto l2cap_hci_proto = {
4001 .name = "L2CAP",
4002 .id = HCI_PROTO_L2CAP,
4003 .connect_ind = l2cap_connect_ind,
4004 .connect_cfm = l2cap_connect_cfm,
4005 .disconn_ind = l2cap_disconn_ind,
4006 .disconn_cfm = l2cap_disconn_cfm,
4007 .security_cfm = l2cap_security_cfm,
4008 .recv_acldata = l2cap_recv_acldata
4009 };
4010
4011 int __init l2cap_init(void)
4012 {
4013 int err;
4014
4015 err = l2cap_init_sockets();
4016 if (err < 0)
4017 return err;
4018
4019 _busy_wq = create_singlethread_workqueue("l2cap");
4020 if (!_busy_wq) {
4021 err = -ENOMEM;
4022 goto error;
4023 }
4024
4025 err = hci_register_proto(&l2cap_hci_proto);
4026 if (err < 0) {
4027 BT_ERR("L2CAP protocol registration failed");
4028 bt_sock_unregister(BTPROTO_L2CAP);
4029 goto error;
4030 }
4031
4032 if (bt_debugfs) {
4033 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4034 bt_debugfs, NULL, &l2cap_debugfs_fops);
4035 if (!l2cap_debugfs)
4036 BT_ERR("Failed to create L2CAP debug file");
4037 }
4038
4039 return 0;
4040
4041 error:
4042 destroy_workqueue(_busy_wq);
4043 l2cap_cleanup_sockets();
4044 return err;
4045 }
4046
4047 void l2cap_exit(void)
4048 {
4049 debugfs_remove(l2cap_debugfs);
4050
4051 flush_workqueue(_busy_wq);
4052 destroy_workqueue(_busy_wq);
4053
4054 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4055 BT_ERR("L2CAP protocol unregistration failed");
4056
4057 l2cap_cleanup_sockets();
4058 }
4059
4060 module_param(disable_ertm, bool, 0644);
4061 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");