[Bluetooth] Add locking for bt_proto array manipulation
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
52
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
57
58 #define VERSION "2.8"
59
60 static const struct proto_ops l2cap_sock_ops;
61
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
64 };
65
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
69
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
72
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
75 {
76 struct sock *sk = (struct sock *) arg;
77
78 BT_DBG("sock %p state %d", sk, sk->sk_state);
79
80 bh_lock_sock(sk);
81 __l2cap_sock_close(sk, ETIMEDOUT);
82 bh_unlock_sock(sk);
83
84 l2cap_sock_kill(sk);
85 sock_put(sk);
86 }
87
88 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
89 {
90 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
91 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
92 }
93
94 static void l2cap_sock_clear_timer(struct sock *sk)
95 {
96 BT_DBG("sock %p state %d", sk, sk->sk_state);
97 sk_stop_timer(sk, &sk->sk_timer);
98 }
99
100 static void l2cap_sock_init_timer(struct sock *sk)
101 {
102 init_timer(&sk->sk_timer);
103 sk->sk_timer.function = l2cap_sock_timeout;
104 sk->sk_timer.data = (unsigned long)sk;
105 }
106
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
109 {
110 struct sock *s;
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
113 break;
114 }
115 return s;
116 }
117
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
119 {
120 struct sock *s;
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
123 break;
124 }
125 return s;
126 }
127
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
131 {
132 struct sock *s;
133 read_lock(&l->lock);
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
137 return s;
138 }
139
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
141 {
142 struct sock *s;
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
145 break;
146 }
147 return s;
148 }
149
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
151 {
152 struct sock *s;
153 read_lock(&l->lock);
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
157 return s;
158 }
159
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
161 {
162 u16 cid = 0x0040;
163
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
166 return cid;
167 }
168
169 return 0;
170 }
171
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
173 {
174 sock_hold(sk);
175
176 if (l->head)
177 l2cap_pi(l->head)->prev_c = sk;
178
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
181 l->head = sk;
182 }
183
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
185 {
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
187
188 write_lock_bh(&l->lock);
189 if (sk == l->head)
190 l->head = next;
191
192 if (next)
193 l2cap_pi(next)->prev_c = prev;
194 if (prev)
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
197
198 __sock_put(sk);
199 }
200
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
202 {
203 struct l2cap_chan_list *l = &conn->chan_list;
204
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
206
207 l2cap_pi(sk)->conn = conn;
208
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
217 } else {
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
222 }
223
224 __l2cap_chan_link(l, sk);
225
226 if (parent)
227 bt_accept_enqueue(parent, sk);
228 }
229
230 /* Delete channel.
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
233 {
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
236
237 l2cap_sock_clear_timer(sk);
238
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
240
241 if (conn) {
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
246 }
247
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
250
251 if (err)
252 sk->sk_err = err;
253
254 if (parent) {
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
257 } else
258 sk->sk_state_change(sk);
259 }
260
261 /* ---- L2CAP connections ---- */
262 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
263 {
264 struct l2cap_conn *conn = hcon->l2cap_data;
265
266 if (conn || status)
267 return conn;
268
269 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
270 if (!conn)
271 return NULL;
272
273 hcon->l2cap_data = conn;
274 conn->hcon = hcon;
275
276 BT_DBG("hcon %p conn %p", hcon, conn);
277
278 conn->mtu = hcon->hdev->acl_mtu;
279 conn->src = &hcon->hdev->bdaddr;
280 conn->dst = &hcon->dst;
281
282 spin_lock_init(&conn->lock);
283 rwlock_init(&conn->chan_list.lock);
284
285 return conn;
286 }
287
288 static void l2cap_conn_del(struct hci_conn *hcon, int err)
289 {
290 struct l2cap_conn *conn = hcon->l2cap_data;
291 struct sock *sk;
292
293 if (!conn)
294 return;
295
296 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
297
298 if (conn->rx_skb)
299 kfree_skb(conn->rx_skb);
300
301 /* Kill channels */
302 while ((sk = conn->chan_list.head)) {
303 bh_lock_sock(sk);
304 l2cap_chan_del(sk, err);
305 bh_unlock_sock(sk);
306 l2cap_sock_kill(sk);
307 }
308
309 hcon->l2cap_data = NULL;
310 kfree(conn);
311 }
312
313 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
314 {
315 struct l2cap_chan_list *l = &conn->chan_list;
316 write_lock_bh(&l->lock);
317 __l2cap_chan_add(conn, sk, parent);
318 write_unlock_bh(&l->lock);
319 }
320
321 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
322 {
323 u8 id;
324
325 /* Get next available identificator.
326 * 1 - 128 are used by kernel.
327 * 129 - 199 are reserved.
328 * 200 - 254 are used by utilities like l2ping, etc.
329 */
330
331 spin_lock_bh(&conn->lock);
332
333 if (++conn->tx_ident > 128)
334 conn->tx_ident = 1;
335
336 id = conn->tx_ident;
337
338 spin_unlock_bh(&conn->lock);
339
340 return id;
341 }
342
343 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
344 {
345 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
346
347 BT_DBG("code 0x%2.2x", code);
348
349 if (!skb)
350 return -ENOMEM;
351
352 return hci_send_acl(conn->hcon, skb, 0);
353 }
354
355 /* ---- Socket interface ---- */
356 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
357 {
358 struct sock *sk;
359 struct hlist_node *node;
360 sk_for_each(sk, node, &l2cap_sk_list.head)
361 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
362 goto found;
363 sk = NULL;
364 found:
365 return sk;
366 }
367
368 /* Find socket with psm and source bdaddr.
369 * Returns closest match.
370 */
371 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
372 {
373 struct sock *sk = NULL, *sk1 = NULL;
374 struct hlist_node *node;
375
376 sk_for_each(sk, node, &l2cap_sk_list.head) {
377 if (state && sk->sk_state != state)
378 continue;
379
380 if (l2cap_pi(sk)->psm == psm) {
381 /* Exact match. */
382 if (!bacmp(&bt_sk(sk)->src, src))
383 break;
384
385 /* Closest match */
386 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
387 sk1 = sk;
388 }
389 }
390 return node ? sk : sk1;
391 }
392
393 /* Find socket with given address (psm, src).
394 * Returns locked socket */
395 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
396 {
397 struct sock *s;
398 read_lock(&l2cap_sk_list.lock);
399 s = __l2cap_get_sock_by_psm(state, psm, src);
400 if (s) bh_lock_sock(s);
401 read_unlock(&l2cap_sk_list.lock);
402 return s;
403 }
404
405 static void l2cap_sock_destruct(struct sock *sk)
406 {
407 BT_DBG("sk %p", sk);
408
409 skb_queue_purge(&sk->sk_receive_queue);
410 skb_queue_purge(&sk->sk_write_queue);
411 }
412
413 static void l2cap_sock_cleanup_listen(struct sock *parent)
414 {
415 struct sock *sk;
416
417 BT_DBG("parent %p", parent);
418
419 /* Close not yet accepted channels */
420 while ((sk = bt_accept_dequeue(parent, NULL)))
421 l2cap_sock_close(sk);
422
423 parent->sk_state = BT_CLOSED;
424 sock_set_flag(parent, SOCK_ZAPPED);
425 }
426
427 /* Kill socket (only if zapped and orphan)
428 * Must be called on unlocked socket.
429 */
430 static void l2cap_sock_kill(struct sock *sk)
431 {
432 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
433 return;
434
435 BT_DBG("sk %p state %d", sk, sk->sk_state);
436
437 /* Kill poor orphan */
438 bt_sock_unlink(&l2cap_sk_list, sk);
439 sock_set_flag(sk, SOCK_DEAD);
440 sock_put(sk);
441 }
442
443 static void __l2cap_sock_close(struct sock *sk, int reason)
444 {
445 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
446
447 switch (sk->sk_state) {
448 case BT_LISTEN:
449 l2cap_sock_cleanup_listen(sk);
450 break;
451
452 case BT_CONNECTED:
453 case BT_CONFIG:
454 case BT_CONNECT2:
455 if (sk->sk_type == SOCK_SEQPACKET) {
456 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
457 struct l2cap_disconn_req req;
458
459 sk->sk_state = BT_DISCONN;
460 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
461
462 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
463 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
464 l2cap_send_cmd(conn, l2cap_get_ident(conn),
465 L2CAP_DISCONN_REQ, sizeof(req), &req);
466 } else {
467 l2cap_chan_del(sk, reason);
468 }
469 break;
470
471 case BT_CONNECT:
472 case BT_DISCONN:
473 l2cap_chan_del(sk, reason);
474 break;
475
476 default:
477 sock_set_flag(sk, SOCK_ZAPPED);
478 break;
479 }
480 }
481
482 /* Must be called on unlocked socket. */
483 static void l2cap_sock_close(struct sock *sk)
484 {
485 l2cap_sock_clear_timer(sk);
486 lock_sock(sk);
487 __l2cap_sock_close(sk, ECONNRESET);
488 release_sock(sk);
489 l2cap_sock_kill(sk);
490 }
491
492 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
493 {
494 struct l2cap_pinfo *pi = l2cap_pi(sk);
495
496 BT_DBG("sk %p", sk);
497
498 if (parent) {
499 sk->sk_type = parent->sk_type;
500 pi->imtu = l2cap_pi(parent)->imtu;
501 pi->omtu = l2cap_pi(parent)->omtu;
502 pi->link_mode = l2cap_pi(parent)->link_mode;
503 } else {
504 pi->imtu = L2CAP_DEFAULT_MTU;
505 pi->omtu = 0;
506 pi->link_mode = 0;
507 }
508
509 /* Default config options */
510 pi->conf_mtu = L2CAP_DEFAULT_MTU;
511 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
512 }
513
514 static struct proto l2cap_proto = {
515 .name = "L2CAP",
516 .owner = THIS_MODULE,
517 .obj_size = sizeof(struct l2cap_pinfo)
518 };
519
520 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio)
521 {
522 struct sock *sk;
523
524 sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1);
525 if (!sk)
526 return NULL;
527
528 sock_init_data(sock, sk);
529 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
530
531 sk->sk_destruct = l2cap_sock_destruct;
532 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
533
534 sock_reset_flag(sk, SOCK_ZAPPED);
535
536 sk->sk_protocol = proto;
537 sk->sk_state = BT_OPEN;
538
539 l2cap_sock_init_timer(sk);
540
541 bt_sock_link(&l2cap_sk_list, sk);
542 return sk;
543 }
544
545 static int l2cap_sock_create(struct socket *sock, int protocol)
546 {
547 struct sock *sk;
548
549 BT_DBG("sock %p", sock);
550
551 sock->state = SS_UNCONNECTED;
552
553 if (sock->type != SOCK_SEQPACKET &&
554 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
555 return -ESOCKTNOSUPPORT;
556
557 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
558 return -EPERM;
559
560 sock->ops = &l2cap_sock_ops;
561
562 sk = l2cap_sock_alloc(sock, protocol, GFP_ATOMIC);
563 if (!sk)
564 return -ENOMEM;
565
566 l2cap_sock_init(sk, NULL);
567 return 0;
568 }
569
570 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
571 {
572 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
573 struct sock *sk = sock->sk;
574 int err = 0;
575
576 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
577
578 if (!addr || addr->sa_family != AF_BLUETOOTH)
579 return -EINVAL;
580
581 lock_sock(sk);
582
583 if (sk->sk_state != BT_OPEN) {
584 err = -EBADFD;
585 goto done;
586 }
587
588 write_lock_bh(&l2cap_sk_list.lock);
589
590 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
591 err = -EADDRINUSE;
592 } else {
593 /* Save source address */
594 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
595 l2cap_pi(sk)->psm = la->l2_psm;
596 l2cap_pi(sk)->sport = la->l2_psm;
597 sk->sk_state = BT_BOUND;
598 }
599
600 write_unlock_bh(&l2cap_sk_list.lock);
601
602 done:
603 release_sock(sk);
604 return err;
605 }
606
607 static int l2cap_do_connect(struct sock *sk)
608 {
609 bdaddr_t *src = &bt_sk(sk)->src;
610 bdaddr_t *dst = &bt_sk(sk)->dst;
611 struct l2cap_conn *conn;
612 struct hci_conn *hcon;
613 struct hci_dev *hdev;
614 int err = 0;
615
616 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
617
618 if (!(hdev = hci_get_route(dst, src)))
619 return -EHOSTUNREACH;
620
621 hci_dev_lock_bh(hdev);
622
623 err = -ENOMEM;
624
625 hcon = hci_connect(hdev, ACL_LINK, dst);
626 if (!hcon)
627 goto done;
628
629 conn = l2cap_conn_add(hcon, 0);
630 if (!conn) {
631 hci_conn_put(hcon);
632 goto done;
633 }
634
635 err = 0;
636
637 /* Update source addr of the socket */
638 bacpy(src, conn->src);
639
640 l2cap_chan_add(conn, sk, NULL);
641
642 sk->sk_state = BT_CONNECT;
643 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
644
645 if (hcon->state == BT_CONNECTED) {
646 if (sk->sk_type == SOCK_SEQPACKET) {
647 struct l2cap_conn_req req;
648 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
649 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
650 req.psm = l2cap_pi(sk)->psm;
651 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
652 L2CAP_CONN_REQ, sizeof(req), &req);
653 } else {
654 l2cap_sock_clear_timer(sk);
655 sk->sk_state = BT_CONNECTED;
656 }
657 }
658
659 done:
660 hci_dev_unlock_bh(hdev);
661 hci_dev_put(hdev);
662 return err;
663 }
664
665 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
666 {
667 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
668 struct sock *sk = sock->sk;
669 int err = 0;
670
671 lock_sock(sk);
672
673 BT_DBG("sk %p", sk);
674
675 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
676 err = -EINVAL;
677 goto done;
678 }
679
680 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
681 err = -EINVAL;
682 goto done;
683 }
684
685 switch(sk->sk_state) {
686 case BT_CONNECT:
687 case BT_CONNECT2:
688 case BT_CONFIG:
689 /* Already connecting */
690 goto wait;
691
692 case BT_CONNECTED:
693 /* Already connected */
694 goto done;
695
696 case BT_OPEN:
697 case BT_BOUND:
698 /* Can connect */
699 break;
700
701 default:
702 err = -EBADFD;
703 goto done;
704 }
705
706 /* Set destination address and psm */
707 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
708 l2cap_pi(sk)->psm = la->l2_psm;
709
710 if ((err = l2cap_do_connect(sk)))
711 goto done;
712
713 wait:
714 err = bt_sock_wait_state(sk, BT_CONNECTED,
715 sock_sndtimeo(sk, flags & O_NONBLOCK));
716 done:
717 release_sock(sk);
718 return err;
719 }
720
721 static int l2cap_sock_listen(struct socket *sock, int backlog)
722 {
723 struct sock *sk = sock->sk;
724 int err = 0;
725
726 BT_DBG("sk %p backlog %d", sk, backlog);
727
728 lock_sock(sk);
729
730 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
731 err = -EBADFD;
732 goto done;
733 }
734
735 if (!l2cap_pi(sk)->psm) {
736 bdaddr_t *src = &bt_sk(sk)->src;
737 u16 psm;
738
739 err = -EINVAL;
740
741 write_lock_bh(&l2cap_sk_list.lock);
742
743 for (psm = 0x1001; psm < 0x1100; psm += 2)
744 if (!__l2cap_get_sock_by_addr(psm, src)) {
745 l2cap_pi(sk)->psm = htobs(psm);
746 l2cap_pi(sk)->sport = htobs(psm);
747 err = 0;
748 break;
749 }
750
751 write_unlock_bh(&l2cap_sk_list.lock);
752
753 if (err < 0)
754 goto done;
755 }
756
757 sk->sk_max_ack_backlog = backlog;
758 sk->sk_ack_backlog = 0;
759 sk->sk_state = BT_LISTEN;
760
761 done:
762 release_sock(sk);
763 return err;
764 }
765
766 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
767 {
768 DECLARE_WAITQUEUE(wait, current);
769 struct sock *sk = sock->sk, *nsk;
770 long timeo;
771 int err = 0;
772
773 lock_sock(sk);
774
775 if (sk->sk_state != BT_LISTEN) {
776 err = -EBADFD;
777 goto done;
778 }
779
780 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
781
782 BT_DBG("sk %p timeo %ld", sk, timeo);
783
784 /* Wait for an incoming connection. (wake-one). */
785 add_wait_queue_exclusive(sk->sk_sleep, &wait);
786 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
787 set_current_state(TASK_INTERRUPTIBLE);
788 if (!timeo) {
789 err = -EAGAIN;
790 break;
791 }
792
793 release_sock(sk);
794 timeo = schedule_timeout(timeo);
795 lock_sock(sk);
796
797 if (sk->sk_state != BT_LISTEN) {
798 err = -EBADFD;
799 break;
800 }
801
802 if (signal_pending(current)) {
803 err = sock_intr_errno(timeo);
804 break;
805 }
806 }
807 set_current_state(TASK_RUNNING);
808 remove_wait_queue(sk->sk_sleep, &wait);
809
810 if (err)
811 goto done;
812
813 newsock->state = SS_CONNECTED;
814
815 BT_DBG("new socket %p", nsk);
816
817 done:
818 release_sock(sk);
819 return err;
820 }
821
822 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
823 {
824 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
825 struct sock *sk = sock->sk;
826
827 BT_DBG("sock %p, sk %p", sock, sk);
828
829 addr->sa_family = AF_BLUETOOTH;
830 *len = sizeof(struct sockaddr_l2);
831
832 if (peer)
833 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
834 else
835 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
836
837 la->l2_psm = l2cap_pi(sk)->psm;
838 return 0;
839 }
840
841 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
842 {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct sk_buff *skb, **frag;
845 int err, hlen, count, sent=0;
846 struct l2cap_hdr *lh;
847
848 BT_DBG("sk %p len %d", sk, len);
849
850 /* First fragment (with L2CAP header) */
851 if (sk->sk_type == SOCK_DGRAM)
852 hlen = L2CAP_HDR_SIZE + 2;
853 else
854 hlen = L2CAP_HDR_SIZE;
855
856 count = min_t(unsigned int, (conn->mtu - hlen), len);
857
858 skb = bt_skb_send_alloc(sk, hlen + count,
859 msg->msg_flags & MSG_DONTWAIT, &err);
860 if (!skb)
861 return err;
862
863 /* Create L2CAP header */
864 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
865 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
866 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
867
868 if (sk->sk_type == SOCK_DGRAM)
869 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
870
871 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
872 err = -EFAULT;
873 goto fail;
874 }
875
876 sent += count;
877 len -= count;
878
879 /* Continuation fragments (no L2CAP header) */
880 frag = &skb_shinfo(skb)->frag_list;
881 while (len) {
882 count = min_t(unsigned int, conn->mtu, len);
883
884 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
885 if (!*frag)
886 goto fail;
887
888 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
889 err = -EFAULT;
890 goto fail;
891 }
892
893 sent += count;
894 len -= count;
895
896 frag = &(*frag)->next;
897 }
898
899 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
900 goto fail;
901
902 return sent;
903
904 fail:
905 kfree_skb(skb);
906 return err;
907 }
908
909 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
910 {
911 struct sock *sk = sock->sk;
912 int err = 0;
913
914 BT_DBG("sock %p, sk %p", sock, sk);
915
916 err = sock_error(sk);
917 if (err)
918 return err;
919
920 if (msg->msg_flags & MSG_OOB)
921 return -EOPNOTSUPP;
922
923 /* Check outgoing MTU */
924 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
925 return -EINVAL;
926
927 lock_sock(sk);
928
929 if (sk->sk_state == BT_CONNECTED)
930 err = l2cap_do_send(sk, msg, len);
931 else
932 err = -ENOTCONN;
933
934 release_sock(sk);
935 return err;
936 }
937
938 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
939 {
940 struct sock *sk = sock->sk;
941 struct l2cap_options opts;
942 int err = 0, len;
943 u32 opt;
944
945 BT_DBG("sk %p", sk);
946
947 lock_sock(sk);
948
949 switch (optname) {
950 case L2CAP_OPTIONS:
951 len = min_t(unsigned int, sizeof(opts), optlen);
952 if (copy_from_user((char *) &opts, optval, len)) {
953 err = -EFAULT;
954 break;
955 }
956 l2cap_pi(sk)->imtu = opts.imtu;
957 l2cap_pi(sk)->omtu = opts.omtu;
958 break;
959
960 case L2CAP_LM:
961 if (get_user(opt, (u32 __user *) optval)) {
962 err = -EFAULT;
963 break;
964 }
965
966 l2cap_pi(sk)->link_mode = opt;
967 break;
968
969 default:
970 err = -ENOPROTOOPT;
971 break;
972 }
973
974 release_sock(sk);
975 return err;
976 }
977
978 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
979 {
980 struct sock *sk = sock->sk;
981 struct l2cap_options opts;
982 struct l2cap_conninfo cinfo;
983 int len, err = 0;
984
985 BT_DBG("sk %p", sk);
986
987 if (get_user(len, optlen))
988 return -EFAULT;
989
990 lock_sock(sk);
991
992 switch (optname) {
993 case L2CAP_OPTIONS:
994 opts.imtu = l2cap_pi(sk)->imtu;
995 opts.omtu = l2cap_pi(sk)->omtu;
996 opts.flush_to = l2cap_pi(sk)->flush_to;
997 opts.mode = 0x00;
998
999 len = min_t(unsigned int, len, sizeof(opts));
1000 if (copy_to_user(optval, (char *) &opts, len))
1001 err = -EFAULT;
1002
1003 break;
1004
1005 case L2CAP_LM:
1006 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1007 err = -EFAULT;
1008 break;
1009
1010 case L2CAP_CONNINFO:
1011 if (sk->sk_state != BT_CONNECTED) {
1012 err = -ENOTCONN;
1013 break;
1014 }
1015
1016 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1017 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1018
1019 len = min_t(unsigned int, len, sizeof(cinfo));
1020 if (copy_to_user(optval, (char *) &cinfo, len))
1021 err = -EFAULT;
1022
1023 break;
1024
1025 default:
1026 err = -ENOPROTOOPT;
1027 break;
1028 }
1029
1030 release_sock(sk);
1031 return err;
1032 }
1033
1034 static int l2cap_sock_shutdown(struct socket *sock, int how)
1035 {
1036 struct sock *sk = sock->sk;
1037 int err = 0;
1038
1039 BT_DBG("sock %p, sk %p", sock, sk);
1040
1041 if (!sk)
1042 return 0;
1043
1044 lock_sock(sk);
1045 if (!sk->sk_shutdown) {
1046 sk->sk_shutdown = SHUTDOWN_MASK;
1047 l2cap_sock_clear_timer(sk);
1048 __l2cap_sock_close(sk, 0);
1049
1050 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1051 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1052 }
1053 release_sock(sk);
1054 return err;
1055 }
1056
1057 static int l2cap_sock_release(struct socket *sock)
1058 {
1059 struct sock *sk = sock->sk;
1060 int err;
1061
1062 BT_DBG("sock %p, sk %p", sock, sk);
1063
1064 if (!sk)
1065 return 0;
1066
1067 err = l2cap_sock_shutdown(sock, 2);
1068
1069 sock_orphan(sk);
1070 l2cap_sock_kill(sk);
1071 return err;
1072 }
1073
1074 static void l2cap_conn_ready(struct l2cap_conn *conn)
1075 {
1076 struct l2cap_chan_list *l = &conn->chan_list;
1077 struct sock *sk;
1078
1079 BT_DBG("conn %p", conn);
1080
1081 read_lock(&l->lock);
1082
1083 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1084 bh_lock_sock(sk);
1085
1086 if (sk->sk_type != SOCK_SEQPACKET) {
1087 l2cap_sock_clear_timer(sk);
1088 sk->sk_state = BT_CONNECTED;
1089 sk->sk_state_change(sk);
1090 } else if (sk->sk_state == BT_CONNECT) {
1091 struct l2cap_conn_req req;
1092 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1093 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1094 req.psm = l2cap_pi(sk)->psm;
1095 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1096 }
1097
1098 bh_unlock_sock(sk);
1099 }
1100
1101 read_unlock(&l->lock);
1102 }
1103
1104 /* Notify sockets that we cannot guaranty reliability anymore */
1105 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1106 {
1107 struct l2cap_chan_list *l = &conn->chan_list;
1108 struct sock *sk;
1109
1110 BT_DBG("conn %p", conn);
1111
1112 read_lock(&l->lock);
1113 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1114 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1115 sk->sk_err = err;
1116 }
1117 read_unlock(&l->lock);
1118 }
1119
1120 static void l2cap_chan_ready(struct sock *sk)
1121 {
1122 struct sock *parent = bt_sk(sk)->parent;
1123
1124 BT_DBG("sk %p, parent %p", sk, parent);
1125
1126 l2cap_pi(sk)->conf_state = 0;
1127 l2cap_sock_clear_timer(sk);
1128
1129 if (!parent) {
1130 /* Outgoing channel.
1131 * Wake up socket sleeping on connect.
1132 */
1133 sk->sk_state = BT_CONNECTED;
1134 sk->sk_state_change(sk);
1135 } else {
1136 /* Incoming channel.
1137 * Wake up socket sleeping on accept.
1138 */
1139 parent->sk_data_ready(parent, 0);
1140 }
1141 }
1142
1143 /* Copy frame to all raw sockets on that connection */
1144 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1145 {
1146 struct l2cap_chan_list *l = &conn->chan_list;
1147 struct sk_buff *nskb;
1148 struct sock * sk;
1149
1150 BT_DBG("conn %p", conn);
1151
1152 read_lock(&l->lock);
1153 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1154 if (sk->sk_type != SOCK_RAW)
1155 continue;
1156
1157 /* Don't send frame to the socket it came from */
1158 if (skb->sk == sk)
1159 continue;
1160
1161 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1162 continue;
1163
1164 if (sock_queue_rcv_skb(sk, nskb))
1165 kfree_skb(nskb);
1166 }
1167 read_unlock(&l->lock);
1168 }
1169
1170 /* ---- L2CAP signalling commands ---- */
1171 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1172 u8 code, u8 ident, u16 dlen, void *data)
1173 {
1174 struct sk_buff *skb, **frag;
1175 struct l2cap_cmd_hdr *cmd;
1176 struct l2cap_hdr *lh;
1177 int len, count;
1178
1179 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1180
1181 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1182 count = min_t(unsigned int, conn->mtu, len);
1183
1184 skb = bt_skb_alloc(count, GFP_ATOMIC);
1185 if (!skb)
1186 return NULL;
1187
1188 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1189 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1190 lh->cid = __cpu_to_le16(0x0001);
1191
1192 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1193 cmd->code = code;
1194 cmd->ident = ident;
1195 cmd->len = __cpu_to_le16(dlen);
1196
1197 if (dlen) {
1198 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1199 memcpy(skb_put(skb, count), data, count);
1200 data += count;
1201 }
1202
1203 len -= skb->len;
1204
1205 /* Continuation fragments (no L2CAP header) */
1206 frag = &skb_shinfo(skb)->frag_list;
1207 while (len) {
1208 count = min_t(unsigned int, conn->mtu, len);
1209
1210 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1211 if (!*frag)
1212 goto fail;
1213
1214 memcpy(skb_put(*frag, count), data, count);
1215
1216 len -= count;
1217 data += count;
1218
1219 frag = &(*frag)->next;
1220 }
1221
1222 return skb;
1223
1224 fail:
1225 kfree_skb(skb);
1226 return NULL;
1227 }
1228
1229 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1230 {
1231 struct l2cap_conf_opt *opt = *ptr;
1232 int len;
1233
1234 len = L2CAP_CONF_OPT_SIZE + opt->len;
1235 *ptr += len;
1236
1237 *type = opt->type;
1238 *olen = opt->len;
1239
1240 switch (opt->len) {
1241 case 1:
1242 *val = *((u8 *) opt->val);
1243 break;
1244
1245 case 2:
1246 *val = __le16_to_cpu(*((u16 *)opt->val));
1247 break;
1248
1249 case 4:
1250 *val = __le32_to_cpu(*((u32 *)opt->val));
1251 break;
1252
1253 default:
1254 *val = (unsigned long) opt->val;
1255 break;
1256 }
1257
1258 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1259 return len;
1260 }
1261
1262 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1263 {
1264 int type, hint, olen;
1265 unsigned long val;
1266 void *ptr = data;
1267
1268 BT_DBG("sk %p len %d", sk, len);
1269
1270 while (len >= L2CAP_CONF_OPT_SIZE) {
1271 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1272
1273 hint = type & 0x80;
1274 type &= 0x7f;
1275
1276 switch (type) {
1277 case L2CAP_CONF_MTU:
1278 l2cap_pi(sk)->conf_mtu = val;
1279 break;
1280
1281 case L2CAP_CONF_FLUSH_TO:
1282 l2cap_pi(sk)->flush_to = val;
1283 break;
1284
1285 case L2CAP_CONF_QOS:
1286 break;
1287
1288 default:
1289 if (hint)
1290 break;
1291
1292 /* FIXME: Reject unknown option */
1293 break;
1294 }
1295 }
1296 }
1297
1298 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1299 {
1300 struct l2cap_conf_opt *opt = *ptr;
1301
1302 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1303
1304 opt->type = type;
1305 opt->len = len;
1306
1307 switch (len) {
1308 case 1:
1309 *((u8 *) opt->val) = val;
1310 break;
1311
1312 case 2:
1313 *((u16 *) opt->val) = __cpu_to_le16(val);
1314 break;
1315
1316 case 4:
1317 *((u32 *) opt->val) = __cpu_to_le32(val);
1318 break;
1319
1320 default:
1321 memcpy(opt->val, (void *) val, len);
1322 break;
1323 }
1324
1325 *ptr += L2CAP_CONF_OPT_SIZE + len;
1326 }
1327
1328 static int l2cap_build_conf_req(struct sock *sk, void *data)
1329 {
1330 struct l2cap_pinfo *pi = l2cap_pi(sk);
1331 struct l2cap_conf_req *req = data;
1332 void *ptr = req->data;
1333
1334 BT_DBG("sk %p", sk);
1335
1336 if (pi->imtu != L2CAP_DEFAULT_MTU)
1337 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1338
1339 /* FIXME: Need actual value of the flush timeout */
1340 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1341 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1342
1343 req->dcid = __cpu_to_le16(pi->dcid);
1344 req->flags = __cpu_to_le16(0);
1345
1346 return ptr - data;
1347 }
1348
1349 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1350 {
1351 struct l2cap_pinfo *pi = l2cap_pi(sk);
1352 int result = 0;
1353
1354 /* Configure output options and let the other side know
1355 * which ones we don't like. */
1356 if (pi->conf_mtu < pi->omtu) {
1357 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1358 result = L2CAP_CONF_UNACCEPT;
1359 } else {
1360 pi->omtu = pi->conf_mtu;
1361 }
1362
1363 BT_DBG("sk %p result %d", sk, result);
1364 return result;
1365 }
1366
1367 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1368 {
1369 struct l2cap_conf_rsp *rsp = data;
1370 void *ptr = rsp->data;
1371 u16 flags = 0;
1372
1373 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1374
1375 if (result)
1376 *result = l2cap_conf_output(sk, &ptr);
1377 else
1378 flags = 0x0001;
1379
1380 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1381 rsp->result = __cpu_to_le16(result ? *result : 0);
1382 rsp->flags = __cpu_to_le16(flags);
1383
1384 return ptr - data;
1385 }
1386
1387 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1388 {
1389 struct l2cap_chan_list *list = &conn->chan_list;
1390 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1391 struct l2cap_conn_rsp rsp;
1392 struct sock *sk, *parent;
1393 int result = 0, status = 0;
1394
1395 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1396 u16 psm = req->psm;
1397
1398 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1399
1400 /* Check if we have socket listening on psm */
1401 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1402 if (!parent) {
1403 result = L2CAP_CR_BAD_PSM;
1404 goto sendresp;
1405 }
1406
1407 result = L2CAP_CR_NO_MEM;
1408
1409 /* Check for backlog size */
1410 if (sk_acceptq_is_full(parent)) {
1411 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1412 goto response;
1413 }
1414
1415 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1416 if (!sk)
1417 goto response;
1418
1419 write_lock_bh(&list->lock);
1420
1421 /* Check if we already have channel with that dcid */
1422 if (__l2cap_get_chan_by_dcid(list, scid)) {
1423 write_unlock_bh(&list->lock);
1424 sock_set_flag(sk, SOCK_ZAPPED);
1425 l2cap_sock_kill(sk);
1426 goto response;
1427 }
1428
1429 hci_conn_hold(conn->hcon);
1430
1431 l2cap_sock_init(sk, parent);
1432 bacpy(&bt_sk(sk)->src, conn->src);
1433 bacpy(&bt_sk(sk)->dst, conn->dst);
1434 l2cap_pi(sk)->psm = psm;
1435 l2cap_pi(sk)->dcid = scid;
1436
1437 __l2cap_chan_add(conn, sk, parent);
1438 dcid = l2cap_pi(sk)->scid;
1439
1440 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1441
1442 /* Service level security */
1443 result = L2CAP_CR_PEND;
1444 status = L2CAP_CS_AUTHEN_PEND;
1445 sk->sk_state = BT_CONNECT2;
1446 l2cap_pi(sk)->ident = cmd->ident;
1447
1448 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1449 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1450 if (!hci_conn_encrypt(conn->hcon))
1451 goto done;
1452 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1453 if (!hci_conn_auth(conn->hcon))
1454 goto done;
1455 }
1456
1457 sk->sk_state = BT_CONFIG;
1458 result = status = 0;
1459
1460 done:
1461 write_unlock_bh(&list->lock);
1462
1463 response:
1464 bh_unlock_sock(parent);
1465
1466 sendresp:
1467 rsp.scid = __cpu_to_le16(scid);
1468 rsp.dcid = __cpu_to_le16(dcid);
1469 rsp.result = __cpu_to_le16(result);
1470 rsp.status = __cpu_to_le16(status);
1471 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1472 return 0;
1473 }
1474
1475 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1476 {
1477 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1478 u16 scid, dcid, result, status;
1479 struct sock *sk;
1480 u8 req[128];
1481
1482 scid = __le16_to_cpu(rsp->scid);
1483 dcid = __le16_to_cpu(rsp->dcid);
1484 result = __le16_to_cpu(rsp->result);
1485 status = __le16_to_cpu(rsp->status);
1486
1487 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1488
1489 if (scid) {
1490 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1491 return 0;
1492 } else {
1493 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1494 return 0;
1495 }
1496
1497 switch (result) {
1498 case L2CAP_CR_SUCCESS:
1499 sk->sk_state = BT_CONFIG;
1500 l2cap_pi(sk)->ident = 0;
1501 l2cap_pi(sk)->dcid = dcid;
1502 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1503
1504 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1505 l2cap_build_conf_req(sk, req), req);
1506 break;
1507
1508 case L2CAP_CR_PEND:
1509 break;
1510
1511 default:
1512 l2cap_chan_del(sk, ECONNREFUSED);
1513 break;
1514 }
1515
1516 bh_unlock_sock(sk);
1517 return 0;
1518 }
1519
1520 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1521 {
1522 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1523 u16 dcid, flags;
1524 u8 rsp[64];
1525 struct sock *sk;
1526 int result;
1527
1528 dcid = __le16_to_cpu(req->dcid);
1529 flags = __le16_to_cpu(req->flags);
1530
1531 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1532
1533 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1534 return -ENOENT;
1535
1536 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1537
1538 if (flags & 0x0001) {
1539 /* Incomplete config. Send empty response. */
1540 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1541 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1542 goto unlock;
1543 }
1544
1545 /* Complete config. */
1546 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1547 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1548
1549 if (result)
1550 goto unlock;
1551
1552 /* Output config done */
1553 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1554
1555 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1556 sk->sk_state = BT_CONNECTED;
1557 l2cap_chan_ready(sk);
1558 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1559 u8 req[64];
1560 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1561 l2cap_build_conf_req(sk, req), req);
1562 }
1563
1564 unlock:
1565 bh_unlock_sock(sk);
1566 return 0;
1567 }
1568
1569 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1570 {
1571 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1572 u16 scid, flags, result;
1573 struct sock *sk;
1574
1575 scid = __le16_to_cpu(rsp->scid);
1576 flags = __le16_to_cpu(rsp->flags);
1577 result = __le16_to_cpu(rsp->result);
1578
1579 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1580
1581 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1582 return 0;
1583
1584 switch (result) {
1585 case L2CAP_CONF_SUCCESS:
1586 break;
1587
1588 case L2CAP_CONF_UNACCEPT:
1589 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1590 char req[128];
1591 /* It does not make sense to adjust L2CAP parameters
1592 * that are currently defined in the spec. We simply
1593 * resend config request that we sent earlier. It is
1594 * stupid, but it helps qualification testing which
1595 * expects at least some response from us. */
1596 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1597 l2cap_build_conf_req(sk, req), req);
1598 goto done;
1599 }
1600
1601 default:
1602 sk->sk_state = BT_DISCONN;
1603 sk->sk_err = ECONNRESET;
1604 l2cap_sock_set_timer(sk, HZ * 5);
1605 {
1606 struct l2cap_disconn_req req;
1607 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1608 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1609 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1610 L2CAP_DISCONN_REQ, sizeof(req), &req);
1611 }
1612 goto done;
1613 }
1614
1615 if (flags & 0x01)
1616 goto done;
1617
1618 /* Input config done */
1619 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1620
1621 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1622 sk->sk_state = BT_CONNECTED;
1623 l2cap_chan_ready(sk);
1624 }
1625
1626 done:
1627 bh_unlock_sock(sk);
1628 return 0;
1629 }
1630
1631 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1632 {
1633 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1634 struct l2cap_disconn_rsp rsp;
1635 u16 dcid, scid;
1636 struct sock *sk;
1637
1638 scid = __le16_to_cpu(req->scid);
1639 dcid = __le16_to_cpu(req->dcid);
1640
1641 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1642
1643 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1644 return 0;
1645
1646 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1647 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1648 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1649
1650 sk->sk_shutdown = SHUTDOWN_MASK;
1651
1652 l2cap_chan_del(sk, ECONNRESET);
1653 bh_unlock_sock(sk);
1654
1655 l2cap_sock_kill(sk);
1656 return 0;
1657 }
1658
1659 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1660 {
1661 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1662 u16 dcid, scid;
1663 struct sock *sk;
1664
1665 scid = __le16_to_cpu(rsp->scid);
1666 dcid = __le16_to_cpu(rsp->dcid);
1667
1668 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1669
1670 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1671 return 0;
1672
1673 l2cap_chan_del(sk, 0);
1674 bh_unlock_sock(sk);
1675
1676 l2cap_sock_kill(sk);
1677 return 0;
1678 }
1679
1680 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1681 {
1682 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1683 struct l2cap_info_rsp rsp;
1684 u16 type;
1685
1686 type = __le16_to_cpu(req->type);
1687
1688 BT_DBG("type 0x%4.4x", type);
1689
1690 rsp.type = __cpu_to_le16(type);
1691 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1692 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1693
1694 return 0;
1695 }
1696
1697 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1698 {
1699 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1700 u16 type, result;
1701
1702 type = __le16_to_cpu(rsp->type);
1703 result = __le16_to_cpu(rsp->result);
1704
1705 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1706
1707 return 0;
1708 }
1709
1710 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1711 {
1712 u8 *data = skb->data;
1713 int len = skb->len;
1714 struct l2cap_cmd_hdr cmd;
1715 int err = 0;
1716
1717 l2cap_raw_recv(conn, skb);
1718
1719 while (len >= L2CAP_CMD_HDR_SIZE) {
1720 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1721 data += L2CAP_CMD_HDR_SIZE;
1722 len -= L2CAP_CMD_HDR_SIZE;
1723
1724 cmd.len = __le16_to_cpu(cmd.len);
1725
1726 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1727
1728 if (cmd.len > len || !cmd.ident) {
1729 BT_DBG("corrupted command");
1730 break;
1731 }
1732
1733 switch (cmd.code) {
1734 case L2CAP_COMMAND_REJ:
1735 /* FIXME: We should process this */
1736 break;
1737
1738 case L2CAP_CONN_REQ:
1739 err = l2cap_connect_req(conn, &cmd, data);
1740 break;
1741
1742 case L2CAP_CONN_RSP:
1743 err = l2cap_connect_rsp(conn, &cmd, data);
1744 break;
1745
1746 case L2CAP_CONF_REQ:
1747 err = l2cap_config_req(conn, &cmd, data);
1748 break;
1749
1750 case L2CAP_CONF_RSP:
1751 err = l2cap_config_rsp(conn, &cmd, data);
1752 break;
1753
1754 case L2CAP_DISCONN_REQ:
1755 err = l2cap_disconnect_req(conn, &cmd, data);
1756 break;
1757
1758 case L2CAP_DISCONN_RSP:
1759 err = l2cap_disconnect_rsp(conn, &cmd, data);
1760 break;
1761
1762 case L2CAP_ECHO_REQ:
1763 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1764 break;
1765
1766 case L2CAP_ECHO_RSP:
1767 break;
1768
1769 case L2CAP_INFO_REQ:
1770 err = l2cap_information_req(conn, &cmd, data);
1771 break;
1772
1773 case L2CAP_INFO_RSP:
1774 err = l2cap_information_rsp(conn, &cmd, data);
1775 break;
1776
1777 default:
1778 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1779 err = -EINVAL;
1780 break;
1781 }
1782
1783 if (err) {
1784 struct l2cap_cmd_rej rej;
1785 BT_DBG("error %d", err);
1786
1787 /* FIXME: Map err to a valid reason */
1788 rej.reason = __cpu_to_le16(0);
1789 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1790 }
1791
1792 data += cmd.len;
1793 len -= cmd.len;
1794 }
1795
1796 kfree_skb(skb);
1797 }
1798
1799 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1800 {
1801 struct sock *sk;
1802
1803 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1804 if (!sk) {
1805 BT_DBG("unknown cid 0x%4.4x", cid);
1806 goto drop;
1807 }
1808
1809 BT_DBG("sk %p, len %d", sk, skb->len);
1810
1811 if (sk->sk_state != BT_CONNECTED)
1812 goto drop;
1813
1814 if (l2cap_pi(sk)->imtu < skb->len)
1815 goto drop;
1816
1817 /* If socket recv buffers overflows we drop data here
1818 * which is *bad* because L2CAP has to be reliable.
1819 * But we don't have any other choice. L2CAP doesn't
1820 * provide flow control mechanism. */
1821
1822 if (!sock_queue_rcv_skb(sk, skb))
1823 goto done;
1824
1825 drop:
1826 kfree_skb(skb);
1827
1828 done:
1829 if (sk)
1830 bh_unlock_sock(sk);
1831
1832 return 0;
1833 }
1834
1835 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1836 {
1837 struct sock *sk;
1838
1839 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1840 if (!sk)
1841 goto drop;
1842
1843 BT_DBG("sk %p, len %d", sk, skb->len);
1844
1845 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1846 goto drop;
1847
1848 if (l2cap_pi(sk)->imtu < skb->len)
1849 goto drop;
1850
1851 if (!sock_queue_rcv_skb(sk, skb))
1852 goto done;
1853
1854 drop:
1855 kfree_skb(skb);
1856
1857 done:
1858 if (sk) bh_unlock_sock(sk);
1859 return 0;
1860 }
1861
1862 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1863 {
1864 struct l2cap_hdr *lh = (void *) skb->data;
1865 u16 cid, psm, len;
1866
1867 skb_pull(skb, L2CAP_HDR_SIZE);
1868 cid = __le16_to_cpu(lh->cid);
1869 len = __le16_to_cpu(lh->len);
1870
1871 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1872
1873 switch (cid) {
1874 case 0x0001:
1875 l2cap_sig_channel(conn, skb);
1876 break;
1877
1878 case 0x0002:
1879 psm = get_unaligned((u16 *) skb->data);
1880 skb_pull(skb, 2);
1881 l2cap_conless_channel(conn, psm, skb);
1882 break;
1883
1884 default:
1885 l2cap_data_channel(conn, cid, skb);
1886 break;
1887 }
1888 }
1889
1890 /* ---- L2CAP interface with lower layer (HCI) ---- */
1891
1892 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1893 {
1894 int exact = 0, lm1 = 0, lm2 = 0;
1895 register struct sock *sk;
1896 struct hlist_node *node;
1897
1898 if (type != ACL_LINK)
1899 return 0;
1900
1901 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1902
1903 /* Find listening sockets and check their link_mode */
1904 read_lock(&l2cap_sk_list.lock);
1905 sk_for_each(sk, node, &l2cap_sk_list.head) {
1906 if (sk->sk_state != BT_LISTEN)
1907 continue;
1908
1909 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1910 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1911 exact++;
1912 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1913 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1914 }
1915 read_unlock(&l2cap_sk_list.lock);
1916
1917 return exact ? lm1 : lm2;
1918 }
1919
1920 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1921 {
1922 struct l2cap_conn *conn;
1923
1924 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1925
1926 if (hcon->type != ACL_LINK)
1927 return 0;
1928
1929 if (!status) {
1930 conn = l2cap_conn_add(hcon, status);
1931 if (conn)
1932 l2cap_conn_ready(conn);
1933 } else
1934 l2cap_conn_del(hcon, bt_err(status));
1935
1936 return 0;
1937 }
1938
1939 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1940 {
1941 BT_DBG("hcon %p reason %d", hcon, reason);
1942
1943 if (hcon->type != ACL_LINK)
1944 return 0;
1945
1946 l2cap_conn_del(hcon, bt_err(reason));
1947
1948 return 0;
1949 }
1950
1951 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1952 {
1953 struct l2cap_chan_list *l;
1954 struct l2cap_conn *conn = conn = hcon->l2cap_data;
1955 struct l2cap_conn_rsp rsp;
1956 struct sock *sk;
1957 int result;
1958
1959 if (!conn)
1960 return 0;
1961
1962 l = &conn->chan_list;
1963
1964 BT_DBG("conn %p", conn);
1965
1966 read_lock(&l->lock);
1967
1968 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1969 bh_lock_sock(sk);
1970
1971 if (sk->sk_state != BT_CONNECT2 ||
1972 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1973 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1974 bh_unlock_sock(sk);
1975 continue;
1976 }
1977
1978 if (!status) {
1979 sk->sk_state = BT_CONFIG;
1980 result = 0;
1981 } else {
1982 sk->sk_state = BT_DISCONN;
1983 l2cap_sock_set_timer(sk, HZ/10);
1984 result = L2CAP_CR_SEC_BLOCK;
1985 }
1986
1987 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1988 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1989 rsp.result = __cpu_to_le16(result);
1990 rsp.status = __cpu_to_le16(0);
1991 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
1992 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1993
1994 bh_unlock_sock(sk);
1995 }
1996
1997 read_unlock(&l->lock);
1998 return 0;
1999 }
2000
2001 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2002 {
2003 struct l2cap_chan_list *l;
2004 struct l2cap_conn *conn = hcon->l2cap_data;
2005 struct l2cap_conn_rsp rsp;
2006 struct sock *sk;
2007 int result;
2008
2009 if (!conn)
2010 return 0;
2011
2012 l = &conn->chan_list;
2013
2014 BT_DBG("conn %p", conn);
2015
2016 read_lock(&l->lock);
2017
2018 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2019 bh_lock_sock(sk);
2020
2021 if (sk->sk_state != BT_CONNECT2) {
2022 bh_unlock_sock(sk);
2023 continue;
2024 }
2025
2026 if (!status) {
2027 sk->sk_state = BT_CONFIG;
2028 result = 0;
2029 } else {
2030 sk->sk_state = BT_DISCONN;
2031 l2cap_sock_set_timer(sk, HZ/10);
2032 result = L2CAP_CR_SEC_BLOCK;
2033 }
2034
2035 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
2036 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
2037 rsp.result = __cpu_to_le16(result);
2038 rsp.status = __cpu_to_le16(0);
2039 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2040 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2041
2042 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2043 hci_conn_change_link_key(hcon);
2044
2045 bh_unlock_sock(sk);
2046 }
2047
2048 read_unlock(&l->lock);
2049 return 0;
2050 }
2051
2052 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2053 {
2054 struct l2cap_conn *conn = hcon->l2cap_data;
2055
2056 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2057 goto drop;
2058
2059 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2060
2061 if (flags & ACL_START) {
2062 struct l2cap_hdr *hdr;
2063 int len;
2064
2065 if (conn->rx_len) {
2066 BT_ERR("Unexpected start frame (len %d)", skb->len);
2067 kfree_skb(conn->rx_skb);
2068 conn->rx_skb = NULL;
2069 conn->rx_len = 0;
2070 l2cap_conn_unreliable(conn, ECOMM);
2071 }
2072
2073 if (skb->len < 2) {
2074 BT_ERR("Frame is too short (len %d)", skb->len);
2075 l2cap_conn_unreliable(conn, ECOMM);
2076 goto drop;
2077 }
2078
2079 hdr = (struct l2cap_hdr *) skb->data;
2080 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2081
2082 if (len == skb->len) {
2083 /* Complete frame received */
2084 l2cap_recv_frame(conn, skb);
2085 return 0;
2086 }
2087
2088 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2089
2090 if (skb->len > len) {
2091 BT_ERR("Frame is too long (len %d, expected len %d)",
2092 skb->len, len);
2093 l2cap_conn_unreliable(conn, ECOMM);
2094 goto drop;
2095 }
2096
2097 /* Allocate skb for the complete frame (with header) */
2098 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2099 goto drop;
2100
2101 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2102 conn->rx_len = len - skb->len;
2103 } else {
2104 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2105
2106 if (!conn->rx_len) {
2107 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2108 l2cap_conn_unreliable(conn, ECOMM);
2109 goto drop;
2110 }
2111
2112 if (skb->len > conn->rx_len) {
2113 BT_ERR("Fragment is too long (len %d, expected %d)",
2114 skb->len, conn->rx_len);
2115 kfree_skb(conn->rx_skb);
2116 conn->rx_skb = NULL;
2117 conn->rx_len = 0;
2118 l2cap_conn_unreliable(conn, ECOMM);
2119 goto drop;
2120 }
2121
2122 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2123 conn->rx_len -= skb->len;
2124
2125 if (!conn->rx_len) {
2126 /* Complete frame received */
2127 l2cap_recv_frame(conn, conn->rx_skb);
2128 conn->rx_skb = NULL;
2129 }
2130 }
2131
2132 drop:
2133 kfree_skb(skb);
2134 return 0;
2135 }
2136
2137 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2138 {
2139 struct sock *sk;
2140 struct hlist_node *node;
2141 char *str = buf;
2142
2143 read_lock_bh(&l2cap_sk_list.lock);
2144
2145 sk_for_each(sk, node, &l2cap_sk_list.head) {
2146 struct l2cap_pinfo *pi = l2cap_pi(sk);
2147
2148 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2149 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2150 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2151 pi->omtu, pi->link_mode);
2152 }
2153
2154 read_unlock_bh(&l2cap_sk_list.lock);
2155
2156 return (str - buf);
2157 }
2158
2159 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2160
2161 static const struct proto_ops l2cap_sock_ops = {
2162 .family = PF_BLUETOOTH,
2163 .owner = THIS_MODULE,
2164 .release = l2cap_sock_release,
2165 .bind = l2cap_sock_bind,
2166 .connect = l2cap_sock_connect,
2167 .listen = l2cap_sock_listen,
2168 .accept = l2cap_sock_accept,
2169 .getname = l2cap_sock_getname,
2170 .sendmsg = l2cap_sock_sendmsg,
2171 .recvmsg = bt_sock_recvmsg,
2172 .poll = bt_sock_poll,
2173 .mmap = sock_no_mmap,
2174 .socketpair = sock_no_socketpair,
2175 .ioctl = sock_no_ioctl,
2176 .shutdown = l2cap_sock_shutdown,
2177 .setsockopt = l2cap_sock_setsockopt,
2178 .getsockopt = l2cap_sock_getsockopt
2179 };
2180
2181 static struct net_proto_family l2cap_sock_family_ops = {
2182 .family = PF_BLUETOOTH,
2183 .owner = THIS_MODULE,
2184 .create = l2cap_sock_create,
2185 };
2186
2187 static struct hci_proto l2cap_hci_proto = {
2188 .name = "L2CAP",
2189 .id = HCI_PROTO_L2CAP,
2190 .connect_ind = l2cap_connect_ind,
2191 .connect_cfm = l2cap_connect_cfm,
2192 .disconn_ind = l2cap_disconn_ind,
2193 .auth_cfm = l2cap_auth_cfm,
2194 .encrypt_cfm = l2cap_encrypt_cfm,
2195 .recv_acldata = l2cap_recv_acldata
2196 };
2197
2198 static int __init l2cap_init(void)
2199 {
2200 int err;
2201
2202 err = proto_register(&l2cap_proto, 0);
2203 if (err < 0)
2204 return err;
2205
2206 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2207 if (err < 0) {
2208 BT_ERR("L2CAP socket registration failed");
2209 goto error;
2210 }
2211
2212 err = hci_register_proto(&l2cap_hci_proto);
2213 if (err < 0) {
2214 BT_ERR("L2CAP protocol registration failed");
2215 bt_sock_unregister(BTPROTO_L2CAP);
2216 goto error;
2217 }
2218
2219 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2220 BT_ERR("Failed to create L2CAP info file");
2221
2222 BT_INFO("L2CAP ver %s", VERSION);
2223 BT_INFO("L2CAP socket layer initialized");
2224
2225 return 0;
2226
2227 error:
2228 proto_unregister(&l2cap_proto);
2229 return err;
2230 }
2231
2232 static void __exit l2cap_exit(void)
2233 {
2234 class_remove_file(bt_class, &class_attr_l2cap);
2235
2236 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2237 BT_ERR("L2CAP socket unregistration failed");
2238
2239 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2240 BT_ERR("L2CAP protocol unregistration failed");
2241
2242 proto_unregister(&l2cap_proto);
2243 }
2244
2245 void l2cap_load(void)
2246 {
2247 /* Dummy function to trigger automatic L2CAP module loading by
2248 * other modules that use L2CAP sockets but don't use any other
2249 * symbols from it. */
2250 return;
2251 }
2252 EXPORT_SYMBOL(l2cap_load);
2253
2254 module_init(l2cap_init);
2255 module_exit(l2cap_exit);
2256
2257 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2258 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2259 MODULE_VERSION(VERSION);
2260 MODULE_LICENSE("GPL");
2261 MODULE_ALIAS("bt-proto-0");