[Bluetooth] Finish L2CAP configuration only with acceptable settings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
52
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
57
58 #define VERSION "2.8"
59
60 static const struct proto_ops l2cap_sock_ops;
61
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
64 };
65
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
69
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
72
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
75 {
76 struct sock *sk = (struct sock *) arg;
77
78 BT_DBG("sock %p state %d", sk, sk->sk_state);
79
80 bh_lock_sock(sk);
81 __l2cap_sock_close(sk, ETIMEDOUT);
82 bh_unlock_sock(sk);
83
84 l2cap_sock_kill(sk);
85 sock_put(sk);
86 }
87
88 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
89 {
90 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
91 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
92 }
93
94 static void l2cap_sock_clear_timer(struct sock *sk)
95 {
96 BT_DBG("sock %p state %d", sk, sk->sk_state);
97 sk_stop_timer(sk, &sk->sk_timer);
98 }
99
100 static void l2cap_sock_init_timer(struct sock *sk)
101 {
102 init_timer(&sk->sk_timer);
103 sk->sk_timer.function = l2cap_sock_timeout;
104 sk->sk_timer.data = (unsigned long)sk;
105 }
106
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
109 {
110 struct sock *s;
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
113 break;
114 }
115 return s;
116 }
117
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
119 {
120 struct sock *s;
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
123 break;
124 }
125 return s;
126 }
127
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
131 {
132 struct sock *s;
133 read_lock(&l->lock);
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
137 return s;
138 }
139
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
141 {
142 struct sock *s;
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
145 break;
146 }
147 return s;
148 }
149
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
151 {
152 struct sock *s;
153 read_lock(&l->lock);
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
157 return s;
158 }
159
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
161 {
162 u16 cid = 0x0040;
163
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
166 return cid;
167 }
168
169 return 0;
170 }
171
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
173 {
174 sock_hold(sk);
175
176 if (l->head)
177 l2cap_pi(l->head)->prev_c = sk;
178
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
181 l->head = sk;
182 }
183
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
185 {
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
187
188 write_lock_bh(&l->lock);
189 if (sk == l->head)
190 l->head = next;
191
192 if (next)
193 l2cap_pi(next)->prev_c = prev;
194 if (prev)
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
197
198 __sock_put(sk);
199 }
200
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
202 {
203 struct l2cap_chan_list *l = &conn->chan_list;
204
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
206
207 l2cap_pi(sk)->conn = conn;
208
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
217 } else {
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
222 }
223
224 __l2cap_chan_link(l, sk);
225
226 if (parent)
227 bt_accept_enqueue(parent, sk);
228 }
229
230 /* Delete channel.
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
233 {
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
236
237 l2cap_sock_clear_timer(sk);
238
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
240
241 if (conn) {
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
246 }
247
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
250
251 if (err)
252 sk->sk_err = err;
253
254 if (parent) {
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
257 } else
258 sk->sk_state_change(sk);
259 }
260
261 /* ---- L2CAP connections ---- */
262 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
263 {
264 struct l2cap_conn *conn = hcon->l2cap_data;
265
266 if (conn || status)
267 return conn;
268
269 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
270 if (!conn)
271 return NULL;
272
273 hcon->l2cap_data = conn;
274 conn->hcon = hcon;
275
276 BT_DBG("hcon %p conn %p", hcon, conn);
277
278 conn->mtu = hcon->hdev->acl_mtu;
279 conn->src = &hcon->hdev->bdaddr;
280 conn->dst = &hcon->dst;
281
282 spin_lock_init(&conn->lock);
283 rwlock_init(&conn->chan_list.lock);
284
285 return conn;
286 }
287
288 static void l2cap_conn_del(struct hci_conn *hcon, int err)
289 {
290 struct l2cap_conn *conn = hcon->l2cap_data;
291 struct sock *sk;
292
293 if (!conn)
294 return;
295
296 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
297
298 if (conn->rx_skb)
299 kfree_skb(conn->rx_skb);
300
301 /* Kill channels */
302 while ((sk = conn->chan_list.head)) {
303 bh_lock_sock(sk);
304 l2cap_chan_del(sk, err);
305 bh_unlock_sock(sk);
306 l2cap_sock_kill(sk);
307 }
308
309 hcon->l2cap_data = NULL;
310 kfree(conn);
311 }
312
313 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
314 {
315 struct l2cap_chan_list *l = &conn->chan_list;
316 write_lock_bh(&l->lock);
317 __l2cap_chan_add(conn, sk, parent);
318 write_unlock_bh(&l->lock);
319 }
320
321 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
322 {
323 u8 id;
324
325 /* Get next available identificator.
326 * 1 - 128 are used by kernel.
327 * 129 - 199 are reserved.
328 * 200 - 254 are used by utilities like l2ping, etc.
329 */
330
331 spin_lock_bh(&conn->lock);
332
333 if (++conn->tx_ident > 128)
334 conn->tx_ident = 1;
335
336 id = conn->tx_ident;
337
338 spin_unlock_bh(&conn->lock);
339
340 return id;
341 }
342
343 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
344 {
345 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
346
347 BT_DBG("code 0x%2.2x", code);
348
349 if (!skb)
350 return -ENOMEM;
351
352 return hci_send_acl(conn->hcon, skb, 0);
353 }
354
355 /* ---- Socket interface ---- */
356 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
357 {
358 struct sock *sk;
359 struct hlist_node *node;
360 sk_for_each(sk, node, &l2cap_sk_list.head)
361 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
362 goto found;
363 sk = NULL;
364 found:
365 return sk;
366 }
367
368 /* Find socket with psm and source bdaddr.
369 * Returns closest match.
370 */
371 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
372 {
373 struct sock *sk = NULL, *sk1 = NULL;
374 struct hlist_node *node;
375
376 sk_for_each(sk, node, &l2cap_sk_list.head) {
377 if (state && sk->sk_state != state)
378 continue;
379
380 if (l2cap_pi(sk)->psm == psm) {
381 /* Exact match. */
382 if (!bacmp(&bt_sk(sk)->src, src))
383 break;
384
385 /* Closest match */
386 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
387 sk1 = sk;
388 }
389 }
390 return node ? sk : sk1;
391 }
392
393 /* Find socket with given address (psm, src).
394 * Returns locked socket */
395 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
396 {
397 struct sock *s;
398 read_lock(&l2cap_sk_list.lock);
399 s = __l2cap_get_sock_by_psm(state, psm, src);
400 if (s) bh_lock_sock(s);
401 read_unlock(&l2cap_sk_list.lock);
402 return s;
403 }
404
405 static void l2cap_sock_destruct(struct sock *sk)
406 {
407 BT_DBG("sk %p", sk);
408
409 skb_queue_purge(&sk->sk_receive_queue);
410 skb_queue_purge(&sk->sk_write_queue);
411 }
412
413 static void l2cap_sock_cleanup_listen(struct sock *parent)
414 {
415 struct sock *sk;
416
417 BT_DBG("parent %p", parent);
418
419 /* Close not yet accepted channels */
420 while ((sk = bt_accept_dequeue(parent, NULL)))
421 l2cap_sock_close(sk);
422
423 parent->sk_state = BT_CLOSED;
424 sock_set_flag(parent, SOCK_ZAPPED);
425 }
426
427 /* Kill socket (only if zapped and orphan)
428 * Must be called on unlocked socket.
429 */
430 static void l2cap_sock_kill(struct sock *sk)
431 {
432 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
433 return;
434
435 BT_DBG("sk %p state %d", sk, sk->sk_state);
436
437 /* Kill poor orphan */
438 bt_sock_unlink(&l2cap_sk_list, sk);
439 sock_set_flag(sk, SOCK_DEAD);
440 sock_put(sk);
441 }
442
443 static void __l2cap_sock_close(struct sock *sk, int reason)
444 {
445 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
446
447 switch (sk->sk_state) {
448 case BT_LISTEN:
449 l2cap_sock_cleanup_listen(sk);
450 break;
451
452 case BT_CONNECTED:
453 case BT_CONFIG:
454 case BT_CONNECT2:
455 if (sk->sk_type == SOCK_SEQPACKET) {
456 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
457 struct l2cap_disconn_req req;
458
459 sk->sk_state = BT_DISCONN;
460 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
461
462 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
463 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
464 l2cap_send_cmd(conn, l2cap_get_ident(conn),
465 L2CAP_DISCONN_REQ, sizeof(req), &req);
466 } else {
467 l2cap_chan_del(sk, reason);
468 }
469 break;
470
471 case BT_CONNECT:
472 case BT_DISCONN:
473 l2cap_chan_del(sk, reason);
474 break;
475
476 default:
477 sock_set_flag(sk, SOCK_ZAPPED);
478 break;
479 }
480 }
481
482 /* Must be called on unlocked socket. */
483 static void l2cap_sock_close(struct sock *sk)
484 {
485 l2cap_sock_clear_timer(sk);
486 lock_sock(sk);
487 __l2cap_sock_close(sk, ECONNRESET);
488 release_sock(sk);
489 l2cap_sock_kill(sk);
490 }
491
492 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
493 {
494 struct l2cap_pinfo *pi = l2cap_pi(sk);
495
496 BT_DBG("sk %p", sk);
497
498 if (parent) {
499 sk->sk_type = parent->sk_type;
500 pi->imtu = l2cap_pi(parent)->imtu;
501 pi->omtu = l2cap_pi(parent)->omtu;
502 pi->link_mode = l2cap_pi(parent)->link_mode;
503 } else {
504 pi->imtu = L2CAP_DEFAULT_MTU;
505 pi->omtu = 0;
506 pi->link_mode = 0;
507 }
508
509 /* Default config options */
510 pi->conf_len = 0;
511 pi->conf_mtu = L2CAP_DEFAULT_MTU;
512 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
513 }
514
515 static struct proto l2cap_proto = {
516 .name = "L2CAP",
517 .owner = THIS_MODULE,
518 .obj_size = sizeof(struct l2cap_pinfo)
519 };
520
521 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
522 {
523 struct sock *sk;
524
525 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, 1);
526 if (!sk)
527 return NULL;
528
529 sock_init_data(sock, sk);
530 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
531
532 sk->sk_destruct = l2cap_sock_destruct;
533 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
534
535 sock_reset_flag(sk, SOCK_ZAPPED);
536
537 sk->sk_protocol = proto;
538 sk->sk_state = BT_OPEN;
539
540 l2cap_sock_init_timer(sk);
541
542 bt_sock_link(&l2cap_sk_list, sk);
543 return sk;
544 }
545
546 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
547 {
548 struct sock *sk;
549
550 BT_DBG("sock %p", sock);
551
552 sock->state = SS_UNCONNECTED;
553
554 if (sock->type != SOCK_SEQPACKET &&
555 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
556 return -ESOCKTNOSUPPORT;
557
558 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
559 return -EPERM;
560
561 sock->ops = &l2cap_sock_ops;
562
563 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
564 if (!sk)
565 return -ENOMEM;
566
567 l2cap_sock_init(sk, NULL);
568 return 0;
569 }
570
571 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
572 {
573 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
574 struct sock *sk = sock->sk;
575 int err = 0;
576
577 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
578
579 if (!addr || addr->sa_family != AF_BLUETOOTH)
580 return -EINVAL;
581
582 lock_sock(sk);
583
584 if (sk->sk_state != BT_OPEN) {
585 err = -EBADFD;
586 goto done;
587 }
588
589 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
590 !capable(CAP_NET_BIND_SERVICE)) {
591 err = -EACCES;
592 goto done;
593 }
594
595 write_lock_bh(&l2cap_sk_list.lock);
596
597 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
598 err = -EADDRINUSE;
599 } else {
600 /* Save source address */
601 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
602 l2cap_pi(sk)->psm = la->l2_psm;
603 l2cap_pi(sk)->sport = la->l2_psm;
604 sk->sk_state = BT_BOUND;
605 }
606
607 write_unlock_bh(&l2cap_sk_list.lock);
608
609 done:
610 release_sock(sk);
611 return err;
612 }
613
614 static int l2cap_do_connect(struct sock *sk)
615 {
616 bdaddr_t *src = &bt_sk(sk)->src;
617 bdaddr_t *dst = &bt_sk(sk)->dst;
618 struct l2cap_conn *conn;
619 struct hci_conn *hcon;
620 struct hci_dev *hdev;
621 int err = 0;
622
623 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
624
625 if (!(hdev = hci_get_route(dst, src)))
626 return -EHOSTUNREACH;
627
628 hci_dev_lock_bh(hdev);
629
630 err = -ENOMEM;
631
632 hcon = hci_connect(hdev, ACL_LINK, dst);
633 if (!hcon)
634 goto done;
635
636 conn = l2cap_conn_add(hcon, 0);
637 if (!conn) {
638 hci_conn_put(hcon);
639 goto done;
640 }
641
642 err = 0;
643
644 /* Update source addr of the socket */
645 bacpy(src, conn->src);
646
647 l2cap_chan_add(conn, sk, NULL);
648
649 sk->sk_state = BT_CONNECT;
650 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
651
652 if (hcon->state == BT_CONNECTED) {
653 if (sk->sk_type == SOCK_SEQPACKET) {
654 struct l2cap_conn_req req;
655 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
656 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
657 req.psm = l2cap_pi(sk)->psm;
658 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
659 L2CAP_CONN_REQ, sizeof(req), &req);
660 } else {
661 l2cap_sock_clear_timer(sk);
662 sk->sk_state = BT_CONNECTED;
663 }
664 }
665
666 done:
667 hci_dev_unlock_bh(hdev);
668 hci_dev_put(hdev);
669 return err;
670 }
671
672 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
673 {
674 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
675 struct sock *sk = sock->sk;
676 int err = 0;
677
678 lock_sock(sk);
679
680 BT_DBG("sk %p", sk);
681
682 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
683 err = -EINVAL;
684 goto done;
685 }
686
687 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
688 err = -EINVAL;
689 goto done;
690 }
691
692 switch(sk->sk_state) {
693 case BT_CONNECT:
694 case BT_CONNECT2:
695 case BT_CONFIG:
696 /* Already connecting */
697 goto wait;
698
699 case BT_CONNECTED:
700 /* Already connected */
701 goto done;
702
703 case BT_OPEN:
704 case BT_BOUND:
705 /* Can connect */
706 break;
707
708 default:
709 err = -EBADFD;
710 goto done;
711 }
712
713 /* Set destination address and psm */
714 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
715 l2cap_pi(sk)->psm = la->l2_psm;
716
717 if ((err = l2cap_do_connect(sk)))
718 goto done;
719
720 wait:
721 err = bt_sock_wait_state(sk, BT_CONNECTED,
722 sock_sndtimeo(sk, flags & O_NONBLOCK));
723 done:
724 release_sock(sk);
725 return err;
726 }
727
728 static int l2cap_sock_listen(struct socket *sock, int backlog)
729 {
730 struct sock *sk = sock->sk;
731 int err = 0;
732
733 BT_DBG("sk %p backlog %d", sk, backlog);
734
735 lock_sock(sk);
736
737 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
738 err = -EBADFD;
739 goto done;
740 }
741
742 if (!l2cap_pi(sk)->psm) {
743 bdaddr_t *src = &bt_sk(sk)->src;
744 u16 psm;
745
746 err = -EINVAL;
747
748 write_lock_bh(&l2cap_sk_list.lock);
749
750 for (psm = 0x1001; psm < 0x1100; psm += 2)
751 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
752 l2cap_pi(sk)->psm = htobs(psm);
753 l2cap_pi(sk)->sport = htobs(psm);
754 err = 0;
755 break;
756 }
757
758 write_unlock_bh(&l2cap_sk_list.lock);
759
760 if (err < 0)
761 goto done;
762 }
763
764 sk->sk_max_ack_backlog = backlog;
765 sk->sk_ack_backlog = 0;
766 sk->sk_state = BT_LISTEN;
767
768 done:
769 release_sock(sk);
770 return err;
771 }
772
773 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
774 {
775 DECLARE_WAITQUEUE(wait, current);
776 struct sock *sk = sock->sk, *nsk;
777 long timeo;
778 int err = 0;
779
780 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
781
782 if (sk->sk_state != BT_LISTEN) {
783 err = -EBADFD;
784 goto done;
785 }
786
787 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
788
789 BT_DBG("sk %p timeo %ld", sk, timeo);
790
791 /* Wait for an incoming connection. (wake-one). */
792 add_wait_queue_exclusive(sk->sk_sleep, &wait);
793 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
794 set_current_state(TASK_INTERRUPTIBLE);
795 if (!timeo) {
796 err = -EAGAIN;
797 break;
798 }
799
800 release_sock(sk);
801 timeo = schedule_timeout(timeo);
802 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
803
804 if (sk->sk_state != BT_LISTEN) {
805 err = -EBADFD;
806 break;
807 }
808
809 if (signal_pending(current)) {
810 err = sock_intr_errno(timeo);
811 break;
812 }
813 }
814 set_current_state(TASK_RUNNING);
815 remove_wait_queue(sk->sk_sleep, &wait);
816
817 if (err)
818 goto done;
819
820 newsock->state = SS_CONNECTED;
821
822 BT_DBG("new socket %p", nsk);
823
824 done:
825 release_sock(sk);
826 return err;
827 }
828
829 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
830 {
831 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
832 struct sock *sk = sock->sk;
833
834 BT_DBG("sock %p, sk %p", sock, sk);
835
836 addr->sa_family = AF_BLUETOOTH;
837 *len = sizeof(struct sockaddr_l2);
838
839 if (peer)
840 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
841 else
842 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
843
844 la->l2_psm = l2cap_pi(sk)->psm;
845 return 0;
846 }
847
848 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
849 {
850 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
851 struct sk_buff *skb, **frag;
852 int err, hlen, count, sent=0;
853 struct l2cap_hdr *lh;
854
855 BT_DBG("sk %p len %d", sk, len);
856
857 /* First fragment (with L2CAP header) */
858 if (sk->sk_type == SOCK_DGRAM)
859 hlen = L2CAP_HDR_SIZE + 2;
860 else
861 hlen = L2CAP_HDR_SIZE;
862
863 count = min_t(unsigned int, (conn->mtu - hlen), len);
864
865 skb = bt_skb_send_alloc(sk, hlen + count,
866 msg->msg_flags & MSG_DONTWAIT, &err);
867 if (!skb)
868 return err;
869
870 /* Create L2CAP header */
871 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
872 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
873 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
874
875 if (sk->sk_type == SOCK_DGRAM)
876 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
877
878 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
879 err = -EFAULT;
880 goto fail;
881 }
882
883 sent += count;
884 len -= count;
885
886 /* Continuation fragments (no L2CAP header) */
887 frag = &skb_shinfo(skb)->frag_list;
888 while (len) {
889 count = min_t(unsigned int, conn->mtu, len);
890
891 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
892 if (!*frag)
893 goto fail;
894
895 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
896 err = -EFAULT;
897 goto fail;
898 }
899
900 sent += count;
901 len -= count;
902
903 frag = &(*frag)->next;
904 }
905
906 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
907 goto fail;
908
909 return sent;
910
911 fail:
912 kfree_skb(skb);
913 return err;
914 }
915
916 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
917 {
918 struct sock *sk = sock->sk;
919 int err = 0;
920
921 BT_DBG("sock %p, sk %p", sock, sk);
922
923 err = sock_error(sk);
924 if (err)
925 return err;
926
927 if (msg->msg_flags & MSG_OOB)
928 return -EOPNOTSUPP;
929
930 /* Check outgoing MTU */
931 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
932 return -EINVAL;
933
934 lock_sock(sk);
935
936 if (sk->sk_state == BT_CONNECTED)
937 err = l2cap_do_send(sk, msg, len);
938 else
939 err = -ENOTCONN;
940
941 release_sock(sk);
942 return err;
943 }
944
945 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
946 {
947 struct sock *sk = sock->sk;
948 struct l2cap_options opts;
949 int err = 0, len;
950 u32 opt;
951
952 BT_DBG("sk %p", sk);
953
954 lock_sock(sk);
955
956 switch (optname) {
957 case L2CAP_OPTIONS:
958 opts.imtu = l2cap_pi(sk)->imtu;
959 opts.omtu = l2cap_pi(sk)->omtu;
960 opts.flush_to = l2cap_pi(sk)->flush_to;
961 opts.mode = 0x00;
962
963 len = min_t(unsigned int, sizeof(opts), optlen);
964 if (copy_from_user((char *) &opts, optval, len)) {
965 err = -EFAULT;
966 break;
967 }
968
969 l2cap_pi(sk)->imtu = opts.imtu;
970 l2cap_pi(sk)->omtu = opts.omtu;
971 break;
972
973 case L2CAP_LM:
974 if (get_user(opt, (u32 __user *) optval)) {
975 err = -EFAULT;
976 break;
977 }
978
979 l2cap_pi(sk)->link_mode = opt;
980 break;
981
982 default:
983 err = -ENOPROTOOPT;
984 break;
985 }
986
987 release_sock(sk);
988 return err;
989 }
990
991 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
992 {
993 struct sock *sk = sock->sk;
994 struct l2cap_options opts;
995 struct l2cap_conninfo cinfo;
996 int len, err = 0;
997
998 BT_DBG("sk %p", sk);
999
1000 if (get_user(len, optlen))
1001 return -EFAULT;
1002
1003 lock_sock(sk);
1004
1005 switch (optname) {
1006 case L2CAP_OPTIONS:
1007 opts.imtu = l2cap_pi(sk)->imtu;
1008 opts.omtu = l2cap_pi(sk)->omtu;
1009 opts.flush_to = l2cap_pi(sk)->flush_to;
1010 opts.mode = 0x00;
1011
1012 len = min_t(unsigned int, len, sizeof(opts));
1013 if (copy_to_user(optval, (char *) &opts, len))
1014 err = -EFAULT;
1015
1016 break;
1017
1018 case L2CAP_LM:
1019 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1020 err = -EFAULT;
1021 break;
1022
1023 case L2CAP_CONNINFO:
1024 if (sk->sk_state != BT_CONNECTED) {
1025 err = -ENOTCONN;
1026 break;
1027 }
1028
1029 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1030 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1031
1032 len = min_t(unsigned int, len, sizeof(cinfo));
1033 if (copy_to_user(optval, (char *) &cinfo, len))
1034 err = -EFAULT;
1035
1036 break;
1037
1038 default:
1039 err = -ENOPROTOOPT;
1040 break;
1041 }
1042
1043 release_sock(sk);
1044 return err;
1045 }
1046
1047 static int l2cap_sock_shutdown(struct socket *sock, int how)
1048 {
1049 struct sock *sk = sock->sk;
1050 int err = 0;
1051
1052 BT_DBG("sock %p, sk %p", sock, sk);
1053
1054 if (!sk)
1055 return 0;
1056
1057 lock_sock(sk);
1058 if (!sk->sk_shutdown) {
1059 sk->sk_shutdown = SHUTDOWN_MASK;
1060 l2cap_sock_clear_timer(sk);
1061 __l2cap_sock_close(sk, 0);
1062
1063 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1064 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1065 }
1066 release_sock(sk);
1067 return err;
1068 }
1069
1070 static int l2cap_sock_release(struct socket *sock)
1071 {
1072 struct sock *sk = sock->sk;
1073 int err;
1074
1075 BT_DBG("sock %p, sk %p", sock, sk);
1076
1077 if (!sk)
1078 return 0;
1079
1080 err = l2cap_sock_shutdown(sock, 2);
1081
1082 sock_orphan(sk);
1083 l2cap_sock_kill(sk);
1084 return err;
1085 }
1086
1087 static void l2cap_conn_ready(struct l2cap_conn *conn)
1088 {
1089 struct l2cap_chan_list *l = &conn->chan_list;
1090 struct sock *sk;
1091
1092 BT_DBG("conn %p", conn);
1093
1094 read_lock(&l->lock);
1095
1096 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1097 bh_lock_sock(sk);
1098
1099 if (sk->sk_type != SOCK_SEQPACKET) {
1100 l2cap_sock_clear_timer(sk);
1101 sk->sk_state = BT_CONNECTED;
1102 sk->sk_state_change(sk);
1103 } else if (sk->sk_state == BT_CONNECT) {
1104 struct l2cap_conn_req req;
1105 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1106 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1107 req.psm = l2cap_pi(sk)->psm;
1108 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1109 }
1110
1111 bh_unlock_sock(sk);
1112 }
1113
1114 read_unlock(&l->lock);
1115 }
1116
1117 /* Notify sockets that we cannot guaranty reliability anymore */
1118 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1119 {
1120 struct l2cap_chan_list *l = &conn->chan_list;
1121 struct sock *sk;
1122
1123 BT_DBG("conn %p", conn);
1124
1125 read_lock(&l->lock);
1126 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1127 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1128 sk->sk_err = err;
1129 }
1130 read_unlock(&l->lock);
1131 }
1132
1133 static void l2cap_chan_ready(struct sock *sk)
1134 {
1135 struct sock *parent = bt_sk(sk)->parent;
1136
1137 BT_DBG("sk %p, parent %p", sk, parent);
1138
1139 l2cap_pi(sk)->conf_state = 0;
1140 l2cap_sock_clear_timer(sk);
1141
1142 if (!parent) {
1143 /* Outgoing channel.
1144 * Wake up socket sleeping on connect.
1145 */
1146 sk->sk_state = BT_CONNECTED;
1147 sk->sk_state_change(sk);
1148 } else {
1149 /* Incoming channel.
1150 * Wake up socket sleeping on accept.
1151 */
1152 parent->sk_data_ready(parent, 0);
1153 }
1154 }
1155
1156 /* Copy frame to all raw sockets on that connection */
1157 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1158 {
1159 struct l2cap_chan_list *l = &conn->chan_list;
1160 struct sk_buff *nskb;
1161 struct sock * sk;
1162
1163 BT_DBG("conn %p", conn);
1164
1165 read_lock(&l->lock);
1166 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1167 if (sk->sk_type != SOCK_RAW)
1168 continue;
1169
1170 /* Don't send frame to the socket it came from */
1171 if (skb->sk == sk)
1172 continue;
1173
1174 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1175 continue;
1176
1177 if (sock_queue_rcv_skb(sk, nskb))
1178 kfree_skb(nskb);
1179 }
1180 read_unlock(&l->lock);
1181 }
1182
1183 /* ---- L2CAP signalling commands ---- */
1184 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1185 u8 code, u8 ident, u16 dlen, void *data)
1186 {
1187 struct sk_buff *skb, **frag;
1188 struct l2cap_cmd_hdr *cmd;
1189 struct l2cap_hdr *lh;
1190 int len, count;
1191
1192 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1193
1194 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1195 count = min_t(unsigned int, conn->mtu, len);
1196
1197 skb = bt_skb_alloc(count, GFP_ATOMIC);
1198 if (!skb)
1199 return NULL;
1200
1201 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1202 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1203 lh->cid = cpu_to_le16(0x0001);
1204
1205 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1206 cmd->code = code;
1207 cmd->ident = ident;
1208 cmd->len = cpu_to_le16(dlen);
1209
1210 if (dlen) {
1211 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1212 memcpy(skb_put(skb, count), data, count);
1213 data += count;
1214 }
1215
1216 len -= skb->len;
1217
1218 /* Continuation fragments (no L2CAP header) */
1219 frag = &skb_shinfo(skb)->frag_list;
1220 while (len) {
1221 count = min_t(unsigned int, conn->mtu, len);
1222
1223 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1224 if (!*frag)
1225 goto fail;
1226
1227 memcpy(skb_put(*frag, count), data, count);
1228
1229 len -= count;
1230 data += count;
1231
1232 frag = &(*frag)->next;
1233 }
1234
1235 return skb;
1236
1237 fail:
1238 kfree_skb(skb);
1239 return NULL;
1240 }
1241
1242 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1243 {
1244 struct l2cap_conf_opt *opt = *ptr;
1245 int len;
1246
1247 len = L2CAP_CONF_OPT_SIZE + opt->len;
1248 *ptr += len;
1249
1250 *type = opt->type;
1251 *olen = opt->len;
1252
1253 switch (opt->len) {
1254 case 1:
1255 *val = *((u8 *) opt->val);
1256 break;
1257
1258 case 2:
1259 *val = __le16_to_cpu(*((__le16 *)opt->val));
1260 break;
1261
1262 case 4:
1263 *val = __le32_to_cpu(*((__le32 *)opt->val));
1264 break;
1265
1266 default:
1267 *val = (unsigned long) opt->val;
1268 break;
1269 }
1270
1271 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1272 return len;
1273 }
1274
1275 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1276 {
1277 struct l2cap_conf_opt *opt = *ptr;
1278
1279 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1280
1281 opt->type = type;
1282 opt->len = len;
1283
1284 switch (len) {
1285 case 1:
1286 *((u8 *) opt->val) = val;
1287 break;
1288
1289 case 2:
1290 *((__le16 *) opt->val) = cpu_to_le16(val);
1291 break;
1292
1293 case 4:
1294 *((__le32 *) opt->val) = cpu_to_le32(val);
1295 break;
1296
1297 default:
1298 memcpy(opt->val, (void *) val, len);
1299 break;
1300 }
1301
1302 *ptr += L2CAP_CONF_OPT_SIZE + len;
1303 }
1304
1305 static int l2cap_build_conf_req(struct sock *sk, void *data)
1306 {
1307 struct l2cap_pinfo *pi = l2cap_pi(sk);
1308 struct l2cap_conf_req *req = data;
1309 void *ptr = req->data;
1310
1311 BT_DBG("sk %p", sk);
1312
1313 if (pi->imtu != L2CAP_DEFAULT_MTU)
1314 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1315
1316 /* FIXME: Need actual value of the flush timeout */
1317 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1318 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1319
1320 req->dcid = cpu_to_le16(pi->dcid);
1321 req->flags = cpu_to_le16(0);
1322
1323 return ptr - data;
1324 }
1325
1326 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1327 {
1328 struct l2cap_pinfo *pi = l2cap_pi(sk);
1329 struct l2cap_conf_rsp *rsp = data;
1330 void *ptr = rsp->data;
1331 void *req = pi->conf_req;
1332 int len = pi->conf_len;
1333 int type, hint, olen;
1334 unsigned long val;
1335 u16 result = L2CAP_CONF_SUCCESS;
1336
1337 BT_DBG("sk %p", sk);
1338
1339 while (len >= L2CAP_CONF_OPT_SIZE) {
1340 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1341
1342 hint = type & 0x80;
1343 type &= 0x7f;
1344
1345 switch (type) {
1346 case L2CAP_CONF_MTU:
1347 pi->conf_mtu = val;
1348 break;
1349
1350 case L2CAP_CONF_FLUSH_TO:
1351 pi->flush_to = val;
1352 break;
1353
1354 case L2CAP_CONF_QOS:
1355 break;
1356
1357 default:
1358 if (hint)
1359 break;
1360
1361 result = L2CAP_CONF_UNKNOWN;
1362 *((u8 *) ptr++) = type;
1363 break;
1364 }
1365 }
1366
1367 if (result == L2CAP_CONF_SUCCESS) {
1368 /* Configure output options and let the other side know
1369 * which ones we don't like. */
1370
1371 if (pi->conf_mtu < pi->omtu)
1372 result = L2CAP_CONF_UNACCEPT;
1373 else {
1374 pi->omtu = pi->conf_mtu;
1375 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1376 }
1377
1378 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1379 }
1380
1381 rsp->scid = cpu_to_le16(pi->dcid);
1382 rsp->result = cpu_to_le16(result);
1383 rsp->flags = cpu_to_le16(0x0000);
1384
1385 return ptr - data;
1386 }
1387
1388 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1389 {
1390 struct l2cap_conf_rsp *rsp = data;
1391 void *ptr = rsp->data;
1392
1393 BT_DBG("sk %p", sk);
1394
1395 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1396 rsp->result = cpu_to_le16(result);
1397 rsp->flags = cpu_to_le16(flags);
1398
1399 return ptr - data;
1400 }
1401
1402 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1403 {
1404 struct l2cap_chan_list *list = &conn->chan_list;
1405 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1406 struct l2cap_conn_rsp rsp;
1407 struct sock *sk, *parent;
1408 int result = 0, status = 0;
1409
1410 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1411 __le16 psm = req->psm;
1412
1413 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1414
1415 /* Check if we have socket listening on psm */
1416 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1417 if (!parent) {
1418 result = L2CAP_CR_BAD_PSM;
1419 goto sendresp;
1420 }
1421
1422 result = L2CAP_CR_NO_MEM;
1423
1424 /* Check for backlog size */
1425 if (sk_acceptq_is_full(parent)) {
1426 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1427 goto response;
1428 }
1429
1430 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1431 if (!sk)
1432 goto response;
1433
1434 write_lock_bh(&list->lock);
1435
1436 /* Check if we already have channel with that dcid */
1437 if (__l2cap_get_chan_by_dcid(list, scid)) {
1438 write_unlock_bh(&list->lock);
1439 sock_set_flag(sk, SOCK_ZAPPED);
1440 l2cap_sock_kill(sk);
1441 goto response;
1442 }
1443
1444 hci_conn_hold(conn->hcon);
1445
1446 l2cap_sock_init(sk, parent);
1447 bacpy(&bt_sk(sk)->src, conn->src);
1448 bacpy(&bt_sk(sk)->dst, conn->dst);
1449 l2cap_pi(sk)->psm = psm;
1450 l2cap_pi(sk)->dcid = scid;
1451
1452 __l2cap_chan_add(conn, sk, parent);
1453 dcid = l2cap_pi(sk)->scid;
1454
1455 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1456
1457 /* Service level security */
1458 result = L2CAP_CR_PEND;
1459 status = L2CAP_CS_AUTHEN_PEND;
1460 sk->sk_state = BT_CONNECT2;
1461 l2cap_pi(sk)->ident = cmd->ident;
1462
1463 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1464 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1465 if (!hci_conn_encrypt(conn->hcon))
1466 goto done;
1467 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1468 if (!hci_conn_auth(conn->hcon))
1469 goto done;
1470 }
1471
1472 sk->sk_state = BT_CONFIG;
1473 result = status = 0;
1474
1475 done:
1476 write_unlock_bh(&list->lock);
1477
1478 response:
1479 bh_unlock_sock(parent);
1480
1481 sendresp:
1482 rsp.scid = cpu_to_le16(scid);
1483 rsp.dcid = cpu_to_le16(dcid);
1484 rsp.result = cpu_to_le16(result);
1485 rsp.status = cpu_to_le16(status);
1486 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1487 return 0;
1488 }
1489
1490 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1491 {
1492 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1493 u16 scid, dcid, result, status;
1494 struct sock *sk;
1495 u8 req[128];
1496
1497 scid = __le16_to_cpu(rsp->scid);
1498 dcid = __le16_to_cpu(rsp->dcid);
1499 result = __le16_to_cpu(rsp->result);
1500 status = __le16_to_cpu(rsp->status);
1501
1502 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1503
1504 if (scid) {
1505 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1506 return 0;
1507 } else {
1508 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1509 return 0;
1510 }
1511
1512 switch (result) {
1513 case L2CAP_CR_SUCCESS:
1514 sk->sk_state = BT_CONFIG;
1515 l2cap_pi(sk)->ident = 0;
1516 l2cap_pi(sk)->dcid = dcid;
1517 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1518
1519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1520 l2cap_build_conf_req(sk, req), req);
1521 break;
1522
1523 case L2CAP_CR_PEND:
1524 break;
1525
1526 default:
1527 l2cap_chan_del(sk, ECONNREFUSED);
1528 break;
1529 }
1530
1531 bh_unlock_sock(sk);
1532 return 0;
1533 }
1534
1535 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1536 {
1537 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1538 u16 dcid, flags;
1539 u8 rsp[64];
1540 struct sock *sk;
1541 int len;
1542
1543 dcid = __le16_to_cpu(req->dcid);
1544 flags = __le16_to_cpu(req->flags);
1545
1546 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1547
1548 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1549 return -ENOENT;
1550
1551 if (sk->sk_state == BT_DISCONN)
1552 goto unlock;
1553
1554 /* Reject if config buffer is too small. */
1555 len = cmd_len - sizeof(*req);
1556 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1557 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1558 l2cap_build_conf_rsp(sk, rsp,
1559 L2CAP_CONF_REJECT, flags), rsp);
1560 goto unlock;
1561 }
1562
1563 /* Store config. */
1564 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1565 l2cap_pi(sk)->conf_len += len;
1566
1567 if (flags & 0x0001) {
1568 /* Incomplete config. Send empty response. */
1569 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1570 l2cap_build_conf_rsp(sk, rsp,
1571 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1572 goto unlock;
1573 }
1574
1575 /* Complete config. */
1576 len = l2cap_parse_conf_req(sk, rsp);
1577 if (len < 0)
1578 goto unlock;
1579
1580 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1581
1582 /* Reset config buffer. */
1583 l2cap_pi(sk)->conf_len = 0;
1584
1585 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1586 goto unlock;
1587
1588 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1589 sk->sk_state = BT_CONNECTED;
1590 l2cap_chan_ready(sk);
1591 goto unlock;
1592 }
1593
1594 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1595 u8 req[64];
1596 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1597 l2cap_build_conf_req(sk, req), req);
1598 }
1599
1600 unlock:
1601 bh_unlock_sock(sk);
1602 return 0;
1603 }
1604
1605 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1606 {
1607 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1608 u16 scid, flags, result;
1609 struct sock *sk;
1610
1611 scid = __le16_to_cpu(rsp->scid);
1612 flags = __le16_to_cpu(rsp->flags);
1613 result = __le16_to_cpu(rsp->result);
1614
1615 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1616
1617 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1618 return 0;
1619
1620 switch (result) {
1621 case L2CAP_CONF_SUCCESS:
1622 break;
1623
1624 case L2CAP_CONF_UNACCEPT:
1625 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1626 char req[128];
1627 /* It does not make sense to adjust L2CAP parameters
1628 * that are currently defined in the spec. We simply
1629 * resend config request that we sent earlier. It is
1630 * stupid, but it helps qualification testing which
1631 * expects at least some response from us. */
1632 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1633 l2cap_build_conf_req(sk, req), req);
1634 goto done;
1635 }
1636
1637 default:
1638 sk->sk_state = BT_DISCONN;
1639 sk->sk_err = ECONNRESET;
1640 l2cap_sock_set_timer(sk, HZ * 5);
1641 {
1642 struct l2cap_disconn_req req;
1643 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1644 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1645 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1646 L2CAP_DISCONN_REQ, sizeof(req), &req);
1647 }
1648 goto done;
1649 }
1650
1651 if (flags & 0x01)
1652 goto done;
1653
1654 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1655
1656 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1657 sk->sk_state = BT_CONNECTED;
1658 l2cap_chan_ready(sk);
1659 }
1660
1661 done:
1662 bh_unlock_sock(sk);
1663 return 0;
1664 }
1665
1666 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1667 {
1668 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1669 struct l2cap_disconn_rsp rsp;
1670 u16 dcid, scid;
1671 struct sock *sk;
1672
1673 scid = __le16_to_cpu(req->scid);
1674 dcid = __le16_to_cpu(req->dcid);
1675
1676 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1677
1678 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1679 return 0;
1680
1681 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1682 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1683 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1684
1685 sk->sk_shutdown = SHUTDOWN_MASK;
1686
1687 l2cap_chan_del(sk, ECONNRESET);
1688 bh_unlock_sock(sk);
1689
1690 l2cap_sock_kill(sk);
1691 return 0;
1692 }
1693
1694 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1695 {
1696 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1697 u16 dcid, scid;
1698 struct sock *sk;
1699
1700 scid = __le16_to_cpu(rsp->scid);
1701 dcid = __le16_to_cpu(rsp->dcid);
1702
1703 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1704
1705 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1706 return 0;
1707
1708 l2cap_chan_del(sk, 0);
1709 bh_unlock_sock(sk);
1710
1711 l2cap_sock_kill(sk);
1712 return 0;
1713 }
1714
1715 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1716 {
1717 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1718 struct l2cap_info_rsp rsp;
1719 u16 type;
1720
1721 type = __le16_to_cpu(req->type);
1722
1723 BT_DBG("type 0x%4.4x", type);
1724
1725 rsp.type = cpu_to_le16(type);
1726 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1727 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1728
1729 return 0;
1730 }
1731
1732 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1733 {
1734 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1735 u16 type, result;
1736
1737 type = __le16_to_cpu(rsp->type);
1738 result = __le16_to_cpu(rsp->result);
1739
1740 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1741
1742 return 0;
1743 }
1744
1745 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1746 {
1747 u8 *data = skb->data;
1748 int len = skb->len;
1749 struct l2cap_cmd_hdr cmd;
1750 int err = 0;
1751
1752 l2cap_raw_recv(conn, skb);
1753
1754 while (len >= L2CAP_CMD_HDR_SIZE) {
1755 u16 cmd_len;
1756 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1757 data += L2CAP_CMD_HDR_SIZE;
1758 len -= L2CAP_CMD_HDR_SIZE;
1759
1760 cmd_len = le16_to_cpu(cmd.len);
1761
1762 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1763
1764 if (cmd_len > len || !cmd.ident) {
1765 BT_DBG("corrupted command");
1766 break;
1767 }
1768
1769 switch (cmd.code) {
1770 case L2CAP_COMMAND_REJ:
1771 /* FIXME: We should process this */
1772 break;
1773
1774 case L2CAP_CONN_REQ:
1775 err = l2cap_connect_req(conn, &cmd, data);
1776 break;
1777
1778 case L2CAP_CONN_RSP:
1779 err = l2cap_connect_rsp(conn, &cmd, data);
1780 break;
1781
1782 case L2CAP_CONF_REQ:
1783 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1784 break;
1785
1786 case L2CAP_CONF_RSP:
1787 err = l2cap_config_rsp(conn, &cmd, data);
1788 break;
1789
1790 case L2CAP_DISCONN_REQ:
1791 err = l2cap_disconnect_req(conn, &cmd, data);
1792 break;
1793
1794 case L2CAP_DISCONN_RSP:
1795 err = l2cap_disconnect_rsp(conn, &cmd, data);
1796 break;
1797
1798 case L2CAP_ECHO_REQ:
1799 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1800 break;
1801
1802 case L2CAP_ECHO_RSP:
1803 break;
1804
1805 case L2CAP_INFO_REQ:
1806 err = l2cap_information_req(conn, &cmd, data);
1807 break;
1808
1809 case L2CAP_INFO_RSP:
1810 err = l2cap_information_rsp(conn, &cmd, data);
1811 break;
1812
1813 default:
1814 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1815 err = -EINVAL;
1816 break;
1817 }
1818
1819 if (err) {
1820 struct l2cap_cmd_rej rej;
1821 BT_DBG("error %d", err);
1822
1823 /* FIXME: Map err to a valid reason */
1824 rej.reason = cpu_to_le16(0);
1825 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1826 }
1827
1828 data += cmd_len;
1829 len -= cmd_len;
1830 }
1831
1832 kfree_skb(skb);
1833 }
1834
1835 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1836 {
1837 struct sock *sk;
1838
1839 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1840 if (!sk) {
1841 BT_DBG("unknown cid 0x%4.4x", cid);
1842 goto drop;
1843 }
1844
1845 BT_DBG("sk %p, len %d", sk, skb->len);
1846
1847 if (sk->sk_state != BT_CONNECTED)
1848 goto drop;
1849
1850 if (l2cap_pi(sk)->imtu < skb->len)
1851 goto drop;
1852
1853 /* If socket recv buffers overflows we drop data here
1854 * which is *bad* because L2CAP has to be reliable.
1855 * But we don't have any other choice. L2CAP doesn't
1856 * provide flow control mechanism. */
1857
1858 if (!sock_queue_rcv_skb(sk, skb))
1859 goto done;
1860
1861 drop:
1862 kfree_skb(skb);
1863
1864 done:
1865 if (sk)
1866 bh_unlock_sock(sk);
1867
1868 return 0;
1869 }
1870
1871 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1872 {
1873 struct sock *sk;
1874
1875 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1876 if (!sk)
1877 goto drop;
1878
1879 BT_DBG("sk %p, len %d", sk, skb->len);
1880
1881 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1882 goto drop;
1883
1884 if (l2cap_pi(sk)->imtu < skb->len)
1885 goto drop;
1886
1887 if (!sock_queue_rcv_skb(sk, skb))
1888 goto done;
1889
1890 drop:
1891 kfree_skb(skb);
1892
1893 done:
1894 if (sk) bh_unlock_sock(sk);
1895 return 0;
1896 }
1897
1898 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1899 {
1900 struct l2cap_hdr *lh = (void *) skb->data;
1901 u16 cid, len;
1902 __le16 psm;
1903
1904 skb_pull(skb, L2CAP_HDR_SIZE);
1905 cid = __le16_to_cpu(lh->cid);
1906 len = __le16_to_cpu(lh->len);
1907
1908 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1909
1910 switch (cid) {
1911 case 0x0001:
1912 l2cap_sig_channel(conn, skb);
1913 break;
1914
1915 case 0x0002:
1916 psm = get_unaligned((__le16 *) skb->data);
1917 skb_pull(skb, 2);
1918 l2cap_conless_channel(conn, psm, skb);
1919 break;
1920
1921 default:
1922 l2cap_data_channel(conn, cid, skb);
1923 break;
1924 }
1925 }
1926
1927 /* ---- L2CAP interface with lower layer (HCI) ---- */
1928
1929 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1930 {
1931 int exact = 0, lm1 = 0, lm2 = 0;
1932 register struct sock *sk;
1933 struct hlist_node *node;
1934
1935 if (type != ACL_LINK)
1936 return 0;
1937
1938 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1939
1940 /* Find listening sockets and check their link_mode */
1941 read_lock(&l2cap_sk_list.lock);
1942 sk_for_each(sk, node, &l2cap_sk_list.head) {
1943 if (sk->sk_state != BT_LISTEN)
1944 continue;
1945
1946 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1947 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1948 exact++;
1949 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1950 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1951 }
1952 read_unlock(&l2cap_sk_list.lock);
1953
1954 return exact ? lm1 : lm2;
1955 }
1956
1957 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1958 {
1959 struct l2cap_conn *conn;
1960
1961 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1962
1963 if (hcon->type != ACL_LINK)
1964 return 0;
1965
1966 if (!status) {
1967 conn = l2cap_conn_add(hcon, status);
1968 if (conn)
1969 l2cap_conn_ready(conn);
1970 } else
1971 l2cap_conn_del(hcon, bt_err(status));
1972
1973 return 0;
1974 }
1975
1976 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1977 {
1978 BT_DBG("hcon %p reason %d", hcon, reason);
1979
1980 if (hcon->type != ACL_LINK)
1981 return 0;
1982
1983 l2cap_conn_del(hcon, bt_err(reason));
1984
1985 return 0;
1986 }
1987
1988 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1989 {
1990 struct l2cap_chan_list *l;
1991 struct l2cap_conn *conn = conn = hcon->l2cap_data;
1992 struct l2cap_conn_rsp rsp;
1993 struct sock *sk;
1994 int result;
1995
1996 if (!conn)
1997 return 0;
1998
1999 l = &conn->chan_list;
2000
2001 BT_DBG("conn %p", conn);
2002
2003 read_lock(&l->lock);
2004
2005 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2006 bh_lock_sock(sk);
2007
2008 if (sk->sk_state != BT_CONNECT2 ||
2009 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2010 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2011 bh_unlock_sock(sk);
2012 continue;
2013 }
2014
2015 if (!status) {
2016 sk->sk_state = BT_CONFIG;
2017 result = 0;
2018 } else {
2019 sk->sk_state = BT_DISCONN;
2020 l2cap_sock_set_timer(sk, HZ/10);
2021 result = L2CAP_CR_SEC_BLOCK;
2022 }
2023
2024 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2025 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2026 rsp.result = cpu_to_le16(result);
2027 rsp.status = cpu_to_le16(0);
2028 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2029 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2030
2031 bh_unlock_sock(sk);
2032 }
2033
2034 read_unlock(&l->lock);
2035 return 0;
2036 }
2037
2038 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2039 {
2040 struct l2cap_chan_list *l;
2041 struct l2cap_conn *conn = hcon->l2cap_data;
2042 struct l2cap_conn_rsp rsp;
2043 struct sock *sk;
2044 int result;
2045
2046 if (!conn)
2047 return 0;
2048
2049 l = &conn->chan_list;
2050
2051 BT_DBG("conn %p", conn);
2052
2053 read_lock(&l->lock);
2054
2055 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2056 bh_lock_sock(sk);
2057
2058 if (sk->sk_state != BT_CONNECT2) {
2059 bh_unlock_sock(sk);
2060 continue;
2061 }
2062
2063 if (!status) {
2064 sk->sk_state = BT_CONFIG;
2065 result = 0;
2066 } else {
2067 sk->sk_state = BT_DISCONN;
2068 l2cap_sock_set_timer(sk, HZ/10);
2069 result = L2CAP_CR_SEC_BLOCK;
2070 }
2071
2072 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2073 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2074 rsp.result = cpu_to_le16(result);
2075 rsp.status = cpu_to_le16(0);
2076 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2077 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2078
2079 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2080 hci_conn_change_link_key(hcon);
2081
2082 bh_unlock_sock(sk);
2083 }
2084
2085 read_unlock(&l->lock);
2086 return 0;
2087 }
2088
2089 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2090 {
2091 struct l2cap_conn *conn = hcon->l2cap_data;
2092
2093 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2094 goto drop;
2095
2096 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2097
2098 if (flags & ACL_START) {
2099 struct l2cap_hdr *hdr;
2100 int len;
2101
2102 if (conn->rx_len) {
2103 BT_ERR("Unexpected start frame (len %d)", skb->len);
2104 kfree_skb(conn->rx_skb);
2105 conn->rx_skb = NULL;
2106 conn->rx_len = 0;
2107 l2cap_conn_unreliable(conn, ECOMM);
2108 }
2109
2110 if (skb->len < 2) {
2111 BT_ERR("Frame is too short (len %d)", skb->len);
2112 l2cap_conn_unreliable(conn, ECOMM);
2113 goto drop;
2114 }
2115
2116 hdr = (struct l2cap_hdr *) skb->data;
2117 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2118
2119 if (len == skb->len) {
2120 /* Complete frame received */
2121 l2cap_recv_frame(conn, skb);
2122 return 0;
2123 }
2124
2125 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2126
2127 if (skb->len > len) {
2128 BT_ERR("Frame is too long (len %d, expected len %d)",
2129 skb->len, len);
2130 l2cap_conn_unreliable(conn, ECOMM);
2131 goto drop;
2132 }
2133
2134 /* Allocate skb for the complete frame (with header) */
2135 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2136 goto drop;
2137
2138 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2139 skb->len);
2140 conn->rx_len = len - skb->len;
2141 } else {
2142 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2143
2144 if (!conn->rx_len) {
2145 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2146 l2cap_conn_unreliable(conn, ECOMM);
2147 goto drop;
2148 }
2149
2150 if (skb->len > conn->rx_len) {
2151 BT_ERR("Fragment is too long (len %d, expected %d)",
2152 skb->len, conn->rx_len);
2153 kfree_skb(conn->rx_skb);
2154 conn->rx_skb = NULL;
2155 conn->rx_len = 0;
2156 l2cap_conn_unreliable(conn, ECOMM);
2157 goto drop;
2158 }
2159
2160 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2161 skb->len);
2162 conn->rx_len -= skb->len;
2163
2164 if (!conn->rx_len) {
2165 /* Complete frame received */
2166 l2cap_recv_frame(conn, conn->rx_skb);
2167 conn->rx_skb = NULL;
2168 }
2169 }
2170
2171 drop:
2172 kfree_skb(skb);
2173 return 0;
2174 }
2175
2176 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2177 {
2178 struct sock *sk;
2179 struct hlist_node *node;
2180 char *str = buf;
2181
2182 read_lock_bh(&l2cap_sk_list.lock);
2183
2184 sk_for_each(sk, node, &l2cap_sk_list.head) {
2185 struct l2cap_pinfo *pi = l2cap_pi(sk);
2186
2187 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2188 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2189 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2190 pi->imtu, pi->omtu, pi->link_mode);
2191 }
2192
2193 read_unlock_bh(&l2cap_sk_list.lock);
2194
2195 return (str - buf);
2196 }
2197
2198 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2199
2200 static const struct proto_ops l2cap_sock_ops = {
2201 .family = PF_BLUETOOTH,
2202 .owner = THIS_MODULE,
2203 .release = l2cap_sock_release,
2204 .bind = l2cap_sock_bind,
2205 .connect = l2cap_sock_connect,
2206 .listen = l2cap_sock_listen,
2207 .accept = l2cap_sock_accept,
2208 .getname = l2cap_sock_getname,
2209 .sendmsg = l2cap_sock_sendmsg,
2210 .recvmsg = bt_sock_recvmsg,
2211 .poll = bt_sock_poll,
2212 .mmap = sock_no_mmap,
2213 .socketpair = sock_no_socketpair,
2214 .ioctl = sock_no_ioctl,
2215 .shutdown = l2cap_sock_shutdown,
2216 .setsockopt = l2cap_sock_setsockopt,
2217 .getsockopt = l2cap_sock_getsockopt
2218 };
2219
2220 static struct net_proto_family l2cap_sock_family_ops = {
2221 .family = PF_BLUETOOTH,
2222 .owner = THIS_MODULE,
2223 .create = l2cap_sock_create,
2224 };
2225
2226 static struct hci_proto l2cap_hci_proto = {
2227 .name = "L2CAP",
2228 .id = HCI_PROTO_L2CAP,
2229 .connect_ind = l2cap_connect_ind,
2230 .connect_cfm = l2cap_connect_cfm,
2231 .disconn_ind = l2cap_disconn_ind,
2232 .auth_cfm = l2cap_auth_cfm,
2233 .encrypt_cfm = l2cap_encrypt_cfm,
2234 .recv_acldata = l2cap_recv_acldata
2235 };
2236
2237 static int __init l2cap_init(void)
2238 {
2239 int err;
2240
2241 err = proto_register(&l2cap_proto, 0);
2242 if (err < 0)
2243 return err;
2244
2245 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2246 if (err < 0) {
2247 BT_ERR("L2CAP socket registration failed");
2248 goto error;
2249 }
2250
2251 err = hci_register_proto(&l2cap_hci_proto);
2252 if (err < 0) {
2253 BT_ERR("L2CAP protocol registration failed");
2254 bt_sock_unregister(BTPROTO_L2CAP);
2255 goto error;
2256 }
2257
2258 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2259 BT_ERR("Failed to create L2CAP info file");
2260
2261 BT_INFO("L2CAP ver %s", VERSION);
2262 BT_INFO("L2CAP socket layer initialized");
2263
2264 return 0;
2265
2266 error:
2267 proto_unregister(&l2cap_proto);
2268 return err;
2269 }
2270
2271 static void __exit l2cap_exit(void)
2272 {
2273 class_remove_file(bt_class, &class_attr_l2cap);
2274
2275 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2276 BT_ERR("L2CAP socket unregistration failed");
2277
2278 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2279 BT_ERR("L2CAP protocol unregistration failed");
2280
2281 proto_unregister(&l2cap_proto);
2282 }
2283
2284 void l2cap_load(void)
2285 {
2286 /* Dummy function to trigger automatic L2CAP module loading by
2287 * other modules that use L2CAP sockets but don't use any other
2288 * symbols from it. */
2289 return;
2290 }
2291 EXPORT_SYMBOL(l2cap_load);
2292
2293 module_init(l2cap_init);
2294 module_exit(l2cap_exit);
2295
2296 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2297 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2298 MODULE_VERSION(VERSION);
2299 MODULE_LICENSE("GPL");
2300 MODULE_ALIAS("bt-proto-0");