[Bluetooth] Add option to disable eSCO connection creation
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
52
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
57
58 #define VERSION "2.10"
59
60 static u32 l2cap_feat_mask = 0x0000;
61
62 static const struct proto_ops l2cap_sock_ops;
63
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 };
67
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
71
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
74
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
77 {
78 struct sock *sk = (struct sock *) arg;
79
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
81
82 bh_lock_sock(sk);
83 __l2cap_sock_close(sk, ETIMEDOUT);
84 bh_unlock_sock(sk);
85
86 l2cap_sock_kill(sk);
87 sock_put(sk);
88 }
89
90 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
91 {
92 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
94 }
95
96 static void l2cap_sock_clear_timer(struct sock *sk)
97 {
98 BT_DBG("sock %p state %d", sk, sk->sk_state);
99 sk_stop_timer(sk, &sk->sk_timer);
100 }
101
102 /* ---- L2CAP channels ---- */
103 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
104 {
105 struct sock *s;
106 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
107 if (l2cap_pi(s)->dcid == cid)
108 break;
109 }
110 return s;
111 }
112
113 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
114 {
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->scid == cid)
118 break;
119 }
120 return s;
121 }
122
123 /* Find channel with given SCID.
124 * Returns locked socket */
125 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 {
127 struct sock *s;
128 read_lock(&l->lock);
129 s = __l2cap_get_chan_by_scid(l, cid);
130 if (s) bh_lock_sock(s);
131 read_unlock(&l->lock);
132 return s;
133 }
134
135 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
136 {
137 struct sock *s;
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->ident == ident)
140 break;
141 }
142 return s;
143 }
144
145 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
146 {
147 struct sock *s;
148 read_lock(&l->lock);
149 s = __l2cap_get_chan_by_ident(l, ident);
150 if (s) bh_lock_sock(s);
151 read_unlock(&l->lock);
152 return s;
153 }
154
155 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
156 {
157 u16 cid = 0x0040;
158
159 for (; cid < 0xffff; cid++) {
160 if(!__l2cap_get_chan_by_scid(l, cid))
161 return cid;
162 }
163
164 return 0;
165 }
166
167 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
168 {
169 sock_hold(sk);
170
171 if (l->head)
172 l2cap_pi(l->head)->prev_c = sk;
173
174 l2cap_pi(sk)->next_c = l->head;
175 l2cap_pi(sk)->prev_c = NULL;
176 l->head = sk;
177 }
178
179 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
180 {
181 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
182
183 write_lock_bh(&l->lock);
184 if (sk == l->head)
185 l->head = next;
186
187 if (next)
188 l2cap_pi(next)->prev_c = prev;
189 if (prev)
190 l2cap_pi(prev)->next_c = next;
191 write_unlock_bh(&l->lock);
192
193 __sock_put(sk);
194 }
195
196 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
197 {
198 struct l2cap_chan_list *l = &conn->chan_list;
199
200 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
201
202 l2cap_pi(sk)->conn = conn;
203
204 if (sk->sk_type == SOCK_SEQPACKET) {
205 /* Alloc CID for connection-oriented socket */
206 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
207 } else if (sk->sk_type == SOCK_DGRAM) {
208 /* Connectionless socket */
209 l2cap_pi(sk)->scid = 0x0002;
210 l2cap_pi(sk)->dcid = 0x0002;
211 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
212 } else {
213 /* Raw socket can send/recv signalling messages only */
214 l2cap_pi(sk)->scid = 0x0001;
215 l2cap_pi(sk)->dcid = 0x0001;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
217 }
218
219 __l2cap_chan_link(l, sk);
220
221 if (parent)
222 bt_accept_enqueue(parent, sk);
223 }
224
225 /* Delete channel.
226 * Must be called on the locked socket. */
227 static void l2cap_chan_del(struct sock *sk, int err)
228 {
229 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
230 struct sock *parent = bt_sk(sk)->parent;
231
232 l2cap_sock_clear_timer(sk);
233
234 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
235
236 if (conn) {
237 /* Unlink from channel list */
238 l2cap_chan_unlink(&conn->chan_list, sk);
239 l2cap_pi(sk)->conn = NULL;
240 hci_conn_put(conn->hcon);
241 }
242
243 sk->sk_state = BT_CLOSED;
244 sock_set_flag(sk, SOCK_ZAPPED);
245
246 if (err)
247 sk->sk_err = err;
248
249 if (parent) {
250 bt_accept_unlink(sk);
251 parent->sk_data_ready(parent, 0);
252 } else
253 sk->sk_state_change(sk);
254 }
255
256 /* Service level security */
257 static inline int l2cap_check_link_mode(struct sock *sk)
258 {
259 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
260
261 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
262 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
263 return hci_conn_encrypt(conn->hcon);
264
265 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
266 return hci_conn_auth(conn->hcon);
267
268 return 1;
269 }
270
271 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
272 {
273 u8 id;
274
275 /* Get next available identificator.
276 * 1 - 128 are used by kernel.
277 * 129 - 199 are reserved.
278 * 200 - 254 are used by utilities like l2ping, etc.
279 */
280
281 spin_lock_bh(&conn->lock);
282
283 if (++conn->tx_ident > 128)
284 conn->tx_ident = 1;
285
286 id = conn->tx_ident;
287
288 spin_unlock_bh(&conn->lock);
289
290 return id;
291 }
292
293 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
294 {
295 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
296
297 BT_DBG("code 0x%2.2x", code);
298
299 if (!skb)
300 return -ENOMEM;
301
302 return hci_send_acl(conn->hcon, skb, 0);
303 }
304
305 static void l2cap_do_start(struct sock *sk)
306 {
307 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
308
309 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
310 struct l2cap_conn_req req;
311 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
312 req.psm = l2cap_pi(sk)->psm;
313
314 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
315
316 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
317 L2CAP_CONN_REQ, sizeof(req), &req);
318 } else {
319 struct l2cap_info_req req;
320 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
321
322 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
323 conn->info_ident = l2cap_get_ident(conn);
324
325 mod_timer(&conn->info_timer, jiffies +
326 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
327
328 l2cap_send_cmd(conn, conn->info_ident,
329 L2CAP_INFO_REQ, sizeof(req), &req);
330 }
331 }
332
333 /* ---- L2CAP connections ---- */
334 static void l2cap_conn_start(struct l2cap_conn *conn)
335 {
336 struct l2cap_chan_list *l = &conn->chan_list;
337 struct sock *sk;
338
339 BT_DBG("conn %p", conn);
340
341 read_lock(&l->lock);
342
343 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
344 bh_lock_sock(sk);
345
346 if (sk->sk_type != SOCK_SEQPACKET) {
347 bh_unlock_sock(sk);
348 continue;
349 }
350
351 if (sk->sk_state == BT_CONNECT) {
352 struct l2cap_conn_req req;
353 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
354 req.psm = l2cap_pi(sk)->psm;
355
356 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
357
358 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
359 L2CAP_CONN_REQ, sizeof(req), &req);
360 } else if (sk->sk_state == BT_CONNECT2) {
361 struct l2cap_conn_rsp rsp;
362 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
363 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
364
365 if (l2cap_check_link_mode(sk)) {
366 sk->sk_state = BT_CONFIG;
367 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
368 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
369 } else {
370 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
371 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
372 }
373
374 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
375 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
376 }
377
378 bh_unlock_sock(sk);
379 }
380
381 read_unlock(&l->lock);
382 }
383
384 static void l2cap_conn_ready(struct l2cap_conn *conn)
385 {
386 struct l2cap_chan_list *l = &conn->chan_list;
387 struct sock *sk;
388
389 BT_DBG("conn %p", conn);
390
391 read_lock(&l->lock);
392
393 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
394 bh_lock_sock(sk);
395
396 if (sk->sk_type != SOCK_SEQPACKET) {
397 l2cap_sock_clear_timer(sk);
398 sk->sk_state = BT_CONNECTED;
399 sk->sk_state_change(sk);
400 } else if (sk->sk_state == BT_CONNECT)
401 l2cap_do_start(sk);
402
403 bh_unlock_sock(sk);
404 }
405
406 read_unlock(&l->lock);
407 }
408
409 /* Notify sockets that we cannot guaranty reliability anymore */
410 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
411 {
412 struct l2cap_chan_list *l = &conn->chan_list;
413 struct sock *sk;
414
415 BT_DBG("conn %p", conn);
416
417 read_lock(&l->lock);
418
419 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
420 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
421 sk->sk_err = err;
422 }
423
424 read_unlock(&l->lock);
425 }
426
427 static void l2cap_info_timeout(unsigned long arg)
428 {
429 struct l2cap_conn *conn = (void *) arg;
430
431 conn->info_ident = 0;
432
433 l2cap_conn_start(conn);
434 }
435
436 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
437 {
438 struct l2cap_conn *conn = hcon->l2cap_data;
439
440 if (conn || status)
441 return conn;
442
443 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
444 if (!conn)
445 return NULL;
446
447 hcon->l2cap_data = conn;
448 conn->hcon = hcon;
449
450 BT_DBG("hcon %p conn %p", hcon, conn);
451
452 conn->mtu = hcon->hdev->acl_mtu;
453 conn->src = &hcon->hdev->bdaddr;
454 conn->dst = &hcon->dst;
455
456 conn->feat_mask = 0;
457
458 setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long)conn);
459
460 spin_lock_init(&conn->lock);
461 rwlock_init(&conn->chan_list.lock);
462
463 return conn;
464 }
465
466 static void l2cap_conn_del(struct hci_conn *hcon, int err)
467 {
468 struct l2cap_conn *conn = hcon->l2cap_data;
469 struct sock *sk;
470
471 if (!conn)
472 return;
473
474 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
475
476 if (conn->rx_skb)
477 kfree_skb(conn->rx_skb);
478
479 /* Kill channels */
480 while ((sk = conn->chan_list.head)) {
481 bh_lock_sock(sk);
482 l2cap_chan_del(sk, err);
483 bh_unlock_sock(sk);
484 l2cap_sock_kill(sk);
485 }
486
487 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
488 del_timer_sync(&conn->info_timer);
489
490 hcon->l2cap_data = NULL;
491 kfree(conn);
492 }
493
494 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
495 {
496 struct l2cap_chan_list *l = &conn->chan_list;
497 write_lock_bh(&l->lock);
498 __l2cap_chan_add(conn, sk, parent);
499 write_unlock_bh(&l->lock);
500 }
501
502 /* ---- Socket interface ---- */
503 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
504 {
505 struct sock *sk;
506 struct hlist_node *node;
507 sk_for_each(sk, node, &l2cap_sk_list.head)
508 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
509 goto found;
510 sk = NULL;
511 found:
512 return sk;
513 }
514
515 /* Find socket with psm and source bdaddr.
516 * Returns closest match.
517 */
518 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
519 {
520 struct sock *sk = NULL, *sk1 = NULL;
521 struct hlist_node *node;
522
523 sk_for_each(sk, node, &l2cap_sk_list.head) {
524 if (state && sk->sk_state != state)
525 continue;
526
527 if (l2cap_pi(sk)->psm == psm) {
528 /* Exact match. */
529 if (!bacmp(&bt_sk(sk)->src, src))
530 break;
531
532 /* Closest match */
533 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
534 sk1 = sk;
535 }
536 }
537 return node ? sk : sk1;
538 }
539
540 /* Find socket with given address (psm, src).
541 * Returns locked socket */
542 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
543 {
544 struct sock *s;
545 read_lock(&l2cap_sk_list.lock);
546 s = __l2cap_get_sock_by_psm(state, psm, src);
547 if (s) bh_lock_sock(s);
548 read_unlock(&l2cap_sk_list.lock);
549 return s;
550 }
551
552 static void l2cap_sock_destruct(struct sock *sk)
553 {
554 BT_DBG("sk %p", sk);
555
556 skb_queue_purge(&sk->sk_receive_queue);
557 skb_queue_purge(&sk->sk_write_queue);
558 }
559
560 static void l2cap_sock_cleanup_listen(struct sock *parent)
561 {
562 struct sock *sk;
563
564 BT_DBG("parent %p", parent);
565
566 /* Close not yet accepted channels */
567 while ((sk = bt_accept_dequeue(parent, NULL)))
568 l2cap_sock_close(sk);
569
570 parent->sk_state = BT_CLOSED;
571 sock_set_flag(parent, SOCK_ZAPPED);
572 }
573
574 /* Kill socket (only if zapped and orphan)
575 * Must be called on unlocked socket.
576 */
577 static void l2cap_sock_kill(struct sock *sk)
578 {
579 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
580 return;
581
582 BT_DBG("sk %p state %d", sk, sk->sk_state);
583
584 /* Kill poor orphan */
585 bt_sock_unlink(&l2cap_sk_list, sk);
586 sock_set_flag(sk, SOCK_DEAD);
587 sock_put(sk);
588 }
589
590 static void __l2cap_sock_close(struct sock *sk, int reason)
591 {
592 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
593
594 switch (sk->sk_state) {
595 case BT_LISTEN:
596 l2cap_sock_cleanup_listen(sk);
597 break;
598
599 case BT_CONNECTED:
600 case BT_CONFIG:
601 case BT_CONNECT2:
602 if (sk->sk_type == SOCK_SEQPACKET) {
603 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
604 struct l2cap_disconn_req req;
605
606 sk->sk_state = BT_DISCONN;
607 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
608
609 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
610 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
611 l2cap_send_cmd(conn, l2cap_get_ident(conn),
612 L2CAP_DISCONN_REQ, sizeof(req), &req);
613 } else {
614 l2cap_chan_del(sk, reason);
615 }
616 break;
617
618 case BT_CONNECT:
619 case BT_DISCONN:
620 l2cap_chan_del(sk, reason);
621 break;
622
623 default:
624 sock_set_flag(sk, SOCK_ZAPPED);
625 break;
626 }
627 }
628
629 /* Must be called on unlocked socket. */
630 static void l2cap_sock_close(struct sock *sk)
631 {
632 l2cap_sock_clear_timer(sk);
633 lock_sock(sk);
634 __l2cap_sock_close(sk, ECONNRESET);
635 release_sock(sk);
636 l2cap_sock_kill(sk);
637 }
638
639 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
640 {
641 struct l2cap_pinfo *pi = l2cap_pi(sk);
642
643 BT_DBG("sk %p", sk);
644
645 if (parent) {
646 sk->sk_type = parent->sk_type;
647 pi->imtu = l2cap_pi(parent)->imtu;
648 pi->omtu = l2cap_pi(parent)->omtu;
649 pi->link_mode = l2cap_pi(parent)->link_mode;
650 } else {
651 pi->imtu = L2CAP_DEFAULT_MTU;
652 pi->omtu = 0;
653 pi->link_mode = 0;
654 }
655
656 /* Default config options */
657 pi->conf_len = 0;
658 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
659 }
660
661 static struct proto l2cap_proto = {
662 .name = "L2CAP",
663 .owner = THIS_MODULE,
664 .obj_size = sizeof(struct l2cap_pinfo)
665 };
666
667 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
668 {
669 struct sock *sk;
670
671 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
672 if (!sk)
673 return NULL;
674
675 sock_init_data(sock, sk);
676 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
677
678 sk->sk_destruct = l2cap_sock_destruct;
679 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
680
681 sock_reset_flag(sk, SOCK_ZAPPED);
682
683 sk->sk_protocol = proto;
684 sk->sk_state = BT_OPEN;
685
686 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long)sk);
687
688 bt_sock_link(&l2cap_sk_list, sk);
689 return sk;
690 }
691
692 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
693 {
694 struct sock *sk;
695
696 BT_DBG("sock %p", sock);
697
698 sock->state = SS_UNCONNECTED;
699
700 if (sock->type != SOCK_SEQPACKET &&
701 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
702 return -ESOCKTNOSUPPORT;
703
704 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
705 return -EPERM;
706
707 sock->ops = &l2cap_sock_ops;
708
709 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
710 if (!sk)
711 return -ENOMEM;
712
713 l2cap_sock_init(sk, NULL);
714 return 0;
715 }
716
717 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
718 {
719 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
720 struct sock *sk = sock->sk;
721 int err = 0;
722
723 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
724
725 if (!addr || addr->sa_family != AF_BLUETOOTH)
726 return -EINVAL;
727
728 lock_sock(sk);
729
730 if (sk->sk_state != BT_OPEN) {
731 err = -EBADFD;
732 goto done;
733 }
734
735 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
736 !capable(CAP_NET_BIND_SERVICE)) {
737 err = -EACCES;
738 goto done;
739 }
740
741 write_lock_bh(&l2cap_sk_list.lock);
742
743 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
744 err = -EADDRINUSE;
745 } else {
746 /* Save source address */
747 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
748 l2cap_pi(sk)->psm = la->l2_psm;
749 l2cap_pi(sk)->sport = la->l2_psm;
750 sk->sk_state = BT_BOUND;
751 }
752
753 write_unlock_bh(&l2cap_sk_list.lock);
754
755 done:
756 release_sock(sk);
757 return err;
758 }
759
760 static int l2cap_do_connect(struct sock *sk)
761 {
762 bdaddr_t *src = &bt_sk(sk)->src;
763 bdaddr_t *dst = &bt_sk(sk)->dst;
764 struct l2cap_conn *conn;
765 struct hci_conn *hcon;
766 struct hci_dev *hdev;
767 int err = 0;
768
769 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
770
771 if (!(hdev = hci_get_route(dst, src)))
772 return -EHOSTUNREACH;
773
774 hci_dev_lock_bh(hdev);
775
776 err = -ENOMEM;
777
778 hcon = hci_connect(hdev, ACL_LINK, dst);
779 if (!hcon)
780 goto done;
781
782 conn = l2cap_conn_add(hcon, 0);
783 if (!conn) {
784 hci_conn_put(hcon);
785 goto done;
786 }
787
788 err = 0;
789
790 /* Update source addr of the socket */
791 bacpy(src, conn->src);
792
793 l2cap_chan_add(conn, sk, NULL);
794
795 sk->sk_state = BT_CONNECT;
796 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
797
798 if (hcon->state == BT_CONNECTED) {
799 if (sk->sk_type != SOCK_SEQPACKET) {
800 l2cap_sock_clear_timer(sk);
801 sk->sk_state = BT_CONNECTED;
802 } else
803 l2cap_do_start(sk);
804 }
805
806 done:
807 hci_dev_unlock_bh(hdev);
808 hci_dev_put(hdev);
809 return err;
810 }
811
812 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
813 {
814 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
815 struct sock *sk = sock->sk;
816 int err = 0;
817
818 lock_sock(sk);
819
820 BT_DBG("sk %p", sk);
821
822 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
823 err = -EINVAL;
824 goto done;
825 }
826
827 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
828 err = -EINVAL;
829 goto done;
830 }
831
832 switch(sk->sk_state) {
833 case BT_CONNECT:
834 case BT_CONNECT2:
835 case BT_CONFIG:
836 /* Already connecting */
837 goto wait;
838
839 case BT_CONNECTED:
840 /* Already connected */
841 goto done;
842
843 case BT_OPEN:
844 case BT_BOUND:
845 /* Can connect */
846 break;
847
848 default:
849 err = -EBADFD;
850 goto done;
851 }
852
853 /* Set destination address and psm */
854 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
855 l2cap_pi(sk)->psm = la->l2_psm;
856
857 if ((err = l2cap_do_connect(sk)))
858 goto done;
859
860 wait:
861 err = bt_sock_wait_state(sk, BT_CONNECTED,
862 sock_sndtimeo(sk, flags & O_NONBLOCK));
863 done:
864 release_sock(sk);
865 return err;
866 }
867
868 static int l2cap_sock_listen(struct socket *sock, int backlog)
869 {
870 struct sock *sk = sock->sk;
871 int err = 0;
872
873 BT_DBG("sk %p backlog %d", sk, backlog);
874
875 lock_sock(sk);
876
877 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
878 err = -EBADFD;
879 goto done;
880 }
881
882 if (!l2cap_pi(sk)->psm) {
883 bdaddr_t *src = &bt_sk(sk)->src;
884 u16 psm;
885
886 err = -EINVAL;
887
888 write_lock_bh(&l2cap_sk_list.lock);
889
890 for (psm = 0x1001; psm < 0x1100; psm += 2)
891 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
892 l2cap_pi(sk)->psm = htobs(psm);
893 l2cap_pi(sk)->sport = htobs(psm);
894 err = 0;
895 break;
896 }
897
898 write_unlock_bh(&l2cap_sk_list.lock);
899
900 if (err < 0)
901 goto done;
902 }
903
904 sk->sk_max_ack_backlog = backlog;
905 sk->sk_ack_backlog = 0;
906 sk->sk_state = BT_LISTEN;
907
908 done:
909 release_sock(sk);
910 return err;
911 }
912
913 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
914 {
915 DECLARE_WAITQUEUE(wait, current);
916 struct sock *sk = sock->sk, *nsk;
917 long timeo;
918 int err = 0;
919
920 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
921
922 if (sk->sk_state != BT_LISTEN) {
923 err = -EBADFD;
924 goto done;
925 }
926
927 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
928
929 BT_DBG("sk %p timeo %ld", sk, timeo);
930
931 /* Wait for an incoming connection. (wake-one). */
932 add_wait_queue_exclusive(sk->sk_sleep, &wait);
933 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
934 set_current_state(TASK_INTERRUPTIBLE);
935 if (!timeo) {
936 err = -EAGAIN;
937 break;
938 }
939
940 release_sock(sk);
941 timeo = schedule_timeout(timeo);
942 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
943
944 if (sk->sk_state != BT_LISTEN) {
945 err = -EBADFD;
946 break;
947 }
948
949 if (signal_pending(current)) {
950 err = sock_intr_errno(timeo);
951 break;
952 }
953 }
954 set_current_state(TASK_RUNNING);
955 remove_wait_queue(sk->sk_sleep, &wait);
956
957 if (err)
958 goto done;
959
960 newsock->state = SS_CONNECTED;
961
962 BT_DBG("new socket %p", nsk);
963
964 done:
965 release_sock(sk);
966 return err;
967 }
968
969 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
970 {
971 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
972 struct sock *sk = sock->sk;
973
974 BT_DBG("sock %p, sk %p", sock, sk);
975
976 addr->sa_family = AF_BLUETOOTH;
977 *len = sizeof(struct sockaddr_l2);
978
979 if (peer)
980 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
981 else
982 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
983
984 la->l2_psm = l2cap_pi(sk)->psm;
985 return 0;
986 }
987
988 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
989 {
990 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
991 struct sk_buff *skb, **frag;
992 int err, hlen, count, sent=0;
993 struct l2cap_hdr *lh;
994
995 BT_DBG("sk %p len %d", sk, len);
996
997 /* First fragment (with L2CAP header) */
998 if (sk->sk_type == SOCK_DGRAM)
999 hlen = L2CAP_HDR_SIZE + 2;
1000 else
1001 hlen = L2CAP_HDR_SIZE;
1002
1003 count = min_t(unsigned int, (conn->mtu - hlen), len);
1004
1005 skb = bt_skb_send_alloc(sk, hlen + count,
1006 msg->msg_flags & MSG_DONTWAIT, &err);
1007 if (!skb)
1008 return err;
1009
1010 /* Create L2CAP header */
1011 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1012 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1013 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1014
1015 if (sk->sk_type == SOCK_DGRAM)
1016 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1017
1018 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1019 err = -EFAULT;
1020 goto fail;
1021 }
1022
1023 sent += count;
1024 len -= count;
1025
1026 /* Continuation fragments (no L2CAP header) */
1027 frag = &skb_shinfo(skb)->frag_list;
1028 while (len) {
1029 count = min_t(unsigned int, conn->mtu, len);
1030
1031 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1032 if (!*frag)
1033 goto fail;
1034
1035 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1036 err = -EFAULT;
1037 goto fail;
1038 }
1039
1040 sent += count;
1041 len -= count;
1042
1043 frag = &(*frag)->next;
1044 }
1045
1046 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1047 goto fail;
1048
1049 return sent;
1050
1051 fail:
1052 kfree_skb(skb);
1053 return err;
1054 }
1055
1056 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1057 {
1058 struct sock *sk = sock->sk;
1059 int err = 0;
1060
1061 BT_DBG("sock %p, sk %p", sock, sk);
1062
1063 err = sock_error(sk);
1064 if (err)
1065 return err;
1066
1067 if (msg->msg_flags & MSG_OOB)
1068 return -EOPNOTSUPP;
1069
1070 /* Check outgoing MTU */
1071 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1072 return -EINVAL;
1073
1074 lock_sock(sk);
1075
1076 if (sk->sk_state == BT_CONNECTED)
1077 err = l2cap_do_send(sk, msg, len);
1078 else
1079 err = -ENOTCONN;
1080
1081 release_sock(sk);
1082 return err;
1083 }
1084
1085 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1086 {
1087 struct sock *sk = sock->sk;
1088 struct l2cap_options opts;
1089 int err = 0, len;
1090 u32 opt;
1091
1092 BT_DBG("sk %p", sk);
1093
1094 lock_sock(sk);
1095
1096 switch (optname) {
1097 case L2CAP_OPTIONS:
1098 opts.imtu = l2cap_pi(sk)->imtu;
1099 opts.omtu = l2cap_pi(sk)->omtu;
1100 opts.flush_to = l2cap_pi(sk)->flush_to;
1101 opts.mode = L2CAP_MODE_BASIC;
1102
1103 len = min_t(unsigned int, sizeof(opts), optlen);
1104 if (copy_from_user((char *) &opts, optval, len)) {
1105 err = -EFAULT;
1106 break;
1107 }
1108
1109 l2cap_pi(sk)->imtu = opts.imtu;
1110 l2cap_pi(sk)->omtu = opts.omtu;
1111 break;
1112
1113 case L2CAP_LM:
1114 if (get_user(opt, (u32 __user *) optval)) {
1115 err = -EFAULT;
1116 break;
1117 }
1118
1119 l2cap_pi(sk)->link_mode = opt;
1120 break;
1121
1122 default:
1123 err = -ENOPROTOOPT;
1124 break;
1125 }
1126
1127 release_sock(sk);
1128 return err;
1129 }
1130
1131 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1132 {
1133 struct sock *sk = sock->sk;
1134 struct l2cap_options opts;
1135 struct l2cap_conninfo cinfo;
1136 int len, err = 0;
1137
1138 BT_DBG("sk %p", sk);
1139
1140 if (get_user(len, optlen))
1141 return -EFAULT;
1142
1143 lock_sock(sk);
1144
1145 switch (optname) {
1146 case L2CAP_OPTIONS:
1147 opts.imtu = l2cap_pi(sk)->imtu;
1148 opts.omtu = l2cap_pi(sk)->omtu;
1149 opts.flush_to = l2cap_pi(sk)->flush_to;
1150 opts.mode = L2CAP_MODE_BASIC;
1151
1152 len = min_t(unsigned int, len, sizeof(opts));
1153 if (copy_to_user(optval, (char *) &opts, len))
1154 err = -EFAULT;
1155
1156 break;
1157
1158 case L2CAP_LM:
1159 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1160 err = -EFAULT;
1161 break;
1162
1163 case L2CAP_CONNINFO:
1164 if (sk->sk_state != BT_CONNECTED) {
1165 err = -ENOTCONN;
1166 break;
1167 }
1168
1169 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1170 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1171
1172 len = min_t(unsigned int, len, sizeof(cinfo));
1173 if (copy_to_user(optval, (char *) &cinfo, len))
1174 err = -EFAULT;
1175
1176 break;
1177
1178 default:
1179 err = -ENOPROTOOPT;
1180 break;
1181 }
1182
1183 release_sock(sk);
1184 return err;
1185 }
1186
1187 static int l2cap_sock_shutdown(struct socket *sock, int how)
1188 {
1189 struct sock *sk = sock->sk;
1190 int err = 0;
1191
1192 BT_DBG("sock %p, sk %p", sock, sk);
1193
1194 if (!sk)
1195 return 0;
1196
1197 lock_sock(sk);
1198 if (!sk->sk_shutdown) {
1199 sk->sk_shutdown = SHUTDOWN_MASK;
1200 l2cap_sock_clear_timer(sk);
1201 __l2cap_sock_close(sk, 0);
1202
1203 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1204 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1205 }
1206 release_sock(sk);
1207 return err;
1208 }
1209
1210 static int l2cap_sock_release(struct socket *sock)
1211 {
1212 struct sock *sk = sock->sk;
1213 int err;
1214
1215 BT_DBG("sock %p, sk %p", sock, sk);
1216
1217 if (!sk)
1218 return 0;
1219
1220 err = l2cap_sock_shutdown(sock, 2);
1221
1222 sock_orphan(sk);
1223 l2cap_sock_kill(sk);
1224 return err;
1225 }
1226
1227 static void l2cap_chan_ready(struct sock *sk)
1228 {
1229 struct sock *parent = bt_sk(sk)->parent;
1230
1231 BT_DBG("sk %p, parent %p", sk, parent);
1232
1233 l2cap_pi(sk)->conf_state = 0;
1234 l2cap_sock_clear_timer(sk);
1235
1236 if (!parent) {
1237 /* Outgoing channel.
1238 * Wake up socket sleeping on connect.
1239 */
1240 sk->sk_state = BT_CONNECTED;
1241 sk->sk_state_change(sk);
1242 } else {
1243 /* Incoming channel.
1244 * Wake up socket sleeping on accept.
1245 */
1246 parent->sk_data_ready(parent, 0);
1247 }
1248 }
1249
1250 /* Copy frame to all raw sockets on that connection */
1251 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1252 {
1253 struct l2cap_chan_list *l = &conn->chan_list;
1254 struct sk_buff *nskb;
1255 struct sock * sk;
1256
1257 BT_DBG("conn %p", conn);
1258
1259 read_lock(&l->lock);
1260 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1261 if (sk->sk_type != SOCK_RAW)
1262 continue;
1263
1264 /* Don't send frame to the socket it came from */
1265 if (skb->sk == sk)
1266 continue;
1267
1268 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1269 continue;
1270
1271 if (sock_queue_rcv_skb(sk, nskb))
1272 kfree_skb(nskb);
1273 }
1274 read_unlock(&l->lock);
1275 }
1276
1277 /* ---- L2CAP signalling commands ---- */
1278 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1279 u8 code, u8 ident, u16 dlen, void *data)
1280 {
1281 struct sk_buff *skb, **frag;
1282 struct l2cap_cmd_hdr *cmd;
1283 struct l2cap_hdr *lh;
1284 int len, count;
1285
1286 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1287
1288 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1289 count = min_t(unsigned int, conn->mtu, len);
1290
1291 skb = bt_skb_alloc(count, GFP_ATOMIC);
1292 if (!skb)
1293 return NULL;
1294
1295 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1296 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1297 lh->cid = cpu_to_le16(0x0001);
1298
1299 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1300 cmd->code = code;
1301 cmd->ident = ident;
1302 cmd->len = cpu_to_le16(dlen);
1303
1304 if (dlen) {
1305 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1306 memcpy(skb_put(skb, count), data, count);
1307 data += count;
1308 }
1309
1310 len -= skb->len;
1311
1312 /* Continuation fragments (no L2CAP header) */
1313 frag = &skb_shinfo(skb)->frag_list;
1314 while (len) {
1315 count = min_t(unsigned int, conn->mtu, len);
1316
1317 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1318 if (!*frag)
1319 goto fail;
1320
1321 memcpy(skb_put(*frag, count), data, count);
1322
1323 len -= count;
1324 data += count;
1325
1326 frag = &(*frag)->next;
1327 }
1328
1329 return skb;
1330
1331 fail:
1332 kfree_skb(skb);
1333 return NULL;
1334 }
1335
1336 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1337 {
1338 struct l2cap_conf_opt *opt = *ptr;
1339 int len;
1340
1341 len = L2CAP_CONF_OPT_SIZE + opt->len;
1342 *ptr += len;
1343
1344 *type = opt->type;
1345 *olen = opt->len;
1346
1347 switch (opt->len) {
1348 case 1:
1349 *val = *((u8 *) opt->val);
1350 break;
1351
1352 case 2:
1353 *val = __le16_to_cpu(*((__le16 *) opt->val));
1354 break;
1355
1356 case 4:
1357 *val = __le32_to_cpu(*((__le32 *) opt->val));
1358 break;
1359
1360 default:
1361 *val = (unsigned long) opt->val;
1362 break;
1363 }
1364
1365 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1366 return len;
1367 }
1368
1369 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1370 {
1371 struct l2cap_conf_opt *opt = *ptr;
1372
1373 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1374
1375 opt->type = type;
1376 opt->len = len;
1377
1378 switch (len) {
1379 case 1:
1380 *((u8 *) opt->val) = val;
1381 break;
1382
1383 case 2:
1384 *((__le16 *) opt->val) = cpu_to_le16(val);
1385 break;
1386
1387 case 4:
1388 *((__le32 *) opt->val) = cpu_to_le32(val);
1389 break;
1390
1391 default:
1392 memcpy(opt->val, (void *) val, len);
1393 break;
1394 }
1395
1396 *ptr += L2CAP_CONF_OPT_SIZE + len;
1397 }
1398
1399 static int l2cap_build_conf_req(struct sock *sk, void *data)
1400 {
1401 struct l2cap_pinfo *pi = l2cap_pi(sk);
1402 struct l2cap_conf_req *req = data;
1403 void *ptr = req->data;
1404
1405 BT_DBG("sk %p", sk);
1406
1407 if (pi->imtu != L2CAP_DEFAULT_MTU)
1408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1409
1410 /* FIXME: Need actual value of the flush timeout */
1411 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1412 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1413
1414 req->dcid = cpu_to_le16(pi->dcid);
1415 req->flags = cpu_to_le16(0);
1416
1417 return ptr - data;
1418 }
1419
1420 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1421 {
1422 struct l2cap_pinfo *pi = l2cap_pi(sk);
1423 struct l2cap_conf_rsp *rsp = data;
1424 void *ptr = rsp->data;
1425 void *req = pi->conf_req;
1426 int len = pi->conf_len;
1427 int type, hint, olen;
1428 unsigned long val;
1429 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1430 u16 mtu = L2CAP_DEFAULT_MTU;
1431 u16 result = L2CAP_CONF_SUCCESS;
1432
1433 BT_DBG("sk %p", sk);
1434
1435 while (len >= L2CAP_CONF_OPT_SIZE) {
1436 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1437
1438 hint = type & 0x80;
1439 type &= 0x7f;
1440
1441 switch (type) {
1442 case L2CAP_CONF_MTU:
1443 mtu = val;
1444 break;
1445
1446 case L2CAP_CONF_FLUSH_TO:
1447 pi->flush_to = val;
1448 break;
1449
1450 case L2CAP_CONF_QOS:
1451 break;
1452
1453 case L2CAP_CONF_RFC:
1454 if (olen == sizeof(rfc))
1455 memcpy(&rfc, (void *) val, olen);
1456 break;
1457
1458 default:
1459 if (hint)
1460 break;
1461
1462 result = L2CAP_CONF_UNKNOWN;
1463 *((u8 *) ptr++) = type;
1464 break;
1465 }
1466 }
1467
1468 if (result == L2CAP_CONF_SUCCESS) {
1469 /* Configure output options and let the other side know
1470 * which ones we don't like. */
1471
1472 if (rfc.mode == L2CAP_MODE_BASIC) {
1473 if (mtu < pi->omtu)
1474 result = L2CAP_CONF_UNACCEPT;
1475 else {
1476 pi->omtu = mtu;
1477 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1478 }
1479
1480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1481 } else {
1482 result = L2CAP_CONF_UNACCEPT;
1483
1484 memset(&rfc, 0, sizeof(rfc));
1485 rfc.mode = L2CAP_MODE_BASIC;
1486
1487 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1488 sizeof(rfc), (unsigned long) &rfc);
1489 }
1490 }
1491
1492 rsp->scid = cpu_to_le16(pi->dcid);
1493 rsp->result = cpu_to_le16(result);
1494 rsp->flags = cpu_to_le16(0x0000);
1495
1496 return ptr - data;
1497 }
1498
1499 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1500 {
1501 struct l2cap_conf_rsp *rsp = data;
1502 void *ptr = rsp->data;
1503
1504 BT_DBG("sk %p", sk);
1505
1506 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1507 rsp->result = cpu_to_le16(result);
1508 rsp->flags = cpu_to_le16(flags);
1509
1510 return ptr - data;
1511 }
1512
1513 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1514 {
1515 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1516
1517 if (rej->reason != 0x0000)
1518 return 0;
1519
1520 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1521 cmd->ident == conn->info_ident) {
1522 conn->info_ident = 0;
1523 del_timer(&conn->info_timer);
1524 l2cap_conn_start(conn);
1525 }
1526
1527 return 0;
1528 }
1529
1530 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1531 {
1532 struct l2cap_chan_list *list = &conn->chan_list;
1533 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1534 struct l2cap_conn_rsp rsp;
1535 struct sock *sk, *parent;
1536 int result, status = 0;
1537
1538 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1539 __le16 psm = req->psm;
1540
1541 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1542
1543 /* Check if we have socket listening on psm */
1544 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1545 if (!parent) {
1546 result = L2CAP_CR_BAD_PSM;
1547 goto sendresp;
1548 }
1549
1550 result = L2CAP_CR_NO_MEM;
1551
1552 /* Check for backlog size */
1553 if (sk_acceptq_is_full(parent)) {
1554 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1555 goto response;
1556 }
1557
1558 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1559 if (!sk)
1560 goto response;
1561
1562 write_lock_bh(&list->lock);
1563
1564 /* Check if we already have channel with that dcid */
1565 if (__l2cap_get_chan_by_dcid(list, scid)) {
1566 write_unlock_bh(&list->lock);
1567 sock_set_flag(sk, SOCK_ZAPPED);
1568 l2cap_sock_kill(sk);
1569 goto response;
1570 }
1571
1572 hci_conn_hold(conn->hcon);
1573
1574 l2cap_sock_init(sk, parent);
1575 bacpy(&bt_sk(sk)->src, conn->src);
1576 bacpy(&bt_sk(sk)->dst, conn->dst);
1577 l2cap_pi(sk)->psm = psm;
1578 l2cap_pi(sk)->dcid = scid;
1579
1580 __l2cap_chan_add(conn, sk, parent);
1581 dcid = l2cap_pi(sk)->scid;
1582
1583 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1584
1585 l2cap_pi(sk)->ident = cmd->ident;
1586
1587 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1588 if (l2cap_check_link_mode(sk)) {
1589 sk->sk_state = BT_CONFIG;
1590 result = L2CAP_CR_SUCCESS;
1591 status = L2CAP_CS_NO_INFO;
1592 } else {
1593 sk->sk_state = BT_CONNECT2;
1594 result = L2CAP_CR_PEND;
1595 status = L2CAP_CS_AUTHEN_PEND;
1596 }
1597 } else {
1598 sk->sk_state = BT_CONNECT2;
1599 result = L2CAP_CR_PEND;
1600 status = L2CAP_CS_NO_INFO;
1601 }
1602
1603 write_unlock_bh(&list->lock);
1604
1605 response:
1606 bh_unlock_sock(parent);
1607
1608 sendresp:
1609 rsp.scid = cpu_to_le16(scid);
1610 rsp.dcid = cpu_to_le16(dcid);
1611 rsp.result = cpu_to_le16(result);
1612 rsp.status = cpu_to_le16(status);
1613 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1614
1615 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1616 struct l2cap_info_req info;
1617 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1618
1619 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1620 conn->info_ident = l2cap_get_ident(conn);
1621
1622 mod_timer(&conn->info_timer, jiffies +
1623 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1624
1625 l2cap_send_cmd(conn, conn->info_ident,
1626 L2CAP_INFO_REQ, sizeof(info), &info);
1627 }
1628
1629 return 0;
1630 }
1631
1632 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1633 {
1634 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1635 u16 scid, dcid, result, status;
1636 struct sock *sk;
1637 u8 req[128];
1638
1639 scid = __le16_to_cpu(rsp->scid);
1640 dcid = __le16_to_cpu(rsp->dcid);
1641 result = __le16_to_cpu(rsp->result);
1642 status = __le16_to_cpu(rsp->status);
1643
1644 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1645
1646 if (scid) {
1647 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1648 return 0;
1649 } else {
1650 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1651 return 0;
1652 }
1653
1654 switch (result) {
1655 case L2CAP_CR_SUCCESS:
1656 sk->sk_state = BT_CONFIG;
1657 l2cap_pi(sk)->ident = 0;
1658 l2cap_pi(sk)->dcid = dcid;
1659 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1660
1661 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1662 l2cap_build_conf_req(sk, req), req);
1663 break;
1664
1665 case L2CAP_CR_PEND:
1666 break;
1667
1668 default:
1669 l2cap_chan_del(sk, ECONNREFUSED);
1670 break;
1671 }
1672
1673 bh_unlock_sock(sk);
1674 return 0;
1675 }
1676
1677 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1678 {
1679 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1680 u16 dcid, flags;
1681 u8 rsp[64];
1682 struct sock *sk;
1683 int len;
1684
1685 dcid = __le16_to_cpu(req->dcid);
1686 flags = __le16_to_cpu(req->flags);
1687
1688 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1689
1690 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1691 return -ENOENT;
1692
1693 if (sk->sk_state == BT_DISCONN)
1694 goto unlock;
1695
1696 /* Reject if config buffer is too small. */
1697 len = cmd_len - sizeof(*req);
1698 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1699 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1700 l2cap_build_conf_rsp(sk, rsp,
1701 L2CAP_CONF_REJECT, flags), rsp);
1702 goto unlock;
1703 }
1704
1705 /* Store config. */
1706 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1707 l2cap_pi(sk)->conf_len += len;
1708
1709 if (flags & 0x0001) {
1710 /* Incomplete config. Send empty response. */
1711 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1712 l2cap_build_conf_rsp(sk, rsp,
1713 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1714 goto unlock;
1715 }
1716
1717 /* Complete config. */
1718 len = l2cap_parse_conf_req(sk, rsp);
1719 if (len < 0)
1720 goto unlock;
1721
1722 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1723
1724 /* Reset config buffer. */
1725 l2cap_pi(sk)->conf_len = 0;
1726
1727 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1728 goto unlock;
1729
1730 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1731 sk->sk_state = BT_CONNECTED;
1732 l2cap_chan_ready(sk);
1733 goto unlock;
1734 }
1735
1736 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1737 u8 buf[64];
1738 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1739 l2cap_build_conf_req(sk, buf), buf);
1740 }
1741
1742 unlock:
1743 bh_unlock_sock(sk);
1744 return 0;
1745 }
1746
1747 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1748 {
1749 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1750 u16 scid, flags, result;
1751 struct sock *sk;
1752
1753 scid = __le16_to_cpu(rsp->scid);
1754 flags = __le16_to_cpu(rsp->flags);
1755 result = __le16_to_cpu(rsp->result);
1756
1757 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1758
1759 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1760 return 0;
1761
1762 switch (result) {
1763 case L2CAP_CONF_SUCCESS:
1764 break;
1765
1766 case L2CAP_CONF_UNACCEPT:
1767 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1768 char req[128];
1769 /* It does not make sense to adjust L2CAP parameters
1770 * that are currently defined in the spec. We simply
1771 * resend config request that we sent earlier. It is
1772 * stupid, but it helps qualification testing which
1773 * expects at least some response from us. */
1774 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1775 l2cap_build_conf_req(sk, req), req);
1776 goto done;
1777 }
1778
1779 default:
1780 sk->sk_state = BT_DISCONN;
1781 sk->sk_err = ECONNRESET;
1782 l2cap_sock_set_timer(sk, HZ * 5);
1783 {
1784 struct l2cap_disconn_req req;
1785 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1786 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1787 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1788 L2CAP_DISCONN_REQ, sizeof(req), &req);
1789 }
1790 goto done;
1791 }
1792
1793 if (flags & 0x01)
1794 goto done;
1795
1796 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1797
1798 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1799 sk->sk_state = BT_CONNECTED;
1800 l2cap_chan_ready(sk);
1801 }
1802
1803 done:
1804 bh_unlock_sock(sk);
1805 return 0;
1806 }
1807
1808 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1809 {
1810 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1811 struct l2cap_disconn_rsp rsp;
1812 u16 dcid, scid;
1813 struct sock *sk;
1814
1815 scid = __le16_to_cpu(req->scid);
1816 dcid = __le16_to_cpu(req->dcid);
1817
1818 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1819
1820 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1821 return 0;
1822
1823 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1824 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1825 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1826
1827 sk->sk_shutdown = SHUTDOWN_MASK;
1828
1829 l2cap_chan_del(sk, ECONNRESET);
1830 bh_unlock_sock(sk);
1831
1832 l2cap_sock_kill(sk);
1833 return 0;
1834 }
1835
1836 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1837 {
1838 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1839 u16 dcid, scid;
1840 struct sock *sk;
1841
1842 scid = __le16_to_cpu(rsp->scid);
1843 dcid = __le16_to_cpu(rsp->dcid);
1844
1845 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1846
1847 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1848 return 0;
1849
1850 l2cap_chan_del(sk, 0);
1851 bh_unlock_sock(sk);
1852
1853 l2cap_sock_kill(sk);
1854 return 0;
1855 }
1856
1857 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1858 {
1859 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1860 u16 type;
1861
1862 type = __le16_to_cpu(req->type);
1863
1864 BT_DBG("type 0x%4.4x", type);
1865
1866 if (type == L2CAP_IT_FEAT_MASK) {
1867 u8 buf[8];
1868 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1869 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1870 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1871 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1872 l2cap_send_cmd(conn, cmd->ident,
1873 L2CAP_INFO_RSP, sizeof(buf), buf);
1874 } else {
1875 struct l2cap_info_rsp rsp;
1876 rsp.type = cpu_to_le16(type);
1877 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1878 l2cap_send_cmd(conn, cmd->ident,
1879 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1880 }
1881
1882 return 0;
1883 }
1884
1885 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1886 {
1887 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1888 u16 type, result;
1889
1890 type = __le16_to_cpu(rsp->type);
1891 result = __le16_to_cpu(rsp->result);
1892
1893 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1894
1895 conn->info_ident = 0;
1896
1897 del_timer(&conn->info_timer);
1898
1899 if (type == L2CAP_IT_FEAT_MASK)
1900 conn->feat_mask = get_unaligned_le32(rsp->data);
1901
1902 l2cap_conn_start(conn);
1903
1904 return 0;
1905 }
1906
1907 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1908 {
1909 u8 *data = skb->data;
1910 int len = skb->len;
1911 struct l2cap_cmd_hdr cmd;
1912 int err = 0;
1913
1914 l2cap_raw_recv(conn, skb);
1915
1916 while (len >= L2CAP_CMD_HDR_SIZE) {
1917 u16 cmd_len;
1918 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1919 data += L2CAP_CMD_HDR_SIZE;
1920 len -= L2CAP_CMD_HDR_SIZE;
1921
1922 cmd_len = le16_to_cpu(cmd.len);
1923
1924 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1925
1926 if (cmd_len > len || !cmd.ident) {
1927 BT_DBG("corrupted command");
1928 break;
1929 }
1930
1931 switch (cmd.code) {
1932 case L2CAP_COMMAND_REJ:
1933 l2cap_command_rej(conn, &cmd, data);
1934 break;
1935
1936 case L2CAP_CONN_REQ:
1937 err = l2cap_connect_req(conn, &cmd, data);
1938 break;
1939
1940 case L2CAP_CONN_RSP:
1941 err = l2cap_connect_rsp(conn, &cmd, data);
1942 break;
1943
1944 case L2CAP_CONF_REQ:
1945 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1946 break;
1947
1948 case L2CAP_CONF_RSP:
1949 err = l2cap_config_rsp(conn, &cmd, data);
1950 break;
1951
1952 case L2CAP_DISCONN_REQ:
1953 err = l2cap_disconnect_req(conn, &cmd, data);
1954 break;
1955
1956 case L2CAP_DISCONN_RSP:
1957 err = l2cap_disconnect_rsp(conn, &cmd, data);
1958 break;
1959
1960 case L2CAP_ECHO_REQ:
1961 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1962 break;
1963
1964 case L2CAP_ECHO_RSP:
1965 break;
1966
1967 case L2CAP_INFO_REQ:
1968 err = l2cap_information_req(conn, &cmd, data);
1969 break;
1970
1971 case L2CAP_INFO_RSP:
1972 err = l2cap_information_rsp(conn, &cmd, data);
1973 break;
1974
1975 default:
1976 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1977 err = -EINVAL;
1978 break;
1979 }
1980
1981 if (err) {
1982 struct l2cap_cmd_rej rej;
1983 BT_DBG("error %d", err);
1984
1985 /* FIXME: Map err to a valid reason */
1986 rej.reason = cpu_to_le16(0);
1987 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1988 }
1989
1990 data += cmd_len;
1991 len -= cmd_len;
1992 }
1993
1994 kfree_skb(skb);
1995 }
1996
1997 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1998 {
1999 struct sock *sk;
2000
2001 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2002 if (!sk) {
2003 BT_DBG("unknown cid 0x%4.4x", cid);
2004 goto drop;
2005 }
2006
2007 BT_DBG("sk %p, len %d", sk, skb->len);
2008
2009 if (sk->sk_state != BT_CONNECTED)
2010 goto drop;
2011
2012 if (l2cap_pi(sk)->imtu < skb->len)
2013 goto drop;
2014
2015 /* If socket recv buffers overflows we drop data here
2016 * which is *bad* because L2CAP has to be reliable.
2017 * But we don't have any other choice. L2CAP doesn't
2018 * provide flow control mechanism. */
2019
2020 if (!sock_queue_rcv_skb(sk, skb))
2021 goto done;
2022
2023 drop:
2024 kfree_skb(skb);
2025
2026 done:
2027 if (sk)
2028 bh_unlock_sock(sk);
2029
2030 return 0;
2031 }
2032
2033 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2034 {
2035 struct sock *sk;
2036
2037 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2038 if (!sk)
2039 goto drop;
2040
2041 BT_DBG("sk %p, len %d", sk, skb->len);
2042
2043 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2044 goto drop;
2045
2046 if (l2cap_pi(sk)->imtu < skb->len)
2047 goto drop;
2048
2049 if (!sock_queue_rcv_skb(sk, skb))
2050 goto done;
2051
2052 drop:
2053 kfree_skb(skb);
2054
2055 done:
2056 if (sk) bh_unlock_sock(sk);
2057 return 0;
2058 }
2059
2060 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2061 {
2062 struct l2cap_hdr *lh = (void *) skb->data;
2063 u16 cid, len;
2064 __le16 psm;
2065
2066 skb_pull(skb, L2CAP_HDR_SIZE);
2067 cid = __le16_to_cpu(lh->cid);
2068 len = __le16_to_cpu(lh->len);
2069
2070 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2071
2072 switch (cid) {
2073 case 0x0001:
2074 l2cap_sig_channel(conn, skb);
2075 break;
2076
2077 case 0x0002:
2078 psm = get_unaligned((__le16 *) skb->data);
2079 skb_pull(skb, 2);
2080 l2cap_conless_channel(conn, psm, skb);
2081 break;
2082
2083 default:
2084 l2cap_data_channel(conn, cid, skb);
2085 break;
2086 }
2087 }
2088
2089 /* ---- L2CAP interface with lower layer (HCI) ---- */
2090
2091 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2092 {
2093 int exact = 0, lm1 = 0, lm2 = 0;
2094 register struct sock *sk;
2095 struct hlist_node *node;
2096
2097 if (type != ACL_LINK)
2098 return 0;
2099
2100 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2101
2102 /* Find listening sockets and check their link_mode */
2103 read_lock(&l2cap_sk_list.lock);
2104 sk_for_each(sk, node, &l2cap_sk_list.head) {
2105 if (sk->sk_state != BT_LISTEN)
2106 continue;
2107
2108 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2109 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2110 exact++;
2111 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2112 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2113 }
2114 read_unlock(&l2cap_sk_list.lock);
2115
2116 return exact ? lm1 : lm2;
2117 }
2118
2119 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2120 {
2121 struct l2cap_conn *conn;
2122
2123 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2124
2125 if (hcon->type != ACL_LINK)
2126 return 0;
2127
2128 if (!status) {
2129 conn = l2cap_conn_add(hcon, status);
2130 if (conn)
2131 l2cap_conn_ready(conn);
2132 } else
2133 l2cap_conn_del(hcon, bt_err(status));
2134
2135 return 0;
2136 }
2137
2138 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2139 {
2140 BT_DBG("hcon %p reason %d", hcon, reason);
2141
2142 if (hcon->type != ACL_LINK)
2143 return 0;
2144
2145 l2cap_conn_del(hcon, bt_err(reason));
2146
2147 return 0;
2148 }
2149
2150 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2151 {
2152 struct l2cap_chan_list *l;
2153 struct l2cap_conn *conn = hcon->l2cap_data;
2154 struct l2cap_conn_rsp rsp;
2155 struct sock *sk;
2156 int result;
2157
2158 if (!conn)
2159 return 0;
2160
2161 l = &conn->chan_list;
2162
2163 BT_DBG("conn %p", conn);
2164
2165 read_lock(&l->lock);
2166
2167 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2168 struct l2cap_pinfo *pi = l2cap_pi(sk);
2169
2170 bh_lock_sock(sk);
2171
2172 if (sk->sk_state != BT_CONNECT2) {
2173 bh_unlock_sock(sk);
2174 continue;
2175 }
2176
2177 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2178 !(hcon->link_mode & HCI_LM_ENCRYPT)) {
2179 bh_unlock_sock(sk);
2180 continue;
2181 }
2182
2183 if (!status) {
2184 sk->sk_state = BT_CONFIG;
2185 result = 0;
2186 } else {
2187 sk->sk_state = BT_DISCONN;
2188 l2cap_sock_set_timer(sk, HZ/10);
2189 result = L2CAP_CR_SEC_BLOCK;
2190 }
2191
2192 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2193 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2194 rsp.result = cpu_to_le16(result);
2195 rsp.status = cpu_to_le16(0);
2196 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2197 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2198
2199 bh_unlock_sock(sk);
2200 }
2201
2202 read_unlock(&l->lock);
2203 return 0;
2204 }
2205
2206 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2207 {
2208 struct l2cap_chan_list *l;
2209 struct l2cap_conn *conn = hcon->l2cap_data;
2210 struct l2cap_conn_rsp rsp;
2211 struct sock *sk;
2212 int result;
2213
2214 if (!conn)
2215 return 0;
2216
2217 l = &conn->chan_list;
2218
2219 BT_DBG("conn %p", conn);
2220
2221 read_lock(&l->lock);
2222
2223 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2224 struct l2cap_pinfo *pi = l2cap_pi(sk);
2225
2226 bh_lock_sock(sk);
2227
2228 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2229 (sk->sk_state == BT_CONNECTED ||
2230 sk->sk_state == BT_CONFIG) &&
2231 !status && encrypt == 0x00) {
2232 __l2cap_sock_close(sk, ECONNREFUSED);
2233 bh_unlock_sock(sk);
2234 continue;
2235 }
2236
2237 if (sk->sk_state != BT_CONNECT2) {
2238 bh_unlock_sock(sk);
2239 continue;
2240 }
2241
2242 if (!status) {
2243 sk->sk_state = BT_CONFIG;
2244 result = 0;
2245 } else {
2246 sk->sk_state = BT_DISCONN;
2247 l2cap_sock_set_timer(sk, HZ/10);
2248 result = L2CAP_CR_SEC_BLOCK;
2249 }
2250
2251 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2252 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2253 rsp.result = cpu_to_le16(result);
2254 rsp.status = cpu_to_le16(0);
2255 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2256 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2257
2258 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2259 hci_conn_change_link_key(hcon);
2260
2261 bh_unlock_sock(sk);
2262 }
2263
2264 read_unlock(&l->lock);
2265 return 0;
2266 }
2267
2268 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2269 {
2270 struct l2cap_conn *conn = hcon->l2cap_data;
2271
2272 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2273 goto drop;
2274
2275 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2276
2277 if (flags & ACL_START) {
2278 struct l2cap_hdr *hdr;
2279 int len;
2280
2281 if (conn->rx_len) {
2282 BT_ERR("Unexpected start frame (len %d)", skb->len);
2283 kfree_skb(conn->rx_skb);
2284 conn->rx_skb = NULL;
2285 conn->rx_len = 0;
2286 l2cap_conn_unreliable(conn, ECOMM);
2287 }
2288
2289 if (skb->len < 2) {
2290 BT_ERR("Frame is too short (len %d)", skb->len);
2291 l2cap_conn_unreliable(conn, ECOMM);
2292 goto drop;
2293 }
2294
2295 hdr = (struct l2cap_hdr *) skb->data;
2296 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2297
2298 if (len == skb->len) {
2299 /* Complete frame received */
2300 l2cap_recv_frame(conn, skb);
2301 return 0;
2302 }
2303
2304 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2305
2306 if (skb->len > len) {
2307 BT_ERR("Frame is too long (len %d, expected len %d)",
2308 skb->len, len);
2309 l2cap_conn_unreliable(conn, ECOMM);
2310 goto drop;
2311 }
2312
2313 /* Allocate skb for the complete frame (with header) */
2314 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2315 goto drop;
2316
2317 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2318 skb->len);
2319 conn->rx_len = len - skb->len;
2320 } else {
2321 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2322
2323 if (!conn->rx_len) {
2324 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2325 l2cap_conn_unreliable(conn, ECOMM);
2326 goto drop;
2327 }
2328
2329 if (skb->len > conn->rx_len) {
2330 BT_ERR("Fragment is too long (len %d, expected %d)",
2331 skb->len, conn->rx_len);
2332 kfree_skb(conn->rx_skb);
2333 conn->rx_skb = NULL;
2334 conn->rx_len = 0;
2335 l2cap_conn_unreliable(conn, ECOMM);
2336 goto drop;
2337 }
2338
2339 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2340 skb->len);
2341 conn->rx_len -= skb->len;
2342
2343 if (!conn->rx_len) {
2344 /* Complete frame received */
2345 l2cap_recv_frame(conn, conn->rx_skb);
2346 conn->rx_skb = NULL;
2347 }
2348 }
2349
2350 drop:
2351 kfree_skb(skb);
2352 return 0;
2353 }
2354
2355 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2356 {
2357 struct sock *sk;
2358 struct hlist_node *node;
2359 char *str = buf;
2360
2361 read_lock_bh(&l2cap_sk_list.lock);
2362
2363 sk_for_each(sk, node, &l2cap_sk_list.head) {
2364 struct l2cap_pinfo *pi = l2cap_pi(sk);
2365
2366 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2367 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2368 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2369 pi->imtu, pi->omtu, pi->link_mode);
2370 }
2371
2372 read_unlock_bh(&l2cap_sk_list.lock);
2373
2374 return (str - buf);
2375 }
2376
2377 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2378
2379 static const struct proto_ops l2cap_sock_ops = {
2380 .family = PF_BLUETOOTH,
2381 .owner = THIS_MODULE,
2382 .release = l2cap_sock_release,
2383 .bind = l2cap_sock_bind,
2384 .connect = l2cap_sock_connect,
2385 .listen = l2cap_sock_listen,
2386 .accept = l2cap_sock_accept,
2387 .getname = l2cap_sock_getname,
2388 .sendmsg = l2cap_sock_sendmsg,
2389 .recvmsg = bt_sock_recvmsg,
2390 .poll = bt_sock_poll,
2391 .ioctl = bt_sock_ioctl,
2392 .mmap = sock_no_mmap,
2393 .socketpair = sock_no_socketpair,
2394 .shutdown = l2cap_sock_shutdown,
2395 .setsockopt = l2cap_sock_setsockopt,
2396 .getsockopt = l2cap_sock_getsockopt
2397 };
2398
2399 static struct net_proto_family l2cap_sock_family_ops = {
2400 .family = PF_BLUETOOTH,
2401 .owner = THIS_MODULE,
2402 .create = l2cap_sock_create,
2403 };
2404
2405 static struct hci_proto l2cap_hci_proto = {
2406 .name = "L2CAP",
2407 .id = HCI_PROTO_L2CAP,
2408 .connect_ind = l2cap_connect_ind,
2409 .connect_cfm = l2cap_connect_cfm,
2410 .disconn_ind = l2cap_disconn_ind,
2411 .auth_cfm = l2cap_auth_cfm,
2412 .encrypt_cfm = l2cap_encrypt_cfm,
2413 .recv_acldata = l2cap_recv_acldata
2414 };
2415
2416 static int __init l2cap_init(void)
2417 {
2418 int err;
2419
2420 err = proto_register(&l2cap_proto, 0);
2421 if (err < 0)
2422 return err;
2423
2424 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2425 if (err < 0) {
2426 BT_ERR("L2CAP socket registration failed");
2427 goto error;
2428 }
2429
2430 err = hci_register_proto(&l2cap_hci_proto);
2431 if (err < 0) {
2432 BT_ERR("L2CAP protocol registration failed");
2433 bt_sock_unregister(BTPROTO_L2CAP);
2434 goto error;
2435 }
2436
2437 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2438 BT_ERR("Failed to create L2CAP info file");
2439
2440 BT_INFO("L2CAP ver %s", VERSION);
2441 BT_INFO("L2CAP socket layer initialized");
2442
2443 return 0;
2444
2445 error:
2446 proto_unregister(&l2cap_proto);
2447 return err;
2448 }
2449
2450 static void __exit l2cap_exit(void)
2451 {
2452 class_remove_file(bt_class, &class_attr_l2cap);
2453
2454 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2455 BT_ERR("L2CAP socket unregistration failed");
2456
2457 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2458 BT_ERR("L2CAP protocol unregistration failed");
2459
2460 proto_unregister(&l2cap_proto);
2461 }
2462
2463 void l2cap_load(void)
2464 {
2465 /* Dummy function to trigger automatic L2CAP module loading by
2466 * other modules that use L2CAP sockets but don't use any other
2467 * symbols from it. */
2468 return;
2469 }
2470 EXPORT_SYMBOL(l2cap_load);
2471
2472 module_init(l2cap_init);
2473 module_exit(l2cap_exit);
2474
2475 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2476 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2477 MODULE_VERSION(VERSION);
2478 MODULE_LICENSE("GPL");
2479 MODULE_ALIAS("bt-proto-0");