bluetooth: scheduling while atomic bug fix
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth L2CAP core and sockets. */
26
27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/socket.h>
40#include <linux/skbuff.h>
41#include <linux/list.h>
42#include <linux/device.h>
43#include <linux/uaccess.h>
44#include <linux/crc16.h>
45#include <net/sock.h>
46
47#include <asm/system.h>
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52#include <net/bluetooth/l2cap.h>
53
54#define VERSION "2.14"
55
56static int enable_ertm = 0;
57
58static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59static u8 l2cap_fixed_chan[8] = { 0x02, };
60
61static const struct proto_ops l2cap_sock_ops;
62
63static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
65};
66
67static void __l2cap_sock_close(struct sock *sk, int reason);
68static void l2cap_sock_close(struct sock *sk);
69static void l2cap_sock_kill(struct sock *sk);
70
71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73
74/* ---- L2CAP timers ---- */
75static void l2cap_sock_timeout(unsigned long arg)
76{
77 struct sock *sk = (struct sock *) arg;
78 int reason;
79
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
81
82 bh_lock_sock(sk);
83
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
89 else
90 reason = ETIMEDOUT;
91
92 __l2cap_sock_close(sk, reason);
93
94 bh_unlock_sock(sk);
95
96 l2cap_sock_kill(sk);
97 sock_put(sk);
98}
99
100static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101{
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
104}
105
106static void l2cap_sock_clear_timer(struct sock *sk)
107{
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
110}
111
112/* ---- L2CAP channels ---- */
113static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
114{
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
118 break;
119 }
120 return s;
121}
122
123static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
124{
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
128 break;
129 }
130 return s;
131}
132
133/* Find channel with given SCID.
134 * Returns locked socket */
135static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136{
137 struct sock *s;
138 read_lock(&l->lock);
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s)
141 bh_lock_sock(s);
142 read_unlock(&l->lock);
143 return s;
144}
145
146static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147{
148 struct sock *s;
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
151 break;
152 }
153 return s;
154}
155
156static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157{
158 struct sock *s;
159 read_lock(&l->lock);
160 s = __l2cap_get_chan_by_ident(l, ident);
161 if (s)
162 bh_lock_sock(s);
163 read_unlock(&l->lock);
164 return s;
165}
166
167static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168{
169 u16 cid = L2CAP_CID_DYN_START;
170
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
173 return cid;
174 }
175
176 return 0;
177}
178
179static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
180{
181 sock_hold(sk);
182
183 if (l->head)
184 l2cap_pi(l->head)->prev_c = sk;
185
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
188 l->head = sk;
189}
190
191static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192{
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194
195 write_lock_bh(&l->lock);
196 if (sk == l->head)
197 l->head = next;
198
199 if (next)
200 l2cap_pi(next)->prev_c = prev;
201 if (prev)
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
204
205 __sock_put(sk);
206}
207
208static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209{
210 struct l2cap_chan_list *l = &conn->chan_list;
211
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214
215 conn->disc_reason = 0x13;
216
217 l2cap_pi(sk)->conn = conn;
218
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 } else {
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
232 }
233
234 __l2cap_chan_link(l, sk);
235
236 if (parent)
237 bt_accept_enqueue(parent, sk);
238}
239
240/* Delete channel.
241 * Must be called on the locked socket. */
242static void l2cap_chan_del(struct sock *sk, int err)
243{
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
246
247 l2cap_sock_clear_timer(sk);
248
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
250
251 if (conn) {
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
256 }
257
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
260
261 if (err)
262 sk->sk_err = err;
263
264 if (parent) {
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
267 } else
268 sk->sk_state_change(sk);
269}
270
271/* Service level security */
272static inline int l2cap_check_security(struct sock *sk)
273{
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
275 __u8 auth_type;
276
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
280 else
281 auth_type = HCI_AT_NO_BONDING;
282
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 } else {
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 break;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
292 break;
293 default:
294 auth_type = HCI_AT_NO_BONDING;
295 break;
296 }
297 }
298
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
300 auth_type);
301}
302
303static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
304{
305 u8 id;
306
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
311 */
312
313 spin_lock_bh(&conn->lock);
314
315 if (++conn->tx_ident > 128)
316 conn->tx_ident = 1;
317
318 id = conn->tx_ident;
319
320 spin_unlock_bh(&conn->lock);
321
322 return id;
323}
324
325static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326{
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328
329 BT_DBG("code 0x%2.2x", code);
330
331 if (!skb)
332 return -ENOMEM;
333
334 return hci_send_acl(conn->hcon, skb, 0);
335}
336
337static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
338{
339 struct sk_buff *skb;
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
343
344 if (pi->fcs == L2CAP_FCS_CRC16)
345 hlen += 2;
346
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
348
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
351
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
353 if (!skb)
354 return -ENOMEM;
355
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
360
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
364 }
365
366 return hci_send_acl(pi->conn->hcon, skb, 0);
367}
368
369static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
370{
371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
372 control |= L2CAP_SUPER_RCV_NOT_READY;
373 else
374 control |= L2CAP_SUPER_RCV_READY;
375
376 return l2cap_send_sframe(pi, control);
377}
378
379static void l2cap_do_start(struct sock *sk)
380{
381 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
382
383 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
384 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
385 return;
386
387 if (l2cap_check_security(sk)) {
388 struct l2cap_conn_req req;
389 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
390 req.psm = l2cap_pi(sk)->psm;
391
392 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
393
394 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
395 L2CAP_CONN_REQ, sizeof(req), &req);
396 }
397 } else {
398 struct l2cap_info_req req;
399 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
400
401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
402 conn->info_ident = l2cap_get_ident(conn);
403
404 mod_timer(&conn->info_timer, jiffies +
405 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
406
407 l2cap_send_cmd(conn, conn->info_ident,
408 L2CAP_INFO_REQ, sizeof(req), &req);
409 }
410}
411
412static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
413{
414 struct l2cap_disconn_req req;
415
416 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
417 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
418 l2cap_send_cmd(conn, l2cap_get_ident(conn),
419 L2CAP_DISCONN_REQ, sizeof(req), &req);
420}
421
422/* ---- L2CAP connections ---- */
423static void l2cap_conn_start(struct l2cap_conn *conn)
424{
425 struct l2cap_chan_list *l = &conn->chan_list;
426 struct sock *sk;
427
428 BT_DBG("conn %p", conn);
429
430 read_lock(&l->lock);
431
432 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
433 bh_lock_sock(sk);
434
435 if (sk->sk_type != SOCK_SEQPACKET) {
436 bh_unlock_sock(sk);
437 continue;
438 }
439
440 if (sk->sk_state == BT_CONNECT) {
441 if (l2cap_check_security(sk)) {
442 struct l2cap_conn_req req;
443 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
444 req.psm = l2cap_pi(sk)->psm;
445
446 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
447
448 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
449 L2CAP_CONN_REQ, sizeof(req), &req);
450 }
451 } else if (sk->sk_state == BT_CONNECT2) {
452 struct l2cap_conn_rsp rsp;
453 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
454 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
455
456 if (l2cap_check_security(sk)) {
457 if (bt_sk(sk)->defer_setup) {
458 struct sock *parent = bt_sk(sk)->parent;
459 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
460 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
461 parent->sk_data_ready(parent, 0);
462
463 } else {
464 sk->sk_state = BT_CONFIG;
465 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
466 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
467 }
468 } else {
469 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
470 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
471 }
472
473 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
474 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
475 }
476
477 bh_unlock_sock(sk);
478 }
479
480 read_unlock(&l->lock);
481}
482
483static void l2cap_conn_ready(struct l2cap_conn *conn)
484{
485 struct l2cap_chan_list *l = &conn->chan_list;
486 struct sock *sk;
487
488 BT_DBG("conn %p", conn);
489
490 read_lock(&l->lock);
491
492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
493 bh_lock_sock(sk);
494
495 if (sk->sk_type != SOCK_SEQPACKET) {
496 l2cap_sock_clear_timer(sk);
497 sk->sk_state = BT_CONNECTED;
498 sk->sk_state_change(sk);
499 } else if (sk->sk_state == BT_CONNECT)
500 l2cap_do_start(sk);
501
502 bh_unlock_sock(sk);
503 }
504
505 read_unlock(&l->lock);
506}
507
508/* Notify sockets that we cannot guaranty reliability anymore */
509static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
510{
511 struct l2cap_chan_list *l = &conn->chan_list;
512 struct sock *sk;
513
514 BT_DBG("conn %p", conn);
515
516 read_lock(&l->lock);
517
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
519 if (l2cap_pi(sk)->force_reliable)
520 sk->sk_err = err;
521 }
522
523 read_unlock(&l->lock);
524}
525
526static void l2cap_info_timeout(unsigned long arg)
527{
528 struct l2cap_conn *conn = (void *) arg;
529
530 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
531 conn->info_ident = 0;
532
533 l2cap_conn_start(conn);
534}
535
536static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
537{
538 struct l2cap_conn *conn = hcon->l2cap_data;
539
540 if (conn || status)
541 return conn;
542
543 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
544 if (!conn)
545 return NULL;
546
547 hcon->l2cap_data = conn;
548 conn->hcon = hcon;
549
550 BT_DBG("hcon %p conn %p", hcon, conn);
551
552 conn->mtu = hcon->hdev->acl_mtu;
553 conn->src = &hcon->hdev->bdaddr;
554 conn->dst = &hcon->dst;
555
556 conn->feat_mask = 0;
557
558 setup_timer(&conn->info_timer, l2cap_info_timeout,
559 (unsigned long) conn);
560
561 spin_lock_init(&conn->lock);
562 rwlock_init(&conn->chan_list.lock);
563
564 conn->disc_reason = 0x13;
565
566 return conn;
567}
568
569static void l2cap_conn_del(struct hci_conn *hcon, int err)
570{
571 struct l2cap_conn *conn = hcon->l2cap_data;
572 struct sock *sk;
573
574 if (!conn)
575 return;
576
577 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
578
579 kfree_skb(conn->rx_skb);
580
581 /* Kill channels */
582 while ((sk = conn->chan_list.head)) {
583 bh_lock_sock(sk);
584 l2cap_chan_del(sk, err);
585 bh_unlock_sock(sk);
586 l2cap_sock_kill(sk);
587 }
588
589 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
590 del_timer_sync(&conn->info_timer);
591
592 hcon->l2cap_data = NULL;
593 kfree(conn);
594}
595
596static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
597{
598 struct l2cap_chan_list *l = &conn->chan_list;
599 write_lock_bh(&l->lock);
600 __l2cap_chan_add(conn, sk, parent);
601 write_unlock_bh(&l->lock);
602}
603
604/* ---- Socket interface ---- */
605static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
606{
607 struct sock *sk;
608 struct hlist_node *node;
609 sk_for_each(sk, node, &l2cap_sk_list.head)
610 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
611 goto found;
612 sk = NULL;
613found:
614 return sk;
615}
616
617/* Find socket with psm and source bdaddr.
618 * Returns closest match.
619 */
620static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
621{
622 struct sock *sk = NULL, *sk1 = NULL;
623 struct hlist_node *node;
624
625 sk_for_each(sk, node, &l2cap_sk_list.head) {
626 if (state && sk->sk_state != state)
627 continue;
628
629 if (l2cap_pi(sk)->psm == psm) {
630 /* Exact match. */
631 if (!bacmp(&bt_sk(sk)->src, src))
632 break;
633
634 /* Closest match */
635 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
636 sk1 = sk;
637 }
638 }
639 return node ? sk : sk1;
640}
641
642/* Find socket with given address (psm, src).
643 * Returns locked socket */
644static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
645{
646 struct sock *s;
647 read_lock(&l2cap_sk_list.lock);
648 s = __l2cap_get_sock_by_psm(state, psm, src);
649 if (s)
650 bh_lock_sock(s);
651 read_unlock(&l2cap_sk_list.lock);
652 return s;
653}
654
655static void l2cap_sock_destruct(struct sock *sk)
656{
657 BT_DBG("sk %p", sk);
658
659 skb_queue_purge(&sk->sk_receive_queue);
660 skb_queue_purge(&sk->sk_write_queue);
661}
662
663static void l2cap_sock_cleanup_listen(struct sock *parent)
664{
665 struct sock *sk;
666
667 BT_DBG("parent %p", parent);
668
669 /* Close not yet accepted channels */
670 while ((sk = bt_accept_dequeue(parent, NULL)))
671 l2cap_sock_close(sk);
672
673 parent->sk_state = BT_CLOSED;
674 sock_set_flag(parent, SOCK_ZAPPED);
675}
676
677/* Kill socket (only if zapped and orphan)
678 * Must be called on unlocked socket.
679 */
680static void l2cap_sock_kill(struct sock *sk)
681{
682 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
683 return;
684
685 BT_DBG("sk %p state %d", sk, sk->sk_state);
686
687 /* Kill poor orphan */
688 bt_sock_unlink(&l2cap_sk_list, sk);
689 sock_set_flag(sk, SOCK_DEAD);
690 sock_put(sk);
691}
692
693static void __l2cap_sock_close(struct sock *sk, int reason)
694{
695 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
696
697 switch (sk->sk_state) {
698 case BT_LISTEN:
699 l2cap_sock_cleanup_listen(sk);
700 break;
701
702 case BT_CONNECTED:
703 case BT_CONFIG:
704 if (sk->sk_type == SOCK_SEQPACKET) {
705 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
706
707 sk->sk_state = BT_DISCONN;
708 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
709 l2cap_send_disconn_req(conn, sk);
710 } else
711 l2cap_chan_del(sk, reason);
712 break;
713
714 case BT_CONNECT2:
715 if (sk->sk_type == SOCK_SEQPACKET) {
716 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
717 struct l2cap_conn_rsp rsp;
718 __u16 result;
719
720 if (bt_sk(sk)->defer_setup)
721 result = L2CAP_CR_SEC_BLOCK;
722 else
723 result = L2CAP_CR_BAD_PSM;
724
725 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
726 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
727 rsp.result = cpu_to_le16(result);
728 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
729 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
730 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
731 } else
732 l2cap_chan_del(sk, reason);
733 break;
734
735 case BT_CONNECT:
736 case BT_DISCONN:
737 l2cap_chan_del(sk, reason);
738 break;
739
740 default:
741 sock_set_flag(sk, SOCK_ZAPPED);
742 break;
743 }
744}
745
746/* Must be called on unlocked socket. */
747static void l2cap_sock_close(struct sock *sk)
748{
749 l2cap_sock_clear_timer(sk);
750 lock_sock(sk);
751 __l2cap_sock_close(sk, ECONNRESET);
752 release_sock(sk);
753 l2cap_sock_kill(sk);
754}
755
756static void l2cap_sock_init(struct sock *sk, struct sock *parent)
757{
758 struct l2cap_pinfo *pi = l2cap_pi(sk);
759
760 BT_DBG("sk %p", sk);
761
762 if (parent) {
763 sk->sk_type = parent->sk_type;
764 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
765
766 pi->imtu = l2cap_pi(parent)->imtu;
767 pi->omtu = l2cap_pi(parent)->omtu;
768 pi->mode = l2cap_pi(parent)->mode;
769 pi->fcs = l2cap_pi(parent)->fcs;
770 pi->sec_level = l2cap_pi(parent)->sec_level;
771 pi->role_switch = l2cap_pi(parent)->role_switch;
772 pi->force_reliable = l2cap_pi(parent)->force_reliable;
773 } else {
774 pi->imtu = L2CAP_DEFAULT_MTU;
775 pi->omtu = 0;
776 pi->mode = L2CAP_MODE_BASIC;
777 pi->fcs = L2CAP_FCS_CRC16;
778 pi->sec_level = BT_SECURITY_LOW;
779 pi->role_switch = 0;
780 pi->force_reliable = 0;
781 }
782
783 /* Default config options */
784 pi->conf_len = 0;
785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
786}
787
788static struct proto l2cap_proto = {
789 .name = "L2CAP",
790 .owner = THIS_MODULE,
791 .obj_size = sizeof(struct l2cap_pinfo)
792};
793
794static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
795{
796 struct sock *sk;
797
798 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
799 if (!sk)
800 return NULL;
801
802 sock_init_data(sock, sk);
803 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
804
805 sk->sk_destruct = l2cap_sock_destruct;
806 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
807
808 sock_reset_flag(sk, SOCK_ZAPPED);
809
810 sk->sk_protocol = proto;
811 sk->sk_state = BT_OPEN;
812
813 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
814
815 bt_sock_link(&l2cap_sk_list, sk);
816 return sk;
817}
818
819static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
820{
821 struct sock *sk;
822
823 BT_DBG("sock %p", sock);
824
825 sock->state = SS_UNCONNECTED;
826
827 if (sock->type != SOCK_SEQPACKET &&
828 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
829 return -ESOCKTNOSUPPORT;
830
831 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
832 return -EPERM;
833
834 sock->ops = &l2cap_sock_ops;
835
836 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
837 if (!sk)
838 return -ENOMEM;
839
840 l2cap_sock_init(sk, NULL);
841 return 0;
842}
843
844static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
845{
846 struct sock *sk = sock->sk;
847 struct sockaddr_l2 la;
848 int len, err = 0;
849
850 BT_DBG("sk %p", sk);
851
852 if (!addr || addr->sa_family != AF_BLUETOOTH)
853 return -EINVAL;
854
855 memset(&la, 0, sizeof(la));
856 len = min_t(unsigned int, sizeof(la), alen);
857 memcpy(&la, addr, len);
858
859 if (la.l2_cid)
860 return -EINVAL;
861
862 lock_sock(sk);
863
864 if (sk->sk_state != BT_OPEN) {
865 err = -EBADFD;
866 goto done;
867 }
868
869 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
870 !capable(CAP_NET_BIND_SERVICE)) {
871 err = -EACCES;
872 goto done;
873 }
874
875 write_lock_bh(&l2cap_sk_list.lock);
876
877 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
878 err = -EADDRINUSE;
879 } else {
880 /* Save source address */
881 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
882 l2cap_pi(sk)->psm = la.l2_psm;
883 l2cap_pi(sk)->sport = la.l2_psm;
884 sk->sk_state = BT_BOUND;
885
886 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
887 __le16_to_cpu(la.l2_psm) == 0x0003)
888 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
889 }
890
891 write_unlock_bh(&l2cap_sk_list.lock);
892
893done:
894 release_sock(sk);
895 return err;
896}
897
898static int l2cap_do_connect(struct sock *sk)
899{
900 bdaddr_t *src = &bt_sk(sk)->src;
901 bdaddr_t *dst = &bt_sk(sk)->dst;
902 struct l2cap_conn *conn;
903 struct hci_conn *hcon;
904 struct hci_dev *hdev;
905 __u8 auth_type;
906 int err;
907
908 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
909 l2cap_pi(sk)->psm);
910
911 hdev = hci_get_route(dst, src);
912 if (!hdev)
913 return -EHOSTUNREACH;
914
915 hci_dev_lock_bh(hdev);
916
917 err = -ENOMEM;
918
919 if (sk->sk_type == SOCK_RAW) {
920 switch (l2cap_pi(sk)->sec_level) {
921 case BT_SECURITY_HIGH:
922 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
923 break;
924 case BT_SECURITY_MEDIUM:
925 auth_type = HCI_AT_DEDICATED_BONDING;
926 break;
927 default:
928 auth_type = HCI_AT_NO_BONDING;
929 break;
930 }
931 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
932 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
933 auth_type = HCI_AT_NO_BONDING_MITM;
934 else
935 auth_type = HCI_AT_NO_BONDING;
936
937 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
938 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
939 } else {
940 switch (l2cap_pi(sk)->sec_level) {
941 case BT_SECURITY_HIGH:
942 auth_type = HCI_AT_GENERAL_BONDING_MITM;
943 break;
944 case BT_SECURITY_MEDIUM:
945 auth_type = HCI_AT_GENERAL_BONDING;
946 break;
947 default:
948 auth_type = HCI_AT_NO_BONDING;
949 break;
950 }
951 }
952
953 hcon = hci_connect(hdev, ACL_LINK, dst,
954 l2cap_pi(sk)->sec_level, auth_type);
955 if (!hcon)
956 goto done;
957
958 conn = l2cap_conn_add(hcon, 0);
959 if (!conn) {
960 hci_conn_put(hcon);
961 goto done;
962 }
963
964 err = 0;
965
966 /* Update source addr of the socket */
967 bacpy(src, conn->src);
968
969 l2cap_chan_add(conn, sk, NULL);
970
971 sk->sk_state = BT_CONNECT;
972 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
973
974 if (hcon->state == BT_CONNECTED) {
975 if (sk->sk_type != SOCK_SEQPACKET) {
976 l2cap_sock_clear_timer(sk);
977 sk->sk_state = BT_CONNECTED;
978 } else
979 l2cap_do_start(sk);
980 }
981
982done:
983 hci_dev_unlock_bh(hdev);
984 hci_dev_put(hdev);
985 return err;
986}
987
988static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
989{
990 struct sock *sk = sock->sk;
991 struct sockaddr_l2 la;
992 int len, err = 0;
993
994 BT_DBG("sk %p", sk);
995
996 if (!addr || addr->sa_family != AF_BLUETOOTH)
997 return -EINVAL;
998
999 memset(&la, 0, sizeof(la));
1000 len = min_t(unsigned int, sizeof(la), alen);
1001 memcpy(&la, addr, len);
1002
1003 if (la.l2_cid)
1004 return -EINVAL;
1005
1006 lock_sock(sk);
1007
1008 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1009 err = -EINVAL;
1010 goto done;
1011 }
1012
1013 switch (l2cap_pi(sk)->mode) {
1014 case L2CAP_MODE_BASIC:
1015 break;
1016 case L2CAP_MODE_ERTM:
1017 case L2CAP_MODE_STREAMING:
1018 if (enable_ertm)
1019 break;
1020 /* fall through */
1021 default:
1022 err = -ENOTSUPP;
1023 goto done;
1024 }
1025
1026 switch (sk->sk_state) {
1027 case BT_CONNECT:
1028 case BT_CONNECT2:
1029 case BT_CONFIG:
1030 /* Already connecting */
1031 goto wait;
1032
1033 case BT_CONNECTED:
1034 /* Already connected */
1035 goto done;
1036
1037 case BT_OPEN:
1038 case BT_BOUND:
1039 /* Can connect */
1040 break;
1041
1042 default:
1043 err = -EBADFD;
1044 goto done;
1045 }
1046
1047 /* Set destination address and psm */
1048 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1049 l2cap_pi(sk)->psm = la.l2_psm;
1050
1051 err = l2cap_do_connect(sk);
1052 if (err)
1053 goto done;
1054
1055wait:
1056 err = bt_sock_wait_state(sk, BT_CONNECTED,
1057 sock_sndtimeo(sk, flags & O_NONBLOCK));
1058done:
1059 release_sock(sk);
1060 return err;
1061}
1062
1063static int l2cap_sock_listen(struct socket *sock, int backlog)
1064{
1065 struct sock *sk = sock->sk;
1066 int err = 0;
1067
1068 BT_DBG("sk %p backlog %d", sk, backlog);
1069
1070 lock_sock(sk);
1071
1072 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1073 err = -EBADFD;
1074 goto done;
1075 }
1076
1077 switch (l2cap_pi(sk)->mode) {
1078 case L2CAP_MODE_BASIC:
1079 break;
1080 case L2CAP_MODE_ERTM:
1081 case L2CAP_MODE_STREAMING:
1082 if (enable_ertm)
1083 break;
1084 /* fall through */
1085 default:
1086 err = -ENOTSUPP;
1087 goto done;
1088 }
1089
1090 if (!l2cap_pi(sk)->psm) {
1091 bdaddr_t *src = &bt_sk(sk)->src;
1092 u16 psm;
1093
1094 err = -EINVAL;
1095
1096 write_lock_bh(&l2cap_sk_list.lock);
1097
1098 for (psm = 0x1001; psm < 0x1100; psm += 2)
1099 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1100 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1101 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1102 err = 0;
1103 break;
1104 }
1105
1106 write_unlock_bh(&l2cap_sk_list.lock);
1107
1108 if (err < 0)
1109 goto done;
1110 }
1111
1112 sk->sk_max_ack_backlog = backlog;
1113 sk->sk_ack_backlog = 0;
1114 sk->sk_state = BT_LISTEN;
1115
1116done:
1117 release_sock(sk);
1118 return err;
1119}
1120
1121static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1122{
1123 DECLARE_WAITQUEUE(wait, current);
1124 struct sock *sk = sock->sk, *nsk;
1125 long timeo;
1126 int err = 0;
1127
1128 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1129
1130 if (sk->sk_state != BT_LISTEN) {
1131 err = -EBADFD;
1132 goto done;
1133 }
1134
1135 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1136
1137 BT_DBG("sk %p timeo %ld", sk, timeo);
1138
1139 /* Wait for an incoming connection. (wake-one). */
1140 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1141 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1142 set_current_state(TASK_INTERRUPTIBLE);
1143 if (!timeo) {
1144 err = -EAGAIN;
1145 break;
1146 }
1147
1148 release_sock(sk);
1149 timeo = schedule_timeout(timeo);
1150 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1151
1152 if (sk->sk_state != BT_LISTEN) {
1153 err = -EBADFD;
1154 break;
1155 }
1156
1157 if (signal_pending(current)) {
1158 err = sock_intr_errno(timeo);
1159 break;
1160 }
1161 }
1162 set_current_state(TASK_RUNNING);
1163 remove_wait_queue(sk->sk_sleep, &wait);
1164
1165 if (err)
1166 goto done;
1167
1168 newsock->state = SS_CONNECTED;
1169
1170 BT_DBG("new socket %p", nsk);
1171
1172done:
1173 release_sock(sk);
1174 return err;
1175}
1176
1177static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1178{
1179 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1180 struct sock *sk = sock->sk;
1181
1182 BT_DBG("sock %p, sk %p", sock, sk);
1183
1184 addr->sa_family = AF_BLUETOOTH;
1185 *len = sizeof(struct sockaddr_l2);
1186
1187 if (peer) {
1188 la->l2_psm = l2cap_pi(sk)->psm;
1189 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1190 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1191 } else {
1192 la->l2_psm = l2cap_pi(sk)->sport;
1193 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1194 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1195 }
1196
1197 return 0;
1198}
1199
1200static void l2cap_monitor_timeout(unsigned long arg)
1201{
1202 struct sock *sk = (void *) arg;
1203 u16 control;
1204
1205 bh_lock_sock(sk);
1206 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1207 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1208 return;
1209 }
1210
1211 l2cap_pi(sk)->retry_count++;
1212 __mod_monitor_timer();
1213
1214 control = L2CAP_CTRL_POLL;
1215 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1216 bh_unlock_sock(sk);
1217}
1218
1219static void l2cap_retrans_timeout(unsigned long arg)
1220{
1221 struct sock *sk = (void *) arg;
1222 u16 control;
1223
1224 bh_lock_sock(sk);
1225 l2cap_pi(sk)->retry_count = 1;
1226 __mod_monitor_timer();
1227
1228 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1229
1230 control = L2CAP_CTRL_POLL;
1231 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1232 bh_unlock_sock(sk);
1233}
1234
1235static void l2cap_drop_acked_frames(struct sock *sk)
1236{
1237 struct sk_buff *skb;
1238
1239 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1240 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1241 break;
1242
1243 skb = skb_dequeue(TX_QUEUE(sk));
1244 kfree_skb(skb);
1245
1246 l2cap_pi(sk)->unacked_frames--;
1247 }
1248
1249 if (!l2cap_pi(sk)->unacked_frames)
1250 del_timer(&l2cap_pi(sk)->retrans_timer);
1251
1252 return;
1253}
1254
1255static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1256{
1257 struct l2cap_pinfo *pi = l2cap_pi(sk);
1258 int err;
1259
1260 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1261
1262 err = hci_send_acl(pi->conn->hcon, skb, 0);
1263 if (err < 0)
1264 kfree_skb(skb);
1265
1266 return err;
1267}
1268
1269static int l2cap_streaming_send(struct sock *sk)
1270{
1271 struct sk_buff *skb, *tx_skb;
1272 struct l2cap_pinfo *pi = l2cap_pi(sk);
1273 u16 control, fcs;
1274 int err;
1275
1276 while ((skb = sk->sk_send_head)) {
1277 tx_skb = skb_clone(skb, GFP_ATOMIC);
1278
1279 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1280 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1281 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1282
1283 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1284 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1285 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1286 }
1287
1288 err = l2cap_do_send(sk, tx_skb);
1289 if (err < 0) {
1290 l2cap_send_disconn_req(pi->conn, sk);
1291 return err;
1292 }
1293
1294 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1295
1296 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1297 sk->sk_send_head = NULL;
1298 else
1299 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1300
1301 skb = skb_dequeue(TX_QUEUE(sk));
1302 kfree_skb(skb);
1303 }
1304 return 0;
1305}
1306
1307static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1308{
1309 struct l2cap_pinfo *pi = l2cap_pi(sk);
1310 struct sk_buff *skb, *tx_skb;
1311 u16 control, fcs;
1312 int err;
1313
1314 skb = skb_peek(TX_QUEUE(sk));
1315 do {
1316 if (bt_cb(skb)->tx_seq != tx_seq) {
1317 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1318 break;
1319 skb = skb_queue_next(TX_QUEUE(sk), skb);
1320 continue;
1321 }
1322
1323 if (pi->remote_max_tx &&
1324 bt_cb(skb)->retries == pi->remote_max_tx) {
1325 l2cap_send_disconn_req(pi->conn, sk);
1326 break;
1327 }
1328
1329 tx_skb = skb_clone(skb, GFP_ATOMIC);
1330 bt_cb(skb)->retries++;
1331 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1332 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1333 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1334 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1335
1336 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1337 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1338 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1339 }
1340
1341 err = l2cap_do_send(sk, tx_skb);
1342 if (err < 0) {
1343 l2cap_send_disconn_req(pi->conn, sk);
1344 return err;
1345 }
1346 break;
1347 } while(1);
1348 return 0;
1349}
1350
1351static int l2cap_ertm_send(struct sock *sk)
1352{
1353 struct sk_buff *skb, *tx_skb;
1354 struct l2cap_pinfo *pi = l2cap_pi(sk);
1355 u16 control, fcs;
1356 int err;
1357
1358 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1359 return 0;
1360
1361 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))
1362 && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1363 tx_skb = skb_clone(skb, GFP_ATOMIC);
1364
1365 if (pi->remote_max_tx &&
1366 bt_cb(skb)->retries == pi->remote_max_tx) {
1367 l2cap_send_disconn_req(pi->conn, sk);
1368 break;
1369 }
1370
1371 bt_cb(skb)->retries++;
1372
1373 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1374 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1375 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1376 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1377
1378
1379 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1380 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1381 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1382 }
1383
1384 err = l2cap_do_send(sk, tx_skb);
1385 if (err < 0) {
1386 l2cap_send_disconn_req(pi->conn, sk);
1387 return err;
1388 }
1389 __mod_retrans_timer();
1390
1391 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1392 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1393
1394 pi->unacked_frames++;
1395
1396 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1397 sk->sk_send_head = NULL;
1398 else
1399 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1400 }
1401
1402 return 0;
1403}
1404
1405static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1406{
1407 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1408 struct sk_buff **frag;
1409 int err, sent = 0;
1410
1411 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1412 return -EFAULT;
1413 }
1414
1415 sent += count;
1416 len -= count;
1417
1418 /* Continuation fragments (no L2CAP header) */
1419 frag = &skb_shinfo(skb)->frag_list;
1420 while (len) {
1421 count = min_t(unsigned int, conn->mtu, len);
1422
1423 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1424 if (!*frag)
1425 return -EFAULT;
1426 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1427 return -EFAULT;
1428
1429 sent += count;
1430 len -= count;
1431
1432 frag = &(*frag)->next;
1433 }
1434
1435 return sent;
1436}
1437
1438static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1439{
1440 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1441 struct sk_buff *skb;
1442 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1443 struct l2cap_hdr *lh;
1444
1445 BT_DBG("sk %p len %d", sk, (int)len);
1446
1447 count = min_t(unsigned int, (conn->mtu - hlen), len);
1448 skb = bt_skb_send_alloc(sk, count + hlen,
1449 msg->msg_flags & MSG_DONTWAIT, &err);
1450 if (!skb)
1451 return ERR_PTR(-ENOMEM);
1452
1453 /* Create L2CAP header */
1454 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1455 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1456 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1457 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1458
1459 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1460 if (unlikely(err < 0)) {
1461 kfree_skb(skb);
1462 return ERR_PTR(err);
1463 }
1464 return skb;
1465}
1466
1467static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1468{
1469 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1470 struct sk_buff *skb;
1471 int err, count, hlen = L2CAP_HDR_SIZE;
1472 struct l2cap_hdr *lh;
1473
1474 BT_DBG("sk %p len %d", sk, (int)len);
1475
1476 count = min_t(unsigned int, (conn->mtu - hlen), len);
1477 skb = bt_skb_send_alloc(sk, count + hlen,
1478 msg->msg_flags & MSG_DONTWAIT, &err);
1479 if (!skb)
1480 return ERR_PTR(-ENOMEM);
1481
1482 /* Create L2CAP header */
1483 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1484 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1485 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1486
1487 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1488 if (unlikely(err < 0)) {
1489 kfree_skb(skb);
1490 return ERR_PTR(err);
1491 }
1492 return skb;
1493}
1494
1495static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1496{
1497 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1498 struct sk_buff *skb;
1499 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1500 struct l2cap_hdr *lh;
1501
1502 BT_DBG("sk %p len %d", sk, (int)len);
1503
1504 if (sdulen)
1505 hlen += 2;
1506
1507 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1508 hlen += 2;
1509
1510 count = min_t(unsigned int, (conn->mtu - hlen), len);
1511 skb = bt_skb_send_alloc(sk, count + hlen,
1512 msg->msg_flags & MSG_DONTWAIT, &err);
1513 if (!skb)
1514 return ERR_PTR(-ENOMEM);
1515
1516 /* Create L2CAP header */
1517 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1518 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1519 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1520 put_unaligned_le16(control, skb_put(skb, 2));
1521 if (sdulen)
1522 put_unaligned_le16(sdulen, skb_put(skb, 2));
1523
1524 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1525 if (unlikely(err < 0)) {
1526 kfree_skb(skb);
1527 return ERR_PTR(err);
1528 }
1529
1530 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1531 put_unaligned_le16(0, skb_put(skb, 2));
1532
1533 bt_cb(skb)->retries = 0;
1534 return skb;
1535}
1536
1537static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1538{
1539 struct l2cap_pinfo *pi = l2cap_pi(sk);
1540 struct sk_buff *skb;
1541 struct sk_buff_head sar_queue;
1542 u16 control;
1543 size_t size = 0;
1544
1545 __skb_queue_head_init(&sar_queue);
1546 control = L2CAP_SDU_START;
1547 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1548 if (IS_ERR(skb))
1549 return PTR_ERR(skb);
1550
1551 __skb_queue_tail(&sar_queue, skb);
1552 len -= pi->max_pdu_size;
1553 size +=pi->max_pdu_size;
1554 control = 0;
1555
1556 while (len > 0) {
1557 size_t buflen;
1558
1559 if (len > pi->max_pdu_size) {
1560 control |= L2CAP_SDU_CONTINUE;
1561 buflen = pi->max_pdu_size;
1562 } else {
1563 control |= L2CAP_SDU_END;
1564 buflen = len;
1565 }
1566
1567 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1568 if (IS_ERR(skb)) {
1569 skb_queue_purge(&sar_queue);
1570 return PTR_ERR(skb);
1571 }
1572
1573 __skb_queue_tail(&sar_queue, skb);
1574 len -= buflen;
1575 size += buflen;
1576 control = 0;
1577 }
1578 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1579 if (sk->sk_send_head == NULL)
1580 sk->sk_send_head = sar_queue.next;
1581
1582 return size;
1583}
1584
1585static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1586{
1587 struct sock *sk = sock->sk;
1588 struct l2cap_pinfo *pi = l2cap_pi(sk);
1589 struct sk_buff *skb;
1590 u16 control;
1591 int err;
1592
1593 BT_DBG("sock %p, sk %p", sock, sk);
1594
1595 err = sock_error(sk);
1596 if (err)
1597 return err;
1598
1599 if (msg->msg_flags & MSG_OOB)
1600 return -EOPNOTSUPP;
1601
1602 /* Check outgoing MTU */
1603 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1604 && len > pi->omtu)
1605 return -EINVAL;
1606
1607 lock_sock(sk);
1608
1609 if (sk->sk_state != BT_CONNECTED) {
1610 err = -ENOTCONN;
1611 goto done;
1612 }
1613
1614 /* Connectionless channel */
1615 if (sk->sk_type == SOCK_DGRAM) {
1616 skb = l2cap_create_connless_pdu(sk, msg, len);
1617 err = l2cap_do_send(sk, skb);
1618 goto done;
1619 }
1620
1621 switch (pi->mode) {
1622 case L2CAP_MODE_BASIC:
1623 /* Create a basic PDU */
1624 skb = l2cap_create_basic_pdu(sk, msg, len);
1625 if (IS_ERR(skb)) {
1626 err = PTR_ERR(skb);
1627 goto done;
1628 }
1629
1630 err = l2cap_do_send(sk, skb);
1631 if (!err)
1632 err = len;
1633 break;
1634
1635 case L2CAP_MODE_ERTM:
1636 case L2CAP_MODE_STREAMING:
1637 /* Entire SDU fits into one PDU */
1638 if (len <= pi->max_pdu_size) {
1639 control = L2CAP_SDU_UNSEGMENTED;
1640 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1641 if (IS_ERR(skb)) {
1642 err = PTR_ERR(skb);
1643 goto done;
1644 }
1645 __skb_queue_tail(TX_QUEUE(sk), skb);
1646 if (sk->sk_send_head == NULL)
1647 sk->sk_send_head = skb;
1648 } else {
1649 /* Segment SDU into multiples PDUs */
1650 err = l2cap_sar_segment_sdu(sk, msg, len);
1651 if (err < 0)
1652 goto done;
1653 }
1654
1655 if (pi->mode == L2CAP_MODE_STREAMING)
1656 err = l2cap_streaming_send(sk);
1657 else
1658 err = l2cap_ertm_send(sk);
1659
1660 if (!err)
1661 err = len;
1662 break;
1663
1664 default:
1665 BT_DBG("bad state %1.1x", pi->mode);
1666 err = -EINVAL;
1667 }
1668
1669done:
1670 release_sock(sk);
1671 return err;
1672}
1673
1674static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1675{
1676 struct sock *sk = sock->sk;
1677
1678 lock_sock(sk);
1679
1680 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1681 struct l2cap_conn_rsp rsp;
1682
1683 sk->sk_state = BT_CONFIG;
1684
1685 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1687 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1688 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1689 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1690 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1691
1692 release_sock(sk);
1693 return 0;
1694 }
1695
1696 release_sock(sk);
1697
1698 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1699}
1700
1701static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1702{
1703 struct sock *sk = sock->sk;
1704 struct l2cap_options opts;
1705 int len, err = 0;
1706 u32 opt;
1707
1708 BT_DBG("sk %p", sk);
1709
1710 lock_sock(sk);
1711
1712 switch (optname) {
1713 case L2CAP_OPTIONS:
1714 opts.imtu = l2cap_pi(sk)->imtu;
1715 opts.omtu = l2cap_pi(sk)->omtu;
1716 opts.flush_to = l2cap_pi(sk)->flush_to;
1717 opts.mode = l2cap_pi(sk)->mode;
1718 opts.fcs = l2cap_pi(sk)->fcs;
1719
1720 len = min_t(unsigned int, sizeof(opts), optlen);
1721 if (copy_from_user((char *) &opts, optval, len)) {
1722 err = -EFAULT;
1723 break;
1724 }
1725
1726 l2cap_pi(sk)->imtu = opts.imtu;
1727 l2cap_pi(sk)->omtu = opts.omtu;
1728 l2cap_pi(sk)->mode = opts.mode;
1729 l2cap_pi(sk)->fcs = opts.fcs;
1730 break;
1731
1732 case L2CAP_LM:
1733 if (get_user(opt, (u32 __user *) optval)) {
1734 err = -EFAULT;
1735 break;
1736 }
1737
1738 if (opt & L2CAP_LM_AUTH)
1739 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1740 if (opt & L2CAP_LM_ENCRYPT)
1741 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1742 if (opt & L2CAP_LM_SECURE)
1743 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1744
1745 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1746 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1747 break;
1748
1749 default:
1750 err = -ENOPROTOOPT;
1751 break;
1752 }
1753
1754 release_sock(sk);
1755 return err;
1756}
1757
1758static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1759{
1760 struct sock *sk = sock->sk;
1761 struct bt_security sec;
1762 int len, err = 0;
1763 u32 opt;
1764
1765 BT_DBG("sk %p", sk);
1766
1767 if (level == SOL_L2CAP)
1768 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1769
1770 if (level != SOL_BLUETOOTH)
1771 return -ENOPROTOOPT;
1772
1773 lock_sock(sk);
1774
1775 switch (optname) {
1776 case BT_SECURITY:
1777 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1778 err = -EINVAL;
1779 break;
1780 }
1781
1782 sec.level = BT_SECURITY_LOW;
1783
1784 len = min_t(unsigned int, sizeof(sec), optlen);
1785 if (copy_from_user((char *) &sec, optval, len)) {
1786 err = -EFAULT;
1787 break;
1788 }
1789
1790 if (sec.level < BT_SECURITY_LOW ||
1791 sec.level > BT_SECURITY_HIGH) {
1792 err = -EINVAL;
1793 break;
1794 }
1795
1796 l2cap_pi(sk)->sec_level = sec.level;
1797 break;
1798
1799 case BT_DEFER_SETUP:
1800 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1801 err = -EINVAL;
1802 break;
1803 }
1804
1805 if (get_user(opt, (u32 __user *) optval)) {
1806 err = -EFAULT;
1807 break;
1808 }
1809
1810 bt_sk(sk)->defer_setup = opt;
1811 break;
1812
1813 default:
1814 err = -ENOPROTOOPT;
1815 break;
1816 }
1817
1818 release_sock(sk);
1819 return err;
1820}
1821
1822static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1823{
1824 struct sock *sk = sock->sk;
1825 struct l2cap_options opts;
1826 struct l2cap_conninfo cinfo;
1827 int len, err = 0;
1828 u32 opt;
1829
1830 BT_DBG("sk %p", sk);
1831
1832 if (get_user(len, optlen))
1833 return -EFAULT;
1834
1835 lock_sock(sk);
1836
1837 switch (optname) {
1838 case L2CAP_OPTIONS:
1839 opts.imtu = l2cap_pi(sk)->imtu;
1840 opts.omtu = l2cap_pi(sk)->omtu;
1841 opts.flush_to = l2cap_pi(sk)->flush_to;
1842 opts.mode = l2cap_pi(sk)->mode;
1843 opts.fcs = l2cap_pi(sk)->fcs;
1844
1845 len = min_t(unsigned int, len, sizeof(opts));
1846 if (copy_to_user(optval, (char *) &opts, len))
1847 err = -EFAULT;
1848
1849 break;
1850
1851 case L2CAP_LM:
1852 switch (l2cap_pi(sk)->sec_level) {
1853 case BT_SECURITY_LOW:
1854 opt = L2CAP_LM_AUTH;
1855 break;
1856 case BT_SECURITY_MEDIUM:
1857 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1858 break;
1859 case BT_SECURITY_HIGH:
1860 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1861 L2CAP_LM_SECURE;
1862 break;
1863 default:
1864 opt = 0;
1865 break;
1866 }
1867
1868 if (l2cap_pi(sk)->role_switch)
1869 opt |= L2CAP_LM_MASTER;
1870
1871 if (l2cap_pi(sk)->force_reliable)
1872 opt |= L2CAP_LM_RELIABLE;
1873
1874 if (put_user(opt, (u32 __user *) optval))
1875 err = -EFAULT;
1876 break;
1877
1878 case L2CAP_CONNINFO:
1879 if (sk->sk_state != BT_CONNECTED &&
1880 !(sk->sk_state == BT_CONNECT2 &&
1881 bt_sk(sk)->defer_setup)) {
1882 err = -ENOTCONN;
1883 break;
1884 }
1885
1886 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1887 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1888
1889 len = min_t(unsigned int, len, sizeof(cinfo));
1890 if (copy_to_user(optval, (char *) &cinfo, len))
1891 err = -EFAULT;
1892
1893 break;
1894
1895 default:
1896 err = -ENOPROTOOPT;
1897 break;
1898 }
1899
1900 release_sock(sk);
1901 return err;
1902}
1903
1904static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1905{
1906 struct sock *sk = sock->sk;
1907 struct bt_security sec;
1908 int len, err = 0;
1909
1910 BT_DBG("sk %p", sk);
1911
1912 if (level == SOL_L2CAP)
1913 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1914
1915 if (level != SOL_BLUETOOTH)
1916 return -ENOPROTOOPT;
1917
1918 if (get_user(len, optlen))
1919 return -EFAULT;
1920
1921 lock_sock(sk);
1922
1923 switch (optname) {
1924 case BT_SECURITY:
1925 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1926 err = -EINVAL;
1927 break;
1928 }
1929
1930 sec.level = l2cap_pi(sk)->sec_level;
1931
1932 len = min_t(unsigned int, len, sizeof(sec));
1933 if (copy_to_user(optval, (char *) &sec, len))
1934 err = -EFAULT;
1935
1936 break;
1937
1938 case BT_DEFER_SETUP:
1939 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1940 err = -EINVAL;
1941 break;
1942 }
1943
1944 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1945 err = -EFAULT;
1946
1947 break;
1948
1949 default:
1950 err = -ENOPROTOOPT;
1951 break;
1952 }
1953
1954 release_sock(sk);
1955 return err;
1956}
1957
1958static int l2cap_sock_shutdown(struct socket *sock, int how)
1959{
1960 struct sock *sk = sock->sk;
1961 int err = 0;
1962
1963 BT_DBG("sock %p, sk %p", sock, sk);
1964
1965 if (!sk)
1966 return 0;
1967
1968 lock_sock(sk);
1969 if (!sk->sk_shutdown) {
1970 sk->sk_shutdown = SHUTDOWN_MASK;
1971 l2cap_sock_clear_timer(sk);
1972 __l2cap_sock_close(sk, 0);
1973
1974 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1975 err = bt_sock_wait_state(sk, BT_CLOSED,
1976 sk->sk_lingertime);
1977 }
1978 release_sock(sk);
1979 return err;
1980}
1981
1982static int l2cap_sock_release(struct socket *sock)
1983{
1984 struct sock *sk = sock->sk;
1985 int err;
1986
1987 BT_DBG("sock %p, sk %p", sock, sk);
1988
1989 if (!sk)
1990 return 0;
1991
1992 err = l2cap_sock_shutdown(sock, 2);
1993
1994 sock_orphan(sk);
1995 l2cap_sock_kill(sk);
1996 return err;
1997}
1998
1999static void l2cap_chan_ready(struct sock *sk)
2000{
2001 struct sock *parent = bt_sk(sk)->parent;
2002
2003 BT_DBG("sk %p, parent %p", sk, parent);
2004
2005 l2cap_pi(sk)->conf_state = 0;
2006 l2cap_sock_clear_timer(sk);
2007
2008 if (!parent) {
2009 /* Outgoing channel.
2010 * Wake up socket sleeping on connect.
2011 */
2012 sk->sk_state = BT_CONNECTED;
2013 sk->sk_state_change(sk);
2014 } else {
2015 /* Incoming channel.
2016 * Wake up socket sleeping on accept.
2017 */
2018 parent->sk_data_ready(parent, 0);
2019 }
2020}
2021
2022/* Copy frame to all raw sockets on that connection */
2023static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2024{
2025 struct l2cap_chan_list *l = &conn->chan_list;
2026 struct sk_buff *nskb;
2027 struct sock *sk;
2028
2029 BT_DBG("conn %p", conn);
2030
2031 read_lock(&l->lock);
2032 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2033 if (sk->sk_type != SOCK_RAW)
2034 continue;
2035
2036 /* Don't send frame to the socket it came from */
2037 if (skb->sk == sk)
2038 continue;
2039 nskb = skb_clone(skb, GFP_ATOMIC);
2040 if (!nskb)
2041 continue;
2042
2043 if (sock_queue_rcv_skb(sk, nskb))
2044 kfree_skb(nskb);
2045 }
2046 read_unlock(&l->lock);
2047}
2048
2049/* ---- L2CAP signalling commands ---- */
2050static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2051 u8 code, u8 ident, u16 dlen, void *data)
2052{
2053 struct sk_buff *skb, **frag;
2054 struct l2cap_cmd_hdr *cmd;
2055 struct l2cap_hdr *lh;
2056 int len, count;
2057
2058 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2059 conn, code, ident, dlen);
2060
2061 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2062 count = min_t(unsigned int, conn->mtu, len);
2063
2064 skb = bt_skb_alloc(count, GFP_ATOMIC);
2065 if (!skb)
2066 return NULL;
2067
2068 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2069 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2070 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2071
2072 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2073 cmd->code = code;
2074 cmd->ident = ident;
2075 cmd->len = cpu_to_le16(dlen);
2076
2077 if (dlen) {
2078 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2079 memcpy(skb_put(skb, count), data, count);
2080 data += count;
2081 }
2082
2083 len -= skb->len;
2084
2085 /* Continuation fragments (no L2CAP header) */
2086 frag = &skb_shinfo(skb)->frag_list;
2087 while (len) {
2088 count = min_t(unsigned int, conn->mtu, len);
2089
2090 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2091 if (!*frag)
2092 goto fail;
2093
2094 memcpy(skb_put(*frag, count), data, count);
2095
2096 len -= count;
2097 data += count;
2098
2099 frag = &(*frag)->next;
2100 }
2101
2102 return skb;
2103
2104fail:
2105 kfree_skb(skb);
2106 return NULL;
2107}
2108
2109static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2110{
2111 struct l2cap_conf_opt *opt = *ptr;
2112 int len;
2113
2114 len = L2CAP_CONF_OPT_SIZE + opt->len;
2115 *ptr += len;
2116
2117 *type = opt->type;
2118 *olen = opt->len;
2119
2120 switch (opt->len) {
2121 case 1:
2122 *val = *((u8 *) opt->val);
2123 break;
2124
2125 case 2:
2126 *val = __le16_to_cpu(*((__le16 *) opt->val));
2127 break;
2128
2129 case 4:
2130 *val = __le32_to_cpu(*((__le32 *) opt->val));
2131 break;
2132
2133 default:
2134 *val = (unsigned long) opt->val;
2135 break;
2136 }
2137
2138 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2139 return len;
2140}
2141
2142static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2143{
2144 struct l2cap_conf_opt *opt = *ptr;
2145
2146 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2147
2148 opt->type = type;
2149 opt->len = len;
2150
2151 switch (len) {
2152 case 1:
2153 *((u8 *) opt->val) = val;
2154 break;
2155
2156 case 2:
2157 *((__le16 *) opt->val) = cpu_to_le16(val);
2158 break;
2159
2160 case 4:
2161 *((__le32 *) opt->val) = cpu_to_le32(val);
2162 break;
2163
2164 default:
2165 memcpy(opt->val, (void *) val, len);
2166 break;
2167 }
2168
2169 *ptr += L2CAP_CONF_OPT_SIZE + len;
2170}
2171
2172static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2173{
2174 u32 local_feat_mask = l2cap_feat_mask;
2175 if (enable_ertm)
2176 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2177
2178 switch (mode) {
2179 case L2CAP_MODE_ERTM:
2180 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2181 case L2CAP_MODE_STREAMING:
2182 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2183 default:
2184 return 0x00;
2185 }
2186}
2187
2188static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2189{
2190 switch (mode) {
2191 case L2CAP_MODE_STREAMING:
2192 case L2CAP_MODE_ERTM:
2193 if (l2cap_mode_supported(mode, remote_feat_mask))
2194 return mode;
2195 /* fall through */
2196 default:
2197 return L2CAP_MODE_BASIC;
2198 }
2199}
2200
2201static int l2cap_build_conf_req(struct sock *sk, void *data)
2202{
2203 struct l2cap_pinfo *pi = l2cap_pi(sk);
2204 struct l2cap_conf_req *req = data;
2205 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2206 void *ptr = req->data;
2207
2208 BT_DBG("sk %p", sk);
2209
2210 if (pi->num_conf_req || pi->num_conf_rsp)
2211 goto done;
2212
2213 switch (pi->mode) {
2214 case L2CAP_MODE_STREAMING:
2215 case L2CAP_MODE_ERTM:
2216 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2217 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2218 l2cap_send_disconn_req(pi->conn, sk);
2219 break;
2220 default:
2221 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2222 break;
2223 }
2224
2225done:
2226 switch (pi->mode) {
2227 case L2CAP_MODE_BASIC:
2228 if (pi->imtu != L2CAP_DEFAULT_MTU)
2229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2230 break;
2231
2232 case L2CAP_MODE_ERTM:
2233 rfc.mode = L2CAP_MODE_ERTM;
2234 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2235 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2236 rfc.retrans_timeout = 0;
2237 rfc.monitor_timeout = 0;
2238 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2239
2240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2241 sizeof(rfc), (unsigned long) &rfc);
2242
2243 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2244 break;
2245
2246 if (pi->fcs == L2CAP_FCS_NONE ||
2247 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2248 pi->fcs = L2CAP_FCS_NONE;
2249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2250 }
2251 break;
2252
2253 case L2CAP_MODE_STREAMING:
2254 rfc.mode = L2CAP_MODE_STREAMING;
2255 rfc.txwin_size = 0;
2256 rfc.max_transmit = 0;
2257 rfc.retrans_timeout = 0;
2258 rfc.monitor_timeout = 0;
2259 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2260
2261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2262 sizeof(rfc), (unsigned long) &rfc);
2263
2264 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2265 break;
2266
2267 if (pi->fcs == L2CAP_FCS_NONE ||
2268 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2269 pi->fcs = L2CAP_FCS_NONE;
2270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2271 }
2272 break;
2273 }
2274
2275 /* FIXME: Need actual value of the flush timeout */
2276 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2277 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2278
2279 req->dcid = cpu_to_le16(pi->dcid);
2280 req->flags = cpu_to_le16(0);
2281
2282 return ptr - data;
2283}
2284
2285static int l2cap_parse_conf_req(struct sock *sk, void *data)
2286{
2287 struct l2cap_pinfo *pi = l2cap_pi(sk);
2288 struct l2cap_conf_rsp *rsp = data;
2289 void *ptr = rsp->data;
2290 void *req = pi->conf_req;
2291 int len = pi->conf_len;
2292 int type, hint, olen;
2293 unsigned long val;
2294 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2295 u16 mtu = L2CAP_DEFAULT_MTU;
2296 u16 result = L2CAP_CONF_SUCCESS;
2297
2298 BT_DBG("sk %p", sk);
2299
2300 while (len >= L2CAP_CONF_OPT_SIZE) {
2301 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2302
2303 hint = type & L2CAP_CONF_HINT;
2304 type &= L2CAP_CONF_MASK;
2305
2306 switch (type) {
2307 case L2CAP_CONF_MTU:
2308 mtu = val;
2309 break;
2310
2311 case L2CAP_CONF_FLUSH_TO:
2312 pi->flush_to = val;
2313 break;
2314
2315 case L2CAP_CONF_QOS:
2316 break;
2317
2318 case L2CAP_CONF_RFC:
2319 if (olen == sizeof(rfc))
2320 memcpy(&rfc, (void *) val, olen);
2321 break;
2322
2323 case L2CAP_CONF_FCS:
2324 if (val == L2CAP_FCS_NONE)
2325 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2326
2327 break;
2328
2329 default:
2330 if (hint)
2331 break;
2332
2333 result = L2CAP_CONF_UNKNOWN;
2334 *((u8 *) ptr++) = type;
2335 break;
2336 }
2337 }
2338
2339 if (pi->num_conf_rsp || pi->num_conf_req)
2340 goto done;
2341
2342 switch (pi->mode) {
2343 case L2CAP_MODE_STREAMING:
2344 case L2CAP_MODE_ERTM:
2345 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2346 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2347 return -ECONNREFUSED;
2348 break;
2349 default:
2350 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2351 break;
2352 }
2353
2354done:
2355 if (pi->mode != rfc.mode) {
2356 result = L2CAP_CONF_UNACCEPT;
2357 rfc.mode = pi->mode;
2358
2359 if (pi->num_conf_rsp == 1)
2360 return -ECONNREFUSED;
2361
2362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2363 sizeof(rfc), (unsigned long) &rfc);
2364 }
2365
2366
2367 if (result == L2CAP_CONF_SUCCESS) {
2368 /* Configure output options and let the other side know
2369 * which ones we don't like. */
2370
2371 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2372 result = L2CAP_CONF_UNACCEPT;
2373 else {
2374 pi->omtu = mtu;
2375 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2376 }
2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2378
2379 switch (rfc.mode) {
2380 case L2CAP_MODE_BASIC:
2381 pi->fcs = L2CAP_FCS_NONE;
2382 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2383 break;
2384
2385 case L2CAP_MODE_ERTM:
2386 pi->remote_tx_win = rfc.txwin_size;
2387 pi->remote_max_tx = rfc.max_transmit;
2388 pi->max_pdu_size = rfc.max_pdu_size;
2389
2390 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2391 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2392
2393 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2394 break;
2395
2396 case L2CAP_MODE_STREAMING:
2397 pi->remote_tx_win = rfc.txwin_size;
2398 pi->max_pdu_size = rfc.max_pdu_size;
2399
2400 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2401 break;
2402
2403 default:
2404 result = L2CAP_CONF_UNACCEPT;
2405
2406 memset(&rfc, 0, sizeof(rfc));
2407 rfc.mode = pi->mode;
2408 }
2409
2410 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2411 sizeof(rfc), (unsigned long) &rfc);
2412
2413 if (result == L2CAP_CONF_SUCCESS)
2414 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2415 }
2416 rsp->scid = cpu_to_le16(pi->dcid);
2417 rsp->result = cpu_to_le16(result);
2418 rsp->flags = cpu_to_le16(0x0000);
2419
2420 return ptr - data;
2421}
2422
2423static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2424{
2425 struct l2cap_pinfo *pi = l2cap_pi(sk);
2426 struct l2cap_conf_req *req = data;
2427 void *ptr = req->data;
2428 int type, olen;
2429 unsigned long val;
2430 struct l2cap_conf_rfc rfc;
2431
2432 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2433
2434 while (len >= L2CAP_CONF_OPT_SIZE) {
2435 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2436
2437 switch (type) {
2438 case L2CAP_CONF_MTU:
2439 if (val < L2CAP_DEFAULT_MIN_MTU) {
2440 *result = L2CAP_CONF_UNACCEPT;
2441 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2442 } else
2443 pi->omtu = val;
2444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2445 break;
2446
2447 case L2CAP_CONF_FLUSH_TO:
2448 pi->flush_to = val;
2449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2450 2, pi->flush_to);
2451 break;
2452
2453 case L2CAP_CONF_RFC:
2454 if (olen == sizeof(rfc))
2455 memcpy(&rfc, (void *)val, olen);
2456
2457 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2458 rfc.mode != pi->mode)
2459 return -ECONNREFUSED;
2460
2461 pi->mode = rfc.mode;
2462 pi->fcs = 0;
2463
2464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2465 sizeof(rfc), (unsigned long) &rfc);
2466 break;
2467 }
2468 }
2469
2470 if (*result == L2CAP_CONF_SUCCESS) {
2471 switch (rfc.mode) {
2472 case L2CAP_MODE_ERTM:
2473 pi->remote_tx_win = rfc.txwin_size;
2474 pi->retrans_timeout = rfc.retrans_timeout;
2475 pi->monitor_timeout = rfc.monitor_timeout;
2476 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2477 break;
2478 case L2CAP_MODE_STREAMING:
2479 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2480 break;
2481 }
2482 }
2483
2484 req->dcid = cpu_to_le16(pi->dcid);
2485 req->flags = cpu_to_le16(0x0000);
2486
2487 return ptr - data;
2488}
2489
2490static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2491{
2492 struct l2cap_conf_rsp *rsp = data;
2493 void *ptr = rsp->data;
2494
2495 BT_DBG("sk %p", sk);
2496
2497 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2498 rsp->result = cpu_to_le16(result);
2499 rsp->flags = cpu_to_le16(flags);
2500
2501 return ptr - data;
2502}
2503
2504static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2505{
2506 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2507
2508 if (rej->reason != 0x0000)
2509 return 0;
2510
2511 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2512 cmd->ident == conn->info_ident) {
2513 del_timer(&conn->info_timer);
2514
2515 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2516 conn->info_ident = 0;
2517
2518 l2cap_conn_start(conn);
2519 }
2520
2521 return 0;
2522}
2523
2524static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2525{
2526 struct l2cap_chan_list *list = &conn->chan_list;
2527 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2528 struct l2cap_conn_rsp rsp;
2529 struct sock *sk, *parent;
2530 int result, status = L2CAP_CS_NO_INFO;
2531
2532 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2533 __le16 psm = req->psm;
2534
2535 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2536
2537 /* Check if we have socket listening on psm */
2538 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2539 if (!parent) {
2540 result = L2CAP_CR_BAD_PSM;
2541 goto sendresp;
2542 }
2543
2544 /* Check if the ACL is secure enough (if not SDP) */
2545 if (psm != cpu_to_le16(0x0001) &&
2546 !hci_conn_check_link_mode(conn->hcon)) {
2547 conn->disc_reason = 0x05;
2548 result = L2CAP_CR_SEC_BLOCK;
2549 goto response;
2550 }
2551
2552 result = L2CAP_CR_NO_MEM;
2553
2554 /* Check for backlog size */
2555 if (sk_acceptq_is_full(parent)) {
2556 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2557 goto response;
2558 }
2559
2560 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2561 if (!sk)
2562 goto response;
2563
2564 write_lock_bh(&list->lock);
2565
2566 /* Check if we already have channel with that dcid */
2567 if (__l2cap_get_chan_by_dcid(list, scid)) {
2568 write_unlock_bh(&list->lock);
2569 sock_set_flag(sk, SOCK_ZAPPED);
2570 l2cap_sock_kill(sk);
2571 goto response;
2572 }
2573
2574 hci_conn_hold(conn->hcon);
2575
2576 l2cap_sock_init(sk, parent);
2577 bacpy(&bt_sk(sk)->src, conn->src);
2578 bacpy(&bt_sk(sk)->dst, conn->dst);
2579 l2cap_pi(sk)->psm = psm;
2580 l2cap_pi(sk)->dcid = scid;
2581
2582 __l2cap_chan_add(conn, sk, parent);
2583 dcid = l2cap_pi(sk)->scid;
2584
2585 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2586
2587 l2cap_pi(sk)->ident = cmd->ident;
2588
2589 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2590 if (l2cap_check_security(sk)) {
2591 if (bt_sk(sk)->defer_setup) {
2592 sk->sk_state = BT_CONNECT2;
2593 result = L2CAP_CR_PEND;
2594 status = L2CAP_CS_AUTHOR_PEND;
2595 parent->sk_data_ready(parent, 0);
2596 } else {
2597 sk->sk_state = BT_CONFIG;
2598 result = L2CAP_CR_SUCCESS;
2599 status = L2CAP_CS_NO_INFO;
2600 }
2601 } else {
2602 sk->sk_state = BT_CONNECT2;
2603 result = L2CAP_CR_PEND;
2604 status = L2CAP_CS_AUTHEN_PEND;
2605 }
2606 } else {
2607 sk->sk_state = BT_CONNECT2;
2608 result = L2CAP_CR_PEND;
2609 status = L2CAP_CS_NO_INFO;
2610 }
2611
2612 write_unlock_bh(&list->lock);
2613
2614response:
2615 bh_unlock_sock(parent);
2616
2617sendresp:
2618 rsp.scid = cpu_to_le16(scid);
2619 rsp.dcid = cpu_to_le16(dcid);
2620 rsp.result = cpu_to_le16(result);
2621 rsp.status = cpu_to_le16(status);
2622 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2623
2624 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2625 struct l2cap_info_req info;
2626 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2627
2628 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2629 conn->info_ident = l2cap_get_ident(conn);
2630
2631 mod_timer(&conn->info_timer, jiffies +
2632 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2633
2634 l2cap_send_cmd(conn, conn->info_ident,
2635 L2CAP_INFO_REQ, sizeof(info), &info);
2636 }
2637
2638 return 0;
2639}
2640
2641static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2642{
2643 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2644 u16 scid, dcid, result, status;
2645 struct sock *sk;
2646 u8 req[128];
2647
2648 scid = __le16_to_cpu(rsp->scid);
2649 dcid = __le16_to_cpu(rsp->dcid);
2650 result = __le16_to_cpu(rsp->result);
2651 status = __le16_to_cpu(rsp->status);
2652
2653 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2654
2655 if (scid) {
2656 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2657 if (!sk)
2658 return 0;
2659 } else {
2660 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2661 if (!sk)
2662 return 0;
2663 }
2664
2665 switch (result) {
2666 case L2CAP_CR_SUCCESS:
2667 sk->sk_state = BT_CONFIG;
2668 l2cap_pi(sk)->ident = 0;
2669 l2cap_pi(sk)->dcid = dcid;
2670 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2671
2672 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2673
2674 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2675 l2cap_build_conf_req(sk, req), req);
2676 l2cap_pi(sk)->num_conf_req++;
2677 break;
2678
2679 case L2CAP_CR_PEND:
2680 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2681 break;
2682
2683 default:
2684 l2cap_chan_del(sk, ECONNREFUSED);
2685 break;
2686 }
2687
2688 bh_unlock_sock(sk);
2689 return 0;
2690}
2691
2692static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2693{
2694 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2695 u16 dcid, flags;
2696 u8 rsp[64];
2697 struct sock *sk;
2698 int len;
2699
2700 dcid = __le16_to_cpu(req->dcid);
2701 flags = __le16_to_cpu(req->flags);
2702
2703 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2704
2705 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2706 if (!sk)
2707 return -ENOENT;
2708
2709 if (sk->sk_state == BT_DISCONN)
2710 goto unlock;
2711
2712 /* Reject if config buffer is too small. */
2713 len = cmd_len - sizeof(*req);
2714 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2715 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2716 l2cap_build_conf_rsp(sk, rsp,
2717 L2CAP_CONF_REJECT, flags), rsp);
2718 goto unlock;
2719 }
2720
2721 /* Store config. */
2722 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2723 l2cap_pi(sk)->conf_len += len;
2724
2725 if (flags & 0x0001) {
2726 /* Incomplete config. Send empty response. */
2727 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2728 l2cap_build_conf_rsp(sk, rsp,
2729 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2730 goto unlock;
2731 }
2732
2733 /* Complete config. */
2734 len = l2cap_parse_conf_req(sk, rsp);
2735 if (len < 0) {
2736 l2cap_send_disconn_req(conn, sk);
2737 goto unlock;
2738 }
2739
2740 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2741 l2cap_pi(sk)->num_conf_rsp++;
2742
2743 /* Reset config buffer. */
2744 l2cap_pi(sk)->conf_len = 0;
2745
2746 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2747 goto unlock;
2748
2749 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2750 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2751 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2752 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2753
2754 sk->sk_state = BT_CONNECTED;
2755 l2cap_pi(sk)->next_tx_seq = 0;
2756 l2cap_pi(sk)->expected_ack_seq = 0;
2757 l2cap_pi(sk)->unacked_frames = 0;
2758
2759 setup_timer(&l2cap_pi(sk)->retrans_timer,
2760 l2cap_retrans_timeout, (unsigned long) sk);
2761 setup_timer(&l2cap_pi(sk)->monitor_timer,
2762 l2cap_monitor_timeout, (unsigned long) sk);
2763
2764 __skb_queue_head_init(TX_QUEUE(sk));
2765 __skb_queue_head_init(SREJ_QUEUE(sk));
2766 l2cap_chan_ready(sk);
2767 goto unlock;
2768 }
2769
2770 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2771 u8 buf[64];
2772 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2773 l2cap_build_conf_req(sk, buf), buf);
2774 l2cap_pi(sk)->num_conf_req++;
2775 }
2776
2777unlock:
2778 bh_unlock_sock(sk);
2779 return 0;
2780}
2781
2782static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2783{
2784 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2785 u16 scid, flags, result;
2786 struct sock *sk;
2787
2788 scid = __le16_to_cpu(rsp->scid);
2789 flags = __le16_to_cpu(rsp->flags);
2790 result = __le16_to_cpu(rsp->result);
2791
2792 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2793 scid, flags, result);
2794
2795 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2796 if (!sk)
2797 return 0;
2798
2799 switch (result) {
2800 case L2CAP_CONF_SUCCESS:
2801 break;
2802
2803 case L2CAP_CONF_UNACCEPT:
2804 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2805 int len = cmd->len - sizeof(*rsp);
2806 char req[64];
2807
2808 /* throw out any old stored conf requests */
2809 result = L2CAP_CONF_SUCCESS;
2810 len = l2cap_parse_conf_rsp(sk, rsp->data,
2811 len, req, &result);
2812 if (len < 0) {
2813 l2cap_send_disconn_req(conn, sk);
2814 goto done;
2815 }
2816
2817 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2818 L2CAP_CONF_REQ, len, req);
2819 l2cap_pi(sk)->num_conf_req++;
2820 if (result != L2CAP_CONF_SUCCESS)
2821 goto done;
2822 break;
2823 }
2824
2825 default:
2826 sk->sk_state = BT_DISCONN;
2827 sk->sk_err = ECONNRESET;
2828 l2cap_sock_set_timer(sk, HZ * 5);
2829 l2cap_send_disconn_req(conn, sk);
2830 goto done;
2831 }
2832
2833 if (flags & 0x01)
2834 goto done;
2835
2836 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2837
2838 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2839 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2840 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2841 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2842
2843 sk->sk_state = BT_CONNECTED;
2844 l2cap_pi(sk)->expected_tx_seq = 0;
2845 l2cap_pi(sk)->buffer_seq = 0;
2846 l2cap_pi(sk)->num_to_ack = 0;
2847 __skb_queue_head_init(TX_QUEUE(sk));
2848 __skb_queue_head_init(SREJ_QUEUE(sk));
2849 l2cap_chan_ready(sk);
2850 }
2851
2852done:
2853 bh_unlock_sock(sk);
2854 return 0;
2855}
2856
2857static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2858{
2859 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2860 struct l2cap_disconn_rsp rsp;
2861 u16 dcid, scid;
2862 struct sock *sk;
2863
2864 scid = __le16_to_cpu(req->scid);
2865 dcid = __le16_to_cpu(req->dcid);
2866
2867 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2868
2869 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2870 if (!sk)
2871 return 0;
2872
2873 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2874 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2875 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2876
2877 sk->sk_shutdown = SHUTDOWN_MASK;
2878
2879 skb_queue_purge(TX_QUEUE(sk));
2880 skb_queue_purge(SREJ_QUEUE(sk));
2881 del_timer(&l2cap_pi(sk)->retrans_timer);
2882 del_timer(&l2cap_pi(sk)->monitor_timer);
2883
2884 l2cap_chan_del(sk, ECONNRESET);
2885 bh_unlock_sock(sk);
2886
2887 l2cap_sock_kill(sk);
2888 return 0;
2889}
2890
2891static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2892{
2893 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2894 u16 dcid, scid;
2895 struct sock *sk;
2896
2897 scid = __le16_to_cpu(rsp->scid);
2898 dcid = __le16_to_cpu(rsp->dcid);
2899
2900 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2901
2902 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2903 if (!sk)
2904 return 0;
2905
2906 skb_queue_purge(TX_QUEUE(sk));
2907 skb_queue_purge(SREJ_QUEUE(sk));
2908 del_timer(&l2cap_pi(sk)->retrans_timer);
2909 del_timer(&l2cap_pi(sk)->monitor_timer);
2910
2911 l2cap_chan_del(sk, 0);
2912 bh_unlock_sock(sk);
2913
2914 l2cap_sock_kill(sk);
2915 return 0;
2916}
2917
2918static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2919{
2920 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2921 u16 type;
2922
2923 type = __le16_to_cpu(req->type);
2924
2925 BT_DBG("type 0x%4.4x", type);
2926
2927 if (type == L2CAP_IT_FEAT_MASK) {
2928 u8 buf[8];
2929 u32 feat_mask = l2cap_feat_mask;
2930 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2931 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2932 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2933 if (enable_ertm)
2934 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2935 | L2CAP_FEAT_FCS;
2936 put_unaligned_le32(feat_mask, rsp->data);
2937 l2cap_send_cmd(conn, cmd->ident,
2938 L2CAP_INFO_RSP, sizeof(buf), buf);
2939 } else if (type == L2CAP_IT_FIXED_CHAN) {
2940 u8 buf[12];
2941 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2942 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2943 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2944 memcpy(buf + 4, l2cap_fixed_chan, 8);
2945 l2cap_send_cmd(conn, cmd->ident,
2946 L2CAP_INFO_RSP, sizeof(buf), buf);
2947 } else {
2948 struct l2cap_info_rsp rsp;
2949 rsp.type = cpu_to_le16(type);
2950 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2951 l2cap_send_cmd(conn, cmd->ident,
2952 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2953 }
2954
2955 return 0;
2956}
2957
2958static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2959{
2960 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2961 u16 type, result;
2962
2963 type = __le16_to_cpu(rsp->type);
2964 result = __le16_to_cpu(rsp->result);
2965
2966 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2967
2968 del_timer(&conn->info_timer);
2969
2970 if (type == L2CAP_IT_FEAT_MASK) {
2971 conn->feat_mask = get_unaligned_le32(rsp->data);
2972
2973 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2974 struct l2cap_info_req req;
2975 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2976
2977 conn->info_ident = l2cap_get_ident(conn);
2978
2979 l2cap_send_cmd(conn, conn->info_ident,
2980 L2CAP_INFO_REQ, sizeof(req), &req);
2981 } else {
2982 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2983 conn->info_ident = 0;
2984
2985 l2cap_conn_start(conn);
2986 }
2987 } else if (type == L2CAP_IT_FIXED_CHAN) {
2988 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2989 conn->info_ident = 0;
2990
2991 l2cap_conn_start(conn);
2992 }
2993
2994 return 0;
2995}
2996
2997static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2998{
2999 u8 *data = skb->data;
3000 int len = skb->len;
3001 struct l2cap_cmd_hdr cmd;
3002 int err = 0;
3003
3004 l2cap_raw_recv(conn, skb);
3005
3006 while (len >= L2CAP_CMD_HDR_SIZE) {
3007 u16 cmd_len;
3008 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3009 data += L2CAP_CMD_HDR_SIZE;
3010 len -= L2CAP_CMD_HDR_SIZE;
3011
3012 cmd_len = le16_to_cpu(cmd.len);
3013
3014 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3015
3016 if (cmd_len > len || !cmd.ident) {
3017 BT_DBG("corrupted command");
3018 break;
3019 }
3020
3021 switch (cmd.code) {
3022 case L2CAP_COMMAND_REJ:
3023 l2cap_command_rej(conn, &cmd, data);
3024 break;
3025
3026 case L2CAP_CONN_REQ:
3027 err = l2cap_connect_req(conn, &cmd, data);
3028 break;
3029
3030 case L2CAP_CONN_RSP:
3031 err = l2cap_connect_rsp(conn, &cmd, data);
3032 break;
3033
3034 case L2CAP_CONF_REQ:
3035 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3036 break;
3037
3038 case L2CAP_CONF_RSP:
3039 err = l2cap_config_rsp(conn, &cmd, data);
3040 break;
3041
3042 case L2CAP_DISCONN_REQ:
3043 err = l2cap_disconnect_req(conn, &cmd, data);
3044 break;
3045
3046 case L2CAP_DISCONN_RSP:
3047 err = l2cap_disconnect_rsp(conn, &cmd, data);
3048 break;
3049
3050 case L2CAP_ECHO_REQ:
3051 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3052 break;
3053
3054 case L2CAP_ECHO_RSP:
3055 break;
3056
3057 case L2CAP_INFO_REQ:
3058 err = l2cap_information_req(conn, &cmd, data);
3059 break;
3060
3061 case L2CAP_INFO_RSP:
3062 err = l2cap_information_rsp(conn, &cmd, data);
3063 break;
3064
3065 default:
3066 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3067 err = -EINVAL;
3068 break;
3069 }
3070
3071 if (err) {
3072 struct l2cap_cmd_rej rej;
3073 BT_DBG("error %d", err);
3074
3075 /* FIXME: Map err to a valid reason */
3076 rej.reason = cpu_to_le16(0);
3077 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3078 }
3079
3080 data += cmd_len;
3081 len -= cmd_len;
3082 }
3083
3084 kfree_skb(skb);
3085}
3086
3087static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3088{
3089 u16 our_fcs, rcv_fcs;
3090 int hdr_size = L2CAP_HDR_SIZE + 2;
3091
3092 if (pi->fcs == L2CAP_FCS_CRC16) {
3093 skb_trim(skb, skb->len - 2);
3094 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3095 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3096
3097 if (our_fcs != rcv_fcs)
3098 return -EINVAL;
3099 }
3100 return 0;
3101}
3102
3103static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3104{
3105 struct sk_buff *next_skb;
3106
3107 bt_cb(skb)->tx_seq = tx_seq;
3108 bt_cb(skb)->sar = sar;
3109
3110 next_skb = skb_peek(SREJ_QUEUE(sk));
3111 if (!next_skb) {
3112 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3113 return;
3114 }
3115
3116 do {
3117 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3118 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3119 return;
3120 }
3121
3122 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3123 break;
3124
3125 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3126
3127 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3128}
3129
3130static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3131{
3132 struct l2cap_pinfo *pi = l2cap_pi(sk);
3133 struct sk_buff *_skb;
3134 int err = -EINVAL;
3135
3136 switch (control & L2CAP_CTRL_SAR) {
3137 case L2CAP_SDU_UNSEGMENTED:
3138 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3139 kfree_skb(pi->sdu);
3140 break;
3141 }
3142
3143 err = sock_queue_rcv_skb(sk, skb);
3144 if (!err)
3145 return 0;
3146
3147 break;
3148
3149 case L2CAP_SDU_START:
3150 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3151 kfree_skb(pi->sdu);
3152 break;
3153 }
3154
3155 pi->sdu_len = get_unaligned_le16(skb->data);
3156 skb_pull(skb, 2);
3157
3158 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3159 if (!pi->sdu) {
3160 err = -ENOMEM;
3161 break;
3162 }
3163
3164 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3165
3166 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3167 pi->partial_sdu_len = skb->len;
3168 err = 0;
3169 break;
3170
3171 case L2CAP_SDU_CONTINUE:
3172 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3173 break;
3174
3175 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3176
3177 pi->partial_sdu_len += skb->len;
3178 if (pi->partial_sdu_len > pi->sdu_len)
3179 kfree_skb(pi->sdu);
3180 else
3181 err = 0;
3182
3183 break;
3184
3185 case L2CAP_SDU_END:
3186 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3187 break;
3188
3189 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3190
3191 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3192 pi->partial_sdu_len += skb->len;
3193
3194 if (pi->partial_sdu_len == pi->sdu_len) {
3195 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3196 err = sock_queue_rcv_skb(sk, _skb);
3197 if (err < 0)
3198 kfree_skb(_skb);
3199 }
3200 kfree_skb(pi->sdu);
3201 err = 0;
3202
3203 break;
3204 }
3205
3206 kfree_skb(skb);
3207 return err;
3208}
3209
3210static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3211{
3212 struct sk_buff *skb;
3213 u16 control = 0;
3214
3215 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3216 if (bt_cb(skb)->tx_seq != tx_seq)
3217 break;
3218
3219 skb = skb_dequeue(SREJ_QUEUE(sk));
3220 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3221 l2cap_sar_reassembly_sdu(sk, skb, control);
3222 l2cap_pi(sk)->buffer_seq_srej =
3223 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3224 tx_seq++;
3225 }
3226}
3227
3228static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3229{
3230 struct l2cap_pinfo *pi = l2cap_pi(sk);
3231 struct srej_list *l, *tmp;
3232 u16 control;
3233
3234 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3235 if (l->tx_seq == tx_seq) {
3236 list_del(&l->list);
3237 kfree(l);
3238 return;
3239 }
3240 control = L2CAP_SUPER_SELECT_REJECT;
3241 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3242 l2cap_send_sframe(pi, control);
3243 list_del(&l->list);
3244 list_add_tail(&l->list, SREJ_LIST(sk));
3245 }
3246}
3247
3248static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3249{
3250 struct l2cap_pinfo *pi = l2cap_pi(sk);
3251 struct srej_list *new;
3252 u16 control;
3253
3254 while (tx_seq != pi->expected_tx_seq) {
3255 control = L2CAP_SUPER_SELECT_REJECT;
3256 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3257 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3258 control |= L2CAP_CTRL_POLL;
3259 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3260 }
3261 l2cap_send_sframe(pi, control);
3262
3263 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3264 new->tx_seq = pi->expected_tx_seq++;
3265 list_add_tail(&new->list, SREJ_LIST(sk));
3266 }
3267 pi->expected_tx_seq++;
3268}
3269
3270static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3271{
3272 struct l2cap_pinfo *pi = l2cap_pi(sk);
3273 u8 tx_seq = __get_txseq(rx_control);
3274 u16 tx_control = 0;
3275 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3276 int err = 0;
3277
3278 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3279
3280 if (tx_seq == pi->expected_tx_seq)
3281 goto expected;
3282
3283 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3284 struct srej_list *first;
3285
3286 first = list_first_entry(SREJ_LIST(sk),
3287 struct srej_list, list);
3288 if (tx_seq == first->tx_seq) {
3289 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3290 l2cap_check_srej_gap(sk, tx_seq);
3291
3292 list_del(&first->list);
3293 kfree(first);
3294
3295 if (list_empty(SREJ_LIST(sk))) {
3296 pi->buffer_seq = pi->buffer_seq_srej;
3297 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3298 }
3299 } else {
3300 struct srej_list *l;
3301 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3302
3303 list_for_each_entry(l, SREJ_LIST(sk), list) {
3304 if (l->tx_seq == tx_seq) {
3305 l2cap_resend_srejframe(sk, tx_seq);
3306 return 0;
3307 }
3308 }
3309 l2cap_send_srejframe(sk, tx_seq);
3310 }
3311 } else {
3312 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3313
3314 INIT_LIST_HEAD(SREJ_LIST(sk));
3315 pi->buffer_seq_srej = pi->buffer_seq;
3316
3317 __skb_queue_head_init(SREJ_QUEUE(sk));
3318 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3319
3320 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3321
3322 l2cap_send_srejframe(sk, tx_seq);
3323 }
3324 return 0;
3325
3326expected:
3327 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3328
3329 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3330 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3331 return 0;
3332 }
3333
3334 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3335
3336 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3337 if (err < 0)
3338 return err;
3339
3340 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3341 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3342 tx_control |= L2CAP_SUPER_RCV_READY;
3343 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3344 l2cap_send_sframe(pi, tx_control);
3345 }
3346 return 0;
3347}
3348
3349static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3350{
3351 struct l2cap_pinfo *pi = l2cap_pi(sk);
3352 u8 tx_seq = __get_reqseq(rx_control);
3353
3354 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3355
3356 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3357 case L2CAP_SUPER_RCV_READY:
3358 if (rx_control & L2CAP_CTRL_POLL) {
3359 u16 control = L2CAP_CTRL_FINAL;
3360 control |= L2CAP_SUPER_RCV_READY |
3361 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3362 l2cap_send_sframe(l2cap_pi(sk), control);
3363 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3364
3365 } else if (rx_control & L2CAP_CTRL_FINAL) {
3366 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3367 pi->expected_ack_seq = tx_seq;
3368 l2cap_drop_acked_frames(sk);
3369
3370 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3371 break;
3372
3373 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3374 del_timer(&pi->monitor_timer);
3375
3376 if (pi->unacked_frames > 0)
3377 __mod_retrans_timer();
3378 } else {
3379 pi->expected_ack_seq = tx_seq;
3380 l2cap_drop_acked_frames(sk);
3381
3382 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3383 && (pi->unacked_frames > 0))
3384 __mod_retrans_timer();
3385
3386 l2cap_ertm_send(sk);
3387 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3388 }
3389 break;
3390
3391 case L2CAP_SUPER_REJECT:
3392 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3393
3394 pi->expected_ack_seq = __get_reqseq(rx_control);
3395 l2cap_drop_acked_frames(sk);
3396
3397 sk->sk_send_head = TX_QUEUE(sk)->next;
3398 pi->next_tx_seq = pi->expected_ack_seq;
3399
3400 l2cap_ertm_send(sk);
3401
3402 break;
3403
3404 case L2CAP_SUPER_SELECT_REJECT:
3405 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3406
3407 if (rx_control & L2CAP_CTRL_POLL) {
3408 l2cap_retransmit_frame(sk, tx_seq);
3409 pi->expected_ack_seq = tx_seq;
3410 l2cap_drop_acked_frames(sk);
3411 l2cap_ertm_send(sk);
3412 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3413 pi->srej_save_reqseq = tx_seq;
3414 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3415 }
3416 } else if (rx_control & L2CAP_CTRL_FINAL) {
3417 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3418 pi->srej_save_reqseq == tx_seq)
3419 pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT;
3420 else
3421 l2cap_retransmit_frame(sk, tx_seq);
3422 }
3423 else {
3424 l2cap_retransmit_frame(sk, tx_seq);
3425 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3426 pi->srej_save_reqseq = tx_seq;
3427 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3428 }
3429 }
3430 break;
3431
3432 case L2CAP_SUPER_RCV_NOT_READY:
3433 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3434 pi->expected_ack_seq = tx_seq;
3435 l2cap_drop_acked_frames(sk);
3436
3437 del_timer(&l2cap_pi(sk)->retrans_timer);
3438 if (rx_control & L2CAP_CTRL_POLL) {
3439 u16 control = L2CAP_CTRL_FINAL;
3440 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3441 }
3442 break;
3443 }
3444
3445 return 0;
3446}
3447
3448static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3449{
3450 struct sock *sk;
3451 struct l2cap_pinfo *pi;
3452 u16 control, len;
3453 u8 tx_seq;
3454 int err;
3455
3456 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3457 if (!sk) {
3458 BT_DBG("unknown cid 0x%4.4x", cid);
3459 goto drop;
3460 }
3461
3462 pi = l2cap_pi(sk);
3463
3464 BT_DBG("sk %p, len %d", sk, skb->len);
3465
3466 if (sk->sk_state != BT_CONNECTED)
3467 goto drop;
3468
3469 switch (pi->mode) {
3470 case L2CAP_MODE_BASIC:
3471 /* If socket recv buffers overflows we drop data here
3472 * which is *bad* because L2CAP has to be reliable.
3473 * But we don't have any other choice. L2CAP doesn't
3474 * provide flow control mechanism. */
3475
3476 if (pi->imtu < skb->len)
3477 goto drop;
3478
3479 if (!sock_queue_rcv_skb(sk, skb))
3480 goto done;
3481 break;
3482
3483 case L2CAP_MODE_ERTM:
3484 control = get_unaligned_le16(skb->data);
3485 skb_pull(skb, 2);
3486 len = skb->len;
3487
3488 if (__is_sar_start(control))
3489 len -= 2;
3490
3491 if (pi->fcs == L2CAP_FCS_CRC16)
3492 len -= 2;
3493
3494 /*
3495 * We can just drop the corrupted I-frame here.
3496 * Receiver will miss it and start proper recovery
3497 * procedures and ask retransmission.
3498 */
3499 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3500 goto drop;
3501
3502 if (l2cap_check_fcs(pi, skb))
3503 goto drop;
3504
3505 if (__is_iframe(control))
3506 err = l2cap_data_channel_iframe(sk, control, skb);
3507 else
3508 err = l2cap_data_channel_sframe(sk, control, skb);
3509
3510 if (!err)
3511 goto done;
3512 break;
3513
3514 case L2CAP_MODE_STREAMING:
3515 control = get_unaligned_le16(skb->data);
3516 skb_pull(skb, 2);
3517 len = skb->len;
3518
3519 if (__is_sar_start(control))
3520 len -= 2;
3521
3522 if (pi->fcs == L2CAP_FCS_CRC16)
3523 len -= 2;
3524
3525 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3526 goto drop;
3527
3528 if (l2cap_check_fcs(pi, skb))
3529 goto drop;
3530
3531 tx_seq = __get_txseq(control);
3532
3533 if (pi->expected_tx_seq == tx_seq)
3534 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3535 else
3536 pi->expected_tx_seq = tx_seq + 1;
3537
3538 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3539
3540 goto done;
3541
3542 default:
3543 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3544 break;
3545 }
3546
3547drop:
3548 kfree_skb(skb);
3549
3550done:
3551 if (sk)
3552 bh_unlock_sock(sk);
3553
3554 return 0;
3555}
3556
3557static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3558{
3559 struct sock *sk;
3560
3561 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3562 if (!sk)
3563 goto drop;
3564
3565 BT_DBG("sk %p, len %d", sk, skb->len);
3566
3567 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3568 goto drop;
3569
3570 if (l2cap_pi(sk)->imtu < skb->len)
3571 goto drop;
3572
3573 if (!sock_queue_rcv_skb(sk, skb))
3574 goto done;
3575
3576drop:
3577 kfree_skb(skb);
3578
3579done:
3580 if (sk)
3581 bh_unlock_sock(sk);
3582 return 0;
3583}
3584
3585static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3586{
3587 struct l2cap_hdr *lh = (void *) skb->data;
3588 u16 cid, len;
3589 __le16 psm;
3590
3591 skb_pull(skb, L2CAP_HDR_SIZE);
3592 cid = __le16_to_cpu(lh->cid);
3593 len = __le16_to_cpu(lh->len);
3594
3595 if (len != skb->len) {
3596 kfree_skb(skb);
3597 return;
3598 }
3599
3600 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3601
3602 switch (cid) {
3603 case L2CAP_CID_SIGNALING:
3604 l2cap_sig_channel(conn, skb);
3605 break;
3606
3607 case L2CAP_CID_CONN_LESS:
3608 psm = get_unaligned_le16(skb->data);
3609 skb_pull(skb, 2);
3610 l2cap_conless_channel(conn, psm, skb);
3611 break;
3612
3613 default:
3614 l2cap_data_channel(conn, cid, skb);
3615 break;
3616 }
3617}
3618
3619/* ---- L2CAP interface with lower layer (HCI) ---- */
3620
3621static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3622{
3623 int exact = 0, lm1 = 0, lm2 = 0;
3624 register struct sock *sk;
3625 struct hlist_node *node;
3626
3627 if (type != ACL_LINK)
3628 return 0;
3629
3630 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3631
3632 /* Find listening sockets and check their link_mode */
3633 read_lock(&l2cap_sk_list.lock);
3634 sk_for_each(sk, node, &l2cap_sk_list.head) {
3635 if (sk->sk_state != BT_LISTEN)
3636 continue;
3637
3638 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3639 lm1 |= HCI_LM_ACCEPT;
3640 if (l2cap_pi(sk)->role_switch)
3641 lm1 |= HCI_LM_MASTER;
3642 exact++;
3643 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3644 lm2 |= HCI_LM_ACCEPT;
3645 if (l2cap_pi(sk)->role_switch)
3646 lm2 |= HCI_LM_MASTER;
3647 }
3648 }
3649 read_unlock(&l2cap_sk_list.lock);
3650
3651 return exact ? lm1 : lm2;
3652}
3653
3654static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3655{
3656 struct l2cap_conn *conn;
3657
3658 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3659
3660 if (hcon->type != ACL_LINK)
3661 return 0;
3662
3663 if (!status) {
3664 conn = l2cap_conn_add(hcon, status);
3665 if (conn)
3666 l2cap_conn_ready(conn);
3667 } else
3668 l2cap_conn_del(hcon, bt_err(status));
3669
3670 return 0;
3671}
3672
3673static int l2cap_disconn_ind(struct hci_conn *hcon)
3674{
3675 struct l2cap_conn *conn = hcon->l2cap_data;
3676
3677 BT_DBG("hcon %p", hcon);
3678
3679 if (hcon->type != ACL_LINK || !conn)
3680 return 0x13;
3681
3682 return conn->disc_reason;
3683}
3684
3685static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3686{
3687 BT_DBG("hcon %p reason %d", hcon, reason);
3688
3689 if (hcon->type != ACL_LINK)
3690 return 0;
3691
3692 l2cap_conn_del(hcon, bt_err(reason));
3693
3694 return 0;
3695}
3696
3697static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3698{
3699 if (sk->sk_type != SOCK_SEQPACKET)
3700 return;
3701
3702 if (encrypt == 0x00) {
3703 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3704 l2cap_sock_clear_timer(sk);
3705 l2cap_sock_set_timer(sk, HZ * 5);
3706 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3707 __l2cap_sock_close(sk, ECONNREFUSED);
3708 } else {
3709 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3710 l2cap_sock_clear_timer(sk);
3711 }
3712}
3713
3714static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3715{
3716 struct l2cap_chan_list *l;
3717 struct l2cap_conn *conn = hcon->l2cap_data;
3718 struct sock *sk;
3719
3720 if (!conn)
3721 return 0;
3722
3723 l = &conn->chan_list;
3724
3725 BT_DBG("conn %p", conn);
3726
3727 read_lock(&l->lock);
3728
3729 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3730 bh_lock_sock(sk);
3731
3732 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3733 bh_unlock_sock(sk);
3734 continue;
3735 }
3736
3737 if (!status && (sk->sk_state == BT_CONNECTED ||
3738 sk->sk_state == BT_CONFIG)) {
3739 l2cap_check_encryption(sk, encrypt);
3740 bh_unlock_sock(sk);
3741 continue;
3742 }
3743
3744 if (sk->sk_state == BT_CONNECT) {
3745 if (!status) {
3746 struct l2cap_conn_req req;
3747 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3748 req.psm = l2cap_pi(sk)->psm;
3749
3750 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3751
3752 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3753 L2CAP_CONN_REQ, sizeof(req), &req);
3754 } else {
3755 l2cap_sock_clear_timer(sk);
3756 l2cap_sock_set_timer(sk, HZ / 10);
3757 }
3758 } else if (sk->sk_state == BT_CONNECT2) {
3759 struct l2cap_conn_rsp rsp;
3760 __u16 result;
3761
3762 if (!status) {
3763 sk->sk_state = BT_CONFIG;
3764 result = L2CAP_CR_SUCCESS;
3765 } else {
3766 sk->sk_state = BT_DISCONN;
3767 l2cap_sock_set_timer(sk, HZ / 10);
3768 result = L2CAP_CR_SEC_BLOCK;
3769 }
3770
3771 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3772 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3773 rsp.result = cpu_to_le16(result);
3774 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3775 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3776 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3777 }
3778
3779 bh_unlock_sock(sk);
3780 }
3781
3782 read_unlock(&l->lock);
3783
3784 return 0;
3785}
3786
3787static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3788{
3789 struct l2cap_conn *conn = hcon->l2cap_data;
3790
3791 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3792 goto drop;
3793
3794 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3795
3796 if (flags & ACL_START) {
3797 struct l2cap_hdr *hdr;
3798 int len;
3799
3800 if (conn->rx_len) {
3801 BT_ERR("Unexpected start frame (len %d)", skb->len);
3802 kfree_skb(conn->rx_skb);
3803 conn->rx_skb = NULL;
3804 conn->rx_len = 0;
3805 l2cap_conn_unreliable(conn, ECOMM);
3806 }
3807
3808 if (skb->len < 2) {
3809 BT_ERR("Frame is too short (len %d)", skb->len);
3810 l2cap_conn_unreliable(conn, ECOMM);
3811 goto drop;
3812 }
3813
3814 hdr = (struct l2cap_hdr *) skb->data;
3815 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3816
3817 if (len == skb->len) {
3818 /* Complete frame received */
3819 l2cap_recv_frame(conn, skb);
3820 return 0;
3821 }
3822
3823 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3824
3825 if (skb->len > len) {
3826 BT_ERR("Frame is too long (len %d, expected len %d)",
3827 skb->len, len);
3828 l2cap_conn_unreliable(conn, ECOMM);
3829 goto drop;
3830 }
3831
3832 /* Allocate skb for the complete frame (with header) */
3833 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3834 if (!conn->rx_skb)
3835 goto drop;
3836
3837 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3838 skb->len);
3839 conn->rx_len = len - skb->len;
3840 } else {
3841 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3842
3843 if (!conn->rx_len) {
3844 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3845 l2cap_conn_unreliable(conn, ECOMM);
3846 goto drop;
3847 }
3848
3849 if (skb->len > conn->rx_len) {
3850 BT_ERR("Fragment is too long (len %d, expected %d)",
3851 skb->len, conn->rx_len);
3852 kfree_skb(conn->rx_skb);
3853 conn->rx_skb = NULL;
3854 conn->rx_len = 0;
3855 l2cap_conn_unreliable(conn, ECOMM);
3856 goto drop;
3857 }
3858
3859 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3860 skb->len);
3861 conn->rx_len -= skb->len;
3862
3863 if (!conn->rx_len) {
3864 /* Complete frame received */
3865 l2cap_recv_frame(conn, conn->rx_skb);
3866 conn->rx_skb = NULL;
3867 }
3868 }
3869
3870drop:
3871 kfree_skb(skb);
3872 return 0;
3873}
3874
3875static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3876{
3877 struct sock *sk;
3878 struct hlist_node *node;
3879 char *str = buf;
3880
3881 read_lock_bh(&l2cap_sk_list.lock);
3882
3883 sk_for_each(sk, node, &l2cap_sk_list.head) {
3884 struct l2cap_pinfo *pi = l2cap_pi(sk);
3885
3886 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3887 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3888 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3889 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3890 }
3891
3892 read_unlock_bh(&l2cap_sk_list.lock);
3893
3894 return str - buf;
3895}
3896
3897static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3898
3899static const struct proto_ops l2cap_sock_ops = {
3900 .family = PF_BLUETOOTH,
3901 .owner = THIS_MODULE,
3902 .release = l2cap_sock_release,
3903 .bind = l2cap_sock_bind,
3904 .connect = l2cap_sock_connect,
3905 .listen = l2cap_sock_listen,
3906 .accept = l2cap_sock_accept,
3907 .getname = l2cap_sock_getname,
3908 .sendmsg = l2cap_sock_sendmsg,
3909 .recvmsg = l2cap_sock_recvmsg,
3910 .poll = bt_sock_poll,
3911 .ioctl = bt_sock_ioctl,
3912 .mmap = sock_no_mmap,
3913 .socketpair = sock_no_socketpair,
3914 .shutdown = l2cap_sock_shutdown,
3915 .setsockopt = l2cap_sock_setsockopt,
3916 .getsockopt = l2cap_sock_getsockopt
3917};
3918
3919static struct net_proto_family l2cap_sock_family_ops = {
3920 .family = PF_BLUETOOTH,
3921 .owner = THIS_MODULE,
3922 .create = l2cap_sock_create,
3923};
3924
3925static struct hci_proto l2cap_hci_proto = {
3926 .name = "L2CAP",
3927 .id = HCI_PROTO_L2CAP,
3928 .connect_ind = l2cap_connect_ind,
3929 .connect_cfm = l2cap_connect_cfm,
3930 .disconn_ind = l2cap_disconn_ind,
3931 .disconn_cfm = l2cap_disconn_cfm,
3932 .security_cfm = l2cap_security_cfm,
3933 .recv_acldata = l2cap_recv_acldata
3934};
3935
3936static int __init l2cap_init(void)
3937{
3938 int err;
3939
3940 err = proto_register(&l2cap_proto, 0);
3941 if (err < 0)
3942 return err;
3943
3944 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3945 if (err < 0) {
3946 BT_ERR("L2CAP socket registration failed");
3947 goto error;
3948 }
3949
3950 err = hci_register_proto(&l2cap_hci_proto);
3951 if (err < 0) {
3952 BT_ERR("L2CAP protocol registration failed");
3953 bt_sock_unregister(BTPROTO_L2CAP);
3954 goto error;
3955 }
3956
3957 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3958 BT_ERR("Failed to create L2CAP info file");
3959
3960 BT_INFO("L2CAP ver %s", VERSION);
3961 BT_INFO("L2CAP socket layer initialized");
3962
3963 return 0;
3964
3965error:
3966 proto_unregister(&l2cap_proto);
3967 return err;
3968}
3969
3970static void __exit l2cap_exit(void)
3971{
3972 class_remove_file(bt_class, &class_attr_l2cap);
3973
3974 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3975 BT_ERR("L2CAP socket unregistration failed");
3976
3977 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3978 BT_ERR("L2CAP protocol unregistration failed");
3979
3980 proto_unregister(&l2cap_proto);
3981}
3982
3983void l2cap_load(void)
3984{
3985 /* Dummy function to trigger automatic L2CAP module loading by
3986 * other modules that use L2CAP sockets but don't use any other
3987 * symbols from it. */
3988 return;
3989}
3990EXPORT_SYMBOL(l2cap_load);
3991
3992module_init(l2cap_init);
3993module_exit(l2cap_exit);
3994
3995module_param(enable_ertm, bool, 0644);
3996MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3997
3998MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3999MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4000MODULE_VERSION(VERSION);
4001MODULE_LICENSE("GPL");
4002MODULE_ALIAS("bt-proto-0");