Bluetooth: Select Basic Mode as default for SOCK_SEQPACKET
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
53
54 #define VERSION "2.14"
55
56 static int enable_ertm = 0;
57
58 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59 static u8 l2cap_fixed_chan[8] = { 0x02, };
60
61 static const struct proto_ops l2cap_sock_ops;
62
63 static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
65 };
66
67 static void __l2cap_sock_close(struct sock *sk, int reason);
68 static void l2cap_sock_close(struct sock *sk);
69 static void l2cap_sock_kill(struct sock *sk);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg)
76 {
77 struct sock *sk = (struct sock *) arg;
78 int reason;
79
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
81
82 bh_lock_sock(sk);
83
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
89 else
90 reason = ETIMEDOUT;
91
92 __l2cap_sock_close(sk, reason);
93
94 bh_unlock_sock(sk);
95
96 l2cap_sock_kill(sk);
97 sock_put(sk);
98 }
99
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 {
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
104 }
105
106 static void l2cap_sock_clear_timer(struct sock *sk)
107 {
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
110 }
111
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
114 {
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
118 break;
119 }
120 return s;
121 }
122
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
124 {
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
128 break;
129 }
130 return s;
131 }
132
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 {
137 struct sock *s;
138 read_lock(&l->lock);
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s)
141 bh_lock_sock(s);
142 read_unlock(&l->lock);
143 return s;
144 }
145
146 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147 {
148 struct sock *s;
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
151 break;
152 }
153 return s;
154 }
155
156 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 {
158 struct sock *s;
159 read_lock(&l->lock);
160 s = __l2cap_get_chan_by_ident(l, ident);
161 if (s)
162 bh_lock_sock(s);
163 read_unlock(&l->lock);
164 return s;
165 }
166
167 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 {
169 u16 cid = L2CAP_CID_DYN_START;
170
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
173 return cid;
174 }
175
176 return 0;
177 }
178
179 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
180 {
181 sock_hold(sk);
182
183 if (l->head)
184 l2cap_pi(l->head)->prev_c = sk;
185
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
188 l->head = sk;
189 }
190
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 {
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194
195 write_lock_bh(&l->lock);
196 if (sk == l->head)
197 l->head = next;
198
199 if (next)
200 l2cap_pi(next)->prev_c = prev;
201 if (prev)
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
204
205 __sock_put(sk);
206 }
207
208 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 {
210 struct l2cap_chan_list *l = &conn->chan_list;
211
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214
215 conn->disc_reason = 0x13;
216
217 l2cap_pi(sk)->conn = conn;
218
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 } else {
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
232 }
233
234 __l2cap_chan_link(l, sk);
235
236 if (parent)
237 bt_accept_enqueue(parent, sk);
238 }
239
240 /* Delete channel.
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock *sk, int err)
243 {
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
246
247 l2cap_sock_clear_timer(sk);
248
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
250
251 if (conn) {
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
256 }
257
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
260
261 if (err)
262 sk->sk_err = err;
263
264 if (parent) {
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
267 } else
268 sk->sk_state_change(sk);
269 }
270
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock *sk)
273 {
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
275 __u8 auth_type;
276
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
280 else
281 auth_type = HCI_AT_NO_BONDING;
282
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 } else {
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 break;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
292 break;
293 default:
294 auth_type = HCI_AT_NO_BONDING;
295 break;
296 }
297 }
298
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
300 auth_type);
301 }
302
303 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
304 {
305 u8 id;
306
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
311 */
312
313 spin_lock_bh(&conn->lock);
314
315 if (++conn->tx_ident > 128)
316 conn->tx_ident = 1;
317
318 id = conn->tx_ident;
319
320 spin_unlock_bh(&conn->lock);
321
322 return id;
323 }
324
325 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 {
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328
329 BT_DBG("code 0x%2.2x", code);
330
331 if (!skb)
332 return -ENOMEM;
333
334 return hci_send_acl(conn->hcon, skb, 0);
335 }
336
337 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
338 {
339 struct sk_buff *skb;
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
343
344 if (pi->fcs == L2CAP_FCS_CRC16)
345 hlen += 2;
346
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
348
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
351
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
353 if (!skb)
354 return -ENOMEM;
355
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
360
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
364 }
365
366 return hci_send_acl(pi->conn->hcon, skb, 0);
367 }
368
369 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
370 {
371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
372 control |= L2CAP_SUPER_RCV_NOT_READY;
373 else
374 control |= L2CAP_SUPER_RCV_READY;
375
376 return l2cap_send_sframe(pi, control);
377 }
378
379 static void l2cap_do_start(struct sock *sk)
380 {
381 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
382
383 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
384 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
385 return;
386
387 if (l2cap_check_security(sk)) {
388 struct l2cap_conn_req req;
389 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
390 req.psm = l2cap_pi(sk)->psm;
391
392 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
393
394 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
395 L2CAP_CONN_REQ, sizeof(req), &req);
396 }
397 } else {
398 struct l2cap_info_req req;
399 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
400
401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
402 conn->info_ident = l2cap_get_ident(conn);
403
404 mod_timer(&conn->info_timer, jiffies +
405 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
406
407 l2cap_send_cmd(conn, conn->info_ident,
408 L2CAP_INFO_REQ, sizeof(req), &req);
409 }
410 }
411
412 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
413 {
414 struct l2cap_disconn_req req;
415
416 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
417 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
418 l2cap_send_cmd(conn, l2cap_get_ident(conn),
419 L2CAP_DISCONN_REQ, sizeof(req), &req);
420 }
421
422 /* ---- L2CAP connections ---- */
423 static void l2cap_conn_start(struct l2cap_conn *conn)
424 {
425 struct l2cap_chan_list *l = &conn->chan_list;
426 struct sock *sk;
427
428 BT_DBG("conn %p", conn);
429
430 read_lock(&l->lock);
431
432 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
433 bh_lock_sock(sk);
434
435 if (sk->sk_type != SOCK_SEQPACKET) {
436 bh_unlock_sock(sk);
437 continue;
438 }
439
440 if (sk->sk_state == BT_CONNECT) {
441 if (l2cap_check_security(sk)) {
442 struct l2cap_conn_req req;
443 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
444 req.psm = l2cap_pi(sk)->psm;
445
446 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
447
448 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
449 L2CAP_CONN_REQ, sizeof(req), &req);
450 }
451 } else if (sk->sk_state == BT_CONNECT2) {
452 struct l2cap_conn_rsp rsp;
453 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
454 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
455
456 if (l2cap_check_security(sk)) {
457 if (bt_sk(sk)->defer_setup) {
458 struct sock *parent = bt_sk(sk)->parent;
459 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
460 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
461 parent->sk_data_ready(parent, 0);
462
463 } else {
464 sk->sk_state = BT_CONFIG;
465 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
466 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
467 }
468 } else {
469 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
470 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
471 }
472
473 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
474 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
475 }
476
477 bh_unlock_sock(sk);
478 }
479
480 read_unlock(&l->lock);
481 }
482
483 static void l2cap_conn_ready(struct l2cap_conn *conn)
484 {
485 struct l2cap_chan_list *l = &conn->chan_list;
486 struct sock *sk;
487
488 BT_DBG("conn %p", conn);
489
490 read_lock(&l->lock);
491
492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
493 bh_lock_sock(sk);
494
495 if (sk->sk_type != SOCK_SEQPACKET) {
496 l2cap_sock_clear_timer(sk);
497 sk->sk_state = BT_CONNECTED;
498 sk->sk_state_change(sk);
499 } else if (sk->sk_state == BT_CONNECT)
500 l2cap_do_start(sk);
501
502 bh_unlock_sock(sk);
503 }
504
505 read_unlock(&l->lock);
506 }
507
508 /* Notify sockets that we cannot guaranty reliability anymore */
509 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
510 {
511 struct l2cap_chan_list *l = &conn->chan_list;
512 struct sock *sk;
513
514 BT_DBG("conn %p", conn);
515
516 read_lock(&l->lock);
517
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
519 if (l2cap_pi(sk)->force_reliable)
520 sk->sk_err = err;
521 }
522
523 read_unlock(&l->lock);
524 }
525
526 static void l2cap_info_timeout(unsigned long arg)
527 {
528 struct l2cap_conn *conn = (void *) arg;
529
530 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
531 conn->info_ident = 0;
532
533 l2cap_conn_start(conn);
534 }
535
536 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
537 {
538 struct l2cap_conn *conn = hcon->l2cap_data;
539
540 if (conn || status)
541 return conn;
542
543 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
544 if (!conn)
545 return NULL;
546
547 hcon->l2cap_data = conn;
548 conn->hcon = hcon;
549
550 BT_DBG("hcon %p conn %p", hcon, conn);
551
552 conn->mtu = hcon->hdev->acl_mtu;
553 conn->src = &hcon->hdev->bdaddr;
554 conn->dst = &hcon->dst;
555
556 conn->feat_mask = 0;
557
558 spin_lock_init(&conn->lock);
559 rwlock_init(&conn->chan_list.lock);
560
561 setup_timer(&conn->info_timer, l2cap_info_timeout,
562 (unsigned long) conn);
563
564 conn->disc_reason = 0x13;
565
566 return conn;
567 }
568
569 static void l2cap_conn_del(struct hci_conn *hcon, int err)
570 {
571 struct l2cap_conn *conn = hcon->l2cap_data;
572 struct sock *sk;
573
574 if (!conn)
575 return;
576
577 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
578
579 kfree_skb(conn->rx_skb);
580
581 /* Kill channels */
582 while ((sk = conn->chan_list.head)) {
583 bh_lock_sock(sk);
584 l2cap_chan_del(sk, err);
585 bh_unlock_sock(sk);
586 l2cap_sock_kill(sk);
587 }
588
589 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
590 del_timer_sync(&conn->info_timer);
591
592 hcon->l2cap_data = NULL;
593 kfree(conn);
594 }
595
596 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
597 {
598 struct l2cap_chan_list *l = &conn->chan_list;
599 write_lock_bh(&l->lock);
600 __l2cap_chan_add(conn, sk, parent);
601 write_unlock_bh(&l->lock);
602 }
603
604 /* ---- Socket interface ---- */
605 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
606 {
607 struct sock *sk;
608 struct hlist_node *node;
609 sk_for_each(sk, node, &l2cap_sk_list.head)
610 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
611 goto found;
612 sk = NULL;
613 found:
614 return sk;
615 }
616
617 /* Find socket with psm and source bdaddr.
618 * Returns closest match.
619 */
620 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
621 {
622 struct sock *sk = NULL, *sk1 = NULL;
623 struct hlist_node *node;
624
625 sk_for_each(sk, node, &l2cap_sk_list.head) {
626 if (state && sk->sk_state != state)
627 continue;
628
629 if (l2cap_pi(sk)->psm == psm) {
630 /* Exact match. */
631 if (!bacmp(&bt_sk(sk)->src, src))
632 break;
633
634 /* Closest match */
635 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
636 sk1 = sk;
637 }
638 }
639 return node ? sk : sk1;
640 }
641
642 /* Find socket with given address (psm, src).
643 * Returns locked socket */
644 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
645 {
646 struct sock *s;
647 read_lock(&l2cap_sk_list.lock);
648 s = __l2cap_get_sock_by_psm(state, psm, src);
649 if (s)
650 bh_lock_sock(s);
651 read_unlock(&l2cap_sk_list.lock);
652 return s;
653 }
654
655 static void l2cap_sock_destruct(struct sock *sk)
656 {
657 BT_DBG("sk %p", sk);
658
659 skb_queue_purge(&sk->sk_receive_queue);
660 skb_queue_purge(&sk->sk_write_queue);
661 }
662
663 static void l2cap_sock_cleanup_listen(struct sock *parent)
664 {
665 struct sock *sk;
666
667 BT_DBG("parent %p", parent);
668
669 /* Close not yet accepted channels */
670 while ((sk = bt_accept_dequeue(parent, NULL)))
671 l2cap_sock_close(sk);
672
673 parent->sk_state = BT_CLOSED;
674 sock_set_flag(parent, SOCK_ZAPPED);
675 }
676
677 /* Kill socket (only if zapped and orphan)
678 * Must be called on unlocked socket.
679 */
680 static void l2cap_sock_kill(struct sock *sk)
681 {
682 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
683 return;
684
685 BT_DBG("sk %p state %d", sk, sk->sk_state);
686
687 /* Kill poor orphan */
688 bt_sock_unlink(&l2cap_sk_list, sk);
689 sock_set_flag(sk, SOCK_DEAD);
690 sock_put(sk);
691 }
692
693 static void __l2cap_sock_close(struct sock *sk, int reason)
694 {
695 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
696
697 switch (sk->sk_state) {
698 case BT_LISTEN:
699 l2cap_sock_cleanup_listen(sk);
700 break;
701
702 case BT_CONNECTED:
703 case BT_CONFIG:
704 if (sk->sk_type == SOCK_SEQPACKET) {
705 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
706
707 sk->sk_state = BT_DISCONN;
708 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
709 l2cap_send_disconn_req(conn, sk);
710 } else
711 l2cap_chan_del(sk, reason);
712 break;
713
714 case BT_CONNECT2:
715 if (sk->sk_type == SOCK_SEQPACKET) {
716 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
717 struct l2cap_conn_rsp rsp;
718 __u16 result;
719
720 if (bt_sk(sk)->defer_setup)
721 result = L2CAP_CR_SEC_BLOCK;
722 else
723 result = L2CAP_CR_BAD_PSM;
724
725 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
726 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
727 rsp.result = cpu_to_le16(result);
728 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
729 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
730 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
731 } else
732 l2cap_chan_del(sk, reason);
733 break;
734
735 case BT_CONNECT:
736 case BT_DISCONN:
737 l2cap_chan_del(sk, reason);
738 break;
739
740 default:
741 sock_set_flag(sk, SOCK_ZAPPED);
742 break;
743 }
744 }
745
746 /* Must be called on unlocked socket. */
747 static void l2cap_sock_close(struct sock *sk)
748 {
749 l2cap_sock_clear_timer(sk);
750 lock_sock(sk);
751 __l2cap_sock_close(sk, ECONNRESET);
752 release_sock(sk);
753 l2cap_sock_kill(sk);
754 }
755
756 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
757 {
758 struct l2cap_pinfo *pi = l2cap_pi(sk);
759
760 BT_DBG("sk %p", sk);
761
762 if (parent) {
763 sk->sk_type = parent->sk_type;
764 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
765
766 pi->imtu = l2cap_pi(parent)->imtu;
767 pi->omtu = l2cap_pi(parent)->omtu;
768 pi->mode = l2cap_pi(parent)->mode;
769 pi->fcs = l2cap_pi(parent)->fcs;
770 pi->sec_level = l2cap_pi(parent)->sec_level;
771 pi->role_switch = l2cap_pi(parent)->role_switch;
772 pi->force_reliable = l2cap_pi(parent)->force_reliable;
773 } else {
774 pi->imtu = L2CAP_DEFAULT_MTU;
775 pi->omtu = 0;
776 pi->mode = L2CAP_MODE_BASIC;
777 pi->fcs = L2CAP_FCS_CRC16;
778 pi->sec_level = BT_SECURITY_LOW;
779 pi->role_switch = 0;
780 pi->force_reliable = 0;
781 }
782
783 /* Default config options */
784 pi->conf_len = 0;
785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
786 skb_queue_head_init(TX_QUEUE(sk));
787 skb_queue_head_init(SREJ_QUEUE(sk));
788 INIT_LIST_HEAD(SREJ_LIST(sk));
789 }
790
791 static struct proto l2cap_proto = {
792 .name = "L2CAP",
793 .owner = THIS_MODULE,
794 .obj_size = sizeof(struct l2cap_pinfo)
795 };
796
797 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
798 {
799 struct sock *sk;
800
801 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
802 if (!sk)
803 return NULL;
804
805 sock_init_data(sock, sk);
806 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
807
808 sk->sk_destruct = l2cap_sock_destruct;
809 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
810
811 sock_reset_flag(sk, SOCK_ZAPPED);
812
813 sk->sk_protocol = proto;
814 sk->sk_state = BT_OPEN;
815
816 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
817
818 bt_sock_link(&l2cap_sk_list, sk);
819 return sk;
820 }
821
822 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
823 {
824 struct sock *sk;
825
826 BT_DBG("sock %p", sock);
827
828 sock->state = SS_UNCONNECTED;
829
830 if (sock->type != SOCK_SEQPACKET &&
831 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
832 return -ESOCKTNOSUPPORT;
833
834 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
835 return -EPERM;
836
837 sock->ops = &l2cap_sock_ops;
838
839 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
840 if (!sk)
841 return -ENOMEM;
842
843 l2cap_sock_init(sk, NULL);
844 return 0;
845 }
846
847 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
848 {
849 struct sock *sk = sock->sk;
850 struct sockaddr_l2 la;
851 int len, err = 0;
852
853 BT_DBG("sk %p", sk);
854
855 if (!addr || addr->sa_family != AF_BLUETOOTH)
856 return -EINVAL;
857
858 memset(&la, 0, sizeof(la));
859 len = min_t(unsigned int, sizeof(la), alen);
860 memcpy(&la, addr, len);
861
862 if (la.l2_cid)
863 return -EINVAL;
864
865 lock_sock(sk);
866
867 if (sk->sk_state != BT_OPEN) {
868 err = -EBADFD;
869 goto done;
870 }
871
872 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
873 !capable(CAP_NET_BIND_SERVICE)) {
874 err = -EACCES;
875 goto done;
876 }
877
878 write_lock_bh(&l2cap_sk_list.lock);
879
880 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
881 err = -EADDRINUSE;
882 } else {
883 /* Save source address */
884 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
885 l2cap_pi(sk)->psm = la.l2_psm;
886 l2cap_pi(sk)->sport = la.l2_psm;
887 sk->sk_state = BT_BOUND;
888
889 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
890 __le16_to_cpu(la.l2_psm) == 0x0003)
891 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
892 }
893
894 write_unlock_bh(&l2cap_sk_list.lock);
895
896 done:
897 release_sock(sk);
898 return err;
899 }
900
901 static int l2cap_do_connect(struct sock *sk)
902 {
903 bdaddr_t *src = &bt_sk(sk)->src;
904 bdaddr_t *dst = &bt_sk(sk)->dst;
905 struct l2cap_conn *conn;
906 struct hci_conn *hcon;
907 struct hci_dev *hdev;
908 __u8 auth_type;
909 int err;
910
911 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
912 l2cap_pi(sk)->psm);
913
914 hdev = hci_get_route(dst, src);
915 if (!hdev)
916 return -EHOSTUNREACH;
917
918 hci_dev_lock_bh(hdev);
919
920 err = -ENOMEM;
921
922 if (sk->sk_type == SOCK_RAW) {
923 switch (l2cap_pi(sk)->sec_level) {
924 case BT_SECURITY_HIGH:
925 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
926 break;
927 case BT_SECURITY_MEDIUM:
928 auth_type = HCI_AT_DEDICATED_BONDING;
929 break;
930 default:
931 auth_type = HCI_AT_NO_BONDING;
932 break;
933 }
934 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
935 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
936 auth_type = HCI_AT_NO_BONDING_MITM;
937 else
938 auth_type = HCI_AT_NO_BONDING;
939
940 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
941 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
942 } else {
943 switch (l2cap_pi(sk)->sec_level) {
944 case BT_SECURITY_HIGH:
945 auth_type = HCI_AT_GENERAL_BONDING_MITM;
946 break;
947 case BT_SECURITY_MEDIUM:
948 auth_type = HCI_AT_GENERAL_BONDING;
949 break;
950 default:
951 auth_type = HCI_AT_NO_BONDING;
952 break;
953 }
954 }
955
956 hcon = hci_connect(hdev, ACL_LINK, dst,
957 l2cap_pi(sk)->sec_level, auth_type);
958 if (!hcon)
959 goto done;
960
961 conn = l2cap_conn_add(hcon, 0);
962 if (!conn) {
963 hci_conn_put(hcon);
964 goto done;
965 }
966
967 err = 0;
968
969 /* Update source addr of the socket */
970 bacpy(src, conn->src);
971
972 l2cap_chan_add(conn, sk, NULL);
973
974 sk->sk_state = BT_CONNECT;
975 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
976
977 if (hcon->state == BT_CONNECTED) {
978 if (sk->sk_type != SOCK_SEQPACKET) {
979 l2cap_sock_clear_timer(sk);
980 sk->sk_state = BT_CONNECTED;
981 } else
982 l2cap_do_start(sk);
983 }
984
985 done:
986 hci_dev_unlock_bh(hdev);
987 hci_dev_put(hdev);
988 return err;
989 }
990
991 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
992 {
993 struct sock *sk = sock->sk;
994 struct sockaddr_l2 la;
995 int len, err = 0;
996
997 BT_DBG("sk %p", sk);
998
999 if (!addr || addr->sa_family != AF_BLUETOOTH)
1000 return -EINVAL;
1001
1002 memset(&la, 0, sizeof(la));
1003 len = min_t(unsigned int, sizeof(la), alen);
1004 memcpy(&la, addr, len);
1005
1006 if (la.l2_cid)
1007 return -EINVAL;
1008
1009 lock_sock(sk);
1010
1011 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1012 err = -EINVAL;
1013 goto done;
1014 }
1015
1016 switch (l2cap_pi(sk)->mode) {
1017 case L2CAP_MODE_BASIC:
1018 break;
1019 case L2CAP_MODE_ERTM:
1020 case L2CAP_MODE_STREAMING:
1021 if (enable_ertm)
1022 break;
1023 /* fall through */
1024 default:
1025 err = -ENOTSUPP;
1026 goto done;
1027 }
1028
1029 switch (sk->sk_state) {
1030 case BT_CONNECT:
1031 case BT_CONNECT2:
1032 case BT_CONFIG:
1033 /* Already connecting */
1034 goto wait;
1035
1036 case BT_CONNECTED:
1037 /* Already connected */
1038 goto done;
1039
1040 case BT_OPEN:
1041 case BT_BOUND:
1042 /* Can connect */
1043 break;
1044
1045 default:
1046 err = -EBADFD;
1047 goto done;
1048 }
1049
1050 /* Set destination address and psm */
1051 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1052 l2cap_pi(sk)->psm = la.l2_psm;
1053
1054 err = l2cap_do_connect(sk);
1055 if (err)
1056 goto done;
1057
1058 wait:
1059 err = bt_sock_wait_state(sk, BT_CONNECTED,
1060 sock_sndtimeo(sk, flags & O_NONBLOCK));
1061 done:
1062 release_sock(sk);
1063 return err;
1064 }
1065
1066 static int l2cap_sock_listen(struct socket *sock, int backlog)
1067 {
1068 struct sock *sk = sock->sk;
1069 int err = 0;
1070
1071 BT_DBG("sk %p backlog %d", sk, backlog);
1072
1073 lock_sock(sk);
1074
1075 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1076 err = -EBADFD;
1077 goto done;
1078 }
1079
1080 switch (l2cap_pi(sk)->mode) {
1081 case L2CAP_MODE_BASIC:
1082 break;
1083 case L2CAP_MODE_ERTM:
1084 case L2CAP_MODE_STREAMING:
1085 if (enable_ertm)
1086 break;
1087 /* fall through */
1088 default:
1089 err = -ENOTSUPP;
1090 goto done;
1091 }
1092
1093 if (!l2cap_pi(sk)->psm) {
1094 bdaddr_t *src = &bt_sk(sk)->src;
1095 u16 psm;
1096
1097 err = -EINVAL;
1098
1099 write_lock_bh(&l2cap_sk_list.lock);
1100
1101 for (psm = 0x1001; psm < 0x1100; psm += 2)
1102 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1103 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1104 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1105 err = 0;
1106 break;
1107 }
1108
1109 write_unlock_bh(&l2cap_sk_list.lock);
1110
1111 if (err < 0)
1112 goto done;
1113 }
1114
1115 sk->sk_max_ack_backlog = backlog;
1116 sk->sk_ack_backlog = 0;
1117 sk->sk_state = BT_LISTEN;
1118
1119 done:
1120 release_sock(sk);
1121 return err;
1122 }
1123
1124 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1125 {
1126 DECLARE_WAITQUEUE(wait, current);
1127 struct sock *sk = sock->sk, *nsk;
1128 long timeo;
1129 int err = 0;
1130
1131 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1132
1133 if (sk->sk_state != BT_LISTEN) {
1134 err = -EBADFD;
1135 goto done;
1136 }
1137
1138 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1139
1140 BT_DBG("sk %p timeo %ld", sk, timeo);
1141
1142 /* Wait for an incoming connection. (wake-one). */
1143 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1144 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1145 set_current_state(TASK_INTERRUPTIBLE);
1146 if (!timeo) {
1147 err = -EAGAIN;
1148 break;
1149 }
1150
1151 release_sock(sk);
1152 timeo = schedule_timeout(timeo);
1153 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1154
1155 if (sk->sk_state != BT_LISTEN) {
1156 err = -EBADFD;
1157 break;
1158 }
1159
1160 if (signal_pending(current)) {
1161 err = sock_intr_errno(timeo);
1162 break;
1163 }
1164 }
1165 set_current_state(TASK_RUNNING);
1166 remove_wait_queue(sk->sk_sleep, &wait);
1167
1168 if (err)
1169 goto done;
1170
1171 newsock->state = SS_CONNECTED;
1172
1173 BT_DBG("new socket %p", nsk);
1174
1175 done:
1176 release_sock(sk);
1177 return err;
1178 }
1179
1180 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1181 {
1182 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1183 struct sock *sk = sock->sk;
1184
1185 BT_DBG("sock %p, sk %p", sock, sk);
1186
1187 addr->sa_family = AF_BLUETOOTH;
1188 *len = sizeof(struct sockaddr_l2);
1189
1190 if (peer) {
1191 la->l2_psm = l2cap_pi(sk)->psm;
1192 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1193 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1194 } else {
1195 la->l2_psm = l2cap_pi(sk)->sport;
1196 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1197 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1198 }
1199
1200 return 0;
1201 }
1202
1203 static void l2cap_monitor_timeout(unsigned long arg)
1204 {
1205 struct sock *sk = (void *) arg;
1206 u16 control;
1207
1208 bh_lock_sock(sk);
1209 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1210 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1211 return;
1212 }
1213
1214 l2cap_pi(sk)->retry_count++;
1215 __mod_monitor_timer();
1216
1217 control = L2CAP_CTRL_POLL;
1218 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1219 bh_unlock_sock(sk);
1220 }
1221
1222 static void l2cap_retrans_timeout(unsigned long arg)
1223 {
1224 struct sock *sk = (void *) arg;
1225 u16 control;
1226
1227 bh_lock_sock(sk);
1228 l2cap_pi(sk)->retry_count = 1;
1229 __mod_monitor_timer();
1230
1231 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1232
1233 control = L2CAP_CTRL_POLL;
1234 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1235 bh_unlock_sock(sk);
1236 }
1237
1238 static void l2cap_drop_acked_frames(struct sock *sk)
1239 {
1240 struct sk_buff *skb;
1241
1242 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1243 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1244 break;
1245
1246 skb = skb_dequeue(TX_QUEUE(sk));
1247 kfree_skb(skb);
1248
1249 l2cap_pi(sk)->unacked_frames--;
1250 }
1251
1252 if (!l2cap_pi(sk)->unacked_frames)
1253 del_timer(&l2cap_pi(sk)->retrans_timer);
1254
1255 return;
1256 }
1257
1258 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1259 {
1260 struct l2cap_pinfo *pi = l2cap_pi(sk);
1261 int err;
1262
1263 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1264
1265 err = hci_send_acl(pi->conn->hcon, skb, 0);
1266 if (err < 0)
1267 kfree_skb(skb);
1268
1269 return err;
1270 }
1271
1272 static int l2cap_streaming_send(struct sock *sk)
1273 {
1274 struct sk_buff *skb, *tx_skb;
1275 struct l2cap_pinfo *pi = l2cap_pi(sk);
1276 u16 control, fcs;
1277 int err;
1278
1279 while ((skb = sk->sk_send_head)) {
1280 tx_skb = skb_clone(skb, GFP_ATOMIC);
1281
1282 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1283 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1284 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1285
1286 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1287 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1288 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1289 }
1290
1291 err = l2cap_do_send(sk, tx_skb);
1292 if (err < 0) {
1293 l2cap_send_disconn_req(pi->conn, sk);
1294 return err;
1295 }
1296
1297 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1298
1299 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1300 sk->sk_send_head = NULL;
1301 else
1302 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1303
1304 skb = skb_dequeue(TX_QUEUE(sk));
1305 kfree_skb(skb);
1306 }
1307 return 0;
1308 }
1309
1310 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1311 {
1312 struct l2cap_pinfo *pi = l2cap_pi(sk);
1313 struct sk_buff *skb, *tx_skb;
1314 u16 control, fcs;
1315 int err;
1316
1317 skb = skb_peek(TX_QUEUE(sk));
1318 do {
1319 if (bt_cb(skb)->tx_seq != tx_seq) {
1320 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1321 break;
1322 skb = skb_queue_next(TX_QUEUE(sk), skb);
1323 continue;
1324 }
1325
1326 if (pi->remote_max_tx &&
1327 bt_cb(skb)->retries == pi->remote_max_tx) {
1328 l2cap_send_disconn_req(pi->conn, sk);
1329 break;
1330 }
1331
1332 tx_skb = skb_clone(skb, GFP_ATOMIC);
1333 bt_cb(skb)->retries++;
1334 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1335 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1336 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1337 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1338
1339 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1340 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1341 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1342 }
1343
1344 err = l2cap_do_send(sk, tx_skb);
1345 if (err < 0) {
1346 l2cap_send_disconn_req(pi->conn, sk);
1347 return err;
1348 }
1349 break;
1350 } while(1);
1351 return 0;
1352 }
1353
1354 static int l2cap_ertm_send(struct sock *sk)
1355 {
1356 struct sk_buff *skb, *tx_skb;
1357 struct l2cap_pinfo *pi = l2cap_pi(sk);
1358 u16 control, fcs;
1359 int err;
1360
1361 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1362 return 0;
1363
1364 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))
1365 && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1366 tx_skb = skb_clone(skb, GFP_ATOMIC);
1367
1368 if (pi->remote_max_tx &&
1369 bt_cb(skb)->retries == pi->remote_max_tx) {
1370 l2cap_send_disconn_req(pi->conn, sk);
1371 break;
1372 }
1373
1374 bt_cb(skb)->retries++;
1375
1376 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1377 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1378 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1379 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1380
1381
1382 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1383 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1384 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1385 }
1386
1387 err = l2cap_do_send(sk, tx_skb);
1388 if (err < 0) {
1389 l2cap_send_disconn_req(pi->conn, sk);
1390 return err;
1391 }
1392 __mod_retrans_timer();
1393
1394 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1395 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1396
1397 pi->unacked_frames++;
1398
1399 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1400 sk->sk_send_head = NULL;
1401 else
1402 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1403 }
1404
1405 return 0;
1406 }
1407
1408 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1409 {
1410 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1411 struct sk_buff **frag;
1412 int err, sent = 0;
1413
1414 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1415 return -EFAULT;
1416 }
1417
1418 sent += count;
1419 len -= count;
1420
1421 /* Continuation fragments (no L2CAP header) */
1422 frag = &skb_shinfo(skb)->frag_list;
1423 while (len) {
1424 count = min_t(unsigned int, conn->mtu, len);
1425
1426 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1427 if (!*frag)
1428 return -EFAULT;
1429 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1430 return -EFAULT;
1431
1432 sent += count;
1433 len -= count;
1434
1435 frag = &(*frag)->next;
1436 }
1437
1438 return sent;
1439 }
1440
1441 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1442 {
1443 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1444 struct sk_buff *skb;
1445 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1446 struct l2cap_hdr *lh;
1447
1448 BT_DBG("sk %p len %d", sk, (int)len);
1449
1450 count = min_t(unsigned int, (conn->mtu - hlen), len);
1451 skb = bt_skb_send_alloc(sk, count + hlen,
1452 msg->msg_flags & MSG_DONTWAIT, &err);
1453 if (!skb)
1454 return ERR_PTR(-ENOMEM);
1455
1456 /* Create L2CAP header */
1457 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1458 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1459 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1460 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1461
1462 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1463 if (unlikely(err < 0)) {
1464 kfree_skb(skb);
1465 return ERR_PTR(err);
1466 }
1467 return skb;
1468 }
1469
1470 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1471 {
1472 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1473 struct sk_buff *skb;
1474 int err, count, hlen = L2CAP_HDR_SIZE;
1475 struct l2cap_hdr *lh;
1476
1477 BT_DBG("sk %p len %d", sk, (int)len);
1478
1479 count = min_t(unsigned int, (conn->mtu - hlen), len);
1480 skb = bt_skb_send_alloc(sk, count + hlen,
1481 msg->msg_flags & MSG_DONTWAIT, &err);
1482 if (!skb)
1483 return ERR_PTR(-ENOMEM);
1484
1485 /* Create L2CAP header */
1486 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1487 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1488 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1489
1490 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1491 if (unlikely(err < 0)) {
1492 kfree_skb(skb);
1493 return ERR_PTR(err);
1494 }
1495 return skb;
1496 }
1497
1498 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1499 {
1500 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1501 struct sk_buff *skb;
1502 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1503 struct l2cap_hdr *lh;
1504
1505 BT_DBG("sk %p len %d", sk, (int)len);
1506
1507 if (sdulen)
1508 hlen += 2;
1509
1510 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1511 hlen += 2;
1512
1513 count = min_t(unsigned int, (conn->mtu - hlen), len);
1514 skb = bt_skb_send_alloc(sk, count + hlen,
1515 msg->msg_flags & MSG_DONTWAIT, &err);
1516 if (!skb)
1517 return ERR_PTR(-ENOMEM);
1518
1519 /* Create L2CAP header */
1520 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1521 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1522 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1523 put_unaligned_le16(control, skb_put(skb, 2));
1524 if (sdulen)
1525 put_unaligned_le16(sdulen, skb_put(skb, 2));
1526
1527 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1528 if (unlikely(err < 0)) {
1529 kfree_skb(skb);
1530 return ERR_PTR(err);
1531 }
1532
1533 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1534 put_unaligned_le16(0, skb_put(skb, 2));
1535
1536 bt_cb(skb)->retries = 0;
1537 return skb;
1538 }
1539
1540 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1541 {
1542 struct l2cap_pinfo *pi = l2cap_pi(sk);
1543 struct sk_buff *skb;
1544 struct sk_buff_head sar_queue;
1545 u16 control;
1546 size_t size = 0;
1547
1548 __skb_queue_head_init(&sar_queue);
1549 control = L2CAP_SDU_START;
1550 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1551 if (IS_ERR(skb))
1552 return PTR_ERR(skb);
1553
1554 __skb_queue_tail(&sar_queue, skb);
1555 len -= pi->max_pdu_size;
1556 size +=pi->max_pdu_size;
1557 control = 0;
1558
1559 while (len > 0) {
1560 size_t buflen;
1561
1562 if (len > pi->max_pdu_size) {
1563 control |= L2CAP_SDU_CONTINUE;
1564 buflen = pi->max_pdu_size;
1565 } else {
1566 control |= L2CAP_SDU_END;
1567 buflen = len;
1568 }
1569
1570 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1571 if (IS_ERR(skb)) {
1572 skb_queue_purge(&sar_queue);
1573 return PTR_ERR(skb);
1574 }
1575
1576 __skb_queue_tail(&sar_queue, skb);
1577 len -= buflen;
1578 size += buflen;
1579 control = 0;
1580 }
1581 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1582 if (sk->sk_send_head == NULL)
1583 sk->sk_send_head = sar_queue.next;
1584
1585 return size;
1586 }
1587
1588 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1589 {
1590 struct sock *sk = sock->sk;
1591 struct l2cap_pinfo *pi = l2cap_pi(sk);
1592 struct sk_buff *skb;
1593 u16 control;
1594 int err;
1595
1596 BT_DBG("sock %p, sk %p", sock, sk);
1597
1598 err = sock_error(sk);
1599 if (err)
1600 return err;
1601
1602 if (msg->msg_flags & MSG_OOB)
1603 return -EOPNOTSUPP;
1604
1605 /* Check outgoing MTU */
1606 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1607 && len > pi->omtu)
1608 return -EINVAL;
1609
1610 lock_sock(sk);
1611
1612 if (sk->sk_state != BT_CONNECTED) {
1613 err = -ENOTCONN;
1614 goto done;
1615 }
1616
1617 /* Connectionless channel */
1618 if (sk->sk_type == SOCK_DGRAM) {
1619 skb = l2cap_create_connless_pdu(sk, msg, len);
1620 err = l2cap_do_send(sk, skb);
1621 goto done;
1622 }
1623
1624 switch (pi->mode) {
1625 case L2CAP_MODE_BASIC:
1626 /* Create a basic PDU */
1627 skb = l2cap_create_basic_pdu(sk, msg, len);
1628 if (IS_ERR(skb)) {
1629 err = PTR_ERR(skb);
1630 goto done;
1631 }
1632
1633 err = l2cap_do_send(sk, skb);
1634 if (!err)
1635 err = len;
1636 break;
1637
1638 case L2CAP_MODE_ERTM:
1639 case L2CAP_MODE_STREAMING:
1640 /* Entire SDU fits into one PDU */
1641 if (len <= pi->max_pdu_size) {
1642 control = L2CAP_SDU_UNSEGMENTED;
1643 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1644 if (IS_ERR(skb)) {
1645 err = PTR_ERR(skb);
1646 goto done;
1647 }
1648 __skb_queue_tail(TX_QUEUE(sk), skb);
1649 if (sk->sk_send_head == NULL)
1650 sk->sk_send_head = skb;
1651 } else {
1652 /* Segment SDU into multiples PDUs */
1653 err = l2cap_sar_segment_sdu(sk, msg, len);
1654 if (err < 0)
1655 goto done;
1656 }
1657
1658 if (pi->mode == L2CAP_MODE_STREAMING)
1659 err = l2cap_streaming_send(sk);
1660 else
1661 err = l2cap_ertm_send(sk);
1662
1663 if (!err)
1664 err = len;
1665 break;
1666
1667 default:
1668 BT_DBG("bad state %1.1x", pi->mode);
1669 err = -EINVAL;
1670 }
1671
1672 done:
1673 release_sock(sk);
1674 return err;
1675 }
1676
1677 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1678 {
1679 struct sock *sk = sock->sk;
1680
1681 lock_sock(sk);
1682
1683 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1684 struct l2cap_conn_rsp rsp;
1685
1686 sk->sk_state = BT_CONFIG;
1687
1688 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1689 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1690 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1691 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1692 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1693 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1694
1695 release_sock(sk);
1696 return 0;
1697 }
1698
1699 release_sock(sk);
1700
1701 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1702 }
1703
1704 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1705 {
1706 struct sock *sk = sock->sk;
1707 struct l2cap_options opts;
1708 int len, err = 0;
1709 u32 opt;
1710
1711 BT_DBG("sk %p", sk);
1712
1713 lock_sock(sk);
1714
1715 switch (optname) {
1716 case L2CAP_OPTIONS:
1717 opts.imtu = l2cap_pi(sk)->imtu;
1718 opts.omtu = l2cap_pi(sk)->omtu;
1719 opts.flush_to = l2cap_pi(sk)->flush_to;
1720 opts.mode = l2cap_pi(sk)->mode;
1721 opts.fcs = l2cap_pi(sk)->fcs;
1722
1723 len = min_t(unsigned int, sizeof(opts), optlen);
1724 if (copy_from_user((char *) &opts, optval, len)) {
1725 err = -EFAULT;
1726 break;
1727 }
1728
1729 l2cap_pi(sk)->imtu = opts.imtu;
1730 l2cap_pi(sk)->omtu = opts.omtu;
1731 l2cap_pi(sk)->mode = opts.mode;
1732 l2cap_pi(sk)->fcs = opts.fcs;
1733 break;
1734
1735 case L2CAP_LM:
1736 if (get_user(opt, (u32 __user *) optval)) {
1737 err = -EFAULT;
1738 break;
1739 }
1740
1741 if (opt & L2CAP_LM_AUTH)
1742 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1743 if (opt & L2CAP_LM_ENCRYPT)
1744 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1745 if (opt & L2CAP_LM_SECURE)
1746 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1747
1748 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1749 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1750 break;
1751
1752 default:
1753 err = -ENOPROTOOPT;
1754 break;
1755 }
1756
1757 release_sock(sk);
1758 return err;
1759 }
1760
1761 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1762 {
1763 struct sock *sk = sock->sk;
1764 struct bt_security sec;
1765 int len, err = 0;
1766 u32 opt;
1767
1768 BT_DBG("sk %p", sk);
1769
1770 if (level == SOL_L2CAP)
1771 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1772
1773 if (level != SOL_BLUETOOTH)
1774 return -ENOPROTOOPT;
1775
1776 lock_sock(sk);
1777
1778 switch (optname) {
1779 case BT_SECURITY:
1780 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1781 err = -EINVAL;
1782 break;
1783 }
1784
1785 sec.level = BT_SECURITY_LOW;
1786
1787 len = min_t(unsigned int, sizeof(sec), optlen);
1788 if (copy_from_user((char *) &sec, optval, len)) {
1789 err = -EFAULT;
1790 break;
1791 }
1792
1793 if (sec.level < BT_SECURITY_LOW ||
1794 sec.level > BT_SECURITY_HIGH) {
1795 err = -EINVAL;
1796 break;
1797 }
1798
1799 l2cap_pi(sk)->sec_level = sec.level;
1800 break;
1801
1802 case BT_DEFER_SETUP:
1803 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1804 err = -EINVAL;
1805 break;
1806 }
1807
1808 if (get_user(opt, (u32 __user *) optval)) {
1809 err = -EFAULT;
1810 break;
1811 }
1812
1813 bt_sk(sk)->defer_setup = opt;
1814 break;
1815
1816 default:
1817 err = -ENOPROTOOPT;
1818 break;
1819 }
1820
1821 release_sock(sk);
1822 return err;
1823 }
1824
1825 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1826 {
1827 struct sock *sk = sock->sk;
1828 struct l2cap_options opts;
1829 struct l2cap_conninfo cinfo;
1830 int len, err = 0;
1831 u32 opt;
1832
1833 BT_DBG("sk %p", sk);
1834
1835 if (get_user(len, optlen))
1836 return -EFAULT;
1837
1838 lock_sock(sk);
1839
1840 switch (optname) {
1841 case L2CAP_OPTIONS:
1842 opts.imtu = l2cap_pi(sk)->imtu;
1843 opts.omtu = l2cap_pi(sk)->omtu;
1844 opts.flush_to = l2cap_pi(sk)->flush_to;
1845 opts.mode = l2cap_pi(sk)->mode;
1846 opts.fcs = l2cap_pi(sk)->fcs;
1847
1848 len = min_t(unsigned int, len, sizeof(opts));
1849 if (copy_to_user(optval, (char *) &opts, len))
1850 err = -EFAULT;
1851
1852 break;
1853
1854 case L2CAP_LM:
1855 switch (l2cap_pi(sk)->sec_level) {
1856 case BT_SECURITY_LOW:
1857 opt = L2CAP_LM_AUTH;
1858 break;
1859 case BT_SECURITY_MEDIUM:
1860 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1861 break;
1862 case BT_SECURITY_HIGH:
1863 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1864 L2CAP_LM_SECURE;
1865 break;
1866 default:
1867 opt = 0;
1868 break;
1869 }
1870
1871 if (l2cap_pi(sk)->role_switch)
1872 opt |= L2CAP_LM_MASTER;
1873
1874 if (l2cap_pi(sk)->force_reliable)
1875 opt |= L2CAP_LM_RELIABLE;
1876
1877 if (put_user(opt, (u32 __user *) optval))
1878 err = -EFAULT;
1879 break;
1880
1881 case L2CAP_CONNINFO:
1882 if (sk->sk_state != BT_CONNECTED &&
1883 !(sk->sk_state == BT_CONNECT2 &&
1884 bt_sk(sk)->defer_setup)) {
1885 err = -ENOTCONN;
1886 break;
1887 }
1888
1889 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1890 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1891
1892 len = min_t(unsigned int, len, sizeof(cinfo));
1893 if (copy_to_user(optval, (char *) &cinfo, len))
1894 err = -EFAULT;
1895
1896 break;
1897
1898 default:
1899 err = -ENOPROTOOPT;
1900 break;
1901 }
1902
1903 release_sock(sk);
1904 return err;
1905 }
1906
1907 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1908 {
1909 struct sock *sk = sock->sk;
1910 struct bt_security sec;
1911 int len, err = 0;
1912
1913 BT_DBG("sk %p", sk);
1914
1915 if (level == SOL_L2CAP)
1916 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1917
1918 if (level != SOL_BLUETOOTH)
1919 return -ENOPROTOOPT;
1920
1921 if (get_user(len, optlen))
1922 return -EFAULT;
1923
1924 lock_sock(sk);
1925
1926 switch (optname) {
1927 case BT_SECURITY:
1928 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1929 err = -EINVAL;
1930 break;
1931 }
1932
1933 sec.level = l2cap_pi(sk)->sec_level;
1934
1935 len = min_t(unsigned int, len, sizeof(sec));
1936 if (copy_to_user(optval, (char *) &sec, len))
1937 err = -EFAULT;
1938
1939 break;
1940
1941 case BT_DEFER_SETUP:
1942 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1943 err = -EINVAL;
1944 break;
1945 }
1946
1947 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1948 err = -EFAULT;
1949
1950 break;
1951
1952 default:
1953 err = -ENOPROTOOPT;
1954 break;
1955 }
1956
1957 release_sock(sk);
1958 return err;
1959 }
1960
1961 static int l2cap_sock_shutdown(struct socket *sock, int how)
1962 {
1963 struct sock *sk = sock->sk;
1964 int err = 0;
1965
1966 BT_DBG("sock %p, sk %p", sock, sk);
1967
1968 if (!sk)
1969 return 0;
1970
1971 lock_sock(sk);
1972 if (!sk->sk_shutdown) {
1973 sk->sk_shutdown = SHUTDOWN_MASK;
1974 l2cap_sock_clear_timer(sk);
1975 __l2cap_sock_close(sk, 0);
1976
1977 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1978 err = bt_sock_wait_state(sk, BT_CLOSED,
1979 sk->sk_lingertime);
1980 }
1981 release_sock(sk);
1982 return err;
1983 }
1984
1985 static int l2cap_sock_release(struct socket *sock)
1986 {
1987 struct sock *sk = sock->sk;
1988 int err;
1989
1990 BT_DBG("sock %p, sk %p", sock, sk);
1991
1992 if (!sk)
1993 return 0;
1994
1995 err = l2cap_sock_shutdown(sock, 2);
1996
1997 sock_orphan(sk);
1998 l2cap_sock_kill(sk);
1999 return err;
2000 }
2001
2002 static void l2cap_chan_ready(struct sock *sk)
2003 {
2004 struct sock *parent = bt_sk(sk)->parent;
2005
2006 BT_DBG("sk %p, parent %p", sk, parent);
2007
2008 l2cap_pi(sk)->conf_state = 0;
2009 l2cap_sock_clear_timer(sk);
2010
2011 if (!parent) {
2012 /* Outgoing channel.
2013 * Wake up socket sleeping on connect.
2014 */
2015 sk->sk_state = BT_CONNECTED;
2016 sk->sk_state_change(sk);
2017 } else {
2018 /* Incoming channel.
2019 * Wake up socket sleeping on accept.
2020 */
2021 parent->sk_data_ready(parent, 0);
2022 }
2023 }
2024
2025 /* Copy frame to all raw sockets on that connection */
2026 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2027 {
2028 struct l2cap_chan_list *l = &conn->chan_list;
2029 struct sk_buff *nskb;
2030 struct sock *sk;
2031
2032 BT_DBG("conn %p", conn);
2033
2034 read_lock(&l->lock);
2035 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2036 if (sk->sk_type != SOCK_RAW)
2037 continue;
2038
2039 /* Don't send frame to the socket it came from */
2040 if (skb->sk == sk)
2041 continue;
2042 nskb = skb_clone(skb, GFP_ATOMIC);
2043 if (!nskb)
2044 continue;
2045
2046 if (sock_queue_rcv_skb(sk, nskb))
2047 kfree_skb(nskb);
2048 }
2049 read_unlock(&l->lock);
2050 }
2051
2052 /* ---- L2CAP signalling commands ---- */
2053 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2054 u8 code, u8 ident, u16 dlen, void *data)
2055 {
2056 struct sk_buff *skb, **frag;
2057 struct l2cap_cmd_hdr *cmd;
2058 struct l2cap_hdr *lh;
2059 int len, count;
2060
2061 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2062 conn, code, ident, dlen);
2063
2064 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2065 count = min_t(unsigned int, conn->mtu, len);
2066
2067 skb = bt_skb_alloc(count, GFP_ATOMIC);
2068 if (!skb)
2069 return NULL;
2070
2071 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2072 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2073 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2074
2075 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2076 cmd->code = code;
2077 cmd->ident = ident;
2078 cmd->len = cpu_to_le16(dlen);
2079
2080 if (dlen) {
2081 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2082 memcpy(skb_put(skb, count), data, count);
2083 data += count;
2084 }
2085
2086 len -= skb->len;
2087
2088 /* Continuation fragments (no L2CAP header) */
2089 frag = &skb_shinfo(skb)->frag_list;
2090 while (len) {
2091 count = min_t(unsigned int, conn->mtu, len);
2092
2093 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2094 if (!*frag)
2095 goto fail;
2096
2097 memcpy(skb_put(*frag, count), data, count);
2098
2099 len -= count;
2100 data += count;
2101
2102 frag = &(*frag)->next;
2103 }
2104
2105 return skb;
2106
2107 fail:
2108 kfree_skb(skb);
2109 return NULL;
2110 }
2111
2112 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2113 {
2114 struct l2cap_conf_opt *opt = *ptr;
2115 int len;
2116
2117 len = L2CAP_CONF_OPT_SIZE + opt->len;
2118 *ptr += len;
2119
2120 *type = opt->type;
2121 *olen = opt->len;
2122
2123 switch (opt->len) {
2124 case 1:
2125 *val = *((u8 *) opt->val);
2126 break;
2127
2128 case 2:
2129 *val = __le16_to_cpu(*((__le16 *) opt->val));
2130 break;
2131
2132 case 4:
2133 *val = __le32_to_cpu(*((__le32 *) opt->val));
2134 break;
2135
2136 default:
2137 *val = (unsigned long) opt->val;
2138 break;
2139 }
2140
2141 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2142 return len;
2143 }
2144
2145 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2146 {
2147 struct l2cap_conf_opt *opt = *ptr;
2148
2149 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2150
2151 opt->type = type;
2152 opt->len = len;
2153
2154 switch (len) {
2155 case 1:
2156 *((u8 *) opt->val) = val;
2157 break;
2158
2159 case 2:
2160 *((__le16 *) opt->val) = cpu_to_le16(val);
2161 break;
2162
2163 case 4:
2164 *((__le32 *) opt->val) = cpu_to_le32(val);
2165 break;
2166
2167 default:
2168 memcpy(opt->val, (void *) val, len);
2169 break;
2170 }
2171
2172 *ptr += L2CAP_CONF_OPT_SIZE + len;
2173 }
2174
2175 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2176 {
2177 u32 local_feat_mask = l2cap_feat_mask;
2178 if (enable_ertm)
2179 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2180
2181 switch (mode) {
2182 case L2CAP_MODE_ERTM:
2183 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2184 case L2CAP_MODE_STREAMING:
2185 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2186 default:
2187 return 0x00;
2188 }
2189 }
2190
2191 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2192 {
2193 switch (mode) {
2194 case L2CAP_MODE_STREAMING:
2195 case L2CAP_MODE_ERTM:
2196 if (l2cap_mode_supported(mode, remote_feat_mask))
2197 return mode;
2198 /* fall through */
2199 default:
2200 return L2CAP_MODE_BASIC;
2201 }
2202 }
2203
2204 static int l2cap_build_conf_req(struct sock *sk, void *data)
2205 {
2206 struct l2cap_pinfo *pi = l2cap_pi(sk);
2207 struct l2cap_conf_req *req = data;
2208 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2209 void *ptr = req->data;
2210
2211 BT_DBG("sk %p", sk);
2212
2213 if (pi->num_conf_req || pi->num_conf_rsp)
2214 goto done;
2215
2216 switch (pi->mode) {
2217 case L2CAP_MODE_STREAMING:
2218 case L2CAP_MODE_ERTM:
2219 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2220 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2221 l2cap_send_disconn_req(pi->conn, sk);
2222 break;
2223 default:
2224 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2225 break;
2226 }
2227
2228 done:
2229 switch (pi->mode) {
2230 case L2CAP_MODE_BASIC:
2231 if (pi->imtu != L2CAP_DEFAULT_MTU)
2232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2233 break;
2234
2235 case L2CAP_MODE_ERTM:
2236 rfc.mode = L2CAP_MODE_ERTM;
2237 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2238 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2239 rfc.retrans_timeout = 0;
2240 rfc.monitor_timeout = 0;
2241 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2242
2243 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2244 sizeof(rfc), (unsigned long) &rfc);
2245
2246 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2247 break;
2248
2249 if (pi->fcs == L2CAP_FCS_NONE ||
2250 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2251 pi->fcs = L2CAP_FCS_NONE;
2252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2253 }
2254 break;
2255
2256 case L2CAP_MODE_STREAMING:
2257 rfc.mode = L2CAP_MODE_STREAMING;
2258 rfc.txwin_size = 0;
2259 rfc.max_transmit = 0;
2260 rfc.retrans_timeout = 0;
2261 rfc.monitor_timeout = 0;
2262 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2263
2264 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2265 sizeof(rfc), (unsigned long) &rfc);
2266
2267 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2268 break;
2269
2270 if (pi->fcs == L2CAP_FCS_NONE ||
2271 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2272 pi->fcs = L2CAP_FCS_NONE;
2273 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2274 }
2275 break;
2276 }
2277
2278 /* FIXME: Need actual value of the flush timeout */
2279 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2280 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2281
2282 req->dcid = cpu_to_le16(pi->dcid);
2283 req->flags = cpu_to_le16(0);
2284
2285 return ptr - data;
2286 }
2287
2288 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2289 {
2290 struct l2cap_pinfo *pi = l2cap_pi(sk);
2291 struct l2cap_conf_rsp *rsp = data;
2292 void *ptr = rsp->data;
2293 void *req = pi->conf_req;
2294 int len = pi->conf_len;
2295 int type, hint, olen;
2296 unsigned long val;
2297 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2298 u16 mtu = L2CAP_DEFAULT_MTU;
2299 u16 result = L2CAP_CONF_SUCCESS;
2300
2301 BT_DBG("sk %p", sk);
2302
2303 while (len >= L2CAP_CONF_OPT_SIZE) {
2304 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2305
2306 hint = type & L2CAP_CONF_HINT;
2307 type &= L2CAP_CONF_MASK;
2308
2309 switch (type) {
2310 case L2CAP_CONF_MTU:
2311 mtu = val;
2312 break;
2313
2314 case L2CAP_CONF_FLUSH_TO:
2315 pi->flush_to = val;
2316 break;
2317
2318 case L2CAP_CONF_QOS:
2319 break;
2320
2321 case L2CAP_CONF_RFC:
2322 if (olen == sizeof(rfc))
2323 memcpy(&rfc, (void *) val, olen);
2324 break;
2325
2326 case L2CAP_CONF_FCS:
2327 if (val == L2CAP_FCS_NONE)
2328 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2329
2330 break;
2331
2332 default:
2333 if (hint)
2334 break;
2335
2336 result = L2CAP_CONF_UNKNOWN;
2337 *((u8 *) ptr++) = type;
2338 break;
2339 }
2340 }
2341
2342 if (pi->num_conf_rsp || pi->num_conf_req)
2343 goto done;
2344
2345 switch (pi->mode) {
2346 case L2CAP_MODE_STREAMING:
2347 case L2CAP_MODE_ERTM:
2348 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2349 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2350 return -ECONNREFUSED;
2351 break;
2352 default:
2353 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2354 break;
2355 }
2356
2357 done:
2358 if (pi->mode != rfc.mode) {
2359 result = L2CAP_CONF_UNACCEPT;
2360 rfc.mode = pi->mode;
2361
2362 if (pi->num_conf_rsp == 1)
2363 return -ECONNREFUSED;
2364
2365 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2366 sizeof(rfc), (unsigned long) &rfc);
2367 }
2368
2369
2370 if (result == L2CAP_CONF_SUCCESS) {
2371 /* Configure output options and let the other side know
2372 * which ones we don't like. */
2373
2374 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2375 result = L2CAP_CONF_UNACCEPT;
2376 else {
2377 pi->omtu = mtu;
2378 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2379 }
2380 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2381
2382 switch (rfc.mode) {
2383 case L2CAP_MODE_BASIC:
2384 pi->fcs = L2CAP_FCS_NONE;
2385 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2386 break;
2387
2388 case L2CAP_MODE_ERTM:
2389 pi->remote_tx_win = rfc.txwin_size;
2390 pi->remote_max_tx = rfc.max_transmit;
2391 pi->max_pdu_size = rfc.max_pdu_size;
2392
2393 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2394 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2395
2396 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2397 break;
2398
2399 case L2CAP_MODE_STREAMING:
2400 pi->remote_tx_win = rfc.txwin_size;
2401 pi->max_pdu_size = rfc.max_pdu_size;
2402
2403 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2404 break;
2405
2406 default:
2407 result = L2CAP_CONF_UNACCEPT;
2408
2409 memset(&rfc, 0, sizeof(rfc));
2410 rfc.mode = pi->mode;
2411 }
2412
2413 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2414 sizeof(rfc), (unsigned long) &rfc);
2415
2416 if (result == L2CAP_CONF_SUCCESS)
2417 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2418 }
2419 rsp->scid = cpu_to_le16(pi->dcid);
2420 rsp->result = cpu_to_le16(result);
2421 rsp->flags = cpu_to_le16(0x0000);
2422
2423 return ptr - data;
2424 }
2425
2426 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2427 {
2428 struct l2cap_pinfo *pi = l2cap_pi(sk);
2429 struct l2cap_conf_req *req = data;
2430 void *ptr = req->data;
2431 int type, olen;
2432 unsigned long val;
2433 struct l2cap_conf_rfc rfc;
2434
2435 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2436
2437 while (len >= L2CAP_CONF_OPT_SIZE) {
2438 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2439
2440 switch (type) {
2441 case L2CAP_CONF_MTU:
2442 if (val < L2CAP_DEFAULT_MIN_MTU) {
2443 *result = L2CAP_CONF_UNACCEPT;
2444 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2445 } else
2446 pi->omtu = val;
2447 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2448 break;
2449
2450 case L2CAP_CONF_FLUSH_TO:
2451 pi->flush_to = val;
2452 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2453 2, pi->flush_to);
2454 break;
2455
2456 case L2CAP_CONF_RFC:
2457 if (olen == sizeof(rfc))
2458 memcpy(&rfc, (void *)val, olen);
2459
2460 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2461 rfc.mode != pi->mode)
2462 return -ECONNREFUSED;
2463
2464 pi->mode = rfc.mode;
2465 pi->fcs = 0;
2466
2467 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2468 sizeof(rfc), (unsigned long) &rfc);
2469 break;
2470 }
2471 }
2472
2473 if (*result == L2CAP_CONF_SUCCESS) {
2474 switch (rfc.mode) {
2475 case L2CAP_MODE_ERTM:
2476 pi->remote_tx_win = rfc.txwin_size;
2477 pi->retrans_timeout = rfc.retrans_timeout;
2478 pi->monitor_timeout = rfc.monitor_timeout;
2479 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2480 break;
2481 case L2CAP_MODE_STREAMING:
2482 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2483 break;
2484 }
2485 }
2486
2487 req->dcid = cpu_to_le16(pi->dcid);
2488 req->flags = cpu_to_le16(0x0000);
2489
2490 return ptr - data;
2491 }
2492
2493 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2494 {
2495 struct l2cap_conf_rsp *rsp = data;
2496 void *ptr = rsp->data;
2497
2498 BT_DBG("sk %p", sk);
2499
2500 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2501 rsp->result = cpu_to_le16(result);
2502 rsp->flags = cpu_to_le16(flags);
2503
2504 return ptr - data;
2505 }
2506
2507 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2508 {
2509 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2510
2511 if (rej->reason != 0x0000)
2512 return 0;
2513
2514 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2515 cmd->ident == conn->info_ident) {
2516 del_timer(&conn->info_timer);
2517
2518 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2519 conn->info_ident = 0;
2520
2521 l2cap_conn_start(conn);
2522 }
2523
2524 return 0;
2525 }
2526
2527 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2528 {
2529 struct l2cap_chan_list *list = &conn->chan_list;
2530 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2531 struct l2cap_conn_rsp rsp;
2532 struct sock *sk, *parent;
2533 int result, status = L2CAP_CS_NO_INFO;
2534
2535 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2536 __le16 psm = req->psm;
2537
2538 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2539
2540 /* Check if we have socket listening on psm */
2541 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2542 if (!parent) {
2543 result = L2CAP_CR_BAD_PSM;
2544 goto sendresp;
2545 }
2546
2547 /* Check if the ACL is secure enough (if not SDP) */
2548 if (psm != cpu_to_le16(0x0001) &&
2549 !hci_conn_check_link_mode(conn->hcon)) {
2550 conn->disc_reason = 0x05;
2551 result = L2CAP_CR_SEC_BLOCK;
2552 goto response;
2553 }
2554
2555 result = L2CAP_CR_NO_MEM;
2556
2557 /* Check for backlog size */
2558 if (sk_acceptq_is_full(parent)) {
2559 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2560 goto response;
2561 }
2562
2563 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2564 if (!sk)
2565 goto response;
2566
2567 write_lock_bh(&list->lock);
2568
2569 /* Check if we already have channel with that dcid */
2570 if (__l2cap_get_chan_by_dcid(list, scid)) {
2571 write_unlock_bh(&list->lock);
2572 sock_set_flag(sk, SOCK_ZAPPED);
2573 l2cap_sock_kill(sk);
2574 goto response;
2575 }
2576
2577 hci_conn_hold(conn->hcon);
2578
2579 l2cap_sock_init(sk, parent);
2580 bacpy(&bt_sk(sk)->src, conn->src);
2581 bacpy(&bt_sk(sk)->dst, conn->dst);
2582 l2cap_pi(sk)->psm = psm;
2583 l2cap_pi(sk)->dcid = scid;
2584
2585 __l2cap_chan_add(conn, sk, parent);
2586 dcid = l2cap_pi(sk)->scid;
2587
2588 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2589
2590 l2cap_pi(sk)->ident = cmd->ident;
2591
2592 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2593 if (l2cap_check_security(sk)) {
2594 if (bt_sk(sk)->defer_setup) {
2595 sk->sk_state = BT_CONNECT2;
2596 result = L2CAP_CR_PEND;
2597 status = L2CAP_CS_AUTHOR_PEND;
2598 parent->sk_data_ready(parent, 0);
2599 } else {
2600 sk->sk_state = BT_CONFIG;
2601 result = L2CAP_CR_SUCCESS;
2602 status = L2CAP_CS_NO_INFO;
2603 }
2604 } else {
2605 sk->sk_state = BT_CONNECT2;
2606 result = L2CAP_CR_PEND;
2607 status = L2CAP_CS_AUTHEN_PEND;
2608 }
2609 } else {
2610 sk->sk_state = BT_CONNECT2;
2611 result = L2CAP_CR_PEND;
2612 status = L2CAP_CS_NO_INFO;
2613 }
2614
2615 write_unlock_bh(&list->lock);
2616
2617 response:
2618 bh_unlock_sock(parent);
2619
2620 sendresp:
2621 rsp.scid = cpu_to_le16(scid);
2622 rsp.dcid = cpu_to_le16(dcid);
2623 rsp.result = cpu_to_le16(result);
2624 rsp.status = cpu_to_le16(status);
2625 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2626
2627 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2628 struct l2cap_info_req info;
2629 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2630
2631 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2632 conn->info_ident = l2cap_get_ident(conn);
2633
2634 mod_timer(&conn->info_timer, jiffies +
2635 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2636
2637 l2cap_send_cmd(conn, conn->info_ident,
2638 L2CAP_INFO_REQ, sizeof(info), &info);
2639 }
2640
2641 return 0;
2642 }
2643
2644 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2645 {
2646 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2647 u16 scid, dcid, result, status;
2648 struct sock *sk;
2649 u8 req[128];
2650
2651 scid = __le16_to_cpu(rsp->scid);
2652 dcid = __le16_to_cpu(rsp->dcid);
2653 result = __le16_to_cpu(rsp->result);
2654 status = __le16_to_cpu(rsp->status);
2655
2656 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2657
2658 if (scid) {
2659 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2660 if (!sk)
2661 return 0;
2662 } else {
2663 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2664 if (!sk)
2665 return 0;
2666 }
2667
2668 switch (result) {
2669 case L2CAP_CR_SUCCESS:
2670 sk->sk_state = BT_CONFIG;
2671 l2cap_pi(sk)->ident = 0;
2672 l2cap_pi(sk)->dcid = dcid;
2673 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2674
2675 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2676
2677 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2678 l2cap_build_conf_req(sk, req), req);
2679 l2cap_pi(sk)->num_conf_req++;
2680 break;
2681
2682 case L2CAP_CR_PEND:
2683 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2684 break;
2685
2686 default:
2687 l2cap_chan_del(sk, ECONNREFUSED);
2688 break;
2689 }
2690
2691 bh_unlock_sock(sk);
2692 return 0;
2693 }
2694
2695 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2696 {
2697 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2698 u16 dcid, flags;
2699 u8 rsp[64];
2700 struct sock *sk;
2701 int len;
2702
2703 dcid = __le16_to_cpu(req->dcid);
2704 flags = __le16_to_cpu(req->flags);
2705
2706 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2707
2708 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2709 if (!sk)
2710 return -ENOENT;
2711
2712 if (sk->sk_state == BT_DISCONN)
2713 goto unlock;
2714
2715 /* Reject if config buffer is too small. */
2716 len = cmd_len - sizeof(*req);
2717 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2718 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2719 l2cap_build_conf_rsp(sk, rsp,
2720 L2CAP_CONF_REJECT, flags), rsp);
2721 goto unlock;
2722 }
2723
2724 /* Store config. */
2725 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2726 l2cap_pi(sk)->conf_len += len;
2727
2728 if (flags & 0x0001) {
2729 /* Incomplete config. Send empty response. */
2730 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2731 l2cap_build_conf_rsp(sk, rsp,
2732 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2733 goto unlock;
2734 }
2735
2736 /* Complete config. */
2737 len = l2cap_parse_conf_req(sk, rsp);
2738 if (len < 0) {
2739 l2cap_send_disconn_req(conn, sk);
2740 goto unlock;
2741 }
2742
2743 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2744 l2cap_pi(sk)->num_conf_rsp++;
2745
2746 /* Reset config buffer. */
2747 l2cap_pi(sk)->conf_len = 0;
2748
2749 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2750 goto unlock;
2751
2752 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2753 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2754 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2755 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2756
2757 sk->sk_state = BT_CONNECTED;
2758 l2cap_pi(sk)->next_tx_seq = 0;
2759 l2cap_pi(sk)->expected_ack_seq = 0;
2760 l2cap_pi(sk)->unacked_frames = 0;
2761
2762 setup_timer(&l2cap_pi(sk)->retrans_timer,
2763 l2cap_retrans_timeout, (unsigned long) sk);
2764 setup_timer(&l2cap_pi(sk)->monitor_timer,
2765 l2cap_monitor_timeout, (unsigned long) sk);
2766
2767 __skb_queue_head_init(TX_QUEUE(sk));
2768 __skb_queue_head_init(SREJ_QUEUE(sk));
2769 l2cap_chan_ready(sk);
2770 goto unlock;
2771 }
2772
2773 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2774 u8 buf[64];
2775 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2776 l2cap_build_conf_req(sk, buf), buf);
2777 l2cap_pi(sk)->num_conf_req++;
2778 }
2779
2780 unlock:
2781 bh_unlock_sock(sk);
2782 return 0;
2783 }
2784
2785 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2786 {
2787 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2788 u16 scid, flags, result;
2789 struct sock *sk;
2790
2791 scid = __le16_to_cpu(rsp->scid);
2792 flags = __le16_to_cpu(rsp->flags);
2793 result = __le16_to_cpu(rsp->result);
2794
2795 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2796 scid, flags, result);
2797
2798 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2799 if (!sk)
2800 return 0;
2801
2802 switch (result) {
2803 case L2CAP_CONF_SUCCESS:
2804 break;
2805
2806 case L2CAP_CONF_UNACCEPT:
2807 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2808 int len = cmd->len - sizeof(*rsp);
2809 char req[64];
2810
2811 /* throw out any old stored conf requests */
2812 result = L2CAP_CONF_SUCCESS;
2813 len = l2cap_parse_conf_rsp(sk, rsp->data,
2814 len, req, &result);
2815 if (len < 0) {
2816 l2cap_send_disconn_req(conn, sk);
2817 goto done;
2818 }
2819
2820 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2821 L2CAP_CONF_REQ, len, req);
2822 l2cap_pi(sk)->num_conf_req++;
2823 if (result != L2CAP_CONF_SUCCESS)
2824 goto done;
2825 break;
2826 }
2827
2828 default:
2829 sk->sk_state = BT_DISCONN;
2830 sk->sk_err = ECONNRESET;
2831 l2cap_sock_set_timer(sk, HZ * 5);
2832 l2cap_send_disconn_req(conn, sk);
2833 goto done;
2834 }
2835
2836 if (flags & 0x01)
2837 goto done;
2838
2839 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2840
2841 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2842 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2843 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2844 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2845
2846 sk->sk_state = BT_CONNECTED;
2847 l2cap_pi(sk)->expected_tx_seq = 0;
2848 l2cap_pi(sk)->buffer_seq = 0;
2849 l2cap_pi(sk)->num_to_ack = 0;
2850 __skb_queue_head_init(TX_QUEUE(sk));
2851 __skb_queue_head_init(SREJ_QUEUE(sk));
2852 l2cap_chan_ready(sk);
2853 }
2854
2855 done:
2856 bh_unlock_sock(sk);
2857 return 0;
2858 }
2859
2860 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2861 {
2862 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2863 struct l2cap_disconn_rsp rsp;
2864 u16 dcid, scid;
2865 struct sock *sk;
2866
2867 scid = __le16_to_cpu(req->scid);
2868 dcid = __le16_to_cpu(req->dcid);
2869
2870 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2871
2872 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2873 if (!sk)
2874 return 0;
2875
2876 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2877 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2878 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2879
2880 sk->sk_shutdown = SHUTDOWN_MASK;
2881
2882 skb_queue_purge(TX_QUEUE(sk));
2883 skb_queue_purge(SREJ_QUEUE(sk));
2884 del_timer(&l2cap_pi(sk)->retrans_timer);
2885 del_timer(&l2cap_pi(sk)->monitor_timer);
2886
2887 l2cap_chan_del(sk, ECONNRESET);
2888 bh_unlock_sock(sk);
2889
2890 l2cap_sock_kill(sk);
2891 return 0;
2892 }
2893
2894 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2895 {
2896 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2897 u16 dcid, scid;
2898 struct sock *sk;
2899
2900 scid = __le16_to_cpu(rsp->scid);
2901 dcid = __le16_to_cpu(rsp->dcid);
2902
2903 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2904
2905 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2906 if (!sk)
2907 return 0;
2908
2909 skb_queue_purge(TX_QUEUE(sk));
2910 skb_queue_purge(SREJ_QUEUE(sk));
2911 del_timer(&l2cap_pi(sk)->retrans_timer);
2912 del_timer(&l2cap_pi(sk)->monitor_timer);
2913
2914 l2cap_chan_del(sk, 0);
2915 bh_unlock_sock(sk);
2916
2917 l2cap_sock_kill(sk);
2918 return 0;
2919 }
2920
2921 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2922 {
2923 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2924 u16 type;
2925
2926 type = __le16_to_cpu(req->type);
2927
2928 BT_DBG("type 0x%4.4x", type);
2929
2930 if (type == L2CAP_IT_FEAT_MASK) {
2931 u8 buf[8];
2932 u32 feat_mask = l2cap_feat_mask;
2933 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2934 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2935 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2936 if (enable_ertm)
2937 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2938 | L2CAP_FEAT_FCS;
2939 put_unaligned_le32(feat_mask, rsp->data);
2940 l2cap_send_cmd(conn, cmd->ident,
2941 L2CAP_INFO_RSP, sizeof(buf), buf);
2942 } else if (type == L2CAP_IT_FIXED_CHAN) {
2943 u8 buf[12];
2944 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2945 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2946 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2947 memcpy(buf + 4, l2cap_fixed_chan, 8);
2948 l2cap_send_cmd(conn, cmd->ident,
2949 L2CAP_INFO_RSP, sizeof(buf), buf);
2950 } else {
2951 struct l2cap_info_rsp rsp;
2952 rsp.type = cpu_to_le16(type);
2953 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2954 l2cap_send_cmd(conn, cmd->ident,
2955 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2956 }
2957
2958 return 0;
2959 }
2960
2961 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2962 {
2963 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2964 u16 type, result;
2965
2966 type = __le16_to_cpu(rsp->type);
2967 result = __le16_to_cpu(rsp->result);
2968
2969 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2970
2971 del_timer(&conn->info_timer);
2972
2973 if (type == L2CAP_IT_FEAT_MASK) {
2974 conn->feat_mask = get_unaligned_le32(rsp->data);
2975
2976 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2977 struct l2cap_info_req req;
2978 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2979
2980 conn->info_ident = l2cap_get_ident(conn);
2981
2982 l2cap_send_cmd(conn, conn->info_ident,
2983 L2CAP_INFO_REQ, sizeof(req), &req);
2984 } else {
2985 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2986 conn->info_ident = 0;
2987
2988 l2cap_conn_start(conn);
2989 }
2990 } else if (type == L2CAP_IT_FIXED_CHAN) {
2991 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2992 conn->info_ident = 0;
2993
2994 l2cap_conn_start(conn);
2995 }
2996
2997 return 0;
2998 }
2999
3000 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3001 {
3002 u8 *data = skb->data;
3003 int len = skb->len;
3004 struct l2cap_cmd_hdr cmd;
3005 int err = 0;
3006
3007 l2cap_raw_recv(conn, skb);
3008
3009 while (len >= L2CAP_CMD_HDR_SIZE) {
3010 u16 cmd_len;
3011 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3012 data += L2CAP_CMD_HDR_SIZE;
3013 len -= L2CAP_CMD_HDR_SIZE;
3014
3015 cmd_len = le16_to_cpu(cmd.len);
3016
3017 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3018
3019 if (cmd_len > len || !cmd.ident) {
3020 BT_DBG("corrupted command");
3021 break;
3022 }
3023
3024 switch (cmd.code) {
3025 case L2CAP_COMMAND_REJ:
3026 l2cap_command_rej(conn, &cmd, data);
3027 break;
3028
3029 case L2CAP_CONN_REQ:
3030 err = l2cap_connect_req(conn, &cmd, data);
3031 break;
3032
3033 case L2CAP_CONN_RSP:
3034 err = l2cap_connect_rsp(conn, &cmd, data);
3035 break;
3036
3037 case L2CAP_CONF_REQ:
3038 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3039 break;
3040
3041 case L2CAP_CONF_RSP:
3042 err = l2cap_config_rsp(conn, &cmd, data);
3043 break;
3044
3045 case L2CAP_DISCONN_REQ:
3046 err = l2cap_disconnect_req(conn, &cmd, data);
3047 break;
3048
3049 case L2CAP_DISCONN_RSP:
3050 err = l2cap_disconnect_rsp(conn, &cmd, data);
3051 break;
3052
3053 case L2CAP_ECHO_REQ:
3054 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3055 break;
3056
3057 case L2CAP_ECHO_RSP:
3058 break;
3059
3060 case L2CAP_INFO_REQ:
3061 err = l2cap_information_req(conn, &cmd, data);
3062 break;
3063
3064 case L2CAP_INFO_RSP:
3065 err = l2cap_information_rsp(conn, &cmd, data);
3066 break;
3067
3068 default:
3069 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3070 err = -EINVAL;
3071 break;
3072 }
3073
3074 if (err) {
3075 struct l2cap_cmd_rej rej;
3076 BT_DBG("error %d", err);
3077
3078 /* FIXME: Map err to a valid reason */
3079 rej.reason = cpu_to_le16(0);
3080 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3081 }
3082
3083 data += cmd_len;
3084 len -= cmd_len;
3085 }
3086
3087 kfree_skb(skb);
3088 }
3089
3090 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3091 {
3092 u16 our_fcs, rcv_fcs;
3093 int hdr_size = L2CAP_HDR_SIZE + 2;
3094
3095 if (pi->fcs == L2CAP_FCS_CRC16) {
3096 skb_trim(skb, skb->len - 2);
3097 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3098 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3099
3100 if (our_fcs != rcv_fcs)
3101 return -EINVAL;
3102 }
3103 return 0;
3104 }
3105
3106 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3107 {
3108 struct sk_buff *next_skb;
3109
3110 bt_cb(skb)->tx_seq = tx_seq;
3111 bt_cb(skb)->sar = sar;
3112
3113 next_skb = skb_peek(SREJ_QUEUE(sk));
3114 if (!next_skb) {
3115 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3116 return;
3117 }
3118
3119 do {
3120 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3121 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3122 return;
3123 }
3124
3125 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3126 break;
3127
3128 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3129
3130 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3131 }
3132
3133 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3134 {
3135 struct l2cap_pinfo *pi = l2cap_pi(sk);
3136 struct sk_buff *_skb;
3137 int err = -EINVAL;
3138
3139 switch (control & L2CAP_CTRL_SAR) {
3140 case L2CAP_SDU_UNSEGMENTED:
3141 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3142 kfree_skb(pi->sdu);
3143 break;
3144 }
3145
3146 err = sock_queue_rcv_skb(sk, skb);
3147 if (!err)
3148 return 0;
3149
3150 break;
3151
3152 case L2CAP_SDU_START:
3153 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3154 kfree_skb(pi->sdu);
3155 break;
3156 }
3157
3158 pi->sdu_len = get_unaligned_le16(skb->data);
3159 skb_pull(skb, 2);
3160
3161 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3162 if (!pi->sdu) {
3163 err = -ENOMEM;
3164 break;
3165 }
3166
3167 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3168
3169 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3170 pi->partial_sdu_len = skb->len;
3171 err = 0;
3172 break;
3173
3174 case L2CAP_SDU_CONTINUE:
3175 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3176 break;
3177
3178 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3179
3180 pi->partial_sdu_len += skb->len;
3181 if (pi->partial_sdu_len > pi->sdu_len)
3182 kfree_skb(pi->sdu);
3183 else
3184 err = 0;
3185
3186 break;
3187
3188 case L2CAP_SDU_END:
3189 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3190 break;
3191
3192 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3193
3194 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3195 pi->partial_sdu_len += skb->len;
3196
3197 if (pi->partial_sdu_len == pi->sdu_len) {
3198 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3199 err = sock_queue_rcv_skb(sk, _skb);
3200 if (err < 0)
3201 kfree_skb(_skb);
3202 }
3203 kfree_skb(pi->sdu);
3204 err = 0;
3205
3206 break;
3207 }
3208
3209 kfree_skb(skb);
3210 return err;
3211 }
3212
3213 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3214 {
3215 struct sk_buff *skb;
3216 u16 control = 0;
3217
3218 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3219 if (bt_cb(skb)->tx_seq != tx_seq)
3220 break;
3221
3222 skb = skb_dequeue(SREJ_QUEUE(sk));
3223 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3224 l2cap_sar_reassembly_sdu(sk, skb, control);
3225 l2cap_pi(sk)->buffer_seq_srej =
3226 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3227 tx_seq++;
3228 }
3229 }
3230
3231 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3232 {
3233 struct l2cap_pinfo *pi = l2cap_pi(sk);
3234 struct srej_list *l, *tmp;
3235 u16 control;
3236
3237 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3238 if (l->tx_seq == tx_seq) {
3239 list_del(&l->list);
3240 kfree(l);
3241 return;
3242 }
3243 control = L2CAP_SUPER_SELECT_REJECT;
3244 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3245 l2cap_send_sframe(pi, control);
3246 list_del(&l->list);
3247 list_add_tail(&l->list, SREJ_LIST(sk));
3248 }
3249 }
3250
3251 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3252 {
3253 struct l2cap_pinfo *pi = l2cap_pi(sk);
3254 struct srej_list *new;
3255 u16 control;
3256
3257 while (tx_seq != pi->expected_tx_seq) {
3258 control = L2CAP_SUPER_SELECT_REJECT;
3259 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3260 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3261 control |= L2CAP_CTRL_POLL;
3262 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3263 }
3264 l2cap_send_sframe(pi, control);
3265
3266 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3267 new->tx_seq = pi->expected_tx_seq++;
3268 list_add_tail(&new->list, SREJ_LIST(sk));
3269 }
3270 pi->expected_tx_seq++;
3271 }
3272
3273 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3274 {
3275 struct l2cap_pinfo *pi = l2cap_pi(sk);
3276 u8 tx_seq = __get_txseq(rx_control);
3277 u16 tx_control = 0;
3278 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3279 int err = 0;
3280
3281 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3282
3283 if (tx_seq == pi->expected_tx_seq)
3284 goto expected;
3285
3286 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3287 struct srej_list *first;
3288
3289 first = list_first_entry(SREJ_LIST(sk),
3290 struct srej_list, list);
3291 if (tx_seq == first->tx_seq) {
3292 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3293 l2cap_check_srej_gap(sk, tx_seq);
3294
3295 list_del(&first->list);
3296 kfree(first);
3297
3298 if (list_empty(SREJ_LIST(sk))) {
3299 pi->buffer_seq = pi->buffer_seq_srej;
3300 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3301 }
3302 } else {
3303 struct srej_list *l;
3304 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3305
3306 list_for_each_entry(l, SREJ_LIST(sk), list) {
3307 if (l->tx_seq == tx_seq) {
3308 l2cap_resend_srejframe(sk, tx_seq);
3309 return 0;
3310 }
3311 }
3312 l2cap_send_srejframe(sk, tx_seq);
3313 }
3314 } else {
3315 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3316
3317 INIT_LIST_HEAD(SREJ_LIST(sk));
3318 pi->buffer_seq_srej = pi->buffer_seq;
3319
3320 __skb_queue_head_init(SREJ_QUEUE(sk));
3321 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3322
3323 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3324
3325 l2cap_send_srejframe(sk, tx_seq);
3326 }
3327 return 0;
3328
3329 expected:
3330 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3331
3332 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3333 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3334 return 0;
3335 }
3336
3337 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3338
3339 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3340 if (err < 0)
3341 return err;
3342
3343 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3344 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3345 tx_control |= L2CAP_SUPER_RCV_READY;
3346 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3347 l2cap_send_sframe(pi, tx_control);
3348 }
3349 return 0;
3350 }
3351
3352 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3353 {
3354 struct l2cap_pinfo *pi = l2cap_pi(sk);
3355 u8 tx_seq = __get_reqseq(rx_control);
3356
3357 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3358
3359 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3360 case L2CAP_SUPER_RCV_READY:
3361 if (rx_control & L2CAP_CTRL_POLL) {
3362 u16 control = L2CAP_CTRL_FINAL;
3363 control |= L2CAP_SUPER_RCV_READY |
3364 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3365 l2cap_send_sframe(l2cap_pi(sk), control);
3366 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3367
3368 } else if (rx_control & L2CAP_CTRL_FINAL) {
3369 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3370 pi->expected_ack_seq = tx_seq;
3371 l2cap_drop_acked_frames(sk);
3372
3373 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3374 break;
3375
3376 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3377 del_timer(&pi->monitor_timer);
3378
3379 if (pi->unacked_frames > 0)
3380 __mod_retrans_timer();
3381 } else {
3382 pi->expected_ack_seq = tx_seq;
3383 l2cap_drop_acked_frames(sk);
3384
3385 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3386 && (pi->unacked_frames > 0))
3387 __mod_retrans_timer();
3388
3389 l2cap_ertm_send(sk);
3390 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3391 }
3392 break;
3393
3394 case L2CAP_SUPER_REJECT:
3395 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3396
3397 pi->expected_ack_seq = __get_reqseq(rx_control);
3398 l2cap_drop_acked_frames(sk);
3399
3400 sk->sk_send_head = TX_QUEUE(sk)->next;
3401 pi->next_tx_seq = pi->expected_ack_seq;
3402
3403 l2cap_ertm_send(sk);
3404
3405 break;
3406
3407 case L2CAP_SUPER_SELECT_REJECT:
3408 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3409
3410 if (rx_control & L2CAP_CTRL_POLL) {
3411 l2cap_retransmit_frame(sk, tx_seq);
3412 pi->expected_ack_seq = tx_seq;
3413 l2cap_drop_acked_frames(sk);
3414 l2cap_ertm_send(sk);
3415 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3416 pi->srej_save_reqseq = tx_seq;
3417 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3418 }
3419 } else if (rx_control & L2CAP_CTRL_FINAL) {
3420 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3421 pi->srej_save_reqseq == tx_seq)
3422 pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT;
3423 else
3424 l2cap_retransmit_frame(sk, tx_seq);
3425 }
3426 else {
3427 l2cap_retransmit_frame(sk, tx_seq);
3428 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3429 pi->srej_save_reqseq = tx_seq;
3430 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3431 }
3432 }
3433 break;
3434
3435 case L2CAP_SUPER_RCV_NOT_READY:
3436 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3437 pi->expected_ack_seq = tx_seq;
3438 l2cap_drop_acked_frames(sk);
3439
3440 del_timer(&l2cap_pi(sk)->retrans_timer);
3441 if (rx_control & L2CAP_CTRL_POLL) {
3442 u16 control = L2CAP_CTRL_FINAL;
3443 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3444 }
3445 break;
3446 }
3447
3448 return 0;
3449 }
3450
3451 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3452 {
3453 struct sock *sk;
3454 struct l2cap_pinfo *pi;
3455 u16 control, len;
3456 u8 tx_seq;
3457 int err;
3458
3459 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3460 if (!sk) {
3461 BT_DBG("unknown cid 0x%4.4x", cid);
3462 goto drop;
3463 }
3464
3465 pi = l2cap_pi(sk);
3466
3467 BT_DBG("sk %p, len %d", sk, skb->len);
3468
3469 if (sk->sk_state != BT_CONNECTED)
3470 goto drop;
3471
3472 switch (pi->mode) {
3473 case L2CAP_MODE_BASIC:
3474 /* If socket recv buffers overflows we drop data here
3475 * which is *bad* because L2CAP has to be reliable.
3476 * But we don't have any other choice. L2CAP doesn't
3477 * provide flow control mechanism. */
3478
3479 if (pi->imtu < skb->len)
3480 goto drop;
3481
3482 if (!sock_queue_rcv_skb(sk, skb))
3483 goto done;
3484 break;
3485
3486 case L2CAP_MODE_ERTM:
3487 control = get_unaligned_le16(skb->data);
3488 skb_pull(skb, 2);
3489 len = skb->len;
3490
3491 if (__is_sar_start(control))
3492 len -= 2;
3493
3494 if (pi->fcs == L2CAP_FCS_CRC16)
3495 len -= 2;
3496
3497 /*
3498 * We can just drop the corrupted I-frame here.
3499 * Receiver will miss it and start proper recovery
3500 * procedures and ask retransmission.
3501 */
3502 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3503 goto drop;
3504
3505 if (l2cap_check_fcs(pi, skb))
3506 goto drop;
3507
3508 if (__is_iframe(control))
3509 err = l2cap_data_channel_iframe(sk, control, skb);
3510 else
3511 err = l2cap_data_channel_sframe(sk, control, skb);
3512
3513 if (!err)
3514 goto done;
3515 break;
3516
3517 case L2CAP_MODE_STREAMING:
3518 control = get_unaligned_le16(skb->data);
3519 skb_pull(skb, 2);
3520 len = skb->len;
3521
3522 if (__is_sar_start(control))
3523 len -= 2;
3524
3525 if (pi->fcs == L2CAP_FCS_CRC16)
3526 len -= 2;
3527
3528 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3529 goto drop;
3530
3531 if (l2cap_check_fcs(pi, skb))
3532 goto drop;
3533
3534 tx_seq = __get_txseq(control);
3535
3536 if (pi->expected_tx_seq == tx_seq)
3537 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3538 else
3539 pi->expected_tx_seq = tx_seq + 1;
3540
3541 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3542
3543 goto done;
3544
3545 default:
3546 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3547 break;
3548 }
3549
3550 drop:
3551 kfree_skb(skb);
3552
3553 done:
3554 if (sk)
3555 bh_unlock_sock(sk);
3556
3557 return 0;
3558 }
3559
3560 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3561 {
3562 struct sock *sk;
3563
3564 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3565 if (!sk)
3566 goto drop;
3567
3568 BT_DBG("sk %p, len %d", sk, skb->len);
3569
3570 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3571 goto drop;
3572
3573 if (l2cap_pi(sk)->imtu < skb->len)
3574 goto drop;
3575
3576 if (!sock_queue_rcv_skb(sk, skb))
3577 goto done;
3578
3579 drop:
3580 kfree_skb(skb);
3581
3582 done:
3583 if (sk)
3584 bh_unlock_sock(sk);
3585 return 0;
3586 }
3587
3588 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3589 {
3590 struct l2cap_hdr *lh = (void *) skb->data;
3591 u16 cid, len;
3592 __le16 psm;
3593
3594 skb_pull(skb, L2CAP_HDR_SIZE);
3595 cid = __le16_to_cpu(lh->cid);
3596 len = __le16_to_cpu(lh->len);
3597
3598 if (len != skb->len) {
3599 kfree_skb(skb);
3600 return;
3601 }
3602
3603 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3604
3605 switch (cid) {
3606 case L2CAP_CID_SIGNALING:
3607 l2cap_sig_channel(conn, skb);
3608 break;
3609
3610 case L2CAP_CID_CONN_LESS:
3611 psm = get_unaligned_le16(skb->data);
3612 skb_pull(skb, 2);
3613 l2cap_conless_channel(conn, psm, skb);
3614 break;
3615
3616 default:
3617 l2cap_data_channel(conn, cid, skb);
3618 break;
3619 }
3620 }
3621
3622 /* ---- L2CAP interface with lower layer (HCI) ---- */
3623
3624 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3625 {
3626 int exact = 0, lm1 = 0, lm2 = 0;
3627 register struct sock *sk;
3628 struct hlist_node *node;
3629
3630 if (type != ACL_LINK)
3631 return 0;
3632
3633 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3634
3635 /* Find listening sockets and check their link_mode */
3636 read_lock(&l2cap_sk_list.lock);
3637 sk_for_each(sk, node, &l2cap_sk_list.head) {
3638 if (sk->sk_state != BT_LISTEN)
3639 continue;
3640
3641 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3642 lm1 |= HCI_LM_ACCEPT;
3643 if (l2cap_pi(sk)->role_switch)
3644 lm1 |= HCI_LM_MASTER;
3645 exact++;
3646 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3647 lm2 |= HCI_LM_ACCEPT;
3648 if (l2cap_pi(sk)->role_switch)
3649 lm2 |= HCI_LM_MASTER;
3650 }
3651 }
3652 read_unlock(&l2cap_sk_list.lock);
3653
3654 return exact ? lm1 : lm2;
3655 }
3656
3657 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3658 {
3659 struct l2cap_conn *conn;
3660
3661 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3662
3663 if (hcon->type != ACL_LINK)
3664 return 0;
3665
3666 if (!status) {
3667 conn = l2cap_conn_add(hcon, status);
3668 if (conn)
3669 l2cap_conn_ready(conn);
3670 } else
3671 l2cap_conn_del(hcon, bt_err(status));
3672
3673 return 0;
3674 }
3675
3676 static int l2cap_disconn_ind(struct hci_conn *hcon)
3677 {
3678 struct l2cap_conn *conn = hcon->l2cap_data;
3679
3680 BT_DBG("hcon %p", hcon);
3681
3682 if (hcon->type != ACL_LINK || !conn)
3683 return 0x13;
3684
3685 return conn->disc_reason;
3686 }
3687
3688 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3689 {
3690 BT_DBG("hcon %p reason %d", hcon, reason);
3691
3692 if (hcon->type != ACL_LINK)
3693 return 0;
3694
3695 l2cap_conn_del(hcon, bt_err(reason));
3696
3697 return 0;
3698 }
3699
3700 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3701 {
3702 if (sk->sk_type != SOCK_SEQPACKET)
3703 return;
3704
3705 if (encrypt == 0x00) {
3706 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3707 l2cap_sock_clear_timer(sk);
3708 l2cap_sock_set_timer(sk, HZ * 5);
3709 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3710 __l2cap_sock_close(sk, ECONNREFUSED);
3711 } else {
3712 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3713 l2cap_sock_clear_timer(sk);
3714 }
3715 }
3716
3717 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3718 {
3719 struct l2cap_chan_list *l;
3720 struct l2cap_conn *conn = hcon->l2cap_data;
3721 struct sock *sk;
3722
3723 if (!conn)
3724 return 0;
3725
3726 l = &conn->chan_list;
3727
3728 BT_DBG("conn %p", conn);
3729
3730 read_lock(&l->lock);
3731
3732 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3733 bh_lock_sock(sk);
3734
3735 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3736 bh_unlock_sock(sk);
3737 continue;
3738 }
3739
3740 if (!status && (sk->sk_state == BT_CONNECTED ||
3741 sk->sk_state == BT_CONFIG)) {
3742 l2cap_check_encryption(sk, encrypt);
3743 bh_unlock_sock(sk);
3744 continue;
3745 }
3746
3747 if (sk->sk_state == BT_CONNECT) {
3748 if (!status) {
3749 struct l2cap_conn_req req;
3750 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3751 req.psm = l2cap_pi(sk)->psm;
3752
3753 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3754
3755 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3756 L2CAP_CONN_REQ, sizeof(req), &req);
3757 } else {
3758 l2cap_sock_clear_timer(sk);
3759 l2cap_sock_set_timer(sk, HZ / 10);
3760 }
3761 } else if (sk->sk_state == BT_CONNECT2) {
3762 struct l2cap_conn_rsp rsp;
3763 __u16 result;
3764
3765 if (!status) {
3766 sk->sk_state = BT_CONFIG;
3767 result = L2CAP_CR_SUCCESS;
3768 } else {
3769 sk->sk_state = BT_DISCONN;
3770 l2cap_sock_set_timer(sk, HZ / 10);
3771 result = L2CAP_CR_SEC_BLOCK;
3772 }
3773
3774 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3775 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3776 rsp.result = cpu_to_le16(result);
3777 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3778 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3779 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3780 }
3781
3782 bh_unlock_sock(sk);
3783 }
3784
3785 read_unlock(&l->lock);
3786
3787 return 0;
3788 }
3789
3790 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3791 {
3792 struct l2cap_conn *conn = hcon->l2cap_data;
3793
3794 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3795 goto drop;
3796
3797 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3798
3799 if (flags & ACL_START) {
3800 struct l2cap_hdr *hdr;
3801 int len;
3802
3803 if (conn->rx_len) {
3804 BT_ERR("Unexpected start frame (len %d)", skb->len);
3805 kfree_skb(conn->rx_skb);
3806 conn->rx_skb = NULL;
3807 conn->rx_len = 0;
3808 l2cap_conn_unreliable(conn, ECOMM);
3809 }
3810
3811 if (skb->len < 2) {
3812 BT_ERR("Frame is too short (len %d)", skb->len);
3813 l2cap_conn_unreliable(conn, ECOMM);
3814 goto drop;
3815 }
3816
3817 hdr = (struct l2cap_hdr *) skb->data;
3818 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3819
3820 if (len == skb->len) {
3821 /* Complete frame received */
3822 l2cap_recv_frame(conn, skb);
3823 return 0;
3824 }
3825
3826 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3827
3828 if (skb->len > len) {
3829 BT_ERR("Frame is too long (len %d, expected len %d)",
3830 skb->len, len);
3831 l2cap_conn_unreliable(conn, ECOMM);
3832 goto drop;
3833 }
3834
3835 /* Allocate skb for the complete frame (with header) */
3836 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3837 if (!conn->rx_skb)
3838 goto drop;
3839
3840 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3841 skb->len);
3842 conn->rx_len = len - skb->len;
3843 } else {
3844 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3845
3846 if (!conn->rx_len) {
3847 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3848 l2cap_conn_unreliable(conn, ECOMM);
3849 goto drop;
3850 }
3851
3852 if (skb->len > conn->rx_len) {
3853 BT_ERR("Fragment is too long (len %d, expected %d)",
3854 skb->len, conn->rx_len);
3855 kfree_skb(conn->rx_skb);
3856 conn->rx_skb = NULL;
3857 conn->rx_len = 0;
3858 l2cap_conn_unreliable(conn, ECOMM);
3859 goto drop;
3860 }
3861
3862 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3863 skb->len);
3864 conn->rx_len -= skb->len;
3865
3866 if (!conn->rx_len) {
3867 /* Complete frame received */
3868 l2cap_recv_frame(conn, conn->rx_skb);
3869 conn->rx_skb = NULL;
3870 }
3871 }
3872
3873 drop:
3874 kfree_skb(skb);
3875 return 0;
3876 }
3877
3878 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3879 {
3880 struct sock *sk;
3881 struct hlist_node *node;
3882 char *str = buf;
3883
3884 read_lock_bh(&l2cap_sk_list.lock);
3885
3886 sk_for_each(sk, node, &l2cap_sk_list.head) {
3887 struct l2cap_pinfo *pi = l2cap_pi(sk);
3888
3889 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3890 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3891 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3892 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3893 }
3894
3895 read_unlock_bh(&l2cap_sk_list.lock);
3896
3897 return str - buf;
3898 }
3899
3900 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3901
3902 static const struct proto_ops l2cap_sock_ops = {
3903 .family = PF_BLUETOOTH,
3904 .owner = THIS_MODULE,
3905 .release = l2cap_sock_release,
3906 .bind = l2cap_sock_bind,
3907 .connect = l2cap_sock_connect,
3908 .listen = l2cap_sock_listen,
3909 .accept = l2cap_sock_accept,
3910 .getname = l2cap_sock_getname,
3911 .sendmsg = l2cap_sock_sendmsg,
3912 .recvmsg = l2cap_sock_recvmsg,
3913 .poll = bt_sock_poll,
3914 .ioctl = bt_sock_ioctl,
3915 .mmap = sock_no_mmap,
3916 .socketpair = sock_no_socketpair,
3917 .shutdown = l2cap_sock_shutdown,
3918 .setsockopt = l2cap_sock_setsockopt,
3919 .getsockopt = l2cap_sock_getsockopt
3920 };
3921
3922 static struct net_proto_family l2cap_sock_family_ops = {
3923 .family = PF_BLUETOOTH,
3924 .owner = THIS_MODULE,
3925 .create = l2cap_sock_create,
3926 };
3927
3928 static struct hci_proto l2cap_hci_proto = {
3929 .name = "L2CAP",
3930 .id = HCI_PROTO_L2CAP,
3931 .connect_ind = l2cap_connect_ind,
3932 .connect_cfm = l2cap_connect_cfm,
3933 .disconn_ind = l2cap_disconn_ind,
3934 .disconn_cfm = l2cap_disconn_cfm,
3935 .security_cfm = l2cap_security_cfm,
3936 .recv_acldata = l2cap_recv_acldata
3937 };
3938
3939 static int __init l2cap_init(void)
3940 {
3941 int err;
3942
3943 err = proto_register(&l2cap_proto, 0);
3944 if (err < 0)
3945 return err;
3946
3947 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3948 if (err < 0) {
3949 BT_ERR("L2CAP socket registration failed");
3950 goto error;
3951 }
3952
3953 err = hci_register_proto(&l2cap_hci_proto);
3954 if (err < 0) {
3955 BT_ERR("L2CAP protocol registration failed");
3956 bt_sock_unregister(BTPROTO_L2CAP);
3957 goto error;
3958 }
3959
3960 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3961 BT_ERR("Failed to create L2CAP info file");
3962
3963 BT_INFO("L2CAP ver %s", VERSION);
3964 BT_INFO("L2CAP socket layer initialized");
3965
3966 return 0;
3967
3968 error:
3969 proto_unregister(&l2cap_proto);
3970 return err;
3971 }
3972
3973 static void __exit l2cap_exit(void)
3974 {
3975 class_remove_file(bt_class, &class_attr_l2cap);
3976
3977 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3978 BT_ERR("L2CAP socket unregistration failed");
3979
3980 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3981 BT_ERR("L2CAP protocol unregistration failed");
3982
3983 proto_unregister(&l2cap_proto);
3984 }
3985
3986 void l2cap_load(void)
3987 {
3988 /* Dummy function to trigger automatic L2CAP module loading by
3989 * other modules that use L2CAP sockets but don't use any other
3990 * symbols from it. */
3991 return;
3992 }
3993 EXPORT_SYMBOL(l2cap_load);
3994
3995 module_init(l2cap_init);
3996 module_exit(l2cap_exit);
3997
3998 module_param(enable_ertm, bool, 0644);
3999 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4000
4001 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4002 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4003 MODULE_VERSION(VERSION);
4004 MODULE_LICENSE("GPL");
4005 MODULE_ALIAS("bt-proto-0");