Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bluetooth / af_bluetooth.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth address family and sockets. */
26
27 #include <linux/module.h>
28 #include <asm/ioctls.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <linux/proc_fs.h>
32
33 #define VERSION "2.16"
34
35 /* Bluetooth sockets */
36 #define BT_MAX_PROTO 8
37 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
38 static DEFINE_RWLOCK(bt_proto_lock);
39
40 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
41 static const char *const bt_key_strings[BT_MAX_PROTO] = {
42 "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
43 "sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
44 "sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
45 "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
46 "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
47 "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
48 "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
49 "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
50 };
51
52 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
53 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
54 "slock-AF_BLUETOOTH-BTPROTO_L2CAP",
55 "slock-AF_BLUETOOTH-BTPROTO_HCI",
56 "slock-AF_BLUETOOTH-BTPROTO_SCO",
57 "slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
58 "slock-AF_BLUETOOTH-BTPROTO_BNEP",
59 "slock-AF_BLUETOOTH-BTPROTO_CMTP",
60 "slock-AF_BLUETOOTH-BTPROTO_HIDP",
61 "slock-AF_BLUETOOTH-BTPROTO_AVDTP",
62 };
63
64 void bt_sock_reclassify_lock(struct sock *sk, int proto)
65 {
66 BUG_ON(!sk);
67 BUG_ON(sock_owned_by_user(sk));
68
69 sock_lock_init_class_and_name(sk,
70 bt_slock_key_strings[proto], &bt_slock_key[proto],
71 bt_key_strings[proto], &bt_lock_key[proto]);
72 }
73 EXPORT_SYMBOL(bt_sock_reclassify_lock);
74
75 int bt_sock_register(int proto, const struct net_proto_family *ops)
76 {
77 int err = 0;
78
79 if (proto < 0 || proto >= BT_MAX_PROTO)
80 return -EINVAL;
81
82 write_lock(&bt_proto_lock);
83
84 if (bt_proto[proto])
85 err = -EEXIST;
86 else
87 bt_proto[proto] = ops;
88
89 write_unlock(&bt_proto_lock);
90
91 return err;
92 }
93 EXPORT_SYMBOL(bt_sock_register);
94
95 int bt_sock_unregister(int proto)
96 {
97 int err = 0;
98
99 if (proto < 0 || proto >= BT_MAX_PROTO)
100 return -EINVAL;
101
102 write_lock(&bt_proto_lock);
103
104 if (!bt_proto[proto])
105 err = -ENOENT;
106 else
107 bt_proto[proto] = NULL;
108
109 write_unlock(&bt_proto_lock);
110
111 return err;
112 }
113 EXPORT_SYMBOL(bt_sock_unregister);
114
115 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
116 int kern)
117 {
118 int err;
119
120 if (net != &init_net)
121 return -EAFNOSUPPORT;
122
123 if (proto < 0 || proto >= BT_MAX_PROTO)
124 return -EINVAL;
125
126 if (!bt_proto[proto])
127 request_module("bt-proto-%d", proto);
128
129 err = -EPROTONOSUPPORT;
130
131 read_lock(&bt_proto_lock);
132
133 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
134 err = bt_proto[proto]->create(net, sock, proto, kern);
135 if (!err)
136 bt_sock_reclassify_lock(sock->sk, proto);
137 module_put(bt_proto[proto]->owner);
138 }
139
140 read_unlock(&bt_proto_lock);
141
142 return err;
143 }
144
145 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
146 {
147 write_lock(&l->lock);
148 sk_add_node(sk, &l->head);
149 write_unlock(&l->lock);
150 }
151 EXPORT_SYMBOL(bt_sock_link);
152
153 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
154 {
155 write_lock(&l->lock);
156 sk_del_node_init(sk);
157 write_unlock(&l->lock);
158 }
159 EXPORT_SYMBOL(bt_sock_unlink);
160
161 void bt_accept_enqueue(struct sock *parent, struct sock *sk)
162 {
163 BT_DBG("parent %p, sk %p", parent, sk);
164
165 sock_hold(sk);
166 list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
167 bt_sk(sk)->parent = parent;
168 parent->sk_ack_backlog++;
169 }
170 EXPORT_SYMBOL(bt_accept_enqueue);
171
172 void bt_accept_unlink(struct sock *sk)
173 {
174 BT_DBG("sk %p state %d", sk, sk->sk_state);
175
176 list_del_init(&bt_sk(sk)->accept_q);
177 bt_sk(sk)->parent->sk_ack_backlog--;
178 bt_sk(sk)->parent = NULL;
179 sock_put(sk);
180 }
181 EXPORT_SYMBOL(bt_accept_unlink);
182
183 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
184 {
185 struct list_head *p, *n;
186 struct sock *sk;
187
188 BT_DBG("parent %p", parent);
189
190 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
191 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
192
193 lock_sock(sk);
194
195 /* FIXME: Is this check still needed */
196 if (sk->sk_state == BT_CLOSED) {
197 release_sock(sk);
198 bt_accept_unlink(sk);
199 continue;
200 }
201
202 if (sk->sk_state == BT_CONNECTED || !newsock ||
203 test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
204 bt_accept_unlink(sk);
205 if (newsock)
206 sock_graft(sk, newsock);
207
208 release_sock(sk);
209 return sk;
210 }
211
212 release_sock(sk);
213 }
214
215 return NULL;
216 }
217 EXPORT_SYMBOL(bt_accept_dequeue);
218
219 int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
220 struct msghdr *msg, size_t len, int flags)
221 {
222 int noblock = flags & MSG_DONTWAIT;
223 struct sock *sk = sock->sk;
224 struct sk_buff *skb;
225 size_t copied;
226 int err;
227
228 BT_DBG("sock %p sk %p len %zu", sock, sk, len);
229
230 if (flags & (MSG_OOB))
231 return -EOPNOTSUPP;
232
233 msg->msg_namelen = 0;
234
235 skb = skb_recv_datagram(sk, flags, noblock, &err);
236 if (!skb) {
237 if (sk->sk_shutdown & RCV_SHUTDOWN)
238 return 0;
239 return err;
240 }
241
242 copied = skb->len;
243 if (len < copied) {
244 msg->msg_flags |= MSG_TRUNC;
245 copied = len;
246 }
247
248 skb_reset_transport_header(skb);
249 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
250 if (err == 0)
251 sock_recv_ts_and_drops(msg, sk, skb);
252
253 skb_free_datagram(sk, skb);
254
255 return err ? : copied;
256 }
257 EXPORT_SYMBOL(bt_sock_recvmsg);
258
259 static long bt_sock_data_wait(struct sock *sk, long timeo)
260 {
261 DECLARE_WAITQUEUE(wait, current);
262
263 add_wait_queue(sk_sleep(sk), &wait);
264 for (;;) {
265 set_current_state(TASK_INTERRUPTIBLE);
266
267 if (!skb_queue_empty(&sk->sk_receive_queue))
268 break;
269
270 if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
271 break;
272
273 if (signal_pending(current) || !timeo)
274 break;
275
276 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
277 release_sock(sk);
278 timeo = schedule_timeout(timeo);
279 lock_sock(sk);
280 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
281 }
282
283 __set_current_state(TASK_RUNNING);
284 remove_wait_queue(sk_sleep(sk), &wait);
285 return timeo;
286 }
287
288 int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
289 struct msghdr *msg, size_t size, int flags)
290 {
291 struct sock *sk = sock->sk;
292 int err = 0;
293 size_t target, copied = 0;
294 long timeo;
295
296 if (flags & MSG_OOB)
297 return -EOPNOTSUPP;
298
299 msg->msg_namelen = 0;
300
301 BT_DBG("sk %p size %zu", sk, size);
302
303 lock_sock(sk);
304
305 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
306 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
307
308 do {
309 struct sk_buff *skb;
310 int chunk;
311
312 skb = skb_dequeue(&sk->sk_receive_queue);
313 if (!skb) {
314 if (copied >= target)
315 break;
316
317 err = sock_error(sk);
318 if (err)
319 break;
320 if (sk->sk_shutdown & RCV_SHUTDOWN)
321 break;
322
323 err = -EAGAIN;
324 if (!timeo)
325 break;
326
327 timeo = bt_sock_data_wait(sk, timeo);
328
329 if (signal_pending(current)) {
330 err = sock_intr_errno(timeo);
331 goto out;
332 }
333 continue;
334 }
335
336 chunk = min_t(unsigned int, skb->len, size);
337 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
338 skb_queue_head(&sk->sk_receive_queue, skb);
339 if (!copied)
340 copied = -EFAULT;
341 break;
342 }
343 copied += chunk;
344 size -= chunk;
345
346 sock_recv_ts_and_drops(msg, sk, skb);
347
348 if (!(flags & MSG_PEEK)) {
349 int skb_len = skb_headlen(skb);
350
351 if (chunk <= skb_len) {
352 __skb_pull(skb, chunk);
353 } else {
354 struct sk_buff *frag;
355
356 __skb_pull(skb, skb_len);
357 chunk -= skb_len;
358
359 skb_walk_frags(skb, frag) {
360 if (chunk <= frag->len) {
361 /* Pulling partial data */
362 skb->len -= chunk;
363 skb->data_len -= chunk;
364 __skb_pull(frag, chunk);
365 break;
366 } else if (frag->len) {
367 /* Pulling all frag data */
368 chunk -= frag->len;
369 skb->len -= frag->len;
370 skb->data_len -= frag->len;
371 __skb_pull(frag, frag->len);
372 }
373 }
374 }
375
376 if (skb->len) {
377 skb_queue_head(&sk->sk_receive_queue, skb);
378 break;
379 }
380 kfree_skb(skb);
381
382 } else {
383 /* put message back and return */
384 skb_queue_head(&sk->sk_receive_queue, skb);
385 break;
386 }
387 } while (size);
388
389 out:
390 release_sock(sk);
391 return copied ? : err;
392 }
393 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
394
395 static inline unsigned int bt_accept_poll(struct sock *parent)
396 {
397 struct list_head *p, *n;
398 struct sock *sk;
399
400 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
401 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
402 if (sk->sk_state == BT_CONNECTED ||
403 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
404 sk->sk_state == BT_CONNECT2))
405 return POLLIN | POLLRDNORM;
406 }
407
408 return 0;
409 }
410
411 unsigned int bt_sock_poll(struct file *file, struct socket *sock,
412 poll_table *wait)
413 {
414 struct sock *sk = sock->sk;
415 unsigned int mask = 0;
416
417 BT_DBG("sock %p, sk %p", sock, sk);
418
419 poll_wait(file, sk_sleep(sk), wait);
420
421 if (sk->sk_state == BT_LISTEN)
422 return bt_accept_poll(sk);
423
424 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
425 mask |= POLLERR |
426 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
427
428 if (sk->sk_shutdown & RCV_SHUTDOWN)
429 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
430
431 if (sk->sk_shutdown == SHUTDOWN_MASK)
432 mask |= POLLHUP;
433
434 if (!skb_queue_empty(&sk->sk_receive_queue))
435 mask |= POLLIN | POLLRDNORM;
436
437 if (sk->sk_state == BT_CLOSED)
438 mask |= POLLHUP;
439
440 if (sk->sk_state == BT_CONNECT ||
441 sk->sk_state == BT_CONNECT2 ||
442 sk->sk_state == BT_CONFIG)
443 return mask;
444
445 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
446 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
447 else
448 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
449
450 return mask;
451 }
452 EXPORT_SYMBOL(bt_sock_poll);
453
454 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
455 {
456 struct sock *sk = sock->sk;
457 struct sk_buff *skb;
458 long amount;
459 int err;
460
461 BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
462
463 switch (cmd) {
464 case TIOCOUTQ:
465 if (sk->sk_state == BT_LISTEN)
466 return -EINVAL;
467
468 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
469 if (amount < 0)
470 amount = 0;
471 err = put_user(amount, (int __user *) arg);
472 break;
473
474 case TIOCINQ:
475 if (sk->sk_state == BT_LISTEN)
476 return -EINVAL;
477
478 lock_sock(sk);
479 skb = skb_peek(&sk->sk_receive_queue);
480 amount = skb ? skb->len : 0;
481 release_sock(sk);
482 err = put_user(amount, (int __user *) arg);
483 break;
484
485 case SIOCGSTAMP:
486 err = sock_get_timestamp(sk, (struct timeval __user *) arg);
487 break;
488
489 case SIOCGSTAMPNS:
490 err = sock_get_timestampns(sk, (struct timespec __user *) arg);
491 break;
492
493 default:
494 err = -ENOIOCTLCMD;
495 break;
496 }
497
498 return err;
499 }
500 EXPORT_SYMBOL(bt_sock_ioctl);
501
502 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
503 {
504 DECLARE_WAITQUEUE(wait, current);
505 int err = 0;
506
507 BT_DBG("sk %p", sk);
508
509 add_wait_queue(sk_sleep(sk), &wait);
510 set_current_state(TASK_INTERRUPTIBLE);
511 while (sk->sk_state != state) {
512 if (!timeo) {
513 err = -EINPROGRESS;
514 break;
515 }
516
517 if (signal_pending(current)) {
518 err = sock_intr_errno(timeo);
519 break;
520 }
521
522 release_sock(sk);
523 timeo = schedule_timeout(timeo);
524 lock_sock(sk);
525 set_current_state(TASK_INTERRUPTIBLE);
526
527 err = sock_error(sk);
528 if (err)
529 break;
530 }
531 __set_current_state(TASK_RUNNING);
532 remove_wait_queue(sk_sleep(sk), &wait);
533 return err;
534 }
535 EXPORT_SYMBOL(bt_sock_wait_state);
536
537 #ifdef CONFIG_PROC_FS
538 struct bt_seq_state {
539 struct bt_sock_list *l;
540 };
541
542 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
543 __acquires(seq->private->l->lock)
544 {
545 struct bt_seq_state *s = seq->private;
546 struct bt_sock_list *l = s->l;
547
548 read_lock(&l->lock);
549 return seq_hlist_start_head(&l->head, *pos);
550 }
551
552 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
553 {
554 struct bt_seq_state *s = seq->private;
555 struct bt_sock_list *l = s->l;
556
557 return seq_hlist_next(v, &l->head, pos);
558 }
559
560 static void bt_seq_stop(struct seq_file *seq, void *v)
561 __releases(seq->private->l->lock)
562 {
563 struct bt_seq_state *s = seq->private;
564 struct bt_sock_list *l = s->l;
565
566 read_unlock(&l->lock);
567 }
568
569 static int bt_seq_show(struct seq_file *seq, void *v)
570 {
571 struct bt_seq_state *s = seq->private;
572 struct bt_sock_list *l = s->l;
573
574 if (v == SEQ_START_TOKEN) {
575 seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Src Dst Parent");
576
577 if (l->custom_seq_show) {
578 seq_putc(seq, ' ');
579 l->custom_seq_show(seq, v);
580 }
581
582 seq_putc(seq, '\n');
583 } else {
584 struct sock *sk = sk_entry(v);
585 struct bt_sock *bt = bt_sk(sk);
586
587 seq_printf(seq,
588 "%pK %-6d %-6u %-6u %-6u %-6lu %pMR %pMR %-6lu",
589 sk,
590 atomic_read(&sk->sk_refcnt),
591 sk_rmem_alloc_get(sk),
592 sk_wmem_alloc_get(sk),
593 from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
594 sock_i_ino(sk),
595 &bt->src,
596 &bt->dst,
597 bt->parent? sock_i_ino(bt->parent): 0LU);
598
599 if (l->custom_seq_show) {
600 seq_putc(seq, ' ');
601 l->custom_seq_show(seq, v);
602 }
603
604 seq_putc(seq, '\n');
605 }
606 return 0;
607 }
608
609 static struct seq_operations bt_seq_ops = {
610 .start = bt_seq_start,
611 .next = bt_seq_next,
612 .stop = bt_seq_stop,
613 .show = bt_seq_show,
614 };
615
616 static int bt_seq_open(struct inode *inode, struct file *file)
617 {
618 struct bt_sock_list *sk_list;
619 struct bt_seq_state *s;
620
621 sk_list = PDE(inode)->data;
622 s = __seq_open_private(file, &bt_seq_ops,
623 sizeof(struct bt_seq_state));
624 if (!s)
625 return -ENOMEM;
626
627 s->l = sk_list;
628 return 0;
629 }
630
631 int bt_procfs_init(struct module* module, struct net *net, const char *name,
632 struct bt_sock_list* sk_list,
633 int (* seq_show)(struct seq_file *, void *))
634 {
635 struct proc_dir_entry * pde;
636
637 sk_list->custom_seq_show = seq_show;
638
639 sk_list->fops.owner = module;
640 sk_list->fops.open = bt_seq_open;
641 sk_list->fops.read = seq_read;
642 sk_list->fops.llseek = seq_lseek;
643 sk_list->fops.release = seq_release_private;
644
645 pde = proc_create(name, 0, net->proc_net, &sk_list->fops);
646 if (!pde)
647 return -ENOMEM;
648
649 pde->data = sk_list;
650
651 return 0;
652 }
653
654 void bt_procfs_cleanup(struct net *net, const char *name)
655 {
656 remove_proc_entry(name, net->proc_net);
657 }
658 #else
659 int bt_procfs_init(struct module* module, struct net *net, const char *name,
660 struct bt_sock_list* sk_list,
661 int (* seq_show)(struct seq_file *, void *))
662 {
663 return 0;
664 }
665
666 void bt_procfs_cleanup(struct net *net, const char *name)
667 {
668 }
669 #endif
670 EXPORT_SYMBOL(bt_procfs_init);
671 EXPORT_SYMBOL(bt_procfs_cleanup);
672
673 static struct net_proto_family bt_sock_family_ops = {
674 .owner = THIS_MODULE,
675 .family = PF_BLUETOOTH,
676 .create = bt_sock_create,
677 };
678
679 static int __init bt_init(void)
680 {
681 int err;
682
683 BT_INFO("Core ver %s", VERSION);
684
685 err = bt_sysfs_init();
686 if (err < 0)
687 return err;
688
689 err = sock_register(&bt_sock_family_ops);
690 if (err < 0) {
691 bt_sysfs_cleanup();
692 return err;
693 }
694
695 BT_INFO("HCI device and connection manager initialized");
696
697 err = hci_sock_init();
698 if (err < 0)
699 goto error;
700
701 err = l2cap_init();
702 if (err < 0)
703 goto sock_err;
704
705 err = sco_init();
706 if (err < 0) {
707 l2cap_exit();
708 goto sock_err;
709 }
710
711 return 0;
712
713 sock_err:
714 hci_sock_cleanup();
715
716 error:
717 sock_unregister(PF_BLUETOOTH);
718 bt_sysfs_cleanup();
719
720 return err;
721 }
722
723 static void __exit bt_exit(void)
724 {
725
726 sco_exit();
727
728 l2cap_exit();
729
730 hci_sock_cleanup();
731
732 sock_unregister(PF_BLUETOOTH);
733
734 bt_sysfs_cleanup();
735 }
736
737 subsys_initcall(bt_init);
738 module_exit(bt_exit);
739
740 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
741 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
742 MODULE_VERSION(VERSION);
743 MODULE_LICENSE("GPL");
744 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);