Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netlink / af_netlink.c
1 /*
2 * NETLINK Kernel-user communication protocol.
3 *
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
22 */
23
24 #include <linux/module.h>
25
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
35 #include <linux/un.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/fs.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/smp_lock.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
55 #include <linux/mm.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/selinux.h>
59
60 #include <net/sock.h>
61 #include <net/scm.h>
62 #include <net/netlink.h>
63
64 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
65
66 struct netlink_sock {
67 /* struct sock has to be the first member of netlink_sock */
68 struct sock sk;
69 u32 pid;
70 u32 dst_pid;
71 u32 dst_group;
72 u32 flags;
73 u32 subscriptions;
74 u32 ngroups;
75 unsigned long *groups;
76 unsigned long state;
77 wait_queue_head_t wait;
78 struct netlink_callback *cb;
79 spinlock_t cb_lock;
80 void (*data_ready)(struct sock *sk, int bytes);
81 struct module *module;
82 };
83
84 #define NETLINK_KERNEL_SOCKET 0x1
85 #define NETLINK_RECV_PKTINFO 0x2
86
87 static inline struct netlink_sock *nlk_sk(struct sock *sk)
88 {
89 return (struct netlink_sock *)sk;
90 }
91
92 struct nl_pid_hash {
93 struct hlist_head *table;
94 unsigned long rehash_time;
95
96 unsigned int mask;
97 unsigned int shift;
98
99 unsigned int entries;
100 unsigned int max_shift;
101
102 u32 rnd;
103 };
104
105 struct netlink_table {
106 struct nl_pid_hash hash;
107 struct hlist_head mc_list;
108 unsigned long *listeners;
109 unsigned int nl_nonroot;
110 unsigned int groups;
111 struct module *module;
112 int registered;
113 };
114
115 static struct netlink_table *nl_table;
116
117 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
118
119 static int netlink_dump(struct sock *sk);
120 static void netlink_destroy_callback(struct netlink_callback *cb);
121
122 static DEFINE_RWLOCK(nl_table_lock);
123 static atomic_t nl_table_users = ATOMIC_INIT(0);
124
125 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
126
127 static u32 netlink_group_mask(u32 group)
128 {
129 return group ? 1 << (group - 1) : 0;
130 }
131
132 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
133 {
134 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
135 }
136
137 static void netlink_sock_destruct(struct sock *sk)
138 {
139 skb_queue_purge(&sk->sk_receive_queue);
140
141 if (!sock_flag(sk, SOCK_DEAD)) {
142 printk("Freeing alive netlink socket %p\n", sk);
143 return;
144 }
145 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
146 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
147 BUG_TRAP(!nlk_sk(sk)->cb);
148 BUG_TRAP(!nlk_sk(sk)->groups);
149 }
150
151 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
152 * Look, when several writers sleep and reader wakes them up, all but one
153 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
154 * this, _but_ remember, it adds useless work on UP machines.
155 */
156
157 static void netlink_table_grab(void)
158 {
159 write_lock_irq(&nl_table_lock);
160
161 if (atomic_read(&nl_table_users)) {
162 DECLARE_WAITQUEUE(wait, current);
163
164 add_wait_queue_exclusive(&nl_table_wait, &wait);
165 for(;;) {
166 set_current_state(TASK_UNINTERRUPTIBLE);
167 if (atomic_read(&nl_table_users) == 0)
168 break;
169 write_unlock_irq(&nl_table_lock);
170 schedule();
171 write_lock_irq(&nl_table_lock);
172 }
173
174 __set_current_state(TASK_RUNNING);
175 remove_wait_queue(&nl_table_wait, &wait);
176 }
177 }
178
179 static __inline__ void netlink_table_ungrab(void)
180 {
181 write_unlock_irq(&nl_table_lock);
182 wake_up(&nl_table_wait);
183 }
184
185 static __inline__ void
186 netlink_lock_table(void)
187 {
188 /* read_lock() synchronizes us to netlink_table_grab */
189
190 read_lock(&nl_table_lock);
191 atomic_inc(&nl_table_users);
192 read_unlock(&nl_table_lock);
193 }
194
195 static __inline__ void
196 netlink_unlock_table(void)
197 {
198 if (atomic_dec_and_test(&nl_table_users))
199 wake_up(&nl_table_wait);
200 }
201
202 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
203 {
204 struct nl_pid_hash *hash = &nl_table[protocol].hash;
205 struct hlist_head *head;
206 struct sock *sk;
207 struct hlist_node *node;
208
209 read_lock(&nl_table_lock);
210 head = nl_pid_hashfn(hash, pid);
211 sk_for_each(sk, node, head) {
212 if (nlk_sk(sk)->pid == pid) {
213 sock_hold(sk);
214 goto found;
215 }
216 }
217 sk = NULL;
218 found:
219 read_unlock(&nl_table_lock);
220 return sk;
221 }
222
223 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
224 {
225 if (size <= PAGE_SIZE)
226 return kmalloc(size, GFP_ATOMIC);
227 else
228 return (struct hlist_head *)
229 __get_free_pages(GFP_ATOMIC, get_order(size));
230 }
231
232 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
233 {
234 if (size <= PAGE_SIZE)
235 kfree(table);
236 else
237 free_pages((unsigned long)table, get_order(size));
238 }
239
240 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
241 {
242 unsigned int omask, mask, shift;
243 size_t osize, size;
244 struct hlist_head *otable, *table;
245 int i;
246
247 omask = mask = hash->mask;
248 osize = size = (mask + 1) * sizeof(*table);
249 shift = hash->shift;
250
251 if (grow) {
252 if (++shift > hash->max_shift)
253 return 0;
254 mask = mask * 2 + 1;
255 size *= 2;
256 }
257
258 table = nl_pid_hash_alloc(size);
259 if (!table)
260 return 0;
261
262 memset(table, 0, size);
263 otable = hash->table;
264 hash->table = table;
265 hash->mask = mask;
266 hash->shift = shift;
267 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
268
269 for (i = 0; i <= omask; i++) {
270 struct sock *sk;
271 struct hlist_node *node, *tmp;
272
273 sk_for_each_safe(sk, node, tmp, &otable[i])
274 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
275 }
276
277 nl_pid_hash_free(otable, osize);
278 hash->rehash_time = jiffies + 10 * 60 * HZ;
279 return 1;
280 }
281
282 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
283 {
284 int avg = hash->entries >> hash->shift;
285
286 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
287 return 1;
288
289 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
290 nl_pid_hash_rehash(hash, 0);
291 return 1;
292 }
293
294 return 0;
295 }
296
297 static const struct proto_ops netlink_ops;
298
299 static void
300 netlink_update_listeners(struct sock *sk)
301 {
302 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
303 struct hlist_node *node;
304 unsigned long mask;
305 unsigned int i;
306
307 for (i = 0; i < NLGRPSZ(tbl->groups)/sizeof(unsigned long); i++) {
308 mask = 0;
309 sk_for_each_bound(sk, node, &tbl->mc_list)
310 mask |= nlk_sk(sk)->groups[i];
311 tbl->listeners[i] = mask;
312 }
313 /* this function is only called with the netlink table "grabbed", which
314 * makes sure updates are visible before bind or setsockopt return. */
315 }
316
317 static int netlink_insert(struct sock *sk, u32 pid)
318 {
319 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
320 struct hlist_head *head;
321 int err = -EADDRINUSE;
322 struct sock *osk;
323 struct hlist_node *node;
324 int len;
325
326 netlink_table_grab();
327 head = nl_pid_hashfn(hash, pid);
328 len = 0;
329 sk_for_each(osk, node, head) {
330 if (nlk_sk(osk)->pid == pid)
331 break;
332 len++;
333 }
334 if (node)
335 goto err;
336
337 err = -EBUSY;
338 if (nlk_sk(sk)->pid)
339 goto err;
340
341 err = -ENOMEM;
342 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
343 goto err;
344
345 if (len && nl_pid_hash_dilute(hash, len))
346 head = nl_pid_hashfn(hash, pid);
347 hash->entries++;
348 nlk_sk(sk)->pid = pid;
349 sk_add_node(sk, head);
350 err = 0;
351
352 err:
353 netlink_table_ungrab();
354 return err;
355 }
356
357 static void netlink_remove(struct sock *sk)
358 {
359 netlink_table_grab();
360 if (sk_del_node_init(sk))
361 nl_table[sk->sk_protocol].hash.entries--;
362 if (nlk_sk(sk)->subscriptions)
363 __sk_del_bind_node(sk);
364 netlink_table_ungrab();
365 }
366
367 static struct proto netlink_proto = {
368 .name = "NETLINK",
369 .owner = THIS_MODULE,
370 .obj_size = sizeof(struct netlink_sock),
371 };
372
373 static int __netlink_create(struct socket *sock, int protocol)
374 {
375 struct sock *sk;
376 struct netlink_sock *nlk;
377
378 sock->ops = &netlink_ops;
379
380 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
381 if (!sk)
382 return -ENOMEM;
383
384 sock_init_data(sock, sk);
385
386 nlk = nlk_sk(sk);
387 spin_lock_init(&nlk->cb_lock);
388 init_waitqueue_head(&nlk->wait);
389
390 sk->sk_destruct = netlink_sock_destruct;
391 sk->sk_protocol = protocol;
392 return 0;
393 }
394
395 static int netlink_create(struct socket *sock, int protocol)
396 {
397 struct module *module = NULL;
398 struct netlink_sock *nlk;
399 unsigned int groups;
400 int err = 0;
401
402 sock->state = SS_UNCONNECTED;
403
404 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
405 return -ESOCKTNOSUPPORT;
406
407 if (protocol<0 || protocol >= MAX_LINKS)
408 return -EPROTONOSUPPORT;
409
410 netlink_lock_table();
411 #ifdef CONFIG_KMOD
412 if (!nl_table[protocol].registered) {
413 netlink_unlock_table();
414 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
415 netlink_lock_table();
416 }
417 #endif
418 if (nl_table[protocol].registered &&
419 try_module_get(nl_table[protocol].module))
420 module = nl_table[protocol].module;
421 groups = nl_table[protocol].groups;
422 netlink_unlock_table();
423
424 if ((err = __netlink_create(sock, protocol)) < 0)
425 goto out_module;
426
427 nlk = nlk_sk(sock->sk);
428 nlk->module = module;
429 out:
430 return err;
431
432 out_module:
433 module_put(module);
434 goto out;
435 }
436
437 static int netlink_release(struct socket *sock)
438 {
439 struct sock *sk = sock->sk;
440 struct netlink_sock *nlk;
441
442 if (!sk)
443 return 0;
444
445 netlink_remove(sk);
446 nlk = nlk_sk(sk);
447
448 spin_lock(&nlk->cb_lock);
449 if (nlk->cb) {
450 if (nlk->cb->done)
451 nlk->cb->done(nlk->cb);
452 netlink_destroy_callback(nlk->cb);
453 nlk->cb = NULL;
454 }
455 spin_unlock(&nlk->cb_lock);
456
457 /* OK. Socket is unlinked, and, therefore,
458 no new packets will arrive */
459
460 sock_orphan(sk);
461 sock->sk = NULL;
462 wake_up_interruptible_all(&nlk->wait);
463
464 skb_queue_purge(&sk->sk_write_queue);
465
466 if (nlk->pid && !nlk->subscriptions) {
467 struct netlink_notify n = {
468 .protocol = sk->sk_protocol,
469 .pid = nlk->pid,
470 };
471 atomic_notifier_call_chain(&netlink_chain,
472 NETLINK_URELEASE, &n);
473 }
474
475 if (nlk->module)
476 module_put(nlk->module);
477
478 netlink_table_grab();
479 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
480 kfree(nl_table[sk->sk_protocol].listeners);
481 nl_table[sk->sk_protocol].module = NULL;
482 nl_table[sk->sk_protocol].registered = 0;
483 } else if (nlk->subscriptions)
484 netlink_update_listeners(sk);
485 netlink_table_ungrab();
486
487 kfree(nlk->groups);
488 nlk->groups = NULL;
489
490 sock_put(sk);
491 return 0;
492 }
493
494 static int netlink_autobind(struct socket *sock)
495 {
496 struct sock *sk = sock->sk;
497 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
498 struct hlist_head *head;
499 struct sock *osk;
500 struct hlist_node *node;
501 s32 pid = current->tgid;
502 int err;
503 static s32 rover = -4097;
504
505 retry:
506 cond_resched();
507 netlink_table_grab();
508 head = nl_pid_hashfn(hash, pid);
509 sk_for_each(osk, node, head) {
510 if (nlk_sk(osk)->pid == pid) {
511 /* Bind collision, search negative pid values. */
512 pid = rover--;
513 if (rover > -4097)
514 rover = -4097;
515 netlink_table_ungrab();
516 goto retry;
517 }
518 }
519 netlink_table_ungrab();
520
521 err = netlink_insert(sk, pid);
522 if (err == -EADDRINUSE)
523 goto retry;
524
525 /* If 2 threads race to autobind, that is fine. */
526 if (err == -EBUSY)
527 err = 0;
528
529 return err;
530 }
531
532 static inline int netlink_capable(struct socket *sock, unsigned int flag)
533 {
534 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
535 capable(CAP_NET_ADMIN);
536 }
537
538 static void
539 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
540 {
541 struct netlink_sock *nlk = nlk_sk(sk);
542
543 if (nlk->subscriptions && !subscriptions)
544 __sk_del_bind_node(sk);
545 else if (!nlk->subscriptions && subscriptions)
546 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
547 nlk->subscriptions = subscriptions;
548 }
549
550 static int netlink_alloc_groups(struct sock *sk)
551 {
552 struct netlink_sock *nlk = nlk_sk(sk);
553 unsigned int groups;
554 int err = 0;
555
556 netlink_lock_table();
557 groups = nl_table[sk->sk_protocol].groups;
558 if (!nl_table[sk->sk_protocol].registered)
559 err = -ENOENT;
560 netlink_unlock_table();
561
562 if (err)
563 return err;
564
565 nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
566 if (nlk->groups == NULL)
567 return -ENOMEM;
568 nlk->ngroups = groups;
569 return 0;
570 }
571
572 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
573 {
574 struct sock *sk = sock->sk;
575 struct netlink_sock *nlk = nlk_sk(sk);
576 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
577 int err;
578
579 if (nladdr->nl_family != AF_NETLINK)
580 return -EINVAL;
581
582 /* Only superuser is allowed to listen multicasts */
583 if (nladdr->nl_groups) {
584 if (!netlink_capable(sock, NL_NONROOT_RECV))
585 return -EPERM;
586 if (nlk->groups == NULL) {
587 err = netlink_alloc_groups(sk);
588 if (err)
589 return err;
590 }
591 }
592
593 if (nlk->pid) {
594 if (nladdr->nl_pid != nlk->pid)
595 return -EINVAL;
596 } else {
597 err = nladdr->nl_pid ?
598 netlink_insert(sk, nladdr->nl_pid) :
599 netlink_autobind(sock);
600 if (err)
601 return err;
602 }
603
604 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
605 return 0;
606
607 netlink_table_grab();
608 netlink_update_subscriptions(sk, nlk->subscriptions +
609 hweight32(nladdr->nl_groups) -
610 hweight32(nlk->groups[0]));
611 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
612 netlink_update_listeners(sk);
613 netlink_table_ungrab();
614
615 return 0;
616 }
617
618 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
619 int alen, int flags)
620 {
621 int err = 0;
622 struct sock *sk = sock->sk;
623 struct netlink_sock *nlk = nlk_sk(sk);
624 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
625
626 if (addr->sa_family == AF_UNSPEC) {
627 sk->sk_state = NETLINK_UNCONNECTED;
628 nlk->dst_pid = 0;
629 nlk->dst_group = 0;
630 return 0;
631 }
632 if (addr->sa_family != AF_NETLINK)
633 return -EINVAL;
634
635 /* Only superuser is allowed to send multicasts */
636 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
637 return -EPERM;
638
639 if (!nlk->pid)
640 err = netlink_autobind(sock);
641
642 if (err == 0) {
643 sk->sk_state = NETLINK_CONNECTED;
644 nlk->dst_pid = nladdr->nl_pid;
645 nlk->dst_group = ffs(nladdr->nl_groups);
646 }
647
648 return err;
649 }
650
651 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
652 {
653 struct sock *sk = sock->sk;
654 struct netlink_sock *nlk = nlk_sk(sk);
655 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
656
657 nladdr->nl_family = AF_NETLINK;
658 nladdr->nl_pad = 0;
659 *addr_len = sizeof(*nladdr);
660
661 if (peer) {
662 nladdr->nl_pid = nlk->dst_pid;
663 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
664 } else {
665 nladdr->nl_pid = nlk->pid;
666 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
667 }
668 return 0;
669 }
670
671 static void netlink_overrun(struct sock *sk)
672 {
673 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
674 sk->sk_err = ENOBUFS;
675 sk->sk_error_report(sk);
676 }
677 }
678
679 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
680 {
681 int protocol = ssk->sk_protocol;
682 struct sock *sock;
683 struct netlink_sock *nlk;
684
685 sock = netlink_lookup(protocol, pid);
686 if (!sock)
687 return ERR_PTR(-ECONNREFUSED);
688
689 /* Don't bother queuing skb if kernel socket has no input function */
690 nlk = nlk_sk(sock);
691 if ((nlk->pid == 0 && !nlk->data_ready) ||
692 (sock->sk_state == NETLINK_CONNECTED &&
693 nlk->dst_pid != nlk_sk(ssk)->pid)) {
694 sock_put(sock);
695 return ERR_PTR(-ECONNREFUSED);
696 }
697 return sock;
698 }
699
700 struct sock *netlink_getsockbyfilp(struct file *filp)
701 {
702 struct inode *inode = filp->f_path.dentry->d_inode;
703 struct sock *sock;
704
705 if (!S_ISSOCK(inode->i_mode))
706 return ERR_PTR(-ENOTSOCK);
707
708 sock = SOCKET_I(inode)->sk;
709 if (sock->sk_family != AF_NETLINK)
710 return ERR_PTR(-EINVAL);
711
712 sock_hold(sock);
713 return sock;
714 }
715
716 /*
717 * Attach a skb to a netlink socket.
718 * The caller must hold a reference to the destination socket. On error, the
719 * reference is dropped. The skb is not send to the destination, just all
720 * all error checks are performed and memory in the queue is reserved.
721 * Return values:
722 * < 0: error. skb freed, reference to sock dropped.
723 * 0: continue
724 * 1: repeat lookup - reference dropped while waiting for socket memory.
725 */
726 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
727 long timeo, struct sock *ssk)
728 {
729 struct netlink_sock *nlk;
730
731 nlk = nlk_sk(sk);
732
733 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
734 test_bit(0, &nlk->state)) {
735 DECLARE_WAITQUEUE(wait, current);
736 if (!timeo) {
737 if (!ssk || nlk_sk(ssk)->pid == 0)
738 netlink_overrun(sk);
739 sock_put(sk);
740 kfree_skb(skb);
741 return -EAGAIN;
742 }
743
744 __set_current_state(TASK_INTERRUPTIBLE);
745 add_wait_queue(&nlk->wait, &wait);
746
747 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
748 test_bit(0, &nlk->state)) &&
749 !sock_flag(sk, SOCK_DEAD))
750 timeo = schedule_timeout(timeo);
751
752 __set_current_state(TASK_RUNNING);
753 remove_wait_queue(&nlk->wait, &wait);
754 sock_put(sk);
755
756 if (signal_pending(current)) {
757 kfree_skb(skb);
758 return sock_intr_errno(timeo);
759 }
760 return 1;
761 }
762 skb_set_owner_r(skb, sk);
763 return 0;
764 }
765
766 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
767 {
768 int len = skb->len;
769
770 skb_queue_tail(&sk->sk_receive_queue, skb);
771 sk->sk_data_ready(sk, len);
772 sock_put(sk);
773 return len;
774 }
775
776 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
777 {
778 kfree_skb(skb);
779 sock_put(sk);
780 }
781
782 static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
783 gfp_t allocation)
784 {
785 int delta;
786
787 skb_orphan(skb);
788
789 delta = skb->end - skb->tail;
790 if (delta * 2 < skb->truesize)
791 return skb;
792
793 if (skb_shared(skb)) {
794 struct sk_buff *nskb = skb_clone(skb, allocation);
795 if (!nskb)
796 return skb;
797 kfree_skb(skb);
798 skb = nskb;
799 }
800
801 if (!pskb_expand_head(skb, 0, -delta, allocation))
802 skb->truesize -= delta;
803
804 return skb;
805 }
806
807 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
808 {
809 struct sock *sk;
810 int err;
811 long timeo;
812
813 skb = netlink_trim(skb, gfp_any());
814
815 timeo = sock_sndtimeo(ssk, nonblock);
816 retry:
817 sk = netlink_getsockbypid(ssk, pid);
818 if (IS_ERR(sk)) {
819 kfree_skb(skb);
820 return PTR_ERR(sk);
821 }
822 err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
823 if (err == 1)
824 goto retry;
825 if (err)
826 return err;
827
828 return netlink_sendskb(sk, skb, ssk->sk_protocol);
829 }
830
831 int netlink_has_listeners(struct sock *sk, unsigned int group)
832 {
833 int res = 0;
834
835 BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET));
836 if (group - 1 < nl_table[sk->sk_protocol].groups)
837 res = test_bit(group - 1, nl_table[sk->sk_protocol].listeners);
838 return res;
839 }
840 EXPORT_SYMBOL_GPL(netlink_has_listeners);
841
842 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
843 {
844 struct netlink_sock *nlk = nlk_sk(sk);
845
846 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
847 !test_bit(0, &nlk->state)) {
848 skb_set_owner_r(skb, sk);
849 skb_queue_tail(&sk->sk_receive_queue, skb);
850 sk->sk_data_ready(sk, skb->len);
851 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
852 }
853 return -1;
854 }
855
856 struct netlink_broadcast_data {
857 struct sock *exclude_sk;
858 u32 pid;
859 u32 group;
860 int failure;
861 int congested;
862 int delivered;
863 gfp_t allocation;
864 struct sk_buff *skb, *skb2;
865 };
866
867 static inline int do_one_broadcast(struct sock *sk,
868 struct netlink_broadcast_data *p)
869 {
870 struct netlink_sock *nlk = nlk_sk(sk);
871 int val;
872
873 if (p->exclude_sk == sk)
874 goto out;
875
876 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
877 !test_bit(p->group - 1, nlk->groups))
878 goto out;
879
880 if (p->failure) {
881 netlink_overrun(sk);
882 goto out;
883 }
884
885 sock_hold(sk);
886 if (p->skb2 == NULL) {
887 if (skb_shared(p->skb)) {
888 p->skb2 = skb_clone(p->skb, p->allocation);
889 } else {
890 p->skb2 = skb_get(p->skb);
891 /*
892 * skb ownership may have been set when
893 * delivered to a previous socket.
894 */
895 skb_orphan(p->skb2);
896 }
897 }
898 if (p->skb2 == NULL) {
899 netlink_overrun(sk);
900 /* Clone failed. Notify ALL listeners. */
901 p->failure = 1;
902 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
903 netlink_overrun(sk);
904 } else {
905 p->congested |= val;
906 p->delivered = 1;
907 p->skb2 = NULL;
908 }
909 sock_put(sk);
910
911 out:
912 return 0;
913 }
914
915 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
916 u32 group, gfp_t allocation)
917 {
918 struct netlink_broadcast_data info;
919 struct hlist_node *node;
920 struct sock *sk;
921
922 skb = netlink_trim(skb, allocation);
923
924 info.exclude_sk = ssk;
925 info.pid = pid;
926 info.group = group;
927 info.failure = 0;
928 info.congested = 0;
929 info.delivered = 0;
930 info.allocation = allocation;
931 info.skb = skb;
932 info.skb2 = NULL;
933
934 /* While we sleep in clone, do not allow to change socket list */
935
936 netlink_lock_table();
937
938 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
939 do_one_broadcast(sk, &info);
940
941 kfree_skb(skb);
942
943 netlink_unlock_table();
944
945 if (info.skb2)
946 kfree_skb(info.skb2);
947
948 if (info.delivered) {
949 if (info.congested && (allocation & __GFP_WAIT))
950 yield();
951 return 0;
952 }
953 if (info.failure)
954 return -ENOBUFS;
955 return -ESRCH;
956 }
957
958 struct netlink_set_err_data {
959 struct sock *exclude_sk;
960 u32 pid;
961 u32 group;
962 int code;
963 };
964
965 static inline int do_one_set_err(struct sock *sk,
966 struct netlink_set_err_data *p)
967 {
968 struct netlink_sock *nlk = nlk_sk(sk);
969
970 if (sk == p->exclude_sk)
971 goto out;
972
973 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
974 !test_bit(p->group - 1, nlk->groups))
975 goto out;
976
977 sk->sk_err = p->code;
978 sk->sk_error_report(sk);
979 out:
980 return 0;
981 }
982
983 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
984 {
985 struct netlink_set_err_data info;
986 struct hlist_node *node;
987 struct sock *sk;
988
989 info.exclude_sk = ssk;
990 info.pid = pid;
991 info.group = group;
992 info.code = code;
993
994 read_lock(&nl_table_lock);
995
996 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
997 do_one_set_err(sk, &info);
998
999 read_unlock(&nl_table_lock);
1000 }
1001
1002 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1003 char __user *optval, int optlen)
1004 {
1005 struct sock *sk = sock->sk;
1006 struct netlink_sock *nlk = nlk_sk(sk);
1007 int val = 0, err;
1008
1009 if (level != SOL_NETLINK)
1010 return -ENOPROTOOPT;
1011
1012 if (optlen >= sizeof(int) &&
1013 get_user(val, (int __user *)optval))
1014 return -EFAULT;
1015
1016 switch (optname) {
1017 case NETLINK_PKTINFO:
1018 if (val)
1019 nlk->flags |= NETLINK_RECV_PKTINFO;
1020 else
1021 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1022 err = 0;
1023 break;
1024 case NETLINK_ADD_MEMBERSHIP:
1025 case NETLINK_DROP_MEMBERSHIP: {
1026 unsigned int subscriptions;
1027 int old, new = optname == NETLINK_ADD_MEMBERSHIP ? 1 : 0;
1028
1029 if (!netlink_capable(sock, NL_NONROOT_RECV))
1030 return -EPERM;
1031 if (nlk->groups == NULL) {
1032 err = netlink_alloc_groups(sk);
1033 if (err)
1034 return err;
1035 }
1036 if (!val || val - 1 >= nlk->ngroups)
1037 return -EINVAL;
1038 netlink_table_grab();
1039 old = test_bit(val - 1, nlk->groups);
1040 subscriptions = nlk->subscriptions - old + new;
1041 if (new)
1042 __set_bit(val - 1, nlk->groups);
1043 else
1044 __clear_bit(val - 1, nlk->groups);
1045 netlink_update_subscriptions(sk, subscriptions);
1046 netlink_update_listeners(sk);
1047 netlink_table_ungrab();
1048 err = 0;
1049 break;
1050 }
1051 default:
1052 err = -ENOPROTOOPT;
1053 }
1054 return err;
1055 }
1056
1057 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1058 char __user *optval, int __user *optlen)
1059 {
1060 struct sock *sk = sock->sk;
1061 struct netlink_sock *nlk = nlk_sk(sk);
1062 int len, val, err;
1063
1064 if (level != SOL_NETLINK)
1065 return -ENOPROTOOPT;
1066
1067 if (get_user(len, optlen))
1068 return -EFAULT;
1069 if (len < 0)
1070 return -EINVAL;
1071
1072 switch (optname) {
1073 case NETLINK_PKTINFO:
1074 if (len < sizeof(int))
1075 return -EINVAL;
1076 len = sizeof(int);
1077 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1078 if (put_user(len, optlen) ||
1079 put_user(val, optval))
1080 return -EFAULT;
1081 err = 0;
1082 break;
1083 default:
1084 err = -ENOPROTOOPT;
1085 }
1086 return err;
1087 }
1088
1089 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1090 {
1091 struct nl_pktinfo info;
1092
1093 info.group = NETLINK_CB(skb).dst_group;
1094 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1095 }
1096
1097 static inline void netlink_rcv_wake(struct sock *sk)
1098 {
1099 struct netlink_sock *nlk = nlk_sk(sk);
1100
1101 if (skb_queue_empty(&sk->sk_receive_queue))
1102 clear_bit(0, &nlk->state);
1103 if (!test_bit(0, &nlk->state))
1104 wake_up_interruptible(&nlk->wait);
1105 }
1106
1107 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1108 struct msghdr *msg, size_t len)
1109 {
1110 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1111 struct sock *sk = sock->sk;
1112 struct netlink_sock *nlk = nlk_sk(sk);
1113 struct sockaddr_nl *addr=msg->msg_name;
1114 u32 dst_pid;
1115 u32 dst_group;
1116 struct sk_buff *skb;
1117 int err;
1118 struct scm_cookie scm;
1119
1120 if (msg->msg_flags&MSG_OOB)
1121 return -EOPNOTSUPP;
1122
1123 if (NULL == siocb->scm)
1124 siocb->scm = &scm;
1125 err = scm_send(sock, msg, siocb->scm);
1126 if (err < 0)
1127 return err;
1128
1129 if (msg->msg_namelen) {
1130 if (addr->nl_family != AF_NETLINK)
1131 return -EINVAL;
1132 dst_pid = addr->nl_pid;
1133 dst_group = ffs(addr->nl_groups);
1134 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1135 return -EPERM;
1136 } else {
1137 dst_pid = nlk->dst_pid;
1138 dst_group = nlk->dst_group;
1139 }
1140
1141 if (!nlk->pid) {
1142 err = netlink_autobind(sock);
1143 if (err)
1144 goto out;
1145 }
1146
1147 err = -EMSGSIZE;
1148 if (len > sk->sk_sndbuf - 32)
1149 goto out;
1150 err = -ENOBUFS;
1151 skb = alloc_skb(len, GFP_KERNEL);
1152 if (skb==NULL)
1153 goto out;
1154
1155 NETLINK_CB(skb).pid = nlk->pid;
1156 NETLINK_CB(skb).dst_group = dst_group;
1157 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
1158 selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));
1159 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1160
1161 /* What can I do? Netlink is asynchronous, so that
1162 we will have to save current capabilities to
1163 check them, when this message will be delivered
1164 to corresponding kernel module. --ANK (980802)
1165 */
1166
1167 err = -EFAULT;
1168 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
1169 kfree_skb(skb);
1170 goto out;
1171 }
1172
1173 err = security_netlink_send(sk, skb);
1174 if (err) {
1175 kfree_skb(skb);
1176 goto out;
1177 }
1178
1179 if (dst_group) {
1180 atomic_inc(&skb->users);
1181 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1182 }
1183 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1184
1185 out:
1186 return err;
1187 }
1188
1189 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1190 struct msghdr *msg, size_t len,
1191 int flags)
1192 {
1193 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1194 struct scm_cookie scm;
1195 struct sock *sk = sock->sk;
1196 struct netlink_sock *nlk = nlk_sk(sk);
1197 int noblock = flags&MSG_DONTWAIT;
1198 size_t copied;
1199 struct sk_buff *skb;
1200 int err;
1201
1202 if (flags&MSG_OOB)
1203 return -EOPNOTSUPP;
1204
1205 copied = 0;
1206
1207 skb = skb_recv_datagram(sk,flags,noblock,&err);
1208 if (skb==NULL)
1209 goto out;
1210
1211 msg->msg_namelen = 0;
1212
1213 copied = skb->len;
1214 if (len < copied) {
1215 msg->msg_flags |= MSG_TRUNC;
1216 copied = len;
1217 }
1218
1219 skb->h.raw = skb->data;
1220 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1221
1222 if (msg->msg_name) {
1223 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1224 addr->nl_family = AF_NETLINK;
1225 addr->nl_pad = 0;
1226 addr->nl_pid = NETLINK_CB(skb).pid;
1227 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1228 msg->msg_namelen = sizeof(*addr);
1229 }
1230
1231 if (nlk->flags & NETLINK_RECV_PKTINFO)
1232 netlink_cmsg_recv_pktinfo(msg, skb);
1233
1234 if (NULL == siocb->scm) {
1235 memset(&scm, 0, sizeof(scm));
1236 siocb->scm = &scm;
1237 }
1238 siocb->scm->creds = *NETLINK_CREDS(skb);
1239 skb_free_datagram(sk, skb);
1240
1241 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1242 netlink_dump(sk);
1243
1244 scm_recv(sock, msg, siocb->scm, flags);
1245
1246 out:
1247 netlink_rcv_wake(sk);
1248 return err ? : copied;
1249 }
1250
1251 static void netlink_data_ready(struct sock *sk, int len)
1252 {
1253 struct netlink_sock *nlk = nlk_sk(sk);
1254
1255 if (nlk->data_ready)
1256 nlk->data_ready(sk, len);
1257 netlink_rcv_wake(sk);
1258 }
1259
1260 /*
1261 * We export these functions to other modules. They provide a
1262 * complete set of kernel non-blocking support for message
1263 * queueing.
1264 */
1265
1266 struct sock *
1267 netlink_kernel_create(int unit, unsigned int groups,
1268 void (*input)(struct sock *sk, int len),
1269 struct module *module)
1270 {
1271 struct socket *sock;
1272 struct sock *sk;
1273 struct netlink_sock *nlk;
1274 unsigned long *listeners = NULL;
1275
1276 BUG_ON(!nl_table);
1277
1278 if (unit<0 || unit>=MAX_LINKS)
1279 return NULL;
1280
1281 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1282 return NULL;
1283
1284 if (__netlink_create(sock, unit) < 0)
1285 goto out_sock_release;
1286
1287 if (groups < 32)
1288 groups = 32;
1289
1290 listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
1291 if (!listeners)
1292 goto out_sock_release;
1293
1294 sk = sock->sk;
1295 sk->sk_data_ready = netlink_data_ready;
1296 if (input)
1297 nlk_sk(sk)->data_ready = input;
1298
1299 if (netlink_insert(sk, 0))
1300 goto out_sock_release;
1301
1302 nlk = nlk_sk(sk);
1303 nlk->flags |= NETLINK_KERNEL_SOCKET;
1304
1305 netlink_table_grab();
1306 nl_table[unit].groups = groups;
1307 nl_table[unit].listeners = listeners;
1308 nl_table[unit].module = module;
1309 nl_table[unit].registered = 1;
1310 netlink_table_ungrab();
1311
1312 return sk;
1313
1314 out_sock_release:
1315 kfree(listeners);
1316 sock_release(sock);
1317 return NULL;
1318 }
1319
1320 void netlink_set_nonroot(int protocol, unsigned int flags)
1321 {
1322 if ((unsigned int)protocol < MAX_LINKS)
1323 nl_table[protocol].nl_nonroot = flags;
1324 }
1325
1326 static void netlink_destroy_callback(struct netlink_callback *cb)
1327 {
1328 if (cb->skb)
1329 kfree_skb(cb->skb);
1330 kfree(cb);
1331 }
1332
1333 /*
1334 * It looks a bit ugly.
1335 * It would be better to create kernel thread.
1336 */
1337
1338 static int netlink_dump(struct sock *sk)
1339 {
1340 struct netlink_sock *nlk = nlk_sk(sk);
1341 struct netlink_callback *cb;
1342 struct sk_buff *skb;
1343 struct nlmsghdr *nlh;
1344 int len, err = -ENOBUFS;
1345
1346 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1347 if (!skb)
1348 goto errout;
1349
1350 spin_lock(&nlk->cb_lock);
1351
1352 cb = nlk->cb;
1353 if (cb == NULL) {
1354 err = -EINVAL;
1355 goto errout_skb;
1356 }
1357
1358 len = cb->dump(skb, cb);
1359
1360 if (len > 0) {
1361 spin_unlock(&nlk->cb_lock);
1362 skb_queue_tail(&sk->sk_receive_queue, skb);
1363 sk->sk_data_ready(sk, len);
1364 return 0;
1365 }
1366
1367 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1368 if (!nlh)
1369 goto errout_skb;
1370
1371 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1372
1373 skb_queue_tail(&sk->sk_receive_queue, skb);
1374 sk->sk_data_ready(sk, skb->len);
1375
1376 if (cb->done)
1377 cb->done(cb);
1378 nlk->cb = NULL;
1379 spin_unlock(&nlk->cb_lock);
1380
1381 netlink_destroy_callback(cb);
1382 return 0;
1383
1384 errout_skb:
1385 spin_unlock(&nlk->cb_lock);
1386 kfree_skb(skb);
1387 errout:
1388 return err;
1389 }
1390
1391 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1392 struct nlmsghdr *nlh,
1393 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1394 int (*done)(struct netlink_callback*))
1395 {
1396 struct netlink_callback *cb;
1397 struct sock *sk;
1398 struct netlink_sock *nlk;
1399
1400 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1401 if (cb == NULL)
1402 return -ENOBUFS;
1403
1404 cb->dump = dump;
1405 cb->done = done;
1406 cb->nlh = nlh;
1407 atomic_inc(&skb->users);
1408 cb->skb = skb;
1409
1410 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1411 if (sk == NULL) {
1412 netlink_destroy_callback(cb);
1413 return -ECONNREFUSED;
1414 }
1415 nlk = nlk_sk(sk);
1416 /* A dump is in progress... */
1417 spin_lock(&nlk->cb_lock);
1418 if (nlk->cb) {
1419 spin_unlock(&nlk->cb_lock);
1420 netlink_destroy_callback(cb);
1421 sock_put(sk);
1422 return -EBUSY;
1423 }
1424 nlk->cb = cb;
1425 spin_unlock(&nlk->cb_lock);
1426
1427 netlink_dump(sk);
1428 sock_put(sk);
1429 return 0;
1430 }
1431
1432 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1433 {
1434 struct sk_buff *skb;
1435 struct nlmsghdr *rep;
1436 struct nlmsgerr *errmsg;
1437 size_t payload = sizeof(*errmsg);
1438
1439 /* error messages get the original request appened */
1440 if (err)
1441 payload += nlmsg_len(nlh);
1442
1443 skb = nlmsg_new(payload, GFP_KERNEL);
1444 if (!skb) {
1445 struct sock *sk;
1446
1447 sk = netlink_lookup(in_skb->sk->sk_protocol,
1448 NETLINK_CB(in_skb).pid);
1449 if (sk) {
1450 sk->sk_err = ENOBUFS;
1451 sk->sk_error_report(sk);
1452 sock_put(sk);
1453 }
1454 return;
1455 }
1456
1457 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1458 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1459 errmsg = nlmsg_data(rep);
1460 errmsg->error = err;
1461 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1462 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1463 }
1464
1465 static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1466 struct nlmsghdr *, int *))
1467 {
1468 struct nlmsghdr *nlh;
1469 int err;
1470
1471 while (skb->len >= nlmsg_total_size(0)) {
1472 nlh = (struct nlmsghdr *) skb->data;
1473
1474 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1475 return 0;
1476
1477 if (cb(skb, nlh, &err) < 0) {
1478 /* Not an error, but we have to interrupt processing
1479 * here. Note: that in this case we do not pull
1480 * message from skb, it will be processed later.
1481 */
1482 if (err == 0)
1483 return -1;
1484 netlink_ack(skb, nlh, err);
1485 } else if (nlh->nlmsg_flags & NLM_F_ACK)
1486 netlink_ack(skb, nlh, 0);
1487
1488 netlink_queue_skip(nlh, skb);
1489 }
1490
1491 return 0;
1492 }
1493
1494 /**
1495 * nelink_run_queue - Process netlink receive queue.
1496 * @sk: Netlink socket containing the queue
1497 * @qlen: Place to store queue length upon entry
1498 * @cb: Callback function invoked for each netlink message found
1499 *
1500 * Processes as much as there was in the queue upon entry and invokes
1501 * a callback function for each netlink message found. The callback
1502 * function may refuse a message by returning a negative error code
1503 * but setting the error pointer to 0 in which case this function
1504 * returns with a qlen != 0.
1505 *
1506 * qlen must be initialized to 0 before the initial entry, afterwards
1507 * the function may be called repeatedly until qlen reaches 0.
1508 */
1509 void netlink_run_queue(struct sock *sk, unsigned int *qlen,
1510 int (*cb)(struct sk_buff *, struct nlmsghdr *, int *))
1511 {
1512 struct sk_buff *skb;
1513
1514 if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue))
1515 *qlen = skb_queue_len(&sk->sk_receive_queue);
1516
1517 for (; *qlen; (*qlen)--) {
1518 skb = skb_dequeue(&sk->sk_receive_queue);
1519 if (netlink_rcv_skb(skb, cb)) {
1520 if (skb->len)
1521 skb_queue_head(&sk->sk_receive_queue, skb);
1522 else {
1523 kfree_skb(skb);
1524 (*qlen)--;
1525 }
1526 break;
1527 }
1528
1529 kfree_skb(skb);
1530 }
1531 }
1532
1533 /**
1534 * netlink_queue_skip - Skip netlink message while processing queue.
1535 * @nlh: Netlink message to be skipped
1536 * @skb: Socket buffer containing the netlink messages.
1537 *
1538 * Pulls the given netlink message off the socket buffer so the next
1539 * call to netlink_queue_run() will not reconsider the message.
1540 */
1541 void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
1542 {
1543 int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1544
1545 if (msglen > skb->len)
1546 msglen = skb->len;
1547
1548 skb_pull(skb, msglen);
1549 }
1550
1551 /**
1552 * nlmsg_notify - send a notification netlink message
1553 * @sk: netlink socket to use
1554 * @skb: notification message
1555 * @pid: destination netlink pid for reports or 0
1556 * @group: destination multicast group or 0
1557 * @report: 1 to report back, 0 to disable
1558 * @flags: allocation flags
1559 */
1560 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1561 unsigned int group, int report, gfp_t flags)
1562 {
1563 int err = 0;
1564
1565 if (group) {
1566 int exclude_pid = 0;
1567
1568 if (report) {
1569 atomic_inc(&skb->users);
1570 exclude_pid = pid;
1571 }
1572
1573 /* errors reported via destination sk->sk_err */
1574 nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1575 }
1576
1577 if (report)
1578 err = nlmsg_unicast(sk, skb, pid);
1579
1580 return err;
1581 }
1582
1583 #ifdef CONFIG_PROC_FS
1584 struct nl_seq_iter {
1585 int link;
1586 int hash_idx;
1587 };
1588
1589 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1590 {
1591 struct nl_seq_iter *iter = seq->private;
1592 int i, j;
1593 struct sock *s;
1594 struct hlist_node *node;
1595 loff_t off = 0;
1596
1597 for (i=0; i<MAX_LINKS; i++) {
1598 struct nl_pid_hash *hash = &nl_table[i].hash;
1599
1600 for (j = 0; j <= hash->mask; j++) {
1601 sk_for_each(s, node, &hash->table[j]) {
1602 if (off == pos) {
1603 iter->link = i;
1604 iter->hash_idx = j;
1605 return s;
1606 }
1607 ++off;
1608 }
1609 }
1610 }
1611 return NULL;
1612 }
1613
1614 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1615 {
1616 read_lock(&nl_table_lock);
1617 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1618 }
1619
1620 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1621 {
1622 struct sock *s;
1623 struct nl_seq_iter *iter;
1624 int i, j;
1625
1626 ++*pos;
1627
1628 if (v == SEQ_START_TOKEN)
1629 return netlink_seq_socket_idx(seq, 0);
1630
1631 s = sk_next(v);
1632 if (s)
1633 return s;
1634
1635 iter = seq->private;
1636 i = iter->link;
1637 j = iter->hash_idx + 1;
1638
1639 do {
1640 struct nl_pid_hash *hash = &nl_table[i].hash;
1641
1642 for (; j <= hash->mask; j++) {
1643 s = sk_head(&hash->table[j]);
1644 if (s) {
1645 iter->link = i;
1646 iter->hash_idx = j;
1647 return s;
1648 }
1649 }
1650
1651 j = 0;
1652 } while (++i < MAX_LINKS);
1653
1654 return NULL;
1655 }
1656
1657 static void netlink_seq_stop(struct seq_file *seq, void *v)
1658 {
1659 read_unlock(&nl_table_lock);
1660 }
1661
1662
1663 static int netlink_seq_show(struct seq_file *seq, void *v)
1664 {
1665 if (v == SEQ_START_TOKEN)
1666 seq_puts(seq,
1667 "sk Eth Pid Groups "
1668 "Rmem Wmem Dump Locks\n");
1669 else {
1670 struct sock *s = v;
1671 struct netlink_sock *nlk = nlk_sk(s);
1672
1673 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1674 s,
1675 s->sk_protocol,
1676 nlk->pid,
1677 nlk->groups ? (u32)nlk->groups[0] : 0,
1678 atomic_read(&s->sk_rmem_alloc),
1679 atomic_read(&s->sk_wmem_alloc),
1680 nlk->cb,
1681 atomic_read(&s->sk_refcnt)
1682 );
1683
1684 }
1685 return 0;
1686 }
1687
1688 static struct seq_operations netlink_seq_ops = {
1689 .start = netlink_seq_start,
1690 .next = netlink_seq_next,
1691 .stop = netlink_seq_stop,
1692 .show = netlink_seq_show,
1693 };
1694
1695
1696 static int netlink_seq_open(struct inode *inode, struct file *file)
1697 {
1698 struct seq_file *seq;
1699 struct nl_seq_iter *iter;
1700 int err;
1701
1702 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1703 if (!iter)
1704 return -ENOMEM;
1705
1706 err = seq_open(file, &netlink_seq_ops);
1707 if (err) {
1708 kfree(iter);
1709 return err;
1710 }
1711
1712 seq = file->private_data;
1713 seq->private = iter;
1714 return 0;
1715 }
1716
1717 static struct file_operations netlink_seq_fops = {
1718 .owner = THIS_MODULE,
1719 .open = netlink_seq_open,
1720 .read = seq_read,
1721 .llseek = seq_lseek,
1722 .release = seq_release_private,
1723 };
1724
1725 #endif
1726
1727 int netlink_register_notifier(struct notifier_block *nb)
1728 {
1729 return atomic_notifier_chain_register(&netlink_chain, nb);
1730 }
1731
1732 int netlink_unregister_notifier(struct notifier_block *nb)
1733 {
1734 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1735 }
1736
1737 static const struct proto_ops netlink_ops = {
1738 .family = PF_NETLINK,
1739 .owner = THIS_MODULE,
1740 .release = netlink_release,
1741 .bind = netlink_bind,
1742 .connect = netlink_connect,
1743 .socketpair = sock_no_socketpair,
1744 .accept = sock_no_accept,
1745 .getname = netlink_getname,
1746 .poll = datagram_poll,
1747 .ioctl = sock_no_ioctl,
1748 .listen = sock_no_listen,
1749 .shutdown = sock_no_shutdown,
1750 .setsockopt = netlink_setsockopt,
1751 .getsockopt = netlink_getsockopt,
1752 .sendmsg = netlink_sendmsg,
1753 .recvmsg = netlink_recvmsg,
1754 .mmap = sock_no_mmap,
1755 .sendpage = sock_no_sendpage,
1756 };
1757
1758 static struct net_proto_family netlink_family_ops = {
1759 .family = PF_NETLINK,
1760 .create = netlink_create,
1761 .owner = THIS_MODULE, /* for consistency 8) */
1762 };
1763
1764 static int __init netlink_proto_init(void)
1765 {
1766 struct sk_buff *dummy_skb;
1767 int i;
1768 unsigned long max;
1769 unsigned int order;
1770 int err = proto_register(&netlink_proto, 0);
1771
1772 if (err != 0)
1773 goto out;
1774
1775 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
1776
1777 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1778 if (!nl_table)
1779 goto panic;
1780
1781 if (num_physpages >= (128 * 1024))
1782 max = num_physpages >> (21 - PAGE_SHIFT);
1783 else
1784 max = num_physpages >> (23 - PAGE_SHIFT);
1785
1786 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1787 max = (1UL << order) / sizeof(struct hlist_head);
1788 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1789
1790 for (i = 0; i < MAX_LINKS; i++) {
1791 struct nl_pid_hash *hash = &nl_table[i].hash;
1792
1793 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1794 if (!hash->table) {
1795 while (i-- > 0)
1796 nl_pid_hash_free(nl_table[i].hash.table,
1797 1 * sizeof(*hash->table));
1798 kfree(nl_table);
1799 goto panic;
1800 }
1801 memset(hash->table, 0, 1 * sizeof(*hash->table));
1802 hash->max_shift = order;
1803 hash->shift = 0;
1804 hash->mask = 0;
1805 hash->rehash_time = jiffies;
1806 }
1807
1808 sock_register(&netlink_family_ops);
1809 #ifdef CONFIG_PROC_FS
1810 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1811 #endif
1812 /* The netlink device handler may be needed early. */
1813 rtnetlink_init();
1814 out:
1815 return err;
1816 panic:
1817 panic("netlink_init: Cannot allocate nl_table\n");
1818 }
1819
1820 core_initcall(netlink_proto_init);
1821
1822 EXPORT_SYMBOL(netlink_ack);
1823 EXPORT_SYMBOL(netlink_run_queue);
1824 EXPORT_SYMBOL(netlink_queue_skip);
1825 EXPORT_SYMBOL(netlink_broadcast);
1826 EXPORT_SYMBOL(netlink_dump_start);
1827 EXPORT_SYMBOL(netlink_kernel_create);
1828 EXPORT_SYMBOL(netlink_register_notifier);
1829 EXPORT_SYMBOL(netlink_set_err);
1830 EXPORT_SYMBOL(netlink_set_nonroot);
1831 EXPORT_SYMBOL(netlink_unicast);
1832 EXPORT_SYMBOL(netlink_unregister_notifier);
1833 EXPORT_SYMBOL(nlmsg_notify);