[NETLINK]: Return -EPROTONOSUPPORT in netlink_create() if no kernel socket is registered
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
1da177e4
LT
22 */
23
24#include <linux/config.h>
25#include <linux/module.h>
26
27#include <linux/kernel.h>
28#include <linux/init.h>
1da177e4
LT
29#include <linux/signal.h>
30#include <linux/sched.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/stat.h>
34#include <linux/socket.h>
35#include <linux/un.h>
36#include <linux/fcntl.h>
37#include <linux/termios.h>
38#include <linux/sockios.h>
39#include <linux/net.h>
40#include <linux/fs.h>
41#include <linux/slab.h>
42#include <asm/uaccess.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/rtnetlink.h>
46#include <linux/proc_fs.h>
47#include <linux/seq_file.h>
48#include <linux/smp_lock.h>
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520
AM
57#include <linux/audit.h>
58
1da177e4
LT
59#include <net/sock.h>
60#include <net/scm.h>
61
62#define Nprintk(a...)
63
64struct netlink_sock {
65 /* struct sock has to be the first member of netlink_sock */
66 struct sock sk;
67 u32 pid;
68 unsigned int groups;
69 u32 dst_pid;
d629b836 70 u32 dst_group;
1da177e4
LT
71 unsigned long state;
72 wait_queue_head_t wait;
73 struct netlink_callback *cb;
74 spinlock_t cb_lock;
75 void (*data_ready)(struct sock *sk, int bytes);
77247bbb
PM
76 struct module *module;
77 u32 flags;
1da177e4
LT
78};
79
77247bbb
PM
80#define NETLINK_KERNEL_SOCKET 0x1
81
1da177e4
LT
82static inline struct netlink_sock *nlk_sk(struct sock *sk)
83{
84 return (struct netlink_sock *)sk;
85}
86
87struct nl_pid_hash {
88 struct hlist_head *table;
89 unsigned long rehash_time;
90
91 unsigned int mask;
92 unsigned int shift;
93
94 unsigned int entries;
95 unsigned int max_shift;
96
97 u32 rnd;
98};
99
100struct netlink_table {
101 struct nl_pid_hash hash;
102 struct hlist_head mc_list;
103 unsigned int nl_nonroot;
77247bbb 104 struct module *module;
ab33a171 105 int registered;
1da177e4
LT
106};
107
108static struct netlink_table *nl_table;
109
110static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
111
112static int netlink_dump(struct sock *sk);
113static void netlink_destroy_callback(struct netlink_callback *cb);
114
115static DEFINE_RWLOCK(nl_table_lock);
116static atomic_t nl_table_users = ATOMIC_INIT(0);
117
118static struct notifier_block *netlink_chain;
119
d629b836
PM
120static u32 netlink_group_mask(u32 group)
121{
122 return group ? 1 << (group - 1) : 0;
123}
124
1da177e4
LT
125static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
126{
127 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
128}
129
130static void netlink_sock_destruct(struct sock *sk)
131{
132 skb_queue_purge(&sk->sk_receive_queue);
133
134 if (!sock_flag(sk, SOCK_DEAD)) {
135 printk("Freeing alive netlink socket %p\n", sk);
136 return;
137 }
138 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
139 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
140 BUG_TRAP(!nlk_sk(sk)->cb);
141}
142
143/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
144 * Look, when several writers sleep and reader wakes them up, all but one
145 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
146 * this, _but_ remember, it adds useless work on UP machines.
147 */
148
149static void netlink_table_grab(void)
150{
151 write_lock_bh(&nl_table_lock);
152
153 if (atomic_read(&nl_table_users)) {
154 DECLARE_WAITQUEUE(wait, current);
155
156 add_wait_queue_exclusive(&nl_table_wait, &wait);
157 for(;;) {
158 set_current_state(TASK_UNINTERRUPTIBLE);
159 if (atomic_read(&nl_table_users) == 0)
160 break;
161 write_unlock_bh(&nl_table_lock);
162 schedule();
163 write_lock_bh(&nl_table_lock);
164 }
165
166 __set_current_state(TASK_RUNNING);
167 remove_wait_queue(&nl_table_wait, &wait);
168 }
169}
170
171static __inline__ void netlink_table_ungrab(void)
172{
173 write_unlock_bh(&nl_table_lock);
174 wake_up(&nl_table_wait);
175}
176
177static __inline__ void
178netlink_lock_table(void)
179{
180 /* read_lock() synchronizes us to netlink_table_grab */
181
182 read_lock(&nl_table_lock);
183 atomic_inc(&nl_table_users);
184 read_unlock(&nl_table_lock);
185}
186
187static __inline__ void
188netlink_unlock_table(void)
189{
190 if (atomic_dec_and_test(&nl_table_users))
191 wake_up(&nl_table_wait);
192}
193
194static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
195{
196 struct nl_pid_hash *hash = &nl_table[protocol].hash;
197 struct hlist_head *head;
198 struct sock *sk;
199 struct hlist_node *node;
200
201 read_lock(&nl_table_lock);
202 head = nl_pid_hashfn(hash, pid);
203 sk_for_each(sk, node, head) {
204 if (nlk_sk(sk)->pid == pid) {
205 sock_hold(sk);
206 goto found;
207 }
208 }
209 sk = NULL;
210found:
211 read_unlock(&nl_table_lock);
212 return sk;
213}
214
215static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
216{
217 if (size <= PAGE_SIZE)
218 return kmalloc(size, GFP_ATOMIC);
219 else
220 return (struct hlist_head *)
221 __get_free_pages(GFP_ATOMIC, get_order(size));
222}
223
224static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
225{
226 if (size <= PAGE_SIZE)
227 kfree(table);
228 else
229 free_pages((unsigned long)table, get_order(size));
230}
231
232static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
233{
234 unsigned int omask, mask, shift;
235 size_t osize, size;
236 struct hlist_head *otable, *table;
237 int i;
238
239 omask = mask = hash->mask;
240 osize = size = (mask + 1) * sizeof(*table);
241 shift = hash->shift;
242
243 if (grow) {
244 if (++shift > hash->max_shift)
245 return 0;
246 mask = mask * 2 + 1;
247 size *= 2;
248 }
249
250 table = nl_pid_hash_alloc(size);
251 if (!table)
252 return 0;
253
254 memset(table, 0, size);
255 otable = hash->table;
256 hash->table = table;
257 hash->mask = mask;
258 hash->shift = shift;
259 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
260
261 for (i = 0; i <= omask; i++) {
262 struct sock *sk;
263 struct hlist_node *node, *tmp;
264
265 sk_for_each_safe(sk, node, tmp, &otable[i])
266 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
267 }
268
269 nl_pid_hash_free(otable, osize);
270 hash->rehash_time = jiffies + 10 * 60 * HZ;
271 return 1;
272}
273
274static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
275{
276 int avg = hash->entries >> hash->shift;
277
278 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
279 return 1;
280
281 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
282 nl_pid_hash_rehash(hash, 0);
283 return 1;
284 }
285
286 return 0;
287}
288
289static struct proto_ops netlink_ops;
290
291static int netlink_insert(struct sock *sk, u32 pid)
292{
293 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
294 struct hlist_head *head;
295 int err = -EADDRINUSE;
296 struct sock *osk;
297 struct hlist_node *node;
298 int len;
299
300 netlink_table_grab();
301 head = nl_pid_hashfn(hash, pid);
302 len = 0;
303 sk_for_each(osk, node, head) {
304 if (nlk_sk(osk)->pid == pid)
305 break;
306 len++;
307 }
308 if (node)
309 goto err;
310
311 err = -EBUSY;
312 if (nlk_sk(sk)->pid)
313 goto err;
314
315 err = -ENOMEM;
316 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
317 goto err;
318
319 if (len && nl_pid_hash_dilute(hash, len))
320 head = nl_pid_hashfn(hash, pid);
321 hash->entries++;
322 nlk_sk(sk)->pid = pid;
323 sk_add_node(sk, head);
324 err = 0;
325
326err:
327 netlink_table_ungrab();
328 return err;
329}
330
331static void netlink_remove(struct sock *sk)
332{
333 netlink_table_grab();
d470e3b4
DM
334 if (sk_del_node_init(sk))
335 nl_table[sk->sk_protocol].hash.entries--;
1da177e4
LT
336 if (nlk_sk(sk)->groups)
337 __sk_del_bind_node(sk);
338 netlink_table_ungrab();
339}
340
341static struct proto netlink_proto = {
342 .name = "NETLINK",
343 .owner = THIS_MODULE,
344 .obj_size = sizeof(struct netlink_sock),
345};
346
ab33a171 347static int __netlink_create(struct socket *sock, int protocol)
1da177e4
LT
348{
349 struct sock *sk;
350 struct netlink_sock *nlk;
ab33a171
PM
351
352 sock->ops = &netlink_ops;
353
354 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
355 if (!sk)
356 return -ENOMEM;
357
358 sock_init_data(sock, sk);
359
360 nlk = nlk_sk(sk);
361 spin_lock_init(&nlk->cb_lock);
362 init_waitqueue_head(&nlk->wait);
363
364 sk->sk_destruct = netlink_sock_destruct;
365 sk->sk_protocol = protocol;
366 return 0;
367}
368
369static int netlink_create(struct socket *sock, int protocol)
370{
371 struct module *module = NULL;
372 int err = 0;
1da177e4
LT
373
374 sock->state = SS_UNCONNECTED;
375
376 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
377 return -ESOCKTNOSUPPORT;
378
379 if (protocol<0 || protocol >= MAX_LINKS)
380 return -EPROTONOSUPPORT;
381
77247bbb 382 netlink_lock_table();
4fdb3bb7 383#ifdef CONFIG_KMOD
ab33a171 384 if (!nl_table[protocol].registered) {
77247bbb 385 netlink_unlock_table();
4fdb3bb7 386 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 387 netlink_lock_table();
4fdb3bb7 388 }
ab33a171
PM
389#endif
390 if (nl_table[protocol].registered &&
391 try_module_get(nl_table[protocol].module))
392 module = nl_table[protocol].module;
393 else
394 err = -EPROTONOSUPPORT;
77247bbb 395 netlink_unlock_table();
4fdb3bb7 396
ab33a171
PM
397 if (err)
398 goto out;
1da177e4 399
ab33a171
PM
400 if ((err = __netlink_create(sock, protocol) < 0))
401 goto out_module;
1da177e4 402
ab33a171
PM
403 nlk_sk(sock->sk)->module = module;
404out:
405 return err;
1da177e4 406
ab33a171
PM
407out_module:
408 module_put(module);
409 goto out;
1da177e4
LT
410}
411
412static int netlink_release(struct socket *sock)
413{
414 struct sock *sk = sock->sk;
415 struct netlink_sock *nlk;
416
417 if (!sk)
418 return 0;
419
420 netlink_remove(sk);
421 nlk = nlk_sk(sk);
422
423 spin_lock(&nlk->cb_lock);
424 if (nlk->cb) {
425 nlk->cb->done(nlk->cb);
426 netlink_destroy_callback(nlk->cb);
427 nlk->cb = NULL;
1da177e4
LT
428 }
429 spin_unlock(&nlk->cb_lock);
430
431 /* OK. Socket is unlinked, and, therefore,
432 no new packets will arrive */
433
434 sock_orphan(sk);
435 sock->sk = NULL;
436 wake_up_interruptible_all(&nlk->wait);
437
438 skb_queue_purge(&sk->sk_write_queue);
439
440 if (nlk->pid && !nlk->groups) {
441 struct netlink_notify n = {
442 .protocol = sk->sk_protocol,
443 .pid = nlk->pid,
444 };
445 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
446 }
4fdb3bb7 447
77247bbb
PM
448 if (nlk->module)
449 module_put(nlk->module);
4fdb3bb7 450
77247bbb 451 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
4fdb3bb7 452 netlink_table_grab();
77247bbb 453 nl_table[sk->sk_protocol].module = NULL;
ab33a171 454 nl_table[sk->sk_protocol].registered = 0;
4fdb3bb7
HW
455 netlink_table_ungrab();
456 }
77247bbb 457
1da177e4
LT
458 sock_put(sk);
459 return 0;
460}
461
462static int netlink_autobind(struct socket *sock)
463{
464 struct sock *sk = sock->sk;
465 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
466 struct hlist_head *head;
467 struct sock *osk;
468 struct hlist_node *node;
469 s32 pid = current->pid;
470 int err;
471 static s32 rover = -4097;
472
473retry:
474 cond_resched();
475 netlink_table_grab();
476 head = nl_pid_hashfn(hash, pid);
477 sk_for_each(osk, node, head) {
478 if (nlk_sk(osk)->pid == pid) {
479 /* Bind collision, search negative pid values. */
480 pid = rover--;
481 if (rover > -4097)
482 rover = -4097;
483 netlink_table_ungrab();
484 goto retry;
485 }
486 }
487 netlink_table_ungrab();
488
489 err = netlink_insert(sk, pid);
490 if (err == -EADDRINUSE)
491 goto retry;
d470e3b4
DM
492
493 /* If 2 threads race to autobind, that is fine. */
494 if (err == -EBUSY)
495 err = 0;
496
497 return err;
1da177e4
LT
498}
499
500static inline int netlink_capable(struct socket *sock, unsigned int flag)
501{
502 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
503 capable(CAP_NET_ADMIN);
504}
505
506static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
507{
508 struct sock *sk = sock->sk;
509 struct netlink_sock *nlk = nlk_sk(sk);
510 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
511 int err;
512
513 if (nladdr->nl_family != AF_NETLINK)
514 return -EINVAL;
515
516 /* Only superuser is allowed to listen multicasts */
517 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
518 return -EPERM;
519
520 if (nlk->pid) {
521 if (nladdr->nl_pid != nlk->pid)
522 return -EINVAL;
523 } else {
524 err = nladdr->nl_pid ?
525 netlink_insert(sk, nladdr->nl_pid) :
526 netlink_autobind(sock);
527 if (err)
528 return err;
529 }
530
531 if (!nladdr->nl_groups && !nlk->groups)
532 return 0;
533
534 netlink_table_grab();
535 if (nlk->groups && !nladdr->nl_groups)
536 __sk_del_bind_node(sk);
537 else if (!nlk->groups && nladdr->nl_groups)
538 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
539 nlk->groups = nladdr->nl_groups;
540 netlink_table_ungrab();
541
542 return 0;
543}
544
545static int netlink_connect(struct socket *sock, struct sockaddr *addr,
546 int alen, int flags)
547{
548 int err = 0;
549 struct sock *sk = sock->sk;
550 struct netlink_sock *nlk = nlk_sk(sk);
551 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
552
553 if (addr->sa_family == AF_UNSPEC) {
554 sk->sk_state = NETLINK_UNCONNECTED;
555 nlk->dst_pid = 0;
d629b836 556 nlk->dst_group = 0;
1da177e4
LT
557 return 0;
558 }
559 if (addr->sa_family != AF_NETLINK)
560 return -EINVAL;
561
562 /* Only superuser is allowed to send multicasts */
563 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
564 return -EPERM;
565
566 if (!nlk->pid)
567 err = netlink_autobind(sock);
568
569 if (err == 0) {
570 sk->sk_state = NETLINK_CONNECTED;
571 nlk->dst_pid = nladdr->nl_pid;
d629b836 572 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
573 }
574
575 return err;
576}
577
578static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
579{
580 struct sock *sk = sock->sk;
581 struct netlink_sock *nlk = nlk_sk(sk);
582 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
583
584 nladdr->nl_family = AF_NETLINK;
585 nladdr->nl_pad = 0;
586 *addr_len = sizeof(*nladdr);
587
588 if (peer) {
589 nladdr->nl_pid = nlk->dst_pid;
d629b836 590 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4
LT
591 } else {
592 nladdr->nl_pid = nlk->pid;
d629b836 593 nladdr->nl_groups = nlk->groups;
1da177e4
LT
594 }
595 return 0;
596}
597
598static void netlink_overrun(struct sock *sk)
599{
600 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
601 sk->sk_err = ENOBUFS;
602 sk->sk_error_report(sk);
603 }
604}
605
606static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
607{
608 int protocol = ssk->sk_protocol;
609 struct sock *sock;
610 struct netlink_sock *nlk;
611
612 sock = netlink_lookup(protocol, pid);
613 if (!sock)
614 return ERR_PTR(-ECONNREFUSED);
615
616 /* Don't bother queuing skb if kernel socket has no input function */
617 nlk = nlk_sk(sock);
618 if ((nlk->pid == 0 && !nlk->data_ready) ||
619 (sock->sk_state == NETLINK_CONNECTED &&
620 nlk->dst_pid != nlk_sk(ssk)->pid)) {
621 sock_put(sock);
622 return ERR_PTR(-ECONNREFUSED);
623 }
624 return sock;
625}
626
627struct sock *netlink_getsockbyfilp(struct file *filp)
628{
629 struct inode *inode = filp->f_dentry->d_inode;
630 struct sock *sock;
631
632 if (!S_ISSOCK(inode->i_mode))
633 return ERR_PTR(-ENOTSOCK);
634
635 sock = SOCKET_I(inode)->sk;
636 if (sock->sk_family != AF_NETLINK)
637 return ERR_PTR(-EINVAL);
638
639 sock_hold(sock);
640 return sock;
641}
642
643/*
644 * Attach a skb to a netlink socket.
645 * The caller must hold a reference to the destination socket. On error, the
646 * reference is dropped. The skb is not send to the destination, just all
647 * all error checks are performed and memory in the queue is reserved.
648 * Return values:
649 * < 0: error. skb freed, reference to sock dropped.
650 * 0: continue
651 * 1: repeat lookup - reference dropped while waiting for socket memory.
652 */
653int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
654{
655 struct netlink_sock *nlk;
656
657 nlk = nlk_sk(sk);
658
659 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
660 test_bit(0, &nlk->state)) {
661 DECLARE_WAITQUEUE(wait, current);
662 if (!timeo) {
663 if (!nlk->pid)
664 netlink_overrun(sk);
665 sock_put(sk);
666 kfree_skb(skb);
667 return -EAGAIN;
668 }
669
670 __set_current_state(TASK_INTERRUPTIBLE);
671 add_wait_queue(&nlk->wait, &wait);
672
673 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
674 test_bit(0, &nlk->state)) &&
675 !sock_flag(sk, SOCK_DEAD))
676 timeo = schedule_timeout(timeo);
677
678 __set_current_state(TASK_RUNNING);
679 remove_wait_queue(&nlk->wait, &wait);
680 sock_put(sk);
681
682 if (signal_pending(current)) {
683 kfree_skb(skb);
684 return sock_intr_errno(timeo);
685 }
686 return 1;
687 }
688 skb_set_owner_r(skb, sk);
689 return 0;
690}
691
692int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
693{
694 struct netlink_sock *nlk;
695 int len = skb->len;
696
697 nlk = nlk_sk(sk);
698
699 skb_queue_tail(&sk->sk_receive_queue, skb);
700 sk->sk_data_ready(sk, len);
701 sock_put(sk);
702 return len;
703}
704
705void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
706{
707 kfree_skb(skb);
708 sock_put(sk);
709}
710
37da647d
VF
711static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
712 unsigned int __nocast allocation)
1da177e4
LT
713{
714 int delta;
715
716 skb_orphan(skb);
717
718 delta = skb->end - skb->tail;
719 if (delta * 2 < skb->truesize)
720 return skb;
721
722 if (skb_shared(skb)) {
723 struct sk_buff *nskb = skb_clone(skb, allocation);
724 if (!nskb)
725 return skb;
726 kfree_skb(skb);
727 skb = nskb;
728 }
729
730 if (!pskb_expand_head(skb, 0, -delta, allocation))
731 skb->truesize -= delta;
732
733 return skb;
734}
735
736int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
737{
738 struct sock *sk;
739 int err;
740 long timeo;
741
742 skb = netlink_trim(skb, gfp_any());
743
744 timeo = sock_sndtimeo(ssk, nonblock);
745retry:
746 sk = netlink_getsockbypid(ssk, pid);
747 if (IS_ERR(sk)) {
748 kfree_skb(skb);
749 return PTR_ERR(sk);
750 }
751 err = netlink_attachskb(sk, skb, nonblock, timeo);
752 if (err == 1)
753 goto retry;
754 if (err)
755 return err;
756
757 return netlink_sendskb(sk, skb, ssk->sk_protocol);
758}
759
760static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
761{
762 struct netlink_sock *nlk = nlk_sk(sk);
763
764 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
765 !test_bit(0, &nlk->state)) {
766 skb_set_owner_r(skb, sk);
767 skb_queue_tail(&sk->sk_receive_queue, skb);
768 sk->sk_data_ready(sk, skb->len);
769 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
770 }
771 return -1;
772}
773
774struct netlink_broadcast_data {
775 struct sock *exclude_sk;
776 u32 pid;
777 u32 group;
778 int failure;
779 int congested;
780 int delivered;
37da647d 781 unsigned int allocation;
1da177e4
LT
782 struct sk_buff *skb, *skb2;
783};
784
785static inline int do_one_broadcast(struct sock *sk,
786 struct netlink_broadcast_data *p)
787{
788 struct netlink_sock *nlk = nlk_sk(sk);
789 int val;
790
791 if (p->exclude_sk == sk)
792 goto out;
793
d629b836 794 if (nlk->pid == p->pid || !(nlk->groups & netlink_group_mask(p->group)))
1da177e4
LT
795 goto out;
796
797 if (p->failure) {
798 netlink_overrun(sk);
799 goto out;
800 }
801
802 sock_hold(sk);
803 if (p->skb2 == NULL) {
68acc024 804 if (skb_shared(p->skb)) {
1da177e4
LT
805 p->skb2 = skb_clone(p->skb, p->allocation);
806 } else {
68acc024
TC
807 p->skb2 = skb_get(p->skb);
808 /*
809 * skb ownership may have been set when
810 * delivered to a previous socket.
811 */
812 skb_orphan(p->skb2);
1da177e4
LT
813 }
814 }
815 if (p->skb2 == NULL) {
816 netlink_overrun(sk);
817 /* Clone failed. Notify ALL listeners. */
818 p->failure = 1;
819 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
820 netlink_overrun(sk);
821 } else {
822 p->congested |= val;
823 p->delivered = 1;
824 p->skb2 = NULL;
825 }
826 sock_put(sk);
827
828out:
829 return 0;
830}
831
832int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
833 u32 group, int allocation)
834{
835 struct netlink_broadcast_data info;
836 struct hlist_node *node;
837 struct sock *sk;
838
839 skb = netlink_trim(skb, allocation);
840
841 info.exclude_sk = ssk;
842 info.pid = pid;
843 info.group = group;
844 info.failure = 0;
845 info.congested = 0;
846 info.delivered = 0;
847 info.allocation = allocation;
848 info.skb = skb;
849 info.skb2 = NULL;
850
851 /* While we sleep in clone, do not allow to change socket list */
852
853 netlink_lock_table();
854
855 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
856 do_one_broadcast(sk, &info);
857
aa1c6a6f
TC
858 kfree_skb(skb);
859
1da177e4
LT
860 netlink_unlock_table();
861
862 if (info.skb2)
863 kfree_skb(info.skb2);
1da177e4
LT
864
865 if (info.delivered) {
866 if (info.congested && (allocation & __GFP_WAIT))
867 yield();
868 return 0;
869 }
870 if (info.failure)
871 return -ENOBUFS;
872 return -ESRCH;
873}
874
875struct netlink_set_err_data {
876 struct sock *exclude_sk;
877 u32 pid;
878 u32 group;
879 int code;
880};
881
882static inline int do_one_set_err(struct sock *sk,
883 struct netlink_set_err_data *p)
884{
885 struct netlink_sock *nlk = nlk_sk(sk);
886
887 if (sk == p->exclude_sk)
888 goto out;
889
d629b836 890 if (nlk->pid == p->pid || !(nlk->groups & netlink_group_mask(p->group)))
1da177e4
LT
891 goto out;
892
893 sk->sk_err = p->code;
894 sk->sk_error_report(sk);
895out:
896 return 0;
897}
898
899void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
900{
901 struct netlink_set_err_data info;
902 struct hlist_node *node;
903 struct sock *sk;
904
905 info.exclude_sk = ssk;
906 info.pid = pid;
907 info.group = group;
908 info.code = code;
909
910 read_lock(&nl_table_lock);
911
912 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
913 do_one_set_err(sk, &info);
914
915 read_unlock(&nl_table_lock);
916}
917
918static inline void netlink_rcv_wake(struct sock *sk)
919{
920 struct netlink_sock *nlk = nlk_sk(sk);
921
b03efcfb 922 if (skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
923 clear_bit(0, &nlk->state);
924 if (!test_bit(0, &nlk->state))
925 wake_up_interruptible(&nlk->wait);
926}
927
928static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
929 struct msghdr *msg, size_t len)
930{
931 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
932 struct sock *sk = sock->sk;
933 struct netlink_sock *nlk = nlk_sk(sk);
934 struct sockaddr_nl *addr=msg->msg_name;
935 u32 dst_pid;
d629b836 936 u32 dst_group;
1da177e4
LT
937 struct sk_buff *skb;
938 int err;
939 struct scm_cookie scm;
940
941 if (msg->msg_flags&MSG_OOB)
942 return -EOPNOTSUPP;
943
944 if (NULL == siocb->scm)
945 siocb->scm = &scm;
946 err = scm_send(sock, msg, siocb->scm);
947 if (err < 0)
948 return err;
949
950 if (msg->msg_namelen) {
951 if (addr->nl_family != AF_NETLINK)
952 return -EINVAL;
953 dst_pid = addr->nl_pid;
d629b836
PM
954 dst_group = ffs(addr->nl_groups);
955 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1da177e4
LT
956 return -EPERM;
957 } else {
958 dst_pid = nlk->dst_pid;
d629b836 959 dst_group = nlk->dst_group;
1da177e4
LT
960 }
961
962 if (!nlk->pid) {
963 err = netlink_autobind(sock);
964 if (err)
965 goto out;
966 }
967
968 err = -EMSGSIZE;
969 if (len > sk->sk_sndbuf - 32)
970 goto out;
971 err = -ENOBUFS;
972 skb = alloc_skb(len, GFP_KERNEL);
973 if (skb==NULL)
974 goto out;
975
976 NETLINK_CB(skb).pid = nlk->pid;
1da177e4 977 NETLINK_CB(skb).dst_pid = dst_pid;
d629b836 978 NETLINK_CB(skb).dst_group = dst_group;
c94c257c 979 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
1da177e4
LT
980 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
981
982 /* What can I do? Netlink is asynchronous, so that
983 we will have to save current capabilities to
984 check them, when this message will be delivered
985 to corresponding kernel module. --ANK (980802)
986 */
987
988 err = -EFAULT;
989 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
990 kfree_skb(skb);
991 goto out;
992 }
993
994 err = security_netlink_send(sk, skb);
995 if (err) {
996 kfree_skb(skb);
997 goto out;
998 }
999
d629b836 1000 if (dst_group) {
1da177e4 1001 atomic_inc(&skb->users);
d629b836 1002 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1da177e4
LT
1003 }
1004 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1005
1006out:
1007 return err;
1008}
1009
1010static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1011 struct msghdr *msg, size_t len,
1012 int flags)
1013{
1014 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1015 struct scm_cookie scm;
1016 struct sock *sk = sock->sk;
1017 struct netlink_sock *nlk = nlk_sk(sk);
1018 int noblock = flags&MSG_DONTWAIT;
1019 size_t copied;
1020 struct sk_buff *skb;
1021 int err;
1022
1023 if (flags&MSG_OOB)
1024 return -EOPNOTSUPP;
1025
1026 copied = 0;
1027
1028 skb = skb_recv_datagram(sk,flags,noblock,&err);
1029 if (skb==NULL)
1030 goto out;
1031
1032 msg->msg_namelen = 0;
1033
1034 copied = skb->len;
1035 if (len < copied) {
1036 msg->msg_flags |= MSG_TRUNC;
1037 copied = len;
1038 }
1039
1040 skb->h.raw = skb->data;
1041 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1042
1043 if (msg->msg_name) {
1044 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1045 addr->nl_family = AF_NETLINK;
1046 addr->nl_pad = 0;
1047 addr->nl_pid = NETLINK_CB(skb).pid;
d629b836 1048 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
1049 msg->msg_namelen = sizeof(*addr);
1050 }
1051
1052 if (NULL == siocb->scm) {
1053 memset(&scm, 0, sizeof(scm));
1054 siocb->scm = &scm;
1055 }
1056 siocb->scm->creds = *NETLINK_CREDS(skb);
1057 skb_free_datagram(sk, skb);
1058
1059 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1060 netlink_dump(sk);
1061
1062 scm_recv(sock, msg, siocb->scm, flags);
1063
1064out:
1065 netlink_rcv_wake(sk);
1066 return err ? : copied;
1067}
1068
1069static void netlink_data_ready(struct sock *sk, int len)
1070{
1071 struct netlink_sock *nlk = nlk_sk(sk);
1072
1073 if (nlk->data_ready)
1074 nlk->data_ready(sk, len);
1075 netlink_rcv_wake(sk);
1076}
1077
1078/*
1079 * We export these functions to other modules. They provide a
1080 * complete set of kernel non-blocking support for message
1081 * queueing.
1082 */
1083
1084struct sock *
4fdb3bb7 1085netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len), struct module *module)
1da177e4
LT
1086{
1087 struct socket *sock;
1088 struct sock *sk;
77247bbb 1089 struct netlink_sock *nlk;
1da177e4
LT
1090
1091 if (!nl_table)
1092 return NULL;
1093
1094 if (unit<0 || unit>=MAX_LINKS)
1095 return NULL;
1096
1097 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1098 return NULL;
1099
ab33a171 1100 if (__netlink_create(sock, unit) < 0)
77247bbb 1101 goto out_sock_release;
4fdb3bb7 1102
1da177e4
LT
1103 sk = sock->sk;
1104 sk->sk_data_ready = netlink_data_ready;
1105 if (input)
1106 nlk_sk(sk)->data_ready = input;
1107
77247bbb
PM
1108 if (netlink_insert(sk, 0))
1109 goto out_sock_release;
4fdb3bb7 1110
77247bbb
PM
1111 nlk = nlk_sk(sk);
1112 nlk->flags |= NETLINK_KERNEL_SOCKET;
4fdb3bb7 1113
4fdb3bb7 1114 netlink_table_grab();
77247bbb 1115 nl_table[unit].module = module;
ab33a171 1116 nl_table[unit].registered = 1;
4fdb3bb7 1117 netlink_table_ungrab();
77247bbb
PM
1118
1119 return sk;
1120
4fdb3bb7
HW
1121out_sock_release:
1122 sock_release(sock);
77247bbb 1123 return NULL;
1da177e4
LT
1124}
1125
1126void netlink_set_nonroot(int protocol, unsigned int flags)
1127{
1128 if ((unsigned int)protocol < MAX_LINKS)
1129 nl_table[protocol].nl_nonroot = flags;
1130}
1131
1132static void netlink_destroy_callback(struct netlink_callback *cb)
1133{
1134 if (cb->skb)
1135 kfree_skb(cb->skb);
1136 kfree(cb);
1137}
1138
1139/*
1140 * It looks a bit ugly.
1141 * It would be better to create kernel thread.
1142 */
1143
1144static int netlink_dump(struct sock *sk)
1145{
1146 struct netlink_sock *nlk = nlk_sk(sk);
1147 struct netlink_callback *cb;
1148 struct sk_buff *skb;
1149 struct nlmsghdr *nlh;
1150 int len;
1151
1152 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1153 if (!skb)
1154 return -ENOBUFS;
1155
1156 spin_lock(&nlk->cb_lock);
1157
1158 cb = nlk->cb;
1159 if (cb == NULL) {
1160 spin_unlock(&nlk->cb_lock);
1161 kfree_skb(skb);
1162 return -EINVAL;
1163 }
1164
1165 len = cb->dump(skb, cb);
1166
1167 if (len > 0) {
1168 spin_unlock(&nlk->cb_lock);
1169 skb_queue_tail(&sk->sk_receive_queue, skb);
1170 sk->sk_data_ready(sk, len);
1171 return 0;
1172 }
1173
1797754e 1174 nlh = NLMSG_NEW_ANSWER(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1da177e4
LT
1175 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1176 skb_queue_tail(&sk->sk_receive_queue, skb);
1177 sk->sk_data_ready(sk, skb->len);
1178
1179 cb->done(cb);
1180 nlk->cb = NULL;
1181 spin_unlock(&nlk->cb_lock);
1182
1183 netlink_destroy_callback(cb);
1da177e4 1184 return 0;
1797754e
TG
1185
1186nlmsg_failure:
1187 return -ENOBUFS;
1da177e4
LT
1188}
1189
1190int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1191 struct nlmsghdr *nlh,
1192 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1193 int (*done)(struct netlink_callback*))
1194{
1195 struct netlink_callback *cb;
1196 struct sock *sk;
1197 struct netlink_sock *nlk;
1198
1199 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1200 if (cb == NULL)
1201 return -ENOBUFS;
1202
1203 memset(cb, 0, sizeof(*cb));
1204 cb->dump = dump;
1205 cb->done = done;
1206 cb->nlh = nlh;
1207 atomic_inc(&skb->users);
1208 cb->skb = skb;
1209
1210 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1211 if (sk == NULL) {
1212 netlink_destroy_callback(cb);
1213 return -ECONNREFUSED;
1214 }
1215 nlk = nlk_sk(sk);
1216 /* A dump is in progress... */
1217 spin_lock(&nlk->cb_lock);
1218 if (nlk->cb) {
1219 spin_unlock(&nlk->cb_lock);
1220 netlink_destroy_callback(cb);
1221 sock_put(sk);
1222 return -EBUSY;
1223 }
1224 nlk->cb = cb;
1da177e4
LT
1225 spin_unlock(&nlk->cb_lock);
1226
1227 netlink_dump(sk);
1228 sock_put(sk);
1229 return 0;
1230}
1231
1232void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1233{
1234 struct sk_buff *skb;
1235 struct nlmsghdr *rep;
1236 struct nlmsgerr *errmsg;
1237 int size;
1238
1239 if (err == 0)
1240 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1241 else
1242 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1243
1244 skb = alloc_skb(size, GFP_KERNEL);
1245 if (!skb) {
1246 struct sock *sk;
1247
1248 sk = netlink_lookup(in_skb->sk->sk_protocol,
1249 NETLINK_CB(in_skb).pid);
1250 if (sk) {
1251 sk->sk_err = ENOBUFS;
1252 sk->sk_error_report(sk);
1253 sock_put(sk);
1254 }
1255 return;
1256 }
1257
1258 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1797754e 1259 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1da177e4
LT
1260 errmsg = NLMSG_DATA(rep);
1261 errmsg->error = err;
1262 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1263 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1264}
1265
1266
1267#ifdef CONFIG_PROC_FS
1268struct nl_seq_iter {
1269 int link;
1270 int hash_idx;
1271};
1272
1273static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1274{
1275 struct nl_seq_iter *iter = seq->private;
1276 int i, j;
1277 struct sock *s;
1278 struct hlist_node *node;
1279 loff_t off = 0;
1280
1281 for (i=0; i<MAX_LINKS; i++) {
1282 struct nl_pid_hash *hash = &nl_table[i].hash;
1283
1284 for (j = 0; j <= hash->mask; j++) {
1285 sk_for_each(s, node, &hash->table[j]) {
1286 if (off == pos) {
1287 iter->link = i;
1288 iter->hash_idx = j;
1289 return s;
1290 }
1291 ++off;
1292 }
1293 }
1294 }
1295 return NULL;
1296}
1297
1298static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1299{
1300 read_lock(&nl_table_lock);
1301 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1302}
1303
1304static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1305{
1306 struct sock *s;
1307 struct nl_seq_iter *iter;
1308 int i, j;
1309
1310 ++*pos;
1311
1312 if (v == SEQ_START_TOKEN)
1313 return netlink_seq_socket_idx(seq, 0);
1314
1315 s = sk_next(v);
1316 if (s)
1317 return s;
1318
1319 iter = seq->private;
1320 i = iter->link;
1321 j = iter->hash_idx + 1;
1322
1323 do {
1324 struct nl_pid_hash *hash = &nl_table[i].hash;
1325
1326 for (; j <= hash->mask; j++) {
1327 s = sk_head(&hash->table[j]);
1328 if (s) {
1329 iter->link = i;
1330 iter->hash_idx = j;
1331 return s;
1332 }
1333 }
1334
1335 j = 0;
1336 } while (++i < MAX_LINKS);
1337
1338 return NULL;
1339}
1340
1341static void netlink_seq_stop(struct seq_file *seq, void *v)
1342{
1343 read_unlock(&nl_table_lock);
1344}
1345
1346
1347static int netlink_seq_show(struct seq_file *seq, void *v)
1348{
1349 if (v == SEQ_START_TOKEN)
1350 seq_puts(seq,
1351 "sk Eth Pid Groups "
1352 "Rmem Wmem Dump Locks\n");
1353 else {
1354 struct sock *s = v;
1355 struct netlink_sock *nlk = nlk_sk(s);
1356
1357 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1358 s,
1359 s->sk_protocol,
1360 nlk->pid,
1361 nlk->groups,
1362 atomic_read(&s->sk_rmem_alloc),
1363 atomic_read(&s->sk_wmem_alloc),
1364 nlk->cb,
1365 atomic_read(&s->sk_refcnt)
1366 );
1367
1368 }
1369 return 0;
1370}
1371
1372static struct seq_operations netlink_seq_ops = {
1373 .start = netlink_seq_start,
1374 .next = netlink_seq_next,
1375 .stop = netlink_seq_stop,
1376 .show = netlink_seq_show,
1377};
1378
1379
1380static int netlink_seq_open(struct inode *inode, struct file *file)
1381{
1382 struct seq_file *seq;
1383 struct nl_seq_iter *iter;
1384 int err;
1385
1386 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1387 if (!iter)
1388 return -ENOMEM;
1389
1390 err = seq_open(file, &netlink_seq_ops);
1391 if (err) {
1392 kfree(iter);
1393 return err;
1394 }
1395
1396 memset(iter, 0, sizeof(*iter));
1397 seq = file->private_data;
1398 seq->private = iter;
1399 return 0;
1400}
1401
1402static struct file_operations netlink_seq_fops = {
1403 .owner = THIS_MODULE,
1404 .open = netlink_seq_open,
1405 .read = seq_read,
1406 .llseek = seq_lseek,
1407 .release = seq_release_private,
1408};
1409
1410#endif
1411
1412int netlink_register_notifier(struct notifier_block *nb)
1413{
1414 return notifier_chain_register(&netlink_chain, nb);
1415}
1416
1417int netlink_unregister_notifier(struct notifier_block *nb)
1418{
1419 return notifier_chain_unregister(&netlink_chain, nb);
1420}
1421
1422static struct proto_ops netlink_ops = {
1423 .family = PF_NETLINK,
1424 .owner = THIS_MODULE,
1425 .release = netlink_release,
1426 .bind = netlink_bind,
1427 .connect = netlink_connect,
1428 .socketpair = sock_no_socketpair,
1429 .accept = sock_no_accept,
1430 .getname = netlink_getname,
1431 .poll = datagram_poll,
1432 .ioctl = sock_no_ioctl,
1433 .listen = sock_no_listen,
1434 .shutdown = sock_no_shutdown,
1435 .setsockopt = sock_no_setsockopt,
1436 .getsockopt = sock_no_getsockopt,
1437 .sendmsg = netlink_sendmsg,
1438 .recvmsg = netlink_recvmsg,
1439 .mmap = sock_no_mmap,
1440 .sendpage = sock_no_sendpage,
1441};
1442
1443static struct net_proto_family netlink_family_ops = {
1444 .family = PF_NETLINK,
1445 .create = netlink_create,
1446 .owner = THIS_MODULE, /* for consistency 8) */
1447};
1448
1449extern void netlink_skb_parms_too_large(void);
1450
1451static int __init netlink_proto_init(void)
1452{
1453 struct sk_buff *dummy_skb;
1454 int i;
1455 unsigned long max;
1456 unsigned int order;
1457 int err = proto_register(&netlink_proto, 0);
1458
1459 if (err != 0)
1460 goto out;
1461
1462 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1463 netlink_skb_parms_too_large();
1464
1465 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1466 if (!nl_table) {
1467enomem:
1468 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1469 return -ENOMEM;
1470 }
1471
1472 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1473
1474 if (num_physpages >= (128 * 1024))
1475 max = num_physpages >> (21 - PAGE_SHIFT);
1476 else
1477 max = num_physpages >> (23 - PAGE_SHIFT);
1478
1479 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1480 max = (1UL << order) / sizeof(struct hlist_head);
1481 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1482
1483 for (i = 0; i < MAX_LINKS; i++) {
1484 struct nl_pid_hash *hash = &nl_table[i].hash;
1485
1486 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1487 if (!hash->table) {
1488 while (i-- > 0)
1489 nl_pid_hash_free(nl_table[i].hash.table,
1490 1 * sizeof(*hash->table));
1491 kfree(nl_table);
1492 goto enomem;
1493 }
1494 memset(hash->table, 0, 1 * sizeof(*hash->table));
1495 hash->max_shift = order;
1496 hash->shift = 0;
1497 hash->mask = 0;
1498 hash->rehash_time = jiffies;
1499 }
1500
1501 sock_register(&netlink_family_ops);
1502#ifdef CONFIG_PROC_FS
1503 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1504#endif
1505 /* The netlink device handler may be needed early. */
1506 rtnetlink_init();
1507out:
1508 return err;
1509}
1510
1da177e4 1511core_initcall(netlink_proto_init);
1da177e4
LT
1512
1513EXPORT_SYMBOL(netlink_ack);
1514EXPORT_SYMBOL(netlink_broadcast);
1515EXPORT_SYMBOL(netlink_dump_start);
1516EXPORT_SYMBOL(netlink_kernel_create);
1517EXPORT_SYMBOL(netlink_register_notifier);
1518EXPORT_SYMBOL(netlink_set_err);
1519EXPORT_SYMBOL(netlink_set_nonroot);
1520EXPORT_SYMBOL(netlink_unicast);
1521EXPORT_SYMBOL(netlink_unregister_notifier);
1522