[NETLINK]: allocate group bitmaps dynamically
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
746fac4d 11 *
1da177e4
LT
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
1da177e4
LT
22 */
23
1da177e4
LT
24#include <linux/module.h>
25
4fc268d2 26#include <linux/capability.h>
1da177e4
LT
27#include <linux/kernel.h>
28#include <linux/init.h>
1da177e4
LT
29#include <linux/signal.h>
30#include <linux/sched.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/stat.h>
34#include <linux/socket.h>
35#include <linux/un.h>
36#include <linux/fcntl.h>
37#include <linux/termios.h>
38#include <linux/sockios.h>
39#include <linux/net.h>
40#include <linux/fs.h>
41#include <linux/slab.h>
42#include <asm/uaccess.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/rtnetlink.h>
46#include <linux/proc_fs.h>
47#include <linux/seq_file.h>
1da177e4
LT
48#include <linux/notifier.h>
49#include <linux/security.h>
50#include <linux/jhash.h>
51#include <linux/jiffies.h>
52#include <linux/random.h>
53#include <linux/bitops.h>
54#include <linux/mm.h>
55#include <linux/types.h>
54e0f520 56#include <linux/audit.h>
e7c34970 57#include <linux/selinux.h>
af65bdfc 58#include <linux/mutex.h>
54e0f520 59
1da177e4
LT
60#include <net/sock.h>
61#include <net/scm.h>
82ace47a 62#include <net/netlink.h>
1da177e4 63
f7fa9b10 64#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
b4ff4f04 65#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
1da177e4
LT
66
67struct netlink_sock {
68 /* struct sock has to be the first member of netlink_sock */
69 struct sock sk;
70 u32 pid;
1da177e4 71 u32 dst_pid;
d629b836 72 u32 dst_group;
f7fa9b10
PM
73 u32 flags;
74 u32 subscriptions;
75 u32 ngroups;
76 unsigned long *groups;
1da177e4
LT
77 unsigned long state;
78 wait_queue_head_t wait;
79 struct netlink_callback *cb;
af65bdfc
PM
80 struct mutex *cb_mutex;
81 struct mutex cb_def_mutex;
1da177e4 82 void (*data_ready)(struct sock *sk, int bytes);
77247bbb 83 struct module *module;
1da177e4
LT
84};
85
77247bbb 86#define NETLINK_KERNEL_SOCKET 0x1
9a4595bc 87#define NETLINK_RECV_PKTINFO 0x2
77247bbb 88
1da177e4
LT
89static inline struct netlink_sock *nlk_sk(struct sock *sk)
90{
91 return (struct netlink_sock *)sk;
92}
93
94struct nl_pid_hash {
95 struct hlist_head *table;
96 unsigned long rehash_time;
97
98 unsigned int mask;
99 unsigned int shift;
100
101 unsigned int entries;
102 unsigned int max_shift;
103
104 u32 rnd;
105};
106
107struct netlink_table {
108 struct nl_pid_hash hash;
109 struct hlist_head mc_list;
4277a083 110 unsigned long *listeners;
1da177e4 111 unsigned int nl_nonroot;
f7fa9b10 112 unsigned int groups;
af65bdfc 113 struct mutex *cb_mutex;
77247bbb 114 struct module *module;
ab33a171 115 int registered;
1da177e4
LT
116};
117
118static struct netlink_table *nl_table;
119
120static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
121
122static int netlink_dump(struct sock *sk);
123static void netlink_destroy_callback(struct netlink_callback *cb);
42bad1da 124static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb);
1da177e4
LT
125
126static DEFINE_RWLOCK(nl_table_lock);
127static atomic_t nl_table_users = ATOMIC_INIT(0);
128
e041c683 129static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 130
d629b836
PM
131static u32 netlink_group_mask(u32 group)
132{
133 return group ? 1 << (group - 1) : 0;
134}
135
1da177e4
LT
136static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
137{
138 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
139}
140
141static void netlink_sock_destruct(struct sock *sk)
142{
3f660d66
HX
143 struct netlink_sock *nlk = nlk_sk(sk);
144
3f660d66
HX
145 if (nlk->cb) {
146 if (nlk->cb->done)
147 nlk->cb->done(nlk->cb);
148 netlink_destroy_callback(nlk->cb);
149 }
150
1da177e4
LT
151 skb_queue_purge(&sk->sk_receive_queue);
152
153 if (!sock_flag(sk, SOCK_DEAD)) {
154 printk("Freeing alive netlink socket %p\n", sk);
155 return;
156 }
157 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
158 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
f7fa9b10 159 BUG_TRAP(!nlk_sk(sk)->groups);
1da177e4
LT
160}
161
162/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
163 * Look, when several writers sleep and reader wakes them up, all but one
164 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
165 * this, _but_ remember, it adds useless work on UP machines.
166 */
167
168static void netlink_table_grab(void)
169{
6abd219c 170 write_lock_irq(&nl_table_lock);
1da177e4
LT
171
172 if (atomic_read(&nl_table_users)) {
173 DECLARE_WAITQUEUE(wait, current);
174
175 add_wait_queue_exclusive(&nl_table_wait, &wait);
176 for(;;) {
177 set_current_state(TASK_UNINTERRUPTIBLE);
178 if (atomic_read(&nl_table_users) == 0)
179 break;
6abd219c 180 write_unlock_irq(&nl_table_lock);
1da177e4 181 schedule();
6abd219c 182 write_lock_irq(&nl_table_lock);
1da177e4
LT
183 }
184
185 __set_current_state(TASK_RUNNING);
186 remove_wait_queue(&nl_table_wait, &wait);
187 }
188}
189
190static __inline__ void netlink_table_ungrab(void)
191{
6abd219c 192 write_unlock_irq(&nl_table_lock);
1da177e4
LT
193 wake_up(&nl_table_wait);
194}
195
196static __inline__ void
197netlink_lock_table(void)
198{
199 /* read_lock() synchronizes us to netlink_table_grab */
200
201 read_lock(&nl_table_lock);
202 atomic_inc(&nl_table_users);
203 read_unlock(&nl_table_lock);
204}
205
206static __inline__ void
207netlink_unlock_table(void)
208{
209 if (atomic_dec_and_test(&nl_table_users))
210 wake_up(&nl_table_wait);
211}
212
213static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
214{
215 struct nl_pid_hash *hash = &nl_table[protocol].hash;
216 struct hlist_head *head;
217 struct sock *sk;
218 struct hlist_node *node;
219
220 read_lock(&nl_table_lock);
221 head = nl_pid_hashfn(hash, pid);
222 sk_for_each(sk, node, head) {
223 if (nlk_sk(sk)->pid == pid) {
224 sock_hold(sk);
225 goto found;
226 }
227 }
228 sk = NULL;
229found:
230 read_unlock(&nl_table_lock);
231 return sk;
232}
233
234static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
235{
236 if (size <= PAGE_SIZE)
237 return kmalloc(size, GFP_ATOMIC);
238 else
239 return (struct hlist_head *)
240 __get_free_pages(GFP_ATOMIC, get_order(size));
241}
242
243static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
244{
245 if (size <= PAGE_SIZE)
246 kfree(table);
247 else
248 free_pages((unsigned long)table, get_order(size));
249}
250
251static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
252{
253 unsigned int omask, mask, shift;
254 size_t osize, size;
255 struct hlist_head *otable, *table;
256 int i;
257
258 omask = mask = hash->mask;
259 osize = size = (mask + 1) * sizeof(*table);
260 shift = hash->shift;
261
262 if (grow) {
263 if (++shift > hash->max_shift)
264 return 0;
265 mask = mask * 2 + 1;
266 size *= 2;
267 }
268
269 table = nl_pid_hash_alloc(size);
270 if (!table)
271 return 0;
272
273 memset(table, 0, size);
274 otable = hash->table;
275 hash->table = table;
276 hash->mask = mask;
277 hash->shift = shift;
278 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
279
280 for (i = 0; i <= omask; i++) {
281 struct sock *sk;
282 struct hlist_node *node, *tmp;
283
284 sk_for_each_safe(sk, node, tmp, &otable[i])
285 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
286 }
287
288 nl_pid_hash_free(otable, osize);
289 hash->rehash_time = jiffies + 10 * 60 * HZ;
290 return 1;
291}
292
293static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
294{
295 int avg = hash->entries >> hash->shift;
296
297 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
298 return 1;
299
300 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
301 nl_pid_hash_rehash(hash, 0);
302 return 1;
303 }
304
305 return 0;
306}
307
90ddc4f0 308static const struct proto_ops netlink_ops;
1da177e4 309
4277a083
PM
310static void
311netlink_update_listeners(struct sock *sk)
312{
313 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
314 struct hlist_node *node;
315 unsigned long mask;
316 unsigned int i;
317
b4ff4f04 318 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
4277a083 319 mask = 0;
b4ff4f04
JB
320 sk_for_each_bound(sk, node, &tbl->mc_list) {
321 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
322 mask |= nlk_sk(sk)->groups[i];
323 }
4277a083
PM
324 tbl->listeners[i] = mask;
325 }
326 /* this function is only called with the netlink table "grabbed", which
327 * makes sure updates are visible before bind or setsockopt return. */
328}
329
1da177e4
LT
330static int netlink_insert(struct sock *sk, u32 pid)
331{
332 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
333 struct hlist_head *head;
334 int err = -EADDRINUSE;
335 struct sock *osk;
336 struct hlist_node *node;
337 int len;
338
339 netlink_table_grab();
340 head = nl_pid_hashfn(hash, pid);
341 len = 0;
342 sk_for_each(osk, node, head) {
343 if (nlk_sk(osk)->pid == pid)
344 break;
345 len++;
346 }
347 if (node)
348 goto err;
349
350 err = -EBUSY;
351 if (nlk_sk(sk)->pid)
352 goto err;
353
354 err = -ENOMEM;
355 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
356 goto err;
357
358 if (len && nl_pid_hash_dilute(hash, len))
359 head = nl_pid_hashfn(hash, pid);
360 hash->entries++;
361 nlk_sk(sk)->pid = pid;
362 sk_add_node(sk, head);
363 err = 0;
364
365err:
366 netlink_table_ungrab();
367 return err;
368}
369
370static void netlink_remove(struct sock *sk)
371{
372 netlink_table_grab();
d470e3b4
DM
373 if (sk_del_node_init(sk))
374 nl_table[sk->sk_protocol].hash.entries--;
f7fa9b10 375 if (nlk_sk(sk)->subscriptions)
1da177e4
LT
376 __sk_del_bind_node(sk);
377 netlink_table_ungrab();
378}
379
380static struct proto netlink_proto = {
381 .name = "NETLINK",
382 .owner = THIS_MODULE,
383 .obj_size = sizeof(struct netlink_sock),
384};
385
af65bdfc
PM
386static int __netlink_create(struct socket *sock, struct mutex *cb_mutex,
387 int protocol)
1da177e4
LT
388{
389 struct sock *sk;
390 struct netlink_sock *nlk;
ab33a171
PM
391
392 sock->ops = &netlink_ops;
393
394 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
395 if (!sk)
396 return -ENOMEM;
397
398 sock_init_data(sock, sk);
399
400 nlk = nlk_sk(sk);
ffa4d721
PM
401 if (cb_mutex)
402 nlk->cb_mutex = cb_mutex;
403 else {
404 nlk->cb_mutex = &nlk->cb_def_mutex;
405 mutex_init(nlk->cb_mutex);
406 }
ab33a171
PM
407 init_waitqueue_head(&nlk->wait);
408
409 sk->sk_destruct = netlink_sock_destruct;
410 sk->sk_protocol = protocol;
411 return 0;
412}
413
414static int netlink_create(struct socket *sock, int protocol)
415{
416 struct module *module = NULL;
af65bdfc 417 struct mutex *cb_mutex;
f7fa9b10 418 struct netlink_sock *nlk;
ab33a171 419 int err = 0;
1da177e4
LT
420
421 sock->state = SS_UNCONNECTED;
422
423 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
424 return -ESOCKTNOSUPPORT;
425
426 if (protocol<0 || protocol >= MAX_LINKS)
427 return -EPROTONOSUPPORT;
428
77247bbb 429 netlink_lock_table();
4fdb3bb7 430#ifdef CONFIG_KMOD
ab33a171 431 if (!nl_table[protocol].registered) {
77247bbb 432 netlink_unlock_table();
4fdb3bb7 433 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 434 netlink_lock_table();
4fdb3bb7 435 }
ab33a171
PM
436#endif
437 if (nl_table[protocol].registered &&
438 try_module_get(nl_table[protocol].module))
439 module = nl_table[protocol].module;
af65bdfc 440 cb_mutex = nl_table[protocol].cb_mutex;
77247bbb 441 netlink_unlock_table();
4fdb3bb7 442
af65bdfc 443 if ((err = __netlink_create(sock, cb_mutex, protocol)) < 0)
f7fa9b10
PM
444 goto out_module;
445
446 nlk = nlk_sk(sock->sk);
f7fa9b10 447 nlk->module = module;
ab33a171
PM
448out:
449 return err;
1da177e4 450
ab33a171
PM
451out_module:
452 module_put(module);
453 goto out;
1da177e4
LT
454}
455
456static int netlink_release(struct socket *sock)
457{
458 struct sock *sk = sock->sk;
459 struct netlink_sock *nlk;
460
461 if (!sk)
462 return 0;
463
464 netlink_remove(sk);
ac57b3a9 465 sock_orphan(sk);
1da177e4
LT
466 nlk = nlk_sk(sk);
467
3f660d66
HX
468 /*
469 * OK. Socket is unlinked, any packets that arrive now
470 * will be purged.
471 */
1da177e4 472
1da177e4
LT
473 sock->sk = NULL;
474 wake_up_interruptible_all(&nlk->wait);
475
476 skb_queue_purge(&sk->sk_write_queue);
477
f7fa9b10 478 if (nlk->pid && !nlk->subscriptions) {
1da177e4
LT
479 struct netlink_notify n = {
480 .protocol = sk->sk_protocol,
481 .pid = nlk->pid,
482 };
e041c683
AS
483 atomic_notifier_call_chain(&netlink_chain,
484 NETLINK_URELEASE, &n);
746fac4d 485 }
4fdb3bb7 486
5e7c001c 487 module_put(nlk->module);
4fdb3bb7 488
4277a083 489 netlink_table_grab();
77247bbb 490 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
4277a083 491 kfree(nl_table[sk->sk_protocol].listeners);
77247bbb 492 nl_table[sk->sk_protocol].module = NULL;
ab33a171 493 nl_table[sk->sk_protocol].registered = 0;
4277a083
PM
494 } else if (nlk->subscriptions)
495 netlink_update_listeners(sk);
496 netlink_table_ungrab();
77247bbb 497
f7fa9b10
PM
498 kfree(nlk->groups);
499 nlk->groups = NULL;
500
1da177e4
LT
501 sock_put(sk);
502 return 0;
503}
504
505static int netlink_autobind(struct socket *sock)
506{
507 struct sock *sk = sock->sk;
508 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
509 struct hlist_head *head;
510 struct sock *osk;
511 struct hlist_node *node;
c27bd492 512 s32 pid = current->tgid;
1da177e4
LT
513 int err;
514 static s32 rover = -4097;
515
516retry:
517 cond_resched();
518 netlink_table_grab();
519 head = nl_pid_hashfn(hash, pid);
520 sk_for_each(osk, node, head) {
521 if (nlk_sk(osk)->pid == pid) {
522 /* Bind collision, search negative pid values. */
523 pid = rover--;
524 if (rover > -4097)
525 rover = -4097;
526 netlink_table_ungrab();
527 goto retry;
528 }
529 }
530 netlink_table_ungrab();
531
532 err = netlink_insert(sk, pid);
533 if (err == -EADDRINUSE)
534 goto retry;
d470e3b4
DM
535
536 /* If 2 threads race to autobind, that is fine. */
537 if (err == -EBUSY)
538 err = 0;
539
540 return err;
1da177e4
LT
541}
542
746fac4d
YH
543static inline int netlink_capable(struct socket *sock, unsigned int flag)
544{
1da177e4
LT
545 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
546 capable(CAP_NET_ADMIN);
746fac4d 547}
1da177e4 548
f7fa9b10
PM
549static void
550netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
551{
552 struct netlink_sock *nlk = nlk_sk(sk);
553
554 if (nlk->subscriptions && !subscriptions)
555 __sk_del_bind_node(sk);
556 else if (!nlk->subscriptions && subscriptions)
557 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
558 nlk->subscriptions = subscriptions;
559}
560
b4ff4f04 561static int netlink_realloc_groups(struct sock *sk)
513c2500
PM
562{
563 struct netlink_sock *nlk = nlk_sk(sk);
564 unsigned int groups;
b4ff4f04 565 unsigned long *new_groups;
513c2500
PM
566 int err = 0;
567
b4ff4f04
JB
568 netlink_table_grab();
569
513c2500 570 groups = nl_table[sk->sk_protocol].groups;
b4ff4f04 571 if (!nl_table[sk->sk_protocol].registered) {
513c2500 572 err = -ENOENT;
b4ff4f04
JB
573 goto out_unlock;
574 }
513c2500 575
b4ff4f04
JB
576 if (nlk->ngroups >= groups)
577 goto out_unlock;
513c2500 578
b4ff4f04
JB
579 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
580 if (new_groups == NULL) {
581 err = -ENOMEM;
582 goto out_unlock;
583 }
584 memset((char*)new_groups + NLGRPSZ(nlk->ngroups), 0,
585 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
586
587 nlk->groups = new_groups;
513c2500 588 nlk->ngroups = groups;
b4ff4f04
JB
589 out_unlock:
590 netlink_table_ungrab();
591 return err;
513c2500
PM
592}
593
1da177e4
LT
594static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
595{
596 struct sock *sk = sock->sk;
597 struct netlink_sock *nlk = nlk_sk(sk);
598 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
599 int err;
746fac4d 600
1da177e4
LT
601 if (nladdr->nl_family != AF_NETLINK)
602 return -EINVAL;
603
604 /* Only superuser is allowed to listen multicasts */
513c2500
PM
605 if (nladdr->nl_groups) {
606 if (!netlink_capable(sock, NL_NONROOT_RECV))
607 return -EPERM;
b4ff4f04
JB
608 err = netlink_realloc_groups(sk);
609 if (err)
610 return err;
513c2500 611 }
1da177e4
LT
612
613 if (nlk->pid) {
614 if (nladdr->nl_pid != nlk->pid)
615 return -EINVAL;
616 } else {
617 err = nladdr->nl_pid ?
618 netlink_insert(sk, nladdr->nl_pid) :
619 netlink_autobind(sock);
620 if (err)
621 return err;
622 }
623
513c2500 624 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
625 return 0;
626
627 netlink_table_grab();
f7fa9b10 628 netlink_update_subscriptions(sk, nlk->subscriptions +
746fac4d
YH
629 hweight32(nladdr->nl_groups) -
630 hweight32(nlk->groups[0]));
631 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
4277a083 632 netlink_update_listeners(sk);
1da177e4
LT
633 netlink_table_ungrab();
634
635 return 0;
636}
637
638static int netlink_connect(struct socket *sock, struct sockaddr *addr,
639 int alen, int flags)
640{
641 int err = 0;
642 struct sock *sk = sock->sk;
643 struct netlink_sock *nlk = nlk_sk(sk);
644 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
645
646 if (addr->sa_family == AF_UNSPEC) {
647 sk->sk_state = NETLINK_UNCONNECTED;
648 nlk->dst_pid = 0;
d629b836 649 nlk->dst_group = 0;
1da177e4
LT
650 return 0;
651 }
652 if (addr->sa_family != AF_NETLINK)
653 return -EINVAL;
654
655 /* Only superuser is allowed to send multicasts */
656 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
657 return -EPERM;
658
659 if (!nlk->pid)
660 err = netlink_autobind(sock);
661
662 if (err == 0) {
663 sk->sk_state = NETLINK_CONNECTED;
664 nlk->dst_pid = nladdr->nl_pid;
d629b836 665 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
666 }
667
668 return err;
669}
670
671static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
672{
673 struct sock *sk = sock->sk;
674 struct netlink_sock *nlk = nlk_sk(sk);
675 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
746fac4d 676
1da177e4
LT
677 nladdr->nl_family = AF_NETLINK;
678 nladdr->nl_pad = 0;
679 *addr_len = sizeof(*nladdr);
680
681 if (peer) {
682 nladdr->nl_pid = nlk->dst_pid;
d629b836 683 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4
LT
684 } else {
685 nladdr->nl_pid = nlk->pid;
513c2500 686 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
687 }
688 return 0;
689}
690
691static void netlink_overrun(struct sock *sk)
692{
693 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
694 sk->sk_err = ENOBUFS;
695 sk->sk_error_report(sk);
696 }
697}
698
699static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
700{
701 int protocol = ssk->sk_protocol;
702 struct sock *sock;
703 struct netlink_sock *nlk;
704
705 sock = netlink_lookup(protocol, pid);
706 if (!sock)
707 return ERR_PTR(-ECONNREFUSED);
708
709 /* Don't bother queuing skb if kernel socket has no input function */
710 nlk = nlk_sk(sock);
711 if ((nlk->pid == 0 && !nlk->data_ready) ||
712 (sock->sk_state == NETLINK_CONNECTED &&
713 nlk->dst_pid != nlk_sk(ssk)->pid)) {
714 sock_put(sock);
715 return ERR_PTR(-ECONNREFUSED);
716 }
717 return sock;
718}
719
720struct sock *netlink_getsockbyfilp(struct file *filp)
721{
6db5fc5d 722 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4
LT
723 struct sock *sock;
724
725 if (!S_ISSOCK(inode->i_mode))
726 return ERR_PTR(-ENOTSOCK);
727
728 sock = SOCKET_I(inode)->sk;
729 if (sock->sk_family != AF_NETLINK)
730 return ERR_PTR(-EINVAL);
731
732 sock_hold(sock);
733 return sock;
734}
735
736/*
737 * Attach a skb to a netlink socket.
738 * The caller must hold a reference to the destination socket. On error, the
739 * reference is dropped. The skb is not send to the destination, just all
740 * all error checks are performed and memory in the queue is reserved.
741 * Return values:
742 * < 0: error. skb freed, reference to sock dropped.
743 * 0: continue
744 * 1: repeat lookup - reference dropped while waiting for socket memory.
745 */
a70ea994
AK
746int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
747 long timeo, struct sock *ssk)
1da177e4
LT
748{
749 struct netlink_sock *nlk;
750
751 nlk = nlk_sk(sk);
752
753 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
754 test_bit(0, &nlk->state)) {
755 DECLARE_WAITQUEUE(wait, current);
756 if (!timeo) {
a70ea994 757 if (!ssk || nlk_sk(ssk)->pid == 0)
1da177e4
LT
758 netlink_overrun(sk);
759 sock_put(sk);
760 kfree_skb(skb);
761 return -EAGAIN;
762 }
763
764 __set_current_state(TASK_INTERRUPTIBLE);
765 add_wait_queue(&nlk->wait, &wait);
766
767 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
768 test_bit(0, &nlk->state)) &&
769 !sock_flag(sk, SOCK_DEAD))
770 timeo = schedule_timeout(timeo);
771
772 __set_current_state(TASK_RUNNING);
773 remove_wait_queue(&nlk->wait, &wait);
774 sock_put(sk);
775
776 if (signal_pending(current)) {
777 kfree_skb(skb);
778 return sock_intr_errno(timeo);
779 }
780 return 1;
781 }
782 skb_set_owner_r(skb, sk);
783 return 0;
784}
785
786int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
787{
1da177e4
LT
788 int len = skb->len;
789
1da177e4
LT
790 skb_queue_tail(&sk->sk_receive_queue, skb);
791 sk->sk_data_ready(sk, len);
792 sock_put(sk);
793 return len;
794}
795
796void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
797{
798 kfree_skb(skb);
799 sock_put(sk);
800}
801
37da647d 802static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
dd0fc66f 803 gfp_t allocation)
1da177e4
LT
804{
805 int delta;
806
807 skb_orphan(skb);
808
4305b541 809 delta = skb->end - skb->tail;
1da177e4
LT
810 if (delta * 2 < skb->truesize)
811 return skb;
812
813 if (skb_shared(skb)) {
814 struct sk_buff *nskb = skb_clone(skb, allocation);
815 if (!nskb)
816 return skb;
817 kfree_skb(skb);
818 skb = nskb;
819 }
820
821 if (!pskb_expand_head(skb, 0, -delta, allocation))
822 skb->truesize -= delta;
823
824 return skb;
825}
826
827int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
828{
829 struct sock *sk;
830 int err;
831 long timeo;
832
833 skb = netlink_trim(skb, gfp_any());
834
835 timeo = sock_sndtimeo(ssk, nonblock);
836retry:
837 sk = netlink_getsockbypid(ssk, pid);
838 if (IS_ERR(sk)) {
839 kfree_skb(skb);
840 return PTR_ERR(sk);
841 }
a70ea994 842 err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
1da177e4
LT
843 if (err == 1)
844 goto retry;
845 if (err)
846 return err;
847
848 return netlink_sendskb(sk, skb, ssk->sk_protocol);
849}
850
4277a083
PM
851int netlink_has_listeners(struct sock *sk, unsigned int group)
852{
853 int res = 0;
b4ff4f04 854 unsigned long *listeners;
4277a083
PM
855
856 BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET));
b4ff4f04
JB
857
858 rcu_read_lock();
859 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
860
4277a083 861 if (group - 1 < nl_table[sk->sk_protocol].groups)
b4ff4f04
JB
862 res = test_bit(group - 1, listeners);
863
864 rcu_read_unlock();
865
4277a083
PM
866 return res;
867}
868EXPORT_SYMBOL_GPL(netlink_has_listeners);
869
1da177e4
LT
870static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
871{
872 struct netlink_sock *nlk = nlk_sk(sk);
873
874 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
875 !test_bit(0, &nlk->state)) {
876 skb_set_owner_r(skb, sk);
877 skb_queue_tail(&sk->sk_receive_queue, skb);
878 sk->sk_data_ready(sk, skb->len);
879 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
880 }
881 return -1;
882}
883
884struct netlink_broadcast_data {
885 struct sock *exclude_sk;
886 u32 pid;
887 u32 group;
888 int failure;
889 int congested;
890 int delivered;
7d877f3b 891 gfp_t allocation;
1da177e4
LT
892 struct sk_buff *skb, *skb2;
893};
894
895static inline int do_one_broadcast(struct sock *sk,
896 struct netlink_broadcast_data *p)
897{
898 struct netlink_sock *nlk = nlk_sk(sk);
899 int val;
900
901 if (p->exclude_sk == sk)
902 goto out;
903
f7fa9b10
PM
904 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
905 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
906 goto out;
907
908 if (p->failure) {
909 netlink_overrun(sk);
910 goto out;
911 }
912
913 sock_hold(sk);
914 if (p->skb2 == NULL) {
68acc024 915 if (skb_shared(p->skb)) {
1da177e4
LT
916 p->skb2 = skb_clone(p->skb, p->allocation);
917 } else {
68acc024
TC
918 p->skb2 = skb_get(p->skb);
919 /*
920 * skb ownership may have been set when
921 * delivered to a previous socket.
922 */
923 skb_orphan(p->skb2);
1da177e4
LT
924 }
925 }
926 if (p->skb2 == NULL) {
927 netlink_overrun(sk);
928 /* Clone failed. Notify ALL listeners. */
929 p->failure = 1;
930 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
931 netlink_overrun(sk);
932 } else {
933 p->congested |= val;
934 p->delivered = 1;
935 p->skb2 = NULL;
936 }
937 sock_put(sk);
938
939out:
940 return 0;
941}
942
943int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
dd0fc66f 944 u32 group, gfp_t allocation)
1da177e4
LT
945{
946 struct netlink_broadcast_data info;
947 struct hlist_node *node;
948 struct sock *sk;
949
950 skb = netlink_trim(skb, allocation);
951
952 info.exclude_sk = ssk;
953 info.pid = pid;
954 info.group = group;
955 info.failure = 0;
956 info.congested = 0;
957 info.delivered = 0;
958 info.allocation = allocation;
959 info.skb = skb;
960 info.skb2 = NULL;
961
962 /* While we sleep in clone, do not allow to change socket list */
963
964 netlink_lock_table();
965
966 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
967 do_one_broadcast(sk, &info);
968
aa1c6a6f
TC
969 kfree_skb(skb);
970
1da177e4
LT
971 netlink_unlock_table();
972
973 if (info.skb2)
974 kfree_skb(info.skb2);
1da177e4
LT
975
976 if (info.delivered) {
977 if (info.congested && (allocation & __GFP_WAIT))
978 yield();
979 return 0;
980 }
981 if (info.failure)
982 return -ENOBUFS;
983 return -ESRCH;
984}
985
986struct netlink_set_err_data {
987 struct sock *exclude_sk;
988 u32 pid;
989 u32 group;
990 int code;
991};
992
993static inline int do_one_set_err(struct sock *sk,
994 struct netlink_set_err_data *p)
995{
996 struct netlink_sock *nlk = nlk_sk(sk);
997
998 if (sk == p->exclude_sk)
999 goto out;
1000
f7fa9b10
PM
1001 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1002 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
1003 goto out;
1004
1005 sk->sk_err = p->code;
1006 sk->sk_error_report(sk);
1007out:
1008 return 0;
1009}
1010
1011void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1012{
1013 struct netlink_set_err_data info;
1014 struct hlist_node *node;
1015 struct sock *sk;
1016
1017 info.exclude_sk = ssk;
1018 info.pid = pid;
1019 info.group = group;
1020 info.code = code;
1021
1022 read_lock(&nl_table_lock);
1023
1024 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1025 do_one_set_err(sk, &info);
1026
1027 read_unlock(&nl_table_lock);
1028}
1029
9a4595bc 1030static int netlink_setsockopt(struct socket *sock, int level, int optname,
746fac4d 1031 char __user *optval, int optlen)
9a4595bc
PM
1032{
1033 struct sock *sk = sock->sk;
1034 struct netlink_sock *nlk = nlk_sk(sk);
eb496534
JB
1035 unsigned int val = 0;
1036 int err;
9a4595bc
PM
1037
1038 if (level != SOL_NETLINK)
1039 return -ENOPROTOOPT;
1040
1041 if (optlen >= sizeof(int) &&
eb496534 1042 get_user(val, (unsigned int __user *)optval))
9a4595bc
PM
1043 return -EFAULT;
1044
1045 switch (optname) {
1046 case NETLINK_PKTINFO:
1047 if (val)
1048 nlk->flags |= NETLINK_RECV_PKTINFO;
1049 else
1050 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1051 err = 0;
1052 break;
1053 case NETLINK_ADD_MEMBERSHIP:
1054 case NETLINK_DROP_MEMBERSHIP: {
1055 unsigned int subscriptions;
1056 int old, new = optname == NETLINK_ADD_MEMBERSHIP ? 1 : 0;
1057
1058 if (!netlink_capable(sock, NL_NONROOT_RECV))
1059 return -EPERM;
b4ff4f04
JB
1060 err = netlink_realloc_groups(sk);
1061 if (err)
1062 return err;
9a4595bc
PM
1063 if (!val || val - 1 >= nlk->ngroups)
1064 return -EINVAL;
1065 netlink_table_grab();
1066 old = test_bit(val - 1, nlk->groups);
1067 subscriptions = nlk->subscriptions - old + new;
1068 if (new)
1069 __set_bit(val - 1, nlk->groups);
1070 else
1071 __clear_bit(val - 1, nlk->groups);
1072 netlink_update_subscriptions(sk, subscriptions);
4277a083 1073 netlink_update_listeners(sk);
9a4595bc
PM
1074 netlink_table_ungrab();
1075 err = 0;
1076 break;
1077 }
1078 default:
1079 err = -ENOPROTOOPT;
1080 }
1081 return err;
1082}
1083
1084static int netlink_getsockopt(struct socket *sock, int level, int optname,
746fac4d 1085 char __user *optval, int __user *optlen)
9a4595bc
PM
1086{
1087 struct sock *sk = sock->sk;
1088 struct netlink_sock *nlk = nlk_sk(sk);
1089 int len, val, err;
1090
1091 if (level != SOL_NETLINK)
1092 return -ENOPROTOOPT;
1093
1094 if (get_user(len, optlen))
1095 return -EFAULT;
1096 if (len < 0)
1097 return -EINVAL;
1098
1099 switch (optname) {
1100 case NETLINK_PKTINFO:
1101 if (len < sizeof(int))
1102 return -EINVAL;
1103 len = sizeof(int);
1104 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
a27b58fe
HC
1105 if (put_user(len, optlen) ||
1106 put_user(val, optval))
1107 return -EFAULT;
9a4595bc
PM
1108 err = 0;
1109 break;
1110 default:
1111 err = -ENOPROTOOPT;
1112 }
1113 return err;
1114}
1115
1116static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1117{
1118 struct nl_pktinfo info;
1119
1120 info.group = NETLINK_CB(skb).dst_group;
1121 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1122}
1123
1da177e4
LT
1124static inline void netlink_rcv_wake(struct sock *sk)
1125{
1126 struct netlink_sock *nlk = nlk_sk(sk);
1127
b03efcfb 1128 if (skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
1129 clear_bit(0, &nlk->state);
1130 if (!test_bit(0, &nlk->state))
1131 wake_up_interruptible(&nlk->wait);
1132}
1133
1134static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1135 struct msghdr *msg, size_t len)
1136{
1137 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1138 struct sock *sk = sock->sk;
1139 struct netlink_sock *nlk = nlk_sk(sk);
1140 struct sockaddr_nl *addr=msg->msg_name;
1141 u32 dst_pid;
d629b836 1142 u32 dst_group;
1da177e4
LT
1143 struct sk_buff *skb;
1144 int err;
1145 struct scm_cookie scm;
1146
1147 if (msg->msg_flags&MSG_OOB)
1148 return -EOPNOTSUPP;
1149
1150 if (NULL == siocb->scm)
1151 siocb->scm = &scm;
1152 err = scm_send(sock, msg, siocb->scm);
1153 if (err < 0)
1154 return err;
1155
1156 if (msg->msg_namelen) {
1157 if (addr->nl_family != AF_NETLINK)
1158 return -EINVAL;
1159 dst_pid = addr->nl_pid;
d629b836
PM
1160 dst_group = ffs(addr->nl_groups);
1161 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1da177e4
LT
1162 return -EPERM;
1163 } else {
1164 dst_pid = nlk->dst_pid;
d629b836 1165 dst_group = nlk->dst_group;
1da177e4
LT
1166 }
1167
1168 if (!nlk->pid) {
1169 err = netlink_autobind(sock);
1170 if (err)
1171 goto out;
1172 }
1173
1174 err = -EMSGSIZE;
1175 if (len > sk->sk_sndbuf - 32)
1176 goto out;
1177 err = -ENOBUFS;
339bf98f 1178 skb = alloc_skb(len, GFP_KERNEL);
1da177e4
LT
1179 if (skb==NULL)
1180 goto out;
1181
1182 NETLINK_CB(skb).pid = nlk->pid;
d629b836 1183 NETLINK_CB(skb).dst_group = dst_group;
c94c257c 1184 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
e7c34970 1185 selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));
1da177e4
LT
1186 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1187
1188 /* What can I do? Netlink is asynchronous, so that
1189 we will have to save current capabilities to
1190 check them, when this message will be delivered
1191 to corresponding kernel module. --ANK (980802)
1192 */
1193
1194 err = -EFAULT;
1195 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
1196 kfree_skb(skb);
1197 goto out;
1198 }
1199
1200 err = security_netlink_send(sk, skb);
1201 if (err) {
1202 kfree_skb(skb);
1203 goto out;
1204 }
1205
d629b836 1206 if (dst_group) {
1da177e4 1207 atomic_inc(&skb->users);
d629b836 1208 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1da177e4
LT
1209 }
1210 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1211
1212out:
1213 return err;
1214}
1215
1216static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1217 struct msghdr *msg, size_t len,
1218 int flags)
1219{
1220 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1221 struct scm_cookie scm;
1222 struct sock *sk = sock->sk;
1223 struct netlink_sock *nlk = nlk_sk(sk);
1224 int noblock = flags&MSG_DONTWAIT;
1225 size_t copied;
1226 struct sk_buff *skb;
1227 int err;
1228
1229 if (flags&MSG_OOB)
1230 return -EOPNOTSUPP;
1231
1232 copied = 0;
1233
1234 skb = skb_recv_datagram(sk,flags,noblock,&err);
1235 if (skb==NULL)
1236 goto out;
1237
1238 msg->msg_namelen = 0;
1239
1240 copied = skb->len;
1241 if (len < copied) {
1242 msg->msg_flags |= MSG_TRUNC;
1243 copied = len;
1244 }
1245
badff6d0 1246 skb_reset_transport_header(skb);
1da177e4
LT
1247 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1248
1249 if (msg->msg_name) {
1250 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1251 addr->nl_family = AF_NETLINK;
1252 addr->nl_pad = 0;
1253 addr->nl_pid = NETLINK_CB(skb).pid;
d629b836 1254 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
1255 msg->msg_namelen = sizeof(*addr);
1256 }
1257
cc9a06cd
PM
1258 if (nlk->flags & NETLINK_RECV_PKTINFO)
1259 netlink_cmsg_recv_pktinfo(msg, skb);
1260
1da177e4
LT
1261 if (NULL == siocb->scm) {
1262 memset(&scm, 0, sizeof(scm));
1263 siocb->scm = &scm;
1264 }
1265 siocb->scm->creds = *NETLINK_CREDS(skb);
188ccb55
PM
1266 if (flags & MSG_TRUNC)
1267 copied = skb->len;
1da177e4
LT
1268 skb_free_datagram(sk, skb);
1269
1270 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1271 netlink_dump(sk);
1272
1273 scm_recv(sock, msg, siocb->scm, flags);
1da177e4
LT
1274out:
1275 netlink_rcv_wake(sk);
1276 return err ? : copied;
1277}
1278
1279static void netlink_data_ready(struct sock *sk, int len)
1280{
1281 struct netlink_sock *nlk = nlk_sk(sk);
1282
1283 if (nlk->data_ready)
1284 nlk->data_ready(sk, len);
1285 netlink_rcv_wake(sk);
1286}
1287
1288/*
746fac4d 1289 * We export these functions to other modules. They provide a
1da177e4
LT
1290 * complete set of kernel non-blocking support for message
1291 * queueing.
1292 */
1293
1294struct sock *
06628607 1295netlink_kernel_create(int unit, unsigned int groups,
746fac4d 1296 void (*input)(struct sock *sk, int len),
af65bdfc 1297 struct mutex *cb_mutex, struct module *module)
1da177e4
LT
1298{
1299 struct socket *sock;
1300 struct sock *sk;
77247bbb 1301 struct netlink_sock *nlk;
4277a083 1302 unsigned long *listeners = NULL;
1da177e4 1303
fab2caf6 1304 BUG_ON(!nl_table);
1da177e4
LT
1305
1306 if (unit<0 || unit>=MAX_LINKS)
1307 return NULL;
1308
1309 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1310 return NULL;
1311
af65bdfc 1312 if (__netlink_create(sock, cb_mutex, unit) < 0)
77247bbb 1313 goto out_sock_release;
4fdb3bb7 1314
4277a083
PM
1315 if (groups < 32)
1316 groups = 32;
1317
1318 listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
1319 if (!listeners)
1320 goto out_sock_release;
1321
1da177e4
LT
1322 sk = sock->sk;
1323 sk->sk_data_ready = netlink_data_ready;
1324 if (input)
1325 nlk_sk(sk)->data_ready = input;
1326
77247bbb
PM
1327 if (netlink_insert(sk, 0))
1328 goto out_sock_release;
4fdb3bb7 1329
77247bbb
PM
1330 nlk = nlk_sk(sk);
1331 nlk->flags |= NETLINK_KERNEL_SOCKET;
4fdb3bb7 1332
4fdb3bb7 1333 netlink_table_grab();
4277a083
PM
1334 nl_table[unit].groups = groups;
1335 nl_table[unit].listeners = listeners;
af65bdfc 1336 nl_table[unit].cb_mutex = cb_mutex;
77247bbb 1337 nl_table[unit].module = module;
ab33a171 1338 nl_table[unit].registered = 1;
4fdb3bb7 1339 netlink_table_ungrab();
77247bbb
PM
1340
1341 return sk;
1342
4fdb3bb7 1343out_sock_release:
4277a083 1344 kfree(listeners);
4fdb3bb7 1345 sock_release(sock);
77247bbb 1346 return NULL;
1da177e4
LT
1347}
1348
b4ff4f04
JB
1349/**
1350 * netlink_change_ngroups - change number of multicast groups
1351 *
1352 * This changes the number of multicast groups that are available
1353 * on a certain netlink family. Note that it is not possible to
1354 * change the number of groups to below 32.
1355 *
1356 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1357 * @groups: The new number of groups.
1358 */
1359int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1360{
1361 unsigned long *listeners, *old = NULL;
1362 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1363 int err = 0;
1364
1365 if (groups < 32)
1366 groups = 32;
1367
1368 netlink_table_grab();
1369 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1370 listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC);
1371 if (!listeners) {
1372 err = -ENOMEM;
1373 goto out_ungrab;
1374 }
1375 old = tbl->listeners;
1376 memcpy(listeners, old, NLGRPSZ(tbl->groups));
1377 rcu_assign_pointer(tbl->listeners, listeners);
1378 }
1379 tbl->groups = groups;
1380
1381 out_ungrab:
1382 netlink_table_ungrab();
1383 synchronize_rcu();
1384 kfree(old);
1385 return err;
1386}
1387EXPORT_SYMBOL(netlink_change_ngroups);
1388
1da177e4 1389void netlink_set_nonroot(int protocol, unsigned int flags)
746fac4d
YH
1390{
1391 if ((unsigned int)protocol < MAX_LINKS)
1da177e4 1392 nl_table[protocol].nl_nonroot = flags;
746fac4d 1393}
1da177e4
LT
1394
1395static void netlink_destroy_callback(struct netlink_callback *cb)
1396{
1397 if (cb->skb)
1398 kfree_skb(cb->skb);
1399 kfree(cb);
1400}
1401
1402/*
1403 * It looks a bit ugly.
1404 * It would be better to create kernel thread.
1405 */
1406
1407static int netlink_dump(struct sock *sk)
1408{
1409 struct netlink_sock *nlk = nlk_sk(sk);
1410 struct netlink_callback *cb;
1411 struct sk_buff *skb;
1412 struct nlmsghdr *nlh;
bf8b79e4 1413 int len, err = -ENOBUFS;
746fac4d 1414
1da177e4
LT
1415 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1416 if (!skb)
bf8b79e4 1417 goto errout;
1da177e4 1418
af65bdfc 1419 mutex_lock(nlk->cb_mutex);
1da177e4
LT
1420
1421 cb = nlk->cb;
1422 if (cb == NULL) {
bf8b79e4
TG
1423 err = -EINVAL;
1424 goto errout_skb;
1da177e4
LT
1425 }
1426
1427 len = cb->dump(skb, cb);
1428
1429 if (len > 0) {
af65bdfc 1430 mutex_unlock(nlk->cb_mutex);
1da177e4
LT
1431 skb_queue_tail(&sk->sk_receive_queue, skb);
1432 sk->sk_data_ready(sk, len);
1433 return 0;
1434 }
1435
bf8b79e4
TG
1436 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1437 if (!nlh)
1438 goto errout_skb;
1439
1440 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1441
1da177e4
LT
1442 skb_queue_tail(&sk->sk_receive_queue, skb);
1443 sk->sk_data_ready(sk, skb->len);
1444
a8f74b22
TG
1445 if (cb->done)
1446 cb->done(cb);
1da177e4 1447 nlk->cb = NULL;
af65bdfc 1448 mutex_unlock(nlk->cb_mutex);
1da177e4
LT
1449
1450 netlink_destroy_callback(cb);
1da177e4 1451 return 0;
1797754e 1452
bf8b79e4 1453errout_skb:
af65bdfc 1454 mutex_unlock(nlk->cb_mutex);
bf8b79e4
TG
1455 kfree_skb(skb);
1456errout:
1457 return err;
1da177e4
LT
1458}
1459
1460int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1461 struct nlmsghdr *nlh,
1462 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1463 int (*done)(struct netlink_callback*))
1464{
1465 struct netlink_callback *cb;
1466 struct sock *sk;
1467 struct netlink_sock *nlk;
1468
0da974f4 1469 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1da177e4
LT
1470 if (cb == NULL)
1471 return -ENOBUFS;
1472
1da177e4
LT
1473 cb->dump = dump;
1474 cb->done = done;
1475 cb->nlh = nlh;
1476 atomic_inc(&skb->users);
1477 cb->skb = skb;
1478
1479 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1480 if (sk == NULL) {
1481 netlink_destroy_callback(cb);
1482 return -ECONNREFUSED;
1483 }
1484 nlk = nlk_sk(sk);
3f660d66 1485 /* A dump is in progress... */
af65bdfc 1486 mutex_lock(nlk->cb_mutex);
3f660d66 1487 if (nlk->cb) {
af65bdfc 1488 mutex_unlock(nlk->cb_mutex);
1da177e4
LT
1489 netlink_destroy_callback(cb);
1490 sock_put(sk);
1491 return -EBUSY;
1492 }
1493 nlk->cb = cb;
af65bdfc 1494 mutex_unlock(nlk->cb_mutex);
1da177e4
LT
1495
1496 netlink_dump(sk);
1497 sock_put(sk);
c702e804
TG
1498
1499 /* We successfully started a dump, by returning -EINTR we
1500 * signal the queue mangement to interrupt processing of
1501 * any netlink messages so userspace gets a chance to read
1502 * the results. */
1503 return -EINTR;
1da177e4
LT
1504}
1505
1506void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1507{
1508 struct sk_buff *skb;
1509 struct nlmsghdr *rep;
1510 struct nlmsgerr *errmsg;
339bf98f 1511 size_t payload = sizeof(*errmsg);
1da177e4 1512
339bf98f
TG
1513 /* error messages get the original request appened */
1514 if (err)
1515 payload += nlmsg_len(nlh);
1da177e4 1516
339bf98f 1517 skb = nlmsg_new(payload, GFP_KERNEL);
1da177e4
LT
1518 if (!skb) {
1519 struct sock *sk;
1520
1521 sk = netlink_lookup(in_skb->sk->sk_protocol,
1522 NETLINK_CB(in_skb).pid);
1523 if (sk) {
1524 sk->sk_err = ENOBUFS;
1525 sk->sk_error_report(sk);
1526 sock_put(sk);
1527 }
1528 return;
1529 }
1530
1531 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1797754e 1532 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
bf8b79e4 1533 errmsg = nlmsg_data(rep);
1da177e4 1534 errmsg->error = err;
bf8b79e4 1535 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1da177e4
LT
1536 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1537}
1538
82ace47a 1539static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1d00a4eb 1540 struct nlmsghdr *))
82ace47a 1541{
82ace47a
TG
1542 struct nlmsghdr *nlh;
1543 int err;
1544
1545 while (skb->len >= nlmsg_total_size(0)) {
b529ccf2 1546 nlh = nlmsg_hdr(skb);
d35b6856 1547 err = 0;
82ace47a 1548
ad8e4b75 1549 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
1550 return 0;
1551
d35b6856
TG
1552 /* Only requests are handled by the kernel */
1553 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1554 goto skip;
45e7ae7f
TG
1555
1556 /* Skip control messages */
1557 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1558 goto skip;
d35b6856 1559
1d00a4eb
TG
1560 err = cb(skb, nlh);
1561 if (err == -EINTR) {
1562 /* Not an error, but we interrupt processing */
1563 netlink_queue_skip(nlh, skb);
1564 return err;
d35b6856
TG
1565 }
1566skip:
1567 if (nlh->nlmsg_flags & NLM_F_ACK || err)
82ace47a 1568 netlink_ack(skb, nlh, err);
82ace47a 1569
bf8b79e4 1570 netlink_queue_skip(nlh, skb);
82ace47a
TG
1571 }
1572
1573 return 0;
1574}
1575
1576/**
1577 * nelink_run_queue - Process netlink receive queue.
1578 * @sk: Netlink socket containing the queue
1579 * @qlen: Place to store queue length upon entry
1580 * @cb: Callback function invoked for each netlink message found
1581 *
1582 * Processes as much as there was in the queue upon entry and invokes
1583 * a callback function for each netlink message found. The callback
1584 * function may refuse a message by returning a negative error code
1585 * but setting the error pointer to 0 in which case this function
1586 * returns with a qlen != 0.
1587 *
1588 * qlen must be initialized to 0 before the initial entry, afterwards
1589 * the function may be called repeatedly until qlen reaches 0.
1d00a4eb
TG
1590 *
1591 * The callback function may return -EINTR to signal that processing
1592 * of netlink messages shall be interrupted. In this case the message
1593 * currently being processed will NOT be requeued onto the receive
1594 * queue.
82ace47a
TG
1595 */
1596void netlink_run_queue(struct sock *sk, unsigned int *qlen,
1d00a4eb 1597 int (*cb)(struct sk_buff *, struct nlmsghdr *))
82ace47a
TG
1598{
1599 struct sk_buff *skb;
1600
1601 if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue))
1602 *qlen = skb_queue_len(&sk->sk_receive_queue);
1603
1604 for (; *qlen; (*qlen)--) {
1605 skb = skb_dequeue(&sk->sk_receive_queue);
1606 if (netlink_rcv_skb(skb, cb)) {
1607 if (skb->len)
1608 skb_queue_head(&sk->sk_receive_queue, skb);
1609 else {
1610 kfree_skb(skb);
1611 (*qlen)--;
1612 }
1613 break;
1614 }
1615
1616 kfree_skb(skb);
1617 }
1618}
1619
1620/**
1621 * netlink_queue_skip - Skip netlink message while processing queue.
1622 * @nlh: Netlink message to be skipped
1623 * @skb: Socket buffer containing the netlink messages.
1624 *
1625 * Pulls the given netlink message off the socket buffer so the next
1626 * call to netlink_queue_run() will not reconsider the message.
1627 */
42bad1da 1628static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
82ace47a
TG
1629{
1630 int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1631
1632 if (msglen > skb->len)
1633 msglen = skb->len;
1634
1635 skb_pull(skb, msglen);
1636}
1da177e4 1637
d387f6ad
TG
1638/**
1639 * nlmsg_notify - send a notification netlink message
1640 * @sk: netlink socket to use
1641 * @skb: notification message
1642 * @pid: destination netlink pid for reports or 0
1643 * @group: destination multicast group or 0
1644 * @report: 1 to report back, 0 to disable
1645 * @flags: allocation flags
1646 */
1647int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1648 unsigned int group, int report, gfp_t flags)
1649{
1650 int err = 0;
1651
1652 if (group) {
1653 int exclude_pid = 0;
1654
1655 if (report) {
1656 atomic_inc(&skb->users);
1657 exclude_pid = pid;
1658 }
1659
1660 /* errors reported via destination sk->sk_err */
1661 nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1662 }
1663
1664 if (report)
1665 err = nlmsg_unicast(sk, skb, pid);
1666
1667 return err;
1668}
1669
1da177e4
LT
1670#ifdef CONFIG_PROC_FS
1671struct nl_seq_iter {
1672 int link;
1673 int hash_idx;
1674};
1675
1676static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1677{
1678 struct nl_seq_iter *iter = seq->private;
1679 int i, j;
1680 struct sock *s;
1681 struct hlist_node *node;
1682 loff_t off = 0;
1683
1684 for (i=0; i<MAX_LINKS; i++) {
1685 struct nl_pid_hash *hash = &nl_table[i].hash;
1686
1687 for (j = 0; j <= hash->mask; j++) {
1688 sk_for_each(s, node, &hash->table[j]) {
1689 if (off == pos) {
1690 iter->link = i;
1691 iter->hash_idx = j;
1692 return s;
1693 }
1694 ++off;
1695 }
1696 }
1697 }
1698 return NULL;
1699}
1700
1701static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1702{
1703 read_lock(&nl_table_lock);
1704 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1705}
1706
1707static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1708{
1709 struct sock *s;
1710 struct nl_seq_iter *iter;
1711 int i, j;
1712
1713 ++*pos;
1714
1715 if (v == SEQ_START_TOKEN)
1716 return netlink_seq_socket_idx(seq, 0);
746fac4d 1717
1da177e4
LT
1718 s = sk_next(v);
1719 if (s)
1720 return s;
1721
1722 iter = seq->private;
1723 i = iter->link;
1724 j = iter->hash_idx + 1;
1725
1726 do {
1727 struct nl_pid_hash *hash = &nl_table[i].hash;
1728
1729 for (; j <= hash->mask; j++) {
1730 s = sk_head(&hash->table[j]);
1731 if (s) {
1732 iter->link = i;
1733 iter->hash_idx = j;
1734 return s;
1735 }
1736 }
1737
1738 j = 0;
1739 } while (++i < MAX_LINKS);
1740
1741 return NULL;
1742}
1743
1744static void netlink_seq_stop(struct seq_file *seq, void *v)
1745{
1746 read_unlock(&nl_table_lock);
1747}
1748
1749
1750static int netlink_seq_show(struct seq_file *seq, void *v)
1751{
1752 if (v == SEQ_START_TOKEN)
1753 seq_puts(seq,
1754 "sk Eth Pid Groups "
1755 "Rmem Wmem Dump Locks\n");
1756 else {
1757 struct sock *s = v;
1758 struct netlink_sock *nlk = nlk_sk(s);
1759
1760 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1761 s,
1762 s->sk_protocol,
1763 nlk->pid,
513c2500 1764 nlk->groups ? (u32)nlk->groups[0] : 0,
1da177e4
LT
1765 atomic_read(&s->sk_rmem_alloc),
1766 atomic_read(&s->sk_wmem_alloc),
1767 nlk->cb,
1768 atomic_read(&s->sk_refcnt)
1769 );
1770
1771 }
1772 return 0;
1773}
1774
56b3d975 1775static const struct seq_operations netlink_seq_ops = {
1da177e4
LT
1776 .start = netlink_seq_start,
1777 .next = netlink_seq_next,
1778 .stop = netlink_seq_stop,
1779 .show = netlink_seq_show,
1780};
1781
1782
1783static int netlink_seq_open(struct inode *inode, struct file *file)
1784{
1785 struct seq_file *seq;
1786 struct nl_seq_iter *iter;
1787 int err;
1788
0da974f4 1789 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1da177e4
LT
1790 if (!iter)
1791 return -ENOMEM;
1792
1793 err = seq_open(file, &netlink_seq_ops);
1794 if (err) {
1795 kfree(iter);
1796 return err;
1797 }
1798
1da177e4
LT
1799 seq = file->private_data;
1800 seq->private = iter;
1801 return 0;
1802}
1803
da7071d7 1804static const struct file_operations netlink_seq_fops = {
1da177e4
LT
1805 .owner = THIS_MODULE,
1806 .open = netlink_seq_open,
1807 .read = seq_read,
1808 .llseek = seq_lseek,
1809 .release = seq_release_private,
1810};
1811
1812#endif
1813
1814int netlink_register_notifier(struct notifier_block *nb)
1815{
e041c683 1816 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4
LT
1817}
1818
1819int netlink_unregister_notifier(struct notifier_block *nb)
1820{
e041c683 1821 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4 1822}
746fac4d 1823
90ddc4f0 1824static const struct proto_ops netlink_ops = {
1da177e4
LT
1825 .family = PF_NETLINK,
1826 .owner = THIS_MODULE,
1827 .release = netlink_release,
1828 .bind = netlink_bind,
1829 .connect = netlink_connect,
1830 .socketpair = sock_no_socketpair,
1831 .accept = sock_no_accept,
1832 .getname = netlink_getname,
1833 .poll = datagram_poll,
1834 .ioctl = sock_no_ioctl,
1835 .listen = sock_no_listen,
1836 .shutdown = sock_no_shutdown,
9a4595bc
PM
1837 .setsockopt = netlink_setsockopt,
1838 .getsockopt = netlink_getsockopt,
1da177e4
LT
1839 .sendmsg = netlink_sendmsg,
1840 .recvmsg = netlink_recvmsg,
1841 .mmap = sock_no_mmap,
1842 .sendpage = sock_no_sendpage,
1843};
1844
1845static struct net_proto_family netlink_family_ops = {
1846 .family = PF_NETLINK,
1847 .create = netlink_create,
1848 .owner = THIS_MODULE, /* for consistency 8) */
1849};
1850
1da177e4
LT
1851static int __init netlink_proto_init(void)
1852{
1853 struct sk_buff *dummy_skb;
1854 int i;
1855 unsigned long max;
1856 unsigned int order;
1857 int err = proto_register(&netlink_proto, 0);
1858
1859 if (err != 0)
1860 goto out;
1861
ef047f5e 1862 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
1da177e4 1863
0da974f4 1864 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
fab2caf6
AM
1865 if (!nl_table)
1866 goto panic;
1da177e4 1867
1da177e4
LT
1868 if (num_physpages >= (128 * 1024))
1869 max = num_physpages >> (21 - PAGE_SHIFT);
1870 else
1871 max = num_physpages >> (23 - PAGE_SHIFT);
1872
1873 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1874 max = (1UL << order) / sizeof(struct hlist_head);
1875 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1876
1877 for (i = 0; i < MAX_LINKS; i++) {
1878 struct nl_pid_hash *hash = &nl_table[i].hash;
1879
1880 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1881 if (!hash->table) {
1882 while (i-- > 0)
1883 nl_pid_hash_free(nl_table[i].hash.table,
1884 1 * sizeof(*hash->table));
1885 kfree(nl_table);
fab2caf6 1886 goto panic;
1da177e4
LT
1887 }
1888 memset(hash->table, 0, 1 * sizeof(*hash->table));
1889 hash->max_shift = order;
1890 hash->shift = 0;
1891 hash->mask = 0;
1892 hash->rehash_time = jiffies;
1893 }
1894
1895 sock_register(&netlink_family_ops);
1896#ifdef CONFIG_PROC_FS
1897 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1898#endif
746fac4d 1899 /* The netlink device handler may be needed early. */
1da177e4
LT
1900 rtnetlink_init();
1901out:
1902 return err;
fab2caf6
AM
1903panic:
1904 panic("netlink_init: Cannot allocate nl_table\n");
1da177e4
LT
1905}
1906
1da177e4 1907core_initcall(netlink_proto_init);
1da177e4
LT
1908
1909EXPORT_SYMBOL(netlink_ack);
82ace47a 1910EXPORT_SYMBOL(netlink_run_queue);
1da177e4
LT
1911EXPORT_SYMBOL(netlink_broadcast);
1912EXPORT_SYMBOL(netlink_dump_start);
1913EXPORT_SYMBOL(netlink_kernel_create);
1914EXPORT_SYMBOL(netlink_register_notifier);
1da177e4
LT
1915EXPORT_SYMBOL(netlink_set_nonroot);
1916EXPORT_SYMBOL(netlink_unicast);
1917EXPORT_SYMBOL(netlink_unregister_notifier);
d387f6ad 1918EXPORT_SYMBOL(nlmsg_notify);