[TCP]: Two RFC3465 Appropriate Byte Count fixes.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
1da177e4
LT
22 */
23
1da177e4
LT
24#include <linux/module.h>
25
4fc268d2 26#include <linux/capability.h>
1da177e4
LT
27#include <linux/kernel.h>
28#include <linux/init.h>
1da177e4
LT
29#include <linux/signal.h>
30#include <linux/sched.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/stat.h>
34#include <linux/socket.h>
35#include <linux/un.h>
36#include <linux/fcntl.h>
37#include <linux/termios.h>
38#include <linux/sockios.h>
39#include <linux/net.h>
40#include <linux/fs.h>
41#include <linux/slab.h>
42#include <asm/uaccess.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/rtnetlink.h>
46#include <linux/proc_fs.h>
47#include <linux/seq_file.h>
48#include <linux/smp_lock.h>
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520 57#include <linux/audit.h>
e7c34970 58#include <linux/selinux.h>
54e0f520 59
1da177e4
LT
60#include <net/sock.h>
61#include <net/scm.h>
82ace47a 62#include <net/netlink.h>
1da177e4 63
f7fa9b10 64#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
1da177e4
LT
65
66struct netlink_sock {
67 /* struct sock has to be the first member of netlink_sock */
68 struct sock sk;
69 u32 pid;
1da177e4 70 u32 dst_pid;
d629b836 71 u32 dst_group;
f7fa9b10
PM
72 u32 flags;
73 u32 subscriptions;
74 u32 ngroups;
75 unsigned long *groups;
1da177e4
LT
76 unsigned long state;
77 wait_queue_head_t wait;
78 struct netlink_callback *cb;
79 spinlock_t cb_lock;
80 void (*data_ready)(struct sock *sk, int bytes);
77247bbb 81 struct module *module;
1da177e4
LT
82};
83
77247bbb 84#define NETLINK_KERNEL_SOCKET 0x1
9a4595bc 85#define NETLINK_RECV_PKTINFO 0x2
77247bbb 86
1da177e4
LT
87static inline struct netlink_sock *nlk_sk(struct sock *sk)
88{
89 return (struct netlink_sock *)sk;
90}
91
92struct nl_pid_hash {
93 struct hlist_head *table;
94 unsigned long rehash_time;
95
96 unsigned int mask;
97 unsigned int shift;
98
99 unsigned int entries;
100 unsigned int max_shift;
101
102 u32 rnd;
103};
104
105struct netlink_table {
106 struct nl_pid_hash hash;
107 struct hlist_head mc_list;
4277a083 108 unsigned long *listeners;
1da177e4 109 unsigned int nl_nonroot;
f7fa9b10 110 unsigned int groups;
77247bbb 111 struct module *module;
ab33a171 112 int registered;
1da177e4
LT
113};
114
115static struct netlink_table *nl_table;
116
117static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
118
119static int netlink_dump(struct sock *sk);
120static void netlink_destroy_callback(struct netlink_callback *cb);
121
122static DEFINE_RWLOCK(nl_table_lock);
123static atomic_t nl_table_users = ATOMIC_INIT(0);
124
e041c683 125static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 126
d629b836
PM
127static u32 netlink_group_mask(u32 group)
128{
129 return group ? 1 << (group - 1) : 0;
130}
131
1da177e4
LT
132static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
133{
134 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
135}
136
137static void netlink_sock_destruct(struct sock *sk)
138{
139 skb_queue_purge(&sk->sk_receive_queue);
140
141 if (!sock_flag(sk, SOCK_DEAD)) {
142 printk("Freeing alive netlink socket %p\n", sk);
143 return;
144 }
145 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
146 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
147 BUG_TRAP(!nlk_sk(sk)->cb);
f7fa9b10 148 BUG_TRAP(!nlk_sk(sk)->groups);
1da177e4
LT
149}
150
151/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
152 * Look, when several writers sleep and reader wakes them up, all but one
153 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
154 * this, _but_ remember, it adds useless work on UP machines.
155 */
156
157static void netlink_table_grab(void)
158{
6abd219c 159 write_lock_irq(&nl_table_lock);
1da177e4
LT
160
161 if (atomic_read(&nl_table_users)) {
162 DECLARE_WAITQUEUE(wait, current);
163
164 add_wait_queue_exclusive(&nl_table_wait, &wait);
165 for(;;) {
166 set_current_state(TASK_UNINTERRUPTIBLE);
167 if (atomic_read(&nl_table_users) == 0)
168 break;
6abd219c 169 write_unlock_irq(&nl_table_lock);
1da177e4 170 schedule();
6abd219c 171 write_lock_irq(&nl_table_lock);
1da177e4
LT
172 }
173
174 __set_current_state(TASK_RUNNING);
175 remove_wait_queue(&nl_table_wait, &wait);
176 }
177}
178
179static __inline__ void netlink_table_ungrab(void)
180{
6abd219c 181 write_unlock_irq(&nl_table_lock);
1da177e4
LT
182 wake_up(&nl_table_wait);
183}
184
185static __inline__ void
186netlink_lock_table(void)
187{
188 /* read_lock() synchronizes us to netlink_table_grab */
189
190 read_lock(&nl_table_lock);
191 atomic_inc(&nl_table_users);
192 read_unlock(&nl_table_lock);
193}
194
195static __inline__ void
196netlink_unlock_table(void)
197{
198 if (atomic_dec_and_test(&nl_table_users))
199 wake_up(&nl_table_wait);
200}
201
202static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
203{
204 struct nl_pid_hash *hash = &nl_table[protocol].hash;
205 struct hlist_head *head;
206 struct sock *sk;
207 struct hlist_node *node;
208
209 read_lock(&nl_table_lock);
210 head = nl_pid_hashfn(hash, pid);
211 sk_for_each(sk, node, head) {
212 if (nlk_sk(sk)->pid == pid) {
213 sock_hold(sk);
214 goto found;
215 }
216 }
217 sk = NULL;
218found:
219 read_unlock(&nl_table_lock);
220 return sk;
221}
222
223static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
224{
225 if (size <= PAGE_SIZE)
226 return kmalloc(size, GFP_ATOMIC);
227 else
228 return (struct hlist_head *)
229 __get_free_pages(GFP_ATOMIC, get_order(size));
230}
231
232static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
233{
234 if (size <= PAGE_SIZE)
235 kfree(table);
236 else
237 free_pages((unsigned long)table, get_order(size));
238}
239
240static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
241{
242 unsigned int omask, mask, shift;
243 size_t osize, size;
244 struct hlist_head *otable, *table;
245 int i;
246
247 omask = mask = hash->mask;
248 osize = size = (mask + 1) * sizeof(*table);
249 shift = hash->shift;
250
251 if (grow) {
252 if (++shift > hash->max_shift)
253 return 0;
254 mask = mask * 2 + 1;
255 size *= 2;
256 }
257
258 table = nl_pid_hash_alloc(size);
259 if (!table)
260 return 0;
261
262 memset(table, 0, size);
263 otable = hash->table;
264 hash->table = table;
265 hash->mask = mask;
266 hash->shift = shift;
267 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
268
269 for (i = 0; i <= omask; i++) {
270 struct sock *sk;
271 struct hlist_node *node, *tmp;
272
273 sk_for_each_safe(sk, node, tmp, &otable[i])
274 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
275 }
276
277 nl_pid_hash_free(otable, osize);
278 hash->rehash_time = jiffies + 10 * 60 * HZ;
279 return 1;
280}
281
282static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
283{
284 int avg = hash->entries >> hash->shift;
285
286 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
287 return 1;
288
289 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
290 nl_pid_hash_rehash(hash, 0);
291 return 1;
292 }
293
294 return 0;
295}
296
90ddc4f0 297static const struct proto_ops netlink_ops;
1da177e4 298
4277a083
PM
299static void
300netlink_update_listeners(struct sock *sk)
301{
302 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
303 struct hlist_node *node;
304 unsigned long mask;
305 unsigned int i;
306
307 for (i = 0; i < NLGRPSZ(tbl->groups)/sizeof(unsigned long); i++) {
308 mask = 0;
309 sk_for_each_bound(sk, node, &tbl->mc_list)
310 mask |= nlk_sk(sk)->groups[i];
311 tbl->listeners[i] = mask;
312 }
313 /* this function is only called with the netlink table "grabbed", which
314 * makes sure updates are visible before bind or setsockopt return. */
315}
316
1da177e4
LT
317static int netlink_insert(struct sock *sk, u32 pid)
318{
319 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
320 struct hlist_head *head;
321 int err = -EADDRINUSE;
322 struct sock *osk;
323 struct hlist_node *node;
324 int len;
325
326 netlink_table_grab();
327 head = nl_pid_hashfn(hash, pid);
328 len = 0;
329 sk_for_each(osk, node, head) {
330 if (nlk_sk(osk)->pid == pid)
331 break;
332 len++;
333 }
334 if (node)
335 goto err;
336
337 err = -EBUSY;
338 if (nlk_sk(sk)->pid)
339 goto err;
340
341 err = -ENOMEM;
342 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
343 goto err;
344
345 if (len && nl_pid_hash_dilute(hash, len))
346 head = nl_pid_hashfn(hash, pid);
347 hash->entries++;
348 nlk_sk(sk)->pid = pid;
349 sk_add_node(sk, head);
350 err = 0;
351
352err:
353 netlink_table_ungrab();
354 return err;
355}
356
357static void netlink_remove(struct sock *sk)
358{
359 netlink_table_grab();
d470e3b4
DM
360 if (sk_del_node_init(sk))
361 nl_table[sk->sk_protocol].hash.entries--;
f7fa9b10 362 if (nlk_sk(sk)->subscriptions)
1da177e4
LT
363 __sk_del_bind_node(sk);
364 netlink_table_ungrab();
365}
366
367static struct proto netlink_proto = {
368 .name = "NETLINK",
369 .owner = THIS_MODULE,
370 .obj_size = sizeof(struct netlink_sock),
371};
372
ab33a171 373static int __netlink_create(struct socket *sock, int protocol)
1da177e4
LT
374{
375 struct sock *sk;
376 struct netlink_sock *nlk;
ab33a171
PM
377
378 sock->ops = &netlink_ops;
379
380 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
381 if (!sk)
382 return -ENOMEM;
383
384 sock_init_data(sock, sk);
385
386 nlk = nlk_sk(sk);
387 spin_lock_init(&nlk->cb_lock);
388 init_waitqueue_head(&nlk->wait);
389
390 sk->sk_destruct = netlink_sock_destruct;
391 sk->sk_protocol = protocol;
392 return 0;
393}
394
395static int netlink_create(struct socket *sock, int protocol)
396{
397 struct module *module = NULL;
f7fa9b10
PM
398 struct netlink_sock *nlk;
399 unsigned int groups;
ab33a171 400 int err = 0;
1da177e4
LT
401
402 sock->state = SS_UNCONNECTED;
403
404 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
405 return -ESOCKTNOSUPPORT;
406
407 if (protocol<0 || protocol >= MAX_LINKS)
408 return -EPROTONOSUPPORT;
409
77247bbb 410 netlink_lock_table();
4fdb3bb7 411#ifdef CONFIG_KMOD
ab33a171 412 if (!nl_table[protocol].registered) {
77247bbb 413 netlink_unlock_table();
4fdb3bb7 414 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 415 netlink_lock_table();
4fdb3bb7 416 }
ab33a171
PM
417#endif
418 if (nl_table[protocol].registered &&
419 try_module_get(nl_table[protocol].module))
420 module = nl_table[protocol].module;
f7fa9b10 421 groups = nl_table[protocol].groups;
77247bbb 422 netlink_unlock_table();
4fdb3bb7 423
14591de1 424 if ((err = __netlink_create(sock, protocol)) < 0)
f7fa9b10
PM
425 goto out_module;
426
427 nlk = nlk_sk(sock->sk);
f7fa9b10 428 nlk->module = module;
ab33a171
PM
429out:
430 return err;
1da177e4 431
ab33a171
PM
432out_module:
433 module_put(module);
434 goto out;
1da177e4
LT
435}
436
437static int netlink_release(struct socket *sock)
438{
439 struct sock *sk = sock->sk;
440 struct netlink_sock *nlk;
441
442 if (!sk)
443 return 0;
444
445 netlink_remove(sk);
446 nlk = nlk_sk(sk);
447
448 spin_lock(&nlk->cb_lock);
449 if (nlk->cb) {
a8f74b22
TG
450 if (nlk->cb->done)
451 nlk->cb->done(nlk->cb);
1da177e4
LT
452 netlink_destroy_callback(nlk->cb);
453 nlk->cb = NULL;
1da177e4
LT
454 }
455 spin_unlock(&nlk->cb_lock);
456
457 /* OK. Socket is unlinked, and, therefore,
458 no new packets will arrive */
459
460 sock_orphan(sk);
461 sock->sk = NULL;
462 wake_up_interruptible_all(&nlk->wait);
463
464 skb_queue_purge(&sk->sk_write_queue);
465
f7fa9b10 466 if (nlk->pid && !nlk->subscriptions) {
1da177e4
LT
467 struct netlink_notify n = {
468 .protocol = sk->sk_protocol,
469 .pid = nlk->pid,
470 };
e041c683
AS
471 atomic_notifier_call_chain(&netlink_chain,
472 NETLINK_URELEASE, &n);
1da177e4 473 }
4fdb3bb7 474
77247bbb
PM
475 if (nlk->module)
476 module_put(nlk->module);
4fdb3bb7 477
4277a083 478 netlink_table_grab();
77247bbb 479 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
4277a083 480 kfree(nl_table[sk->sk_protocol].listeners);
77247bbb 481 nl_table[sk->sk_protocol].module = NULL;
ab33a171 482 nl_table[sk->sk_protocol].registered = 0;
4277a083
PM
483 } else if (nlk->subscriptions)
484 netlink_update_listeners(sk);
485 netlink_table_ungrab();
77247bbb 486
f7fa9b10
PM
487 kfree(nlk->groups);
488 nlk->groups = NULL;
489
1da177e4
LT
490 sock_put(sk);
491 return 0;
492}
493
494static int netlink_autobind(struct socket *sock)
495{
496 struct sock *sk = sock->sk;
497 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
498 struct hlist_head *head;
499 struct sock *osk;
500 struct hlist_node *node;
c27bd492 501 s32 pid = current->tgid;
1da177e4
LT
502 int err;
503 static s32 rover = -4097;
504
505retry:
506 cond_resched();
507 netlink_table_grab();
508 head = nl_pid_hashfn(hash, pid);
509 sk_for_each(osk, node, head) {
510 if (nlk_sk(osk)->pid == pid) {
511 /* Bind collision, search negative pid values. */
512 pid = rover--;
513 if (rover > -4097)
514 rover = -4097;
515 netlink_table_ungrab();
516 goto retry;
517 }
518 }
519 netlink_table_ungrab();
520
521 err = netlink_insert(sk, pid);
522 if (err == -EADDRINUSE)
523 goto retry;
d470e3b4
DM
524
525 /* If 2 threads race to autobind, that is fine. */
526 if (err == -EBUSY)
527 err = 0;
528
529 return err;
1da177e4
LT
530}
531
532static inline int netlink_capable(struct socket *sock, unsigned int flag)
533{
534 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
535 capable(CAP_NET_ADMIN);
536}
537
f7fa9b10
PM
538static void
539netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
540{
541 struct netlink_sock *nlk = nlk_sk(sk);
542
543 if (nlk->subscriptions && !subscriptions)
544 __sk_del_bind_node(sk);
545 else if (!nlk->subscriptions && subscriptions)
546 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
547 nlk->subscriptions = subscriptions;
548}
549
513c2500
PM
550static int netlink_alloc_groups(struct sock *sk)
551{
552 struct netlink_sock *nlk = nlk_sk(sk);
553 unsigned int groups;
554 int err = 0;
555
556 netlink_lock_table();
557 groups = nl_table[sk->sk_protocol].groups;
558 if (!nl_table[sk->sk_protocol].registered)
559 err = -ENOENT;
560 netlink_unlock_table();
561
562 if (err)
563 return err;
564
0da974f4 565 nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
513c2500
PM
566 if (nlk->groups == NULL)
567 return -ENOMEM;
513c2500
PM
568 nlk->ngroups = groups;
569 return 0;
570}
571
1da177e4
LT
572static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
573{
574 struct sock *sk = sock->sk;
575 struct netlink_sock *nlk = nlk_sk(sk);
576 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
577 int err;
578
579 if (nladdr->nl_family != AF_NETLINK)
580 return -EINVAL;
581
582 /* Only superuser is allowed to listen multicasts */
513c2500
PM
583 if (nladdr->nl_groups) {
584 if (!netlink_capable(sock, NL_NONROOT_RECV))
585 return -EPERM;
586 if (nlk->groups == NULL) {
587 err = netlink_alloc_groups(sk);
588 if (err)
589 return err;
590 }
591 }
1da177e4
LT
592
593 if (nlk->pid) {
594 if (nladdr->nl_pid != nlk->pid)
595 return -EINVAL;
596 } else {
597 err = nladdr->nl_pid ?
598 netlink_insert(sk, nladdr->nl_pid) :
599 netlink_autobind(sock);
600 if (err)
601 return err;
602 }
603
513c2500 604 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
605 return 0;
606
607 netlink_table_grab();
f7fa9b10
PM
608 netlink_update_subscriptions(sk, nlk->subscriptions +
609 hweight32(nladdr->nl_groups) -
610 hweight32(nlk->groups[0]));
611 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
4277a083 612 netlink_update_listeners(sk);
1da177e4
LT
613 netlink_table_ungrab();
614
615 return 0;
616}
617
618static int netlink_connect(struct socket *sock, struct sockaddr *addr,
619 int alen, int flags)
620{
621 int err = 0;
622 struct sock *sk = sock->sk;
623 struct netlink_sock *nlk = nlk_sk(sk);
624 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
625
626 if (addr->sa_family == AF_UNSPEC) {
627 sk->sk_state = NETLINK_UNCONNECTED;
628 nlk->dst_pid = 0;
d629b836 629 nlk->dst_group = 0;
1da177e4
LT
630 return 0;
631 }
632 if (addr->sa_family != AF_NETLINK)
633 return -EINVAL;
634
635 /* Only superuser is allowed to send multicasts */
636 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
637 return -EPERM;
638
639 if (!nlk->pid)
640 err = netlink_autobind(sock);
641
642 if (err == 0) {
643 sk->sk_state = NETLINK_CONNECTED;
644 nlk->dst_pid = nladdr->nl_pid;
d629b836 645 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
646 }
647
648 return err;
649}
650
651static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
652{
653 struct sock *sk = sock->sk;
654 struct netlink_sock *nlk = nlk_sk(sk);
655 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
656
657 nladdr->nl_family = AF_NETLINK;
658 nladdr->nl_pad = 0;
659 *addr_len = sizeof(*nladdr);
660
661 if (peer) {
662 nladdr->nl_pid = nlk->dst_pid;
d629b836 663 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4
LT
664 } else {
665 nladdr->nl_pid = nlk->pid;
513c2500 666 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
667 }
668 return 0;
669}
670
671static void netlink_overrun(struct sock *sk)
672{
673 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
674 sk->sk_err = ENOBUFS;
675 sk->sk_error_report(sk);
676 }
677}
678
679static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
680{
681 int protocol = ssk->sk_protocol;
682 struct sock *sock;
683 struct netlink_sock *nlk;
684
685 sock = netlink_lookup(protocol, pid);
686 if (!sock)
687 return ERR_PTR(-ECONNREFUSED);
688
689 /* Don't bother queuing skb if kernel socket has no input function */
690 nlk = nlk_sk(sock);
691 if ((nlk->pid == 0 && !nlk->data_ready) ||
692 (sock->sk_state == NETLINK_CONNECTED &&
693 nlk->dst_pid != nlk_sk(ssk)->pid)) {
694 sock_put(sock);
695 return ERR_PTR(-ECONNREFUSED);
696 }
697 return sock;
698}
699
700struct sock *netlink_getsockbyfilp(struct file *filp)
701{
702 struct inode *inode = filp->f_dentry->d_inode;
703 struct sock *sock;
704
705 if (!S_ISSOCK(inode->i_mode))
706 return ERR_PTR(-ENOTSOCK);
707
708 sock = SOCKET_I(inode)->sk;
709 if (sock->sk_family != AF_NETLINK)
710 return ERR_PTR(-EINVAL);
711
712 sock_hold(sock);
713 return sock;
714}
715
716/*
717 * Attach a skb to a netlink socket.
718 * The caller must hold a reference to the destination socket. On error, the
719 * reference is dropped. The skb is not send to the destination, just all
720 * all error checks are performed and memory in the queue is reserved.
721 * Return values:
722 * < 0: error. skb freed, reference to sock dropped.
723 * 0: continue
724 * 1: repeat lookup - reference dropped while waiting for socket memory.
725 */
a70ea994
AK
726int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
727 long timeo, struct sock *ssk)
1da177e4
LT
728{
729 struct netlink_sock *nlk;
730
731 nlk = nlk_sk(sk);
732
733 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
734 test_bit(0, &nlk->state)) {
735 DECLARE_WAITQUEUE(wait, current);
736 if (!timeo) {
a70ea994 737 if (!ssk || nlk_sk(ssk)->pid == 0)
1da177e4
LT
738 netlink_overrun(sk);
739 sock_put(sk);
740 kfree_skb(skb);
741 return -EAGAIN;
742 }
743
744 __set_current_state(TASK_INTERRUPTIBLE);
745 add_wait_queue(&nlk->wait, &wait);
746
747 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
748 test_bit(0, &nlk->state)) &&
749 !sock_flag(sk, SOCK_DEAD))
750 timeo = schedule_timeout(timeo);
751
752 __set_current_state(TASK_RUNNING);
753 remove_wait_queue(&nlk->wait, &wait);
754 sock_put(sk);
755
756 if (signal_pending(current)) {
757 kfree_skb(skb);
758 return sock_intr_errno(timeo);
759 }
760 return 1;
761 }
762 skb_set_owner_r(skb, sk);
763 return 0;
764}
765
766int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
767{
1da177e4
LT
768 int len = skb->len;
769
1da177e4
LT
770 skb_queue_tail(&sk->sk_receive_queue, skb);
771 sk->sk_data_ready(sk, len);
772 sock_put(sk);
773 return len;
774}
775
776void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
777{
778 kfree_skb(skb);
779 sock_put(sk);
780}
781
37da647d 782static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
dd0fc66f 783 gfp_t allocation)
1da177e4
LT
784{
785 int delta;
786
787 skb_orphan(skb);
788
789 delta = skb->end - skb->tail;
790 if (delta * 2 < skb->truesize)
791 return skb;
792
793 if (skb_shared(skb)) {
794 struct sk_buff *nskb = skb_clone(skb, allocation);
795 if (!nskb)
796 return skb;
797 kfree_skb(skb);
798 skb = nskb;
799 }
800
801 if (!pskb_expand_head(skb, 0, -delta, allocation))
802 skb->truesize -= delta;
803
804 return skb;
805}
806
807int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
808{
809 struct sock *sk;
810 int err;
811 long timeo;
812
813 skb = netlink_trim(skb, gfp_any());
814
815 timeo = sock_sndtimeo(ssk, nonblock);
816retry:
817 sk = netlink_getsockbypid(ssk, pid);
818 if (IS_ERR(sk)) {
819 kfree_skb(skb);
820 return PTR_ERR(sk);
821 }
a70ea994 822 err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
1da177e4
LT
823 if (err == 1)
824 goto retry;
825 if (err)
826 return err;
827
828 return netlink_sendskb(sk, skb, ssk->sk_protocol);
829}
830
4277a083
PM
831int netlink_has_listeners(struct sock *sk, unsigned int group)
832{
833 int res = 0;
834
835 BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET));
836 if (group - 1 < nl_table[sk->sk_protocol].groups)
837 res = test_bit(group - 1, nl_table[sk->sk_protocol].listeners);
838 return res;
839}
840EXPORT_SYMBOL_GPL(netlink_has_listeners);
841
1da177e4
LT
842static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
843{
844 struct netlink_sock *nlk = nlk_sk(sk);
845
846 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
847 !test_bit(0, &nlk->state)) {
848 skb_set_owner_r(skb, sk);
849 skb_queue_tail(&sk->sk_receive_queue, skb);
850 sk->sk_data_ready(sk, skb->len);
851 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
852 }
853 return -1;
854}
855
856struct netlink_broadcast_data {
857 struct sock *exclude_sk;
858 u32 pid;
859 u32 group;
860 int failure;
861 int congested;
862 int delivered;
7d877f3b 863 gfp_t allocation;
1da177e4
LT
864 struct sk_buff *skb, *skb2;
865};
866
867static inline int do_one_broadcast(struct sock *sk,
868 struct netlink_broadcast_data *p)
869{
870 struct netlink_sock *nlk = nlk_sk(sk);
871 int val;
872
873 if (p->exclude_sk == sk)
874 goto out;
875
f7fa9b10
PM
876 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
877 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
878 goto out;
879
880 if (p->failure) {
881 netlink_overrun(sk);
882 goto out;
883 }
884
885 sock_hold(sk);
886 if (p->skb2 == NULL) {
68acc024 887 if (skb_shared(p->skb)) {
1da177e4
LT
888 p->skb2 = skb_clone(p->skb, p->allocation);
889 } else {
68acc024
TC
890 p->skb2 = skb_get(p->skb);
891 /*
892 * skb ownership may have been set when
893 * delivered to a previous socket.
894 */
895 skb_orphan(p->skb2);
1da177e4
LT
896 }
897 }
898 if (p->skb2 == NULL) {
899 netlink_overrun(sk);
900 /* Clone failed. Notify ALL listeners. */
901 p->failure = 1;
902 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
903 netlink_overrun(sk);
904 } else {
905 p->congested |= val;
906 p->delivered = 1;
907 p->skb2 = NULL;
908 }
909 sock_put(sk);
910
911out:
912 return 0;
913}
914
915int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
dd0fc66f 916 u32 group, gfp_t allocation)
1da177e4
LT
917{
918 struct netlink_broadcast_data info;
919 struct hlist_node *node;
920 struct sock *sk;
921
922 skb = netlink_trim(skb, allocation);
923
924 info.exclude_sk = ssk;
925 info.pid = pid;
926 info.group = group;
927 info.failure = 0;
928 info.congested = 0;
929 info.delivered = 0;
930 info.allocation = allocation;
931 info.skb = skb;
932 info.skb2 = NULL;
933
934 /* While we sleep in clone, do not allow to change socket list */
935
936 netlink_lock_table();
937
938 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
939 do_one_broadcast(sk, &info);
940
aa1c6a6f
TC
941 kfree_skb(skb);
942
1da177e4
LT
943 netlink_unlock_table();
944
945 if (info.skb2)
946 kfree_skb(info.skb2);
1da177e4
LT
947
948 if (info.delivered) {
949 if (info.congested && (allocation & __GFP_WAIT))
950 yield();
951 return 0;
952 }
953 if (info.failure)
954 return -ENOBUFS;
955 return -ESRCH;
956}
957
958struct netlink_set_err_data {
959 struct sock *exclude_sk;
960 u32 pid;
961 u32 group;
962 int code;
963};
964
965static inline int do_one_set_err(struct sock *sk,
966 struct netlink_set_err_data *p)
967{
968 struct netlink_sock *nlk = nlk_sk(sk);
969
970 if (sk == p->exclude_sk)
971 goto out;
972
f7fa9b10
PM
973 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
974 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
975 goto out;
976
977 sk->sk_err = p->code;
978 sk->sk_error_report(sk);
979out:
980 return 0;
981}
982
983void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
984{
985 struct netlink_set_err_data info;
986 struct hlist_node *node;
987 struct sock *sk;
988
989 info.exclude_sk = ssk;
990 info.pid = pid;
991 info.group = group;
992 info.code = code;
993
994 read_lock(&nl_table_lock);
995
996 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
997 do_one_set_err(sk, &info);
998
999 read_unlock(&nl_table_lock);
1000}
1001
9a4595bc
PM
1002static int netlink_setsockopt(struct socket *sock, int level, int optname,
1003 char __user *optval, int optlen)
1004{
1005 struct sock *sk = sock->sk;
1006 struct netlink_sock *nlk = nlk_sk(sk);
1007 int val = 0, err;
1008
1009 if (level != SOL_NETLINK)
1010 return -ENOPROTOOPT;
1011
1012 if (optlen >= sizeof(int) &&
1013 get_user(val, (int __user *)optval))
1014 return -EFAULT;
1015
1016 switch (optname) {
1017 case NETLINK_PKTINFO:
1018 if (val)
1019 nlk->flags |= NETLINK_RECV_PKTINFO;
1020 else
1021 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1022 err = 0;
1023 break;
1024 case NETLINK_ADD_MEMBERSHIP:
1025 case NETLINK_DROP_MEMBERSHIP: {
1026 unsigned int subscriptions;
1027 int old, new = optname == NETLINK_ADD_MEMBERSHIP ? 1 : 0;
1028
1029 if (!netlink_capable(sock, NL_NONROOT_RECV))
1030 return -EPERM;
513c2500
PM
1031 if (nlk->groups == NULL) {
1032 err = netlink_alloc_groups(sk);
1033 if (err)
1034 return err;
1035 }
9a4595bc
PM
1036 if (!val || val - 1 >= nlk->ngroups)
1037 return -EINVAL;
1038 netlink_table_grab();
1039 old = test_bit(val - 1, nlk->groups);
1040 subscriptions = nlk->subscriptions - old + new;
1041 if (new)
1042 __set_bit(val - 1, nlk->groups);
1043 else
1044 __clear_bit(val - 1, nlk->groups);
1045 netlink_update_subscriptions(sk, subscriptions);
4277a083 1046 netlink_update_listeners(sk);
9a4595bc
PM
1047 netlink_table_ungrab();
1048 err = 0;
1049 break;
1050 }
1051 default:
1052 err = -ENOPROTOOPT;
1053 }
1054 return err;
1055}
1056
1057static int netlink_getsockopt(struct socket *sock, int level, int optname,
1058 char __user *optval, int __user *optlen)
1059{
1060 struct sock *sk = sock->sk;
1061 struct netlink_sock *nlk = nlk_sk(sk);
1062 int len, val, err;
1063
1064 if (level != SOL_NETLINK)
1065 return -ENOPROTOOPT;
1066
1067 if (get_user(len, optlen))
1068 return -EFAULT;
1069 if (len < 0)
1070 return -EINVAL;
1071
1072 switch (optname) {
1073 case NETLINK_PKTINFO:
1074 if (len < sizeof(int))
1075 return -EINVAL;
1076 len = sizeof(int);
1077 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1078 put_user(len, optlen);
1079 put_user(val, optval);
1080 err = 0;
1081 break;
1082 default:
1083 err = -ENOPROTOOPT;
1084 }
1085 return err;
1086}
1087
1088static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1089{
1090 struct nl_pktinfo info;
1091
1092 info.group = NETLINK_CB(skb).dst_group;
1093 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1094}
1095
1da177e4
LT
1096static inline void netlink_rcv_wake(struct sock *sk)
1097{
1098 struct netlink_sock *nlk = nlk_sk(sk);
1099
b03efcfb 1100 if (skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
1101 clear_bit(0, &nlk->state);
1102 if (!test_bit(0, &nlk->state))
1103 wake_up_interruptible(&nlk->wait);
1104}
1105
1106static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1107 struct msghdr *msg, size_t len)
1108{
1109 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1110 struct sock *sk = sock->sk;
1111 struct netlink_sock *nlk = nlk_sk(sk);
1112 struct sockaddr_nl *addr=msg->msg_name;
1113 u32 dst_pid;
d629b836 1114 u32 dst_group;
1da177e4
LT
1115 struct sk_buff *skb;
1116 int err;
1117 struct scm_cookie scm;
1118
1119 if (msg->msg_flags&MSG_OOB)
1120 return -EOPNOTSUPP;
1121
1122 if (NULL == siocb->scm)
1123 siocb->scm = &scm;
1124 err = scm_send(sock, msg, siocb->scm);
1125 if (err < 0)
1126 return err;
1127
1128 if (msg->msg_namelen) {
1129 if (addr->nl_family != AF_NETLINK)
1130 return -EINVAL;
1131 dst_pid = addr->nl_pid;
d629b836
PM
1132 dst_group = ffs(addr->nl_groups);
1133 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1da177e4
LT
1134 return -EPERM;
1135 } else {
1136 dst_pid = nlk->dst_pid;
d629b836 1137 dst_group = nlk->dst_group;
1da177e4
LT
1138 }
1139
1140 if (!nlk->pid) {
1141 err = netlink_autobind(sock);
1142 if (err)
1143 goto out;
1144 }
1145
1146 err = -EMSGSIZE;
1147 if (len > sk->sk_sndbuf - 32)
1148 goto out;
1149 err = -ENOBUFS;
1150 skb = alloc_skb(len, GFP_KERNEL);
1151 if (skb==NULL)
1152 goto out;
1153
1154 NETLINK_CB(skb).pid = nlk->pid;
1da177e4 1155 NETLINK_CB(skb).dst_pid = dst_pid;
d629b836 1156 NETLINK_CB(skb).dst_group = dst_group;
c94c257c 1157 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
e7c34970 1158 selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));
1da177e4
LT
1159 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1160
1161 /* What can I do? Netlink is asynchronous, so that
1162 we will have to save current capabilities to
1163 check them, when this message will be delivered
1164 to corresponding kernel module. --ANK (980802)
1165 */
1166
1167 err = -EFAULT;
1168 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
1169 kfree_skb(skb);
1170 goto out;
1171 }
1172
1173 err = security_netlink_send(sk, skb);
1174 if (err) {
1175 kfree_skb(skb);
1176 goto out;
1177 }
1178
d629b836 1179 if (dst_group) {
1da177e4 1180 atomic_inc(&skb->users);
d629b836 1181 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1da177e4
LT
1182 }
1183 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1184
1185out:
1186 return err;
1187}
1188
1189static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1190 struct msghdr *msg, size_t len,
1191 int flags)
1192{
1193 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1194 struct scm_cookie scm;
1195 struct sock *sk = sock->sk;
1196 struct netlink_sock *nlk = nlk_sk(sk);
1197 int noblock = flags&MSG_DONTWAIT;
1198 size_t copied;
1199 struct sk_buff *skb;
1200 int err;
1201
1202 if (flags&MSG_OOB)
1203 return -EOPNOTSUPP;
1204
1205 copied = 0;
1206
1207 skb = skb_recv_datagram(sk,flags,noblock,&err);
1208 if (skb==NULL)
1209 goto out;
1210
1211 msg->msg_namelen = 0;
1212
1213 copied = skb->len;
1214 if (len < copied) {
1215 msg->msg_flags |= MSG_TRUNC;
1216 copied = len;
1217 }
1218
1219 skb->h.raw = skb->data;
1220 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1221
1222 if (msg->msg_name) {
1223 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1224 addr->nl_family = AF_NETLINK;
1225 addr->nl_pad = 0;
1226 addr->nl_pid = NETLINK_CB(skb).pid;
d629b836 1227 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
1228 msg->msg_namelen = sizeof(*addr);
1229 }
1230
cc9a06cd
PM
1231 if (nlk->flags & NETLINK_RECV_PKTINFO)
1232 netlink_cmsg_recv_pktinfo(msg, skb);
1233
1da177e4
LT
1234 if (NULL == siocb->scm) {
1235 memset(&scm, 0, sizeof(scm));
1236 siocb->scm = &scm;
1237 }
1238 siocb->scm->creds = *NETLINK_CREDS(skb);
1239 skb_free_datagram(sk, skb);
1240
1241 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1242 netlink_dump(sk);
1243
1244 scm_recv(sock, msg, siocb->scm, flags);
1245
1246out:
1247 netlink_rcv_wake(sk);
1248 return err ? : copied;
1249}
1250
1251static void netlink_data_ready(struct sock *sk, int len)
1252{
1253 struct netlink_sock *nlk = nlk_sk(sk);
1254
1255 if (nlk->data_ready)
1256 nlk->data_ready(sk, len);
1257 netlink_rcv_wake(sk);
1258}
1259
1260/*
1261 * We export these functions to other modules. They provide a
1262 * complete set of kernel non-blocking support for message
1263 * queueing.
1264 */
1265
1266struct sock *
06628607
PM
1267netlink_kernel_create(int unit, unsigned int groups,
1268 void (*input)(struct sock *sk, int len),
1269 struct module *module)
1da177e4
LT
1270{
1271 struct socket *sock;
1272 struct sock *sk;
77247bbb 1273 struct netlink_sock *nlk;
4277a083 1274 unsigned long *listeners = NULL;
1da177e4
LT
1275
1276 if (!nl_table)
1277 return NULL;
1278
1279 if (unit<0 || unit>=MAX_LINKS)
1280 return NULL;
1281
1282 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1283 return NULL;
1284
ab33a171 1285 if (__netlink_create(sock, unit) < 0)
77247bbb 1286 goto out_sock_release;
4fdb3bb7 1287
4277a083
PM
1288 if (groups < 32)
1289 groups = 32;
1290
1291 listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
1292 if (!listeners)
1293 goto out_sock_release;
1294
1da177e4
LT
1295 sk = sock->sk;
1296 sk->sk_data_ready = netlink_data_ready;
1297 if (input)
1298 nlk_sk(sk)->data_ready = input;
1299
77247bbb
PM
1300 if (netlink_insert(sk, 0))
1301 goto out_sock_release;
4fdb3bb7 1302
77247bbb
PM
1303 nlk = nlk_sk(sk);
1304 nlk->flags |= NETLINK_KERNEL_SOCKET;
4fdb3bb7 1305
4fdb3bb7 1306 netlink_table_grab();
4277a083
PM
1307 nl_table[unit].groups = groups;
1308 nl_table[unit].listeners = listeners;
77247bbb 1309 nl_table[unit].module = module;
ab33a171 1310 nl_table[unit].registered = 1;
4fdb3bb7 1311 netlink_table_ungrab();
77247bbb
PM
1312
1313 return sk;
1314
4fdb3bb7 1315out_sock_release:
4277a083 1316 kfree(listeners);
4fdb3bb7 1317 sock_release(sock);
77247bbb 1318 return NULL;
1da177e4
LT
1319}
1320
1321void netlink_set_nonroot(int protocol, unsigned int flags)
1322{
1323 if ((unsigned int)protocol < MAX_LINKS)
1324 nl_table[protocol].nl_nonroot = flags;
1325}
1326
1327static void netlink_destroy_callback(struct netlink_callback *cb)
1328{
1329 if (cb->skb)
1330 kfree_skb(cb->skb);
1331 kfree(cb);
1332}
1333
1334/*
1335 * It looks a bit ugly.
1336 * It would be better to create kernel thread.
1337 */
1338
1339static int netlink_dump(struct sock *sk)
1340{
1341 struct netlink_sock *nlk = nlk_sk(sk);
1342 struct netlink_callback *cb;
1343 struct sk_buff *skb;
1344 struct nlmsghdr *nlh;
1345 int len;
1346
1347 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1348 if (!skb)
1349 return -ENOBUFS;
1350
1351 spin_lock(&nlk->cb_lock);
1352
1353 cb = nlk->cb;
1354 if (cb == NULL) {
1355 spin_unlock(&nlk->cb_lock);
1356 kfree_skb(skb);
1357 return -EINVAL;
1358 }
1359
1360 len = cb->dump(skb, cb);
1361
1362 if (len > 0) {
1363 spin_unlock(&nlk->cb_lock);
1364 skb_queue_tail(&sk->sk_receive_queue, skb);
1365 sk->sk_data_ready(sk, len);
1366 return 0;
1367 }
1368
1797754e 1369 nlh = NLMSG_NEW_ANSWER(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1da177e4
LT
1370 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1371 skb_queue_tail(&sk->sk_receive_queue, skb);
1372 sk->sk_data_ready(sk, skb->len);
1373
a8f74b22
TG
1374 if (cb->done)
1375 cb->done(cb);
1da177e4
LT
1376 nlk->cb = NULL;
1377 spin_unlock(&nlk->cb_lock);
1378
1379 netlink_destroy_callback(cb);
1da177e4 1380 return 0;
1797754e
TG
1381
1382nlmsg_failure:
1383 return -ENOBUFS;
1da177e4
LT
1384}
1385
1386int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1387 struct nlmsghdr *nlh,
1388 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1389 int (*done)(struct netlink_callback*))
1390{
1391 struct netlink_callback *cb;
1392 struct sock *sk;
1393 struct netlink_sock *nlk;
1394
0da974f4 1395 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1da177e4
LT
1396 if (cb == NULL)
1397 return -ENOBUFS;
1398
1da177e4
LT
1399 cb->dump = dump;
1400 cb->done = done;
1401 cb->nlh = nlh;
1402 atomic_inc(&skb->users);
1403 cb->skb = skb;
1404
1405 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1406 if (sk == NULL) {
1407 netlink_destroy_callback(cb);
1408 return -ECONNREFUSED;
1409 }
1410 nlk = nlk_sk(sk);
1411 /* A dump is in progress... */
1412 spin_lock(&nlk->cb_lock);
1413 if (nlk->cb) {
1414 spin_unlock(&nlk->cb_lock);
1415 netlink_destroy_callback(cb);
1416 sock_put(sk);
1417 return -EBUSY;
1418 }
1419 nlk->cb = cb;
1da177e4
LT
1420 spin_unlock(&nlk->cb_lock);
1421
1422 netlink_dump(sk);
1423 sock_put(sk);
1424 return 0;
1425}
1426
1427void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1428{
1429 struct sk_buff *skb;
1430 struct nlmsghdr *rep;
1431 struct nlmsgerr *errmsg;
1432 int size;
1433
1434 if (err == 0)
1435 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1436 else
1437 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1438
1439 skb = alloc_skb(size, GFP_KERNEL);
1440 if (!skb) {
1441 struct sock *sk;
1442
1443 sk = netlink_lookup(in_skb->sk->sk_protocol,
1444 NETLINK_CB(in_skb).pid);
1445 if (sk) {
1446 sk->sk_err = ENOBUFS;
1447 sk->sk_error_report(sk);
1448 sock_put(sk);
1449 }
1450 return;
1451 }
1452
1453 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1797754e 1454 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1da177e4
LT
1455 errmsg = NLMSG_DATA(rep);
1456 errmsg->error = err;
1457 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1458 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1459}
1460
82ace47a
TG
1461static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1462 struct nlmsghdr *, int *))
1463{
1464 unsigned int total_len;
1465 struct nlmsghdr *nlh;
1466 int err;
1467
1468 while (skb->len >= nlmsg_total_size(0)) {
1469 nlh = (struct nlmsghdr *) skb->data;
1470
ad8e4b75 1471 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
1472 return 0;
1473
1474 total_len = min(NLMSG_ALIGN(nlh->nlmsg_len), skb->len);
1475
1476 if (cb(skb, nlh, &err) < 0) {
1477 /* Not an error, but we have to interrupt processing
1478 * here. Note: that in this case we do not pull
1479 * message from skb, it will be processed later.
1480 */
1481 if (err == 0)
1482 return -1;
1483 netlink_ack(skb, nlh, err);
1484 } else if (nlh->nlmsg_flags & NLM_F_ACK)
1485 netlink_ack(skb, nlh, 0);
1486
1487 skb_pull(skb, total_len);
1488 }
1489
1490 return 0;
1491}
1492
1493/**
1494 * nelink_run_queue - Process netlink receive queue.
1495 * @sk: Netlink socket containing the queue
1496 * @qlen: Place to store queue length upon entry
1497 * @cb: Callback function invoked for each netlink message found
1498 *
1499 * Processes as much as there was in the queue upon entry and invokes
1500 * a callback function for each netlink message found. The callback
1501 * function may refuse a message by returning a negative error code
1502 * but setting the error pointer to 0 in which case this function
1503 * returns with a qlen != 0.
1504 *
1505 * qlen must be initialized to 0 before the initial entry, afterwards
1506 * the function may be called repeatedly until qlen reaches 0.
1507 */
1508void netlink_run_queue(struct sock *sk, unsigned int *qlen,
1509 int (*cb)(struct sk_buff *, struct nlmsghdr *, int *))
1510{
1511 struct sk_buff *skb;
1512
1513 if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue))
1514 *qlen = skb_queue_len(&sk->sk_receive_queue);
1515
1516 for (; *qlen; (*qlen)--) {
1517 skb = skb_dequeue(&sk->sk_receive_queue);
1518 if (netlink_rcv_skb(skb, cb)) {
1519 if (skb->len)
1520 skb_queue_head(&sk->sk_receive_queue, skb);
1521 else {
1522 kfree_skb(skb);
1523 (*qlen)--;
1524 }
1525 break;
1526 }
1527
1528 kfree_skb(skb);
1529 }
1530}
1531
1532/**
1533 * netlink_queue_skip - Skip netlink message while processing queue.
1534 * @nlh: Netlink message to be skipped
1535 * @skb: Socket buffer containing the netlink messages.
1536 *
1537 * Pulls the given netlink message off the socket buffer so the next
1538 * call to netlink_queue_run() will not reconsider the message.
1539 */
1540void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
1541{
1542 int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1543
1544 if (msglen > skb->len)
1545 msglen = skb->len;
1546
1547 skb_pull(skb, msglen);
1548}
1da177e4
LT
1549
1550#ifdef CONFIG_PROC_FS
1551struct nl_seq_iter {
1552 int link;
1553 int hash_idx;
1554};
1555
1556static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1557{
1558 struct nl_seq_iter *iter = seq->private;
1559 int i, j;
1560 struct sock *s;
1561 struct hlist_node *node;
1562 loff_t off = 0;
1563
1564 for (i=0; i<MAX_LINKS; i++) {
1565 struct nl_pid_hash *hash = &nl_table[i].hash;
1566
1567 for (j = 0; j <= hash->mask; j++) {
1568 sk_for_each(s, node, &hash->table[j]) {
1569 if (off == pos) {
1570 iter->link = i;
1571 iter->hash_idx = j;
1572 return s;
1573 }
1574 ++off;
1575 }
1576 }
1577 }
1578 return NULL;
1579}
1580
1581static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1582{
1583 read_lock(&nl_table_lock);
1584 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1585}
1586
1587static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1588{
1589 struct sock *s;
1590 struct nl_seq_iter *iter;
1591 int i, j;
1592
1593 ++*pos;
1594
1595 if (v == SEQ_START_TOKEN)
1596 return netlink_seq_socket_idx(seq, 0);
1597
1598 s = sk_next(v);
1599 if (s)
1600 return s;
1601
1602 iter = seq->private;
1603 i = iter->link;
1604 j = iter->hash_idx + 1;
1605
1606 do {
1607 struct nl_pid_hash *hash = &nl_table[i].hash;
1608
1609 for (; j <= hash->mask; j++) {
1610 s = sk_head(&hash->table[j]);
1611 if (s) {
1612 iter->link = i;
1613 iter->hash_idx = j;
1614 return s;
1615 }
1616 }
1617
1618 j = 0;
1619 } while (++i < MAX_LINKS);
1620
1621 return NULL;
1622}
1623
1624static void netlink_seq_stop(struct seq_file *seq, void *v)
1625{
1626 read_unlock(&nl_table_lock);
1627}
1628
1629
1630static int netlink_seq_show(struct seq_file *seq, void *v)
1631{
1632 if (v == SEQ_START_TOKEN)
1633 seq_puts(seq,
1634 "sk Eth Pid Groups "
1635 "Rmem Wmem Dump Locks\n");
1636 else {
1637 struct sock *s = v;
1638 struct netlink_sock *nlk = nlk_sk(s);
1639
1640 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1641 s,
1642 s->sk_protocol,
1643 nlk->pid,
513c2500 1644 nlk->groups ? (u32)nlk->groups[0] : 0,
1da177e4
LT
1645 atomic_read(&s->sk_rmem_alloc),
1646 atomic_read(&s->sk_wmem_alloc),
1647 nlk->cb,
1648 atomic_read(&s->sk_refcnt)
1649 );
1650
1651 }
1652 return 0;
1653}
1654
1655static struct seq_operations netlink_seq_ops = {
1656 .start = netlink_seq_start,
1657 .next = netlink_seq_next,
1658 .stop = netlink_seq_stop,
1659 .show = netlink_seq_show,
1660};
1661
1662
1663static int netlink_seq_open(struct inode *inode, struct file *file)
1664{
1665 struct seq_file *seq;
1666 struct nl_seq_iter *iter;
1667 int err;
1668
0da974f4 1669 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1da177e4
LT
1670 if (!iter)
1671 return -ENOMEM;
1672
1673 err = seq_open(file, &netlink_seq_ops);
1674 if (err) {
1675 kfree(iter);
1676 return err;
1677 }
1678
1da177e4
LT
1679 seq = file->private_data;
1680 seq->private = iter;
1681 return 0;
1682}
1683
1684static struct file_operations netlink_seq_fops = {
1685 .owner = THIS_MODULE,
1686 .open = netlink_seq_open,
1687 .read = seq_read,
1688 .llseek = seq_lseek,
1689 .release = seq_release_private,
1690};
1691
1692#endif
1693
1694int netlink_register_notifier(struct notifier_block *nb)
1695{
e041c683 1696 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4
LT
1697}
1698
1699int netlink_unregister_notifier(struct notifier_block *nb)
1700{
e041c683 1701 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4
LT
1702}
1703
90ddc4f0 1704static const struct proto_ops netlink_ops = {
1da177e4
LT
1705 .family = PF_NETLINK,
1706 .owner = THIS_MODULE,
1707 .release = netlink_release,
1708 .bind = netlink_bind,
1709 .connect = netlink_connect,
1710 .socketpair = sock_no_socketpair,
1711 .accept = sock_no_accept,
1712 .getname = netlink_getname,
1713 .poll = datagram_poll,
1714 .ioctl = sock_no_ioctl,
1715 .listen = sock_no_listen,
1716 .shutdown = sock_no_shutdown,
9a4595bc
PM
1717 .setsockopt = netlink_setsockopt,
1718 .getsockopt = netlink_getsockopt,
1da177e4
LT
1719 .sendmsg = netlink_sendmsg,
1720 .recvmsg = netlink_recvmsg,
1721 .mmap = sock_no_mmap,
1722 .sendpage = sock_no_sendpage,
1723};
1724
1725static struct net_proto_family netlink_family_ops = {
1726 .family = PF_NETLINK,
1727 .create = netlink_create,
1728 .owner = THIS_MODULE, /* for consistency 8) */
1729};
1730
1731extern void netlink_skb_parms_too_large(void);
1732
1733static int __init netlink_proto_init(void)
1734{
1735 struct sk_buff *dummy_skb;
1736 int i;
1737 unsigned long max;
1738 unsigned int order;
1739 int err = proto_register(&netlink_proto, 0);
1740
1741 if (err != 0)
1742 goto out;
1743
1744 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1745 netlink_skb_parms_too_large();
1746
0da974f4 1747 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1da177e4
LT
1748 if (!nl_table) {
1749enomem:
1750 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1751 return -ENOMEM;
1752 }
1753
1da177e4
LT
1754 if (num_physpages >= (128 * 1024))
1755 max = num_physpages >> (21 - PAGE_SHIFT);
1756 else
1757 max = num_physpages >> (23 - PAGE_SHIFT);
1758
1759 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1760 max = (1UL << order) / sizeof(struct hlist_head);
1761 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1762
1763 for (i = 0; i < MAX_LINKS; i++) {
1764 struct nl_pid_hash *hash = &nl_table[i].hash;
1765
1766 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1767 if (!hash->table) {
1768 while (i-- > 0)
1769 nl_pid_hash_free(nl_table[i].hash.table,
1770 1 * sizeof(*hash->table));
1771 kfree(nl_table);
1772 goto enomem;
1773 }
1774 memset(hash->table, 0, 1 * sizeof(*hash->table));
1775 hash->max_shift = order;
1776 hash->shift = 0;
1777 hash->mask = 0;
1778 hash->rehash_time = jiffies;
1779 }
1780
1781 sock_register(&netlink_family_ops);
1782#ifdef CONFIG_PROC_FS
1783 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1784#endif
1785 /* The netlink device handler may be needed early. */
1786 rtnetlink_init();
1787out:
1788 return err;
1789}
1790
1da177e4 1791core_initcall(netlink_proto_init);
1da177e4
LT
1792
1793EXPORT_SYMBOL(netlink_ack);
82ace47a
TG
1794EXPORT_SYMBOL(netlink_run_queue);
1795EXPORT_SYMBOL(netlink_queue_skip);
1da177e4
LT
1796EXPORT_SYMBOL(netlink_broadcast);
1797EXPORT_SYMBOL(netlink_dump_start);
1798EXPORT_SYMBOL(netlink_kernel_create);
1799EXPORT_SYMBOL(netlink_register_notifier);
1800EXPORT_SYMBOL(netlink_set_err);
1801EXPORT_SYMBOL(netlink_set_nonroot);
1802EXPORT_SYMBOL(netlink_unicast);
1803EXPORT_SYMBOL(netlink_unregister_notifier);
1804