2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
25 #include <linux/module.h>
27 #include <linux/capability.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/errno.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/socket.h>
37 #include <linux/fcntl.h>
38 #include <linux/termios.h>
39 #include <linux/sockios.h>
40 #include <linux/net.h>
42 #include <linux/slab.h>
43 #include <asm/uaccess.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/mutex.h>
59 #include <linux/vmalloc.h>
60 #include <asm/cacheflush.h>
62 #include <net/net_namespace.h>
65 #include <net/netlink.h>
67 #include "af_netlink.h"
71 unsigned long masks
[0];
75 #define NETLINK_CONGESTED 0x0
78 #define NETLINK_KERNEL_SOCKET 0x1
79 #define NETLINK_RECV_PKTINFO 0x2
80 #define NETLINK_BROADCAST_SEND_ERROR 0x4
81 #define NETLINK_RECV_NO_ENOBUFS 0x8
83 static inline int netlink_is_kernel(struct sock
*sk
)
85 return nlk_sk(sk
)->flags
& NETLINK_KERNEL_SOCKET
;
88 struct netlink_table
*nl_table
;
89 EXPORT_SYMBOL_GPL(nl_table
);
91 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait
);
93 static int netlink_dump(struct sock
*sk
);
94 static void netlink_skb_destructor(struct sk_buff
*skb
);
96 DEFINE_RWLOCK(nl_table_lock
);
97 EXPORT_SYMBOL_GPL(nl_table_lock
);
98 static atomic_t nl_table_users
= ATOMIC_INIT(0);
100 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
102 static ATOMIC_NOTIFIER_HEAD(netlink_chain
);
104 static inline u32
netlink_group_mask(u32 group
)
106 return group
? 1 << (group
- 1) : 0;
109 static inline struct hlist_head
*nl_portid_hashfn(struct nl_portid_hash
*hash
, u32 portid
)
111 return &hash
->table
[jhash_1word(portid
, hash
->rnd
) & hash
->mask
];
114 static void netlink_overrun(struct sock
*sk
)
116 struct netlink_sock
*nlk
= nlk_sk(sk
);
118 if (!(nlk
->flags
& NETLINK_RECV_NO_ENOBUFS
)) {
119 if (!test_and_set_bit(NETLINK_CONGESTED
, &nlk_sk(sk
)->state
)) {
120 sk
->sk_err
= ENOBUFS
;
121 sk
->sk_error_report(sk
);
124 atomic_inc(&sk
->sk_drops
);
127 static void netlink_rcv_wake(struct sock
*sk
)
129 struct netlink_sock
*nlk
= nlk_sk(sk
);
131 if (skb_queue_empty(&sk
->sk_receive_queue
))
132 clear_bit(NETLINK_CONGESTED
, &nlk
->state
);
133 if (!test_bit(NETLINK_CONGESTED
, &nlk
->state
))
134 wake_up_interruptible(&nlk
->wait
);
137 #ifdef CONFIG_NETLINK_MMAP
138 static bool netlink_skb_is_mmaped(const struct sk_buff
*skb
)
140 return NETLINK_CB(skb
).flags
& NETLINK_SKB_MMAPED
;
143 static bool netlink_rx_is_mmaped(struct sock
*sk
)
145 return nlk_sk(sk
)->rx_ring
.pg_vec
!= NULL
;
148 static bool netlink_tx_is_mmaped(struct sock
*sk
)
150 return nlk_sk(sk
)->tx_ring
.pg_vec
!= NULL
;
153 static __pure
struct page
*pgvec_to_page(const void *addr
)
155 if (is_vmalloc_addr(addr
))
156 return vmalloc_to_page(addr
);
158 return virt_to_page(addr
);
161 static void free_pg_vec(void **pg_vec
, unsigned int order
, unsigned int len
)
165 for (i
= 0; i
< len
; i
++) {
166 if (pg_vec
[i
] != NULL
) {
167 if (is_vmalloc_addr(pg_vec
[i
]))
170 free_pages((unsigned long)pg_vec
[i
], order
);
176 static void *alloc_one_pg_vec_page(unsigned long order
)
179 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_COMP
| __GFP_ZERO
|
180 __GFP_NOWARN
| __GFP_NORETRY
;
182 buffer
= (void *)__get_free_pages(gfp_flags
, order
);
186 buffer
= vzalloc((1 << order
) * PAGE_SIZE
);
190 gfp_flags
&= ~__GFP_NORETRY
;
191 return (void *)__get_free_pages(gfp_flags
, order
);
194 static void **alloc_pg_vec(struct netlink_sock
*nlk
,
195 struct nl_mmap_req
*req
, unsigned int order
)
197 unsigned int block_nr
= req
->nm_block_nr
;
201 pg_vec
= kcalloc(block_nr
, sizeof(void *), GFP_KERNEL
);
205 for (i
= 0; i
< block_nr
; i
++) {
206 pg_vec
[i
] = ptr
= alloc_one_pg_vec_page(order
);
207 if (pg_vec
[i
] == NULL
)
213 free_pg_vec(pg_vec
, order
, block_nr
);
219 __netlink_set_ring(struct sock
*sk
, struct nl_mmap_req
*req
, bool tx_ring
, void **pg_vec
,
222 struct netlink_sock
*nlk
= nlk_sk(sk
);
223 struct sk_buff_head
*queue
;
224 struct netlink_ring
*ring
;
226 queue
= tx_ring
? &sk
->sk_write_queue
: &sk
->sk_receive_queue
;
227 ring
= tx_ring
? &nlk
->tx_ring
: &nlk
->rx_ring
;
229 spin_lock_bh(&queue
->lock
);
231 ring
->frame_max
= req
->nm_frame_nr
- 1;
233 ring
->frame_size
= req
->nm_frame_size
;
234 ring
->pg_vec_pages
= req
->nm_block_size
/ PAGE_SIZE
;
236 swap(ring
->pg_vec_len
, req
->nm_block_nr
);
237 swap(ring
->pg_vec_order
, order
);
238 swap(ring
->pg_vec
, pg_vec
);
240 __skb_queue_purge(queue
);
241 spin_unlock_bh(&queue
->lock
);
243 WARN_ON(atomic_read(&nlk
->mapped
));
246 free_pg_vec(pg_vec
, order
, req
->nm_block_nr
);
249 static int netlink_set_ring(struct sock
*sk
, struct nl_mmap_req
*req
,
252 struct netlink_sock
*nlk
= nlk_sk(sk
);
253 struct netlink_ring
*ring
;
254 void **pg_vec
= NULL
;
255 unsigned int order
= 0;
257 ring
= tx_ring
? &nlk
->tx_ring
: &nlk
->rx_ring
;
259 if (atomic_read(&nlk
->mapped
))
261 if (atomic_read(&ring
->pending
))
264 if (req
->nm_block_nr
) {
265 if (ring
->pg_vec
!= NULL
)
268 if ((int)req
->nm_block_size
<= 0)
270 if (!IS_ALIGNED(req
->nm_block_size
, PAGE_SIZE
))
272 if (req
->nm_frame_size
< NL_MMAP_HDRLEN
)
274 if (!IS_ALIGNED(req
->nm_frame_size
, NL_MMAP_MSG_ALIGNMENT
))
277 ring
->frames_per_block
= req
->nm_block_size
/
279 if (ring
->frames_per_block
== 0)
281 if (ring
->frames_per_block
* req
->nm_block_nr
!=
285 order
= get_order(req
->nm_block_size
);
286 pg_vec
= alloc_pg_vec(nlk
, req
, order
);
290 if (req
->nm_frame_nr
)
294 mutex_lock(&nlk
->pg_vec_lock
);
295 if (atomic_read(&nlk
->mapped
) == 0) {
296 __netlink_set_ring(sk
, req
, tx_ring
, pg_vec
, order
);
297 mutex_unlock(&nlk
->pg_vec_lock
);
301 mutex_unlock(&nlk
->pg_vec_lock
);
304 free_pg_vec(pg_vec
, order
, req
->nm_block_nr
);
309 static void netlink_mm_open(struct vm_area_struct
*vma
)
311 struct file
*file
= vma
->vm_file
;
312 struct socket
*sock
= file
->private_data
;
313 struct sock
*sk
= sock
->sk
;
316 atomic_inc(&nlk_sk(sk
)->mapped
);
319 static void netlink_mm_close(struct vm_area_struct
*vma
)
321 struct file
*file
= vma
->vm_file
;
322 struct socket
*sock
= file
->private_data
;
323 struct sock
*sk
= sock
->sk
;
326 atomic_dec(&nlk_sk(sk
)->mapped
);
329 static const struct vm_operations_struct netlink_mmap_ops
= {
330 .open
= netlink_mm_open
,
331 .close
= netlink_mm_close
,
334 static int netlink_mmap(struct file
*file
, struct socket
*sock
,
335 struct vm_area_struct
*vma
)
337 struct sock
*sk
= sock
->sk
;
338 struct netlink_sock
*nlk
= nlk_sk(sk
);
339 struct netlink_ring
*ring
;
340 unsigned long start
, size
, expected
;
347 mutex_lock(&nlk
->pg_vec_lock
);
350 for (ring
= &nlk
->rx_ring
; ring
<= &nlk
->tx_ring
; ring
++) {
351 if (ring
->pg_vec
== NULL
)
353 expected
+= ring
->pg_vec_len
* ring
->pg_vec_pages
* PAGE_SIZE
;
359 size
= vma
->vm_end
- vma
->vm_start
;
360 if (size
!= expected
)
363 start
= vma
->vm_start
;
364 for (ring
= &nlk
->rx_ring
; ring
<= &nlk
->tx_ring
; ring
++) {
365 if (ring
->pg_vec
== NULL
)
368 for (i
= 0; i
< ring
->pg_vec_len
; i
++) {
370 void *kaddr
= ring
->pg_vec
[i
];
373 for (pg_num
= 0; pg_num
< ring
->pg_vec_pages
; pg_num
++) {
374 page
= pgvec_to_page(kaddr
);
375 err
= vm_insert_page(vma
, start
, page
);
384 atomic_inc(&nlk
->mapped
);
385 vma
->vm_ops
= &netlink_mmap_ops
;
388 mutex_unlock(&nlk
->pg_vec_lock
);
392 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr
*hdr
, unsigned int nm_len
)
394 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
395 struct page
*p_start
, *p_end
;
397 /* First page is flushed through netlink_{get,set}_status */
398 p_start
= pgvec_to_page(hdr
+ PAGE_SIZE
);
399 p_end
= pgvec_to_page((void *)hdr
+ NL_MMAP_HDRLEN
+ nm_len
- 1);
400 while (p_start
<= p_end
) {
401 flush_dcache_page(p_start
);
407 static enum nl_mmap_status
netlink_get_status(const struct nl_mmap_hdr
*hdr
)
410 flush_dcache_page(pgvec_to_page(hdr
));
411 return hdr
->nm_status
;
414 static void netlink_set_status(struct nl_mmap_hdr
*hdr
,
415 enum nl_mmap_status status
)
418 hdr
->nm_status
= status
;
419 flush_dcache_page(pgvec_to_page(hdr
));
422 static struct nl_mmap_hdr
*
423 __netlink_lookup_frame(const struct netlink_ring
*ring
, unsigned int pos
)
425 unsigned int pg_vec_pos
, frame_off
;
427 pg_vec_pos
= pos
/ ring
->frames_per_block
;
428 frame_off
= pos
% ring
->frames_per_block
;
430 return ring
->pg_vec
[pg_vec_pos
] + (frame_off
* ring
->frame_size
);
433 static struct nl_mmap_hdr
*
434 netlink_lookup_frame(const struct netlink_ring
*ring
, unsigned int pos
,
435 enum nl_mmap_status status
)
437 struct nl_mmap_hdr
*hdr
;
439 hdr
= __netlink_lookup_frame(ring
, pos
);
440 if (netlink_get_status(hdr
) != status
)
446 static struct nl_mmap_hdr
*
447 netlink_current_frame(const struct netlink_ring
*ring
,
448 enum nl_mmap_status status
)
450 return netlink_lookup_frame(ring
, ring
->head
, status
);
453 static struct nl_mmap_hdr
*
454 netlink_previous_frame(const struct netlink_ring
*ring
,
455 enum nl_mmap_status status
)
459 prev
= ring
->head
? ring
->head
- 1 : ring
->frame_max
;
460 return netlink_lookup_frame(ring
, prev
, status
);
463 static void netlink_increment_head(struct netlink_ring
*ring
)
465 ring
->head
= ring
->head
!= ring
->frame_max
? ring
->head
+ 1 : 0;
468 static void netlink_forward_ring(struct netlink_ring
*ring
)
470 unsigned int head
= ring
->head
, pos
= head
;
471 const struct nl_mmap_hdr
*hdr
;
474 hdr
= __netlink_lookup_frame(ring
, pos
);
475 if (hdr
->nm_status
== NL_MMAP_STATUS_UNUSED
)
477 if (hdr
->nm_status
!= NL_MMAP_STATUS_SKIP
)
479 netlink_increment_head(ring
);
480 } while (ring
->head
!= head
);
483 static bool netlink_dump_space(struct netlink_sock
*nlk
)
485 struct netlink_ring
*ring
= &nlk
->rx_ring
;
486 struct nl_mmap_hdr
*hdr
;
489 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_UNUSED
);
493 n
= ring
->head
+ ring
->frame_max
/ 2;
494 if (n
> ring
->frame_max
)
495 n
-= ring
->frame_max
;
497 hdr
= __netlink_lookup_frame(ring
, n
);
499 return hdr
->nm_status
== NL_MMAP_STATUS_UNUSED
;
502 static unsigned int netlink_poll(struct file
*file
, struct socket
*sock
,
505 struct sock
*sk
= sock
->sk
;
506 struct netlink_sock
*nlk
= nlk_sk(sk
);
510 if (nlk
->rx_ring
.pg_vec
!= NULL
) {
511 /* Memory mapped sockets don't call recvmsg(), so flow control
512 * for dumps is performed here. A dump is allowed to continue
513 * if at least half the ring is unused.
515 while (nlk
->cb
!= NULL
&& netlink_dump_space(nlk
)) {
516 err
= netlink_dump(sk
);
519 sk
->sk_error_report(sk
);
523 netlink_rcv_wake(sk
);
526 mask
= datagram_poll(file
, sock
, wait
);
528 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
529 if (nlk
->rx_ring
.pg_vec
) {
530 netlink_forward_ring(&nlk
->rx_ring
);
531 if (!netlink_previous_frame(&nlk
->rx_ring
, NL_MMAP_STATUS_UNUSED
))
532 mask
|= POLLIN
| POLLRDNORM
;
534 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
536 spin_lock_bh(&sk
->sk_write_queue
.lock
);
537 if (nlk
->tx_ring
.pg_vec
) {
538 if (netlink_current_frame(&nlk
->tx_ring
, NL_MMAP_STATUS_UNUSED
))
539 mask
|= POLLOUT
| POLLWRNORM
;
541 spin_unlock_bh(&sk
->sk_write_queue
.lock
);
546 static struct nl_mmap_hdr
*netlink_mmap_hdr(struct sk_buff
*skb
)
548 return (struct nl_mmap_hdr
*)(skb
->head
- NL_MMAP_HDRLEN
);
551 static void netlink_ring_setup_skb(struct sk_buff
*skb
, struct sock
*sk
,
552 struct netlink_ring
*ring
,
553 struct nl_mmap_hdr
*hdr
)
558 size
= ring
->frame_size
- NL_MMAP_HDRLEN
;
559 data
= (void *)hdr
+ NL_MMAP_HDRLEN
;
563 skb_reset_tail_pointer(skb
);
564 skb
->end
= skb
->tail
+ size
;
567 skb
->destructor
= netlink_skb_destructor
;
568 NETLINK_CB(skb
).flags
|= NETLINK_SKB_MMAPED
;
569 NETLINK_CB(skb
).sk
= sk
;
572 static int netlink_mmap_sendmsg(struct sock
*sk
, struct msghdr
*msg
,
573 u32 dst_portid
, u32 dst_group
,
574 struct sock_iocb
*siocb
)
576 struct netlink_sock
*nlk
= nlk_sk(sk
);
577 struct netlink_ring
*ring
;
578 struct nl_mmap_hdr
*hdr
;
581 int err
= 0, len
= 0;
583 mutex_lock(&nlk
->pg_vec_lock
);
585 ring
= &nlk
->tx_ring
;
586 maxlen
= ring
->frame_size
- NL_MMAP_HDRLEN
;
591 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_VALID
);
593 if (!(msg
->msg_flags
& MSG_DONTWAIT
) &&
594 atomic_read(&nlk
->tx_ring
.pending
))
599 nm_len
= ACCESS_ONCE(hdr
->nm_len
);
600 if (nm_len
> maxlen
) {
605 netlink_frame_flush_dcache(hdr
, nm_len
);
607 skb
= alloc_skb(nm_len
, GFP_KERNEL
);
612 __skb_put(skb
, nm_len
);
613 memcpy(skb
->data
, (void *)hdr
+ NL_MMAP_HDRLEN
, nm_len
);
614 netlink_set_status(hdr
, NL_MMAP_STATUS_UNUSED
);
616 netlink_increment_head(ring
);
618 NETLINK_CB(skb
).portid
= nlk
->portid
;
619 NETLINK_CB(skb
).dst_group
= dst_group
;
620 NETLINK_CB(skb
).creds
= siocb
->scm
->creds
;
622 err
= security_netlink_send(sk
, skb
);
628 if (unlikely(dst_group
)) {
629 atomic_inc(&skb
->users
);
630 netlink_broadcast(sk
, skb
, dst_portid
, dst_group
,
633 err
= netlink_unicast(sk
, skb
, dst_portid
,
634 msg
->msg_flags
& MSG_DONTWAIT
);
639 } while (hdr
!= NULL
||
640 (!(msg
->msg_flags
& MSG_DONTWAIT
) &&
641 atomic_read(&nlk
->tx_ring
.pending
)));
646 mutex_unlock(&nlk
->pg_vec_lock
);
650 static void netlink_queue_mmaped_skb(struct sock
*sk
, struct sk_buff
*skb
)
652 struct nl_mmap_hdr
*hdr
;
654 hdr
= netlink_mmap_hdr(skb
);
655 hdr
->nm_len
= skb
->len
;
656 hdr
->nm_group
= NETLINK_CB(skb
).dst_group
;
657 hdr
->nm_pid
= NETLINK_CB(skb
).creds
.pid
;
658 hdr
->nm_uid
= from_kuid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.uid
);
659 hdr
->nm_gid
= from_kgid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.gid
);
660 netlink_frame_flush_dcache(hdr
, hdr
->nm_len
);
661 netlink_set_status(hdr
, NL_MMAP_STATUS_VALID
);
663 NETLINK_CB(skb
).flags
|= NETLINK_SKB_DELIVERED
;
667 static void netlink_ring_set_copied(struct sock
*sk
, struct sk_buff
*skb
)
669 struct netlink_sock
*nlk
= nlk_sk(sk
);
670 struct netlink_ring
*ring
= &nlk
->rx_ring
;
671 struct nl_mmap_hdr
*hdr
;
673 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
674 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_UNUSED
);
676 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
681 netlink_increment_head(ring
);
682 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
683 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
685 hdr
->nm_len
= skb
->len
;
686 hdr
->nm_group
= NETLINK_CB(skb
).dst_group
;
687 hdr
->nm_pid
= NETLINK_CB(skb
).creds
.pid
;
688 hdr
->nm_uid
= from_kuid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.uid
);
689 hdr
->nm_gid
= from_kgid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.gid
);
690 netlink_set_status(hdr
, NL_MMAP_STATUS_COPY
);
693 #else /* CONFIG_NETLINK_MMAP */
694 #define netlink_skb_is_mmaped(skb) false
695 #define netlink_rx_is_mmaped(sk) false
696 #define netlink_tx_is_mmaped(sk) false
697 #define netlink_mmap sock_no_mmap
698 #define netlink_poll datagram_poll
699 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
700 #endif /* CONFIG_NETLINK_MMAP */
702 static void netlink_destroy_callback(struct netlink_callback
*cb
)
708 static void netlink_consume_callback(struct netlink_callback
*cb
)
710 consume_skb(cb
->skb
);
714 static void netlink_skb_destructor(struct sk_buff
*skb
)
716 #ifdef CONFIG_NETLINK_MMAP
717 struct nl_mmap_hdr
*hdr
;
718 struct netlink_ring
*ring
;
721 /* If a packet from the kernel to userspace was freed because of an
722 * error without being delivered to userspace, the kernel must reset
723 * the status. In the direction userspace to kernel, the status is
724 * always reset here after the packet was processed and freed.
726 if (netlink_skb_is_mmaped(skb
)) {
727 hdr
= netlink_mmap_hdr(skb
);
728 sk
= NETLINK_CB(skb
).sk
;
730 if (NETLINK_CB(skb
).flags
& NETLINK_SKB_TX
) {
731 netlink_set_status(hdr
, NL_MMAP_STATUS_UNUSED
);
732 ring
= &nlk_sk(sk
)->tx_ring
;
734 if (!(NETLINK_CB(skb
).flags
& NETLINK_SKB_DELIVERED
)) {
736 netlink_set_status(hdr
, NL_MMAP_STATUS_VALID
);
738 ring
= &nlk_sk(sk
)->rx_ring
;
741 WARN_ON(atomic_read(&ring
->pending
) == 0);
742 atomic_dec(&ring
->pending
);
752 static void netlink_skb_set_owner_r(struct sk_buff
*skb
, struct sock
*sk
)
754 WARN_ON(skb
->sk
!= NULL
);
756 skb
->destructor
= netlink_skb_destructor
;
757 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
758 sk_mem_charge(sk
, skb
->truesize
);
761 static void netlink_sock_destruct(struct sock
*sk
)
763 struct netlink_sock
*nlk
= nlk_sk(sk
);
767 nlk
->cb
->done(nlk
->cb
);
769 module_put(nlk
->cb
->module
);
770 netlink_destroy_callback(nlk
->cb
);
773 skb_queue_purge(&sk
->sk_receive_queue
);
774 #ifdef CONFIG_NETLINK_MMAP
776 struct nl_mmap_req req
;
778 memset(&req
, 0, sizeof(req
));
779 if (nlk
->rx_ring
.pg_vec
)
780 __netlink_set_ring(sk
, &req
, false, NULL
, 0);
781 memset(&req
, 0, sizeof(req
));
782 if (nlk
->tx_ring
.pg_vec
)
783 __netlink_set_ring(sk
, &req
, true, NULL
, 0);
785 #endif /* CONFIG_NETLINK_MMAP */
787 if (!sock_flag(sk
, SOCK_DEAD
)) {
788 printk(KERN_ERR
"Freeing alive netlink socket %p\n", sk
);
792 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
793 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
794 WARN_ON(nlk_sk(sk
)->groups
);
797 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
798 * SMP. Look, when several writers sleep and reader wakes them up, all but one
799 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
800 * this, _but_ remember, it adds useless work on UP machines.
803 void netlink_table_grab(void)
804 __acquires(nl_table_lock
)
808 write_lock_irq(&nl_table_lock
);
810 if (atomic_read(&nl_table_users
)) {
811 DECLARE_WAITQUEUE(wait
, current
);
813 add_wait_queue_exclusive(&nl_table_wait
, &wait
);
815 set_current_state(TASK_UNINTERRUPTIBLE
);
816 if (atomic_read(&nl_table_users
) == 0)
818 write_unlock_irq(&nl_table_lock
);
820 write_lock_irq(&nl_table_lock
);
823 __set_current_state(TASK_RUNNING
);
824 remove_wait_queue(&nl_table_wait
, &wait
);
828 void netlink_table_ungrab(void)
829 __releases(nl_table_lock
)
831 write_unlock_irq(&nl_table_lock
);
832 wake_up(&nl_table_wait
);
836 netlink_lock_table(void)
838 /* read_lock() synchronizes us to netlink_table_grab */
840 read_lock(&nl_table_lock
);
841 atomic_inc(&nl_table_users
);
842 read_unlock(&nl_table_lock
);
846 netlink_unlock_table(void)
848 if (atomic_dec_and_test(&nl_table_users
))
849 wake_up(&nl_table_wait
);
852 static struct sock
*netlink_lookup(struct net
*net
, int protocol
, u32 portid
)
854 struct nl_portid_hash
*hash
= &nl_table
[protocol
].hash
;
855 struct hlist_head
*head
;
858 read_lock(&nl_table_lock
);
859 head
= nl_portid_hashfn(hash
, portid
);
860 sk_for_each(sk
, head
) {
861 if (net_eq(sock_net(sk
), net
) && (nlk_sk(sk
)->portid
== portid
)) {
868 read_unlock(&nl_table_lock
);
872 static struct hlist_head
*nl_portid_hash_zalloc(size_t size
)
874 if (size
<= PAGE_SIZE
)
875 return kzalloc(size
, GFP_ATOMIC
);
877 return (struct hlist_head
*)
878 __get_free_pages(GFP_ATOMIC
| __GFP_ZERO
,
882 static void nl_portid_hash_free(struct hlist_head
*table
, size_t size
)
884 if (size
<= PAGE_SIZE
)
887 free_pages((unsigned long)table
, get_order(size
));
890 static int nl_portid_hash_rehash(struct nl_portid_hash
*hash
, int grow
)
892 unsigned int omask
, mask
, shift
;
894 struct hlist_head
*otable
, *table
;
897 omask
= mask
= hash
->mask
;
898 osize
= size
= (mask
+ 1) * sizeof(*table
);
902 if (++shift
> hash
->max_shift
)
908 table
= nl_portid_hash_zalloc(size
);
912 otable
= hash
->table
;
916 get_random_bytes(&hash
->rnd
, sizeof(hash
->rnd
));
918 for (i
= 0; i
<= omask
; i
++) {
920 struct hlist_node
*tmp
;
922 sk_for_each_safe(sk
, tmp
, &otable
[i
])
923 __sk_add_node(sk
, nl_portid_hashfn(hash
, nlk_sk(sk
)->portid
));
926 nl_portid_hash_free(otable
, osize
);
927 hash
->rehash_time
= jiffies
+ 10 * 60 * HZ
;
931 static inline int nl_portid_hash_dilute(struct nl_portid_hash
*hash
, int len
)
933 int avg
= hash
->entries
>> hash
->shift
;
935 if (unlikely(avg
> 1) && nl_portid_hash_rehash(hash
, 1))
938 if (unlikely(len
> avg
) && time_after(jiffies
, hash
->rehash_time
)) {
939 nl_portid_hash_rehash(hash
, 0);
946 static const struct proto_ops netlink_ops
;
949 netlink_update_listeners(struct sock
*sk
)
951 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
954 struct listeners
*listeners
;
956 listeners
= nl_deref_protected(tbl
->listeners
);
960 for (i
= 0; i
< NLGRPLONGS(tbl
->groups
); i
++) {
962 sk_for_each_bound(sk
, &tbl
->mc_list
) {
963 if (i
< NLGRPLONGS(nlk_sk(sk
)->ngroups
))
964 mask
|= nlk_sk(sk
)->groups
[i
];
966 listeners
->masks
[i
] = mask
;
968 /* this function is only called with the netlink table "grabbed", which
969 * makes sure updates are visible before bind or setsockopt return. */
972 static int netlink_insert(struct sock
*sk
, struct net
*net
, u32 portid
)
974 struct nl_portid_hash
*hash
= &nl_table
[sk
->sk_protocol
].hash
;
975 struct hlist_head
*head
;
976 int err
= -EADDRINUSE
;
980 netlink_table_grab();
981 head
= nl_portid_hashfn(hash
, portid
);
983 sk_for_each(osk
, head
) {
984 if (net_eq(sock_net(osk
), net
) && (nlk_sk(osk
)->portid
== portid
))
992 if (nlk_sk(sk
)->portid
)
996 if (BITS_PER_LONG
> 32 && unlikely(hash
->entries
>= UINT_MAX
))
999 if (len
&& nl_portid_hash_dilute(hash
, len
))
1000 head
= nl_portid_hashfn(hash
, portid
);
1002 nlk_sk(sk
)->portid
= portid
;
1003 sk_add_node(sk
, head
);
1007 netlink_table_ungrab();
1011 static void netlink_remove(struct sock
*sk
)
1013 netlink_table_grab();
1014 if (sk_del_node_init(sk
))
1015 nl_table
[sk
->sk_protocol
].hash
.entries
--;
1016 if (nlk_sk(sk
)->subscriptions
)
1017 __sk_del_bind_node(sk
);
1018 netlink_table_ungrab();
1021 static struct proto netlink_proto
= {
1023 .owner
= THIS_MODULE
,
1024 .obj_size
= sizeof(struct netlink_sock
),
1027 static int __netlink_create(struct net
*net
, struct socket
*sock
,
1028 struct mutex
*cb_mutex
, int protocol
)
1031 struct netlink_sock
*nlk
;
1033 sock
->ops
= &netlink_ops
;
1035 sk
= sk_alloc(net
, PF_NETLINK
, GFP_KERNEL
, &netlink_proto
);
1039 sock_init_data(sock
, sk
);
1043 nlk
->cb_mutex
= cb_mutex
;
1045 nlk
->cb_mutex
= &nlk
->cb_def_mutex
;
1046 mutex_init(nlk
->cb_mutex
);
1048 init_waitqueue_head(&nlk
->wait
);
1049 #ifdef CONFIG_NETLINK_MMAP
1050 mutex_init(&nlk
->pg_vec_lock
);
1053 sk
->sk_destruct
= netlink_sock_destruct
;
1054 sk
->sk_protocol
= protocol
;
1058 static int netlink_create(struct net
*net
, struct socket
*sock
, int protocol
,
1061 struct module
*module
= NULL
;
1062 struct mutex
*cb_mutex
;
1063 struct netlink_sock
*nlk
;
1064 void (*bind
)(int group
);
1067 sock
->state
= SS_UNCONNECTED
;
1069 if (sock
->type
!= SOCK_RAW
&& sock
->type
!= SOCK_DGRAM
)
1070 return -ESOCKTNOSUPPORT
;
1072 if (protocol
< 0 || protocol
>= MAX_LINKS
)
1073 return -EPROTONOSUPPORT
;
1075 netlink_lock_table();
1076 #ifdef CONFIG_MODULES
1077 if (!nl_table
[protocol
].registered
) {
1078 netlink_unlock_table();
1079 request_module("net-pf-%d-proto-%d", PF_NETLINK
, protocol
);
1080 netlink_lock_table();
1083 if (nl_table
[protocol
].registered
&&
1084 try_module_get(nl_table
[protocol
].module
))
1085 module
= nl_table
[protocol
].module
;
1087 err
= -EPROTONOSUPPORT
;
1088 cb_mutex
= nl_table
[protocol
].cb_mutex
;
1089 bind
= nl_table
[protocol
].bind
;
1090 netlink_unlock_table();
1095 err
= __netlink_create(net
, sock
, cb_mutex
, protocol
);
1100 sock_prot_inuse_add(net
, &netlink_proto
, 1);
1103 nlk
= nlk_sk(sock
->sk
);
1104 nlk
->module
= module
;
1105 nlk
->netlink_bind
= bind
;
1114 static int netlink_release(struct socket
*sock
)
1116 struct sock
*sk
= sock
->sk
;
1117 struct netlink_sock
*nlk
;
1127 * OK. Socket is unlinked, any packets that arrive now
1132 wake_up_interruptible_all(&nlk
->wait
);
1134 skb_queue_purge(&sk
->sk_write_queue
);
1137 struct netlink_notify n
= {
1138 .net
= sock_net(sk
),
1139 .protocol
= sk
->sk_protocol
,
1140 .portid
= nlk
->portid
,
1142 atomic_notifier_call_chain(&netlink_chain
,
1143 NETLINK_URELEASE
, &n
);
1146 module_put(nlk
->module
);
1148 netlink_table_grab();
1149 if (netlink_is_kernel(sk
)) {
1150 BUG_ON(nl_table
[sk
->sk_protocol
].registered
== 0);
1151 if (--nl_table
[sk
->sk_protocol
].registered
== 0) {
1152 struct listeners
*old
;
1154 old
= nl_deref_protected(nl_table
[sk
->sk_protocol
].listeners
);
1155 RCU_INIT_POINTER(nl_table
[sk
->sk_protocol
].listeners
, NULL
);
1156 kfree_rcu(old
, rcu
);
1157 nl_table
[sk
->sk_protocol
].module
= NULL
;
1158 nl_table
[sk
->sk_protocol
].bind
= NULL
;
1159 nl_table
[sk
->sk_protocol
].flags
= 0;
1160 nl_table
[sk
->sk_protocol
].registered
= 0;
1162 } else if (nlk
->subscriptions
) {
1163 netlink_update_listeners(sk
);
1165 netlink_table_ungrab();
1171 sock_prot_inuse_add(sock_net(sk
), &netlink_proto
, -1);
1177 static int netlink_autobind(struct socket
*sock
)
1179 struct sock
*sk
= sock
->sk
;
1180 struct net
*net
= sock_net(sk
);
1181 struct nl_portid_hash
*hash
= &nl_table
[sk
->sk_protocol
].hash
;
1182 struct hlist_head
*head
;
1184 s32 portid
= task_tgid_vnr(current
);
1186 static s32 rover
= -4097;
1190 netlink_table_grab();
1191 head
= nl_portid_hashfn(hash
, portid
);
1192 sk_for_each(osk
, head
) {
1193 if (!net_eq(sock_net(osk
), net
))
1195 if (nlk_sk(osk
)->portid
== portid
) {
1196 /* Bind collision, search negative portid values. */
1200 netlink_table_ungrab();
1204 netlink_table_ungrab();
1206 err
= netlink_insert(sk
, net
, portid
);
1207 if (err
== -EADDRINUSE
)
1210 /* If 2 threads race to autobind, that is fine. */
1218 * __netlink_ns_capable - General netlink message capability test
1219 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1220 * @user_ns: The user namespace of the capability to use
1221 * @cap: The capability to use
1223 * Test to see if the opener of the socket we received the message
1224 * from had when the netlink socket was created and the sender of the
1225 * message has has the capability @cap in the user namespace @user_ns.
1227 bool __netlink_ns_capable(const struct netlink_skb_parms
*nsp
,
1228 struct user_namespace
*user_ns
, int cap
)
1230 return ((nsp
->flags
& NETLINK_SKB_DST
) ||
1231 file_ns_capable(nsp
->sk
->sk_socket
->file
, user_ns
, cap
)) &&
1232 ns_capable(user_ns
, cap
);
1234 EXPORT_SYMBOL(__netlink_ns_capable
);
1237 * netlink_ns_capable - General netlink message capability test
1238 * @skb: socket buffer holding a netlink command from userspace
1239 * @user_ns: The user namespace of the capability to use
1240 * @cap: The capability to use
1242 * Test to see if the opener of the socket we received the message
1243 * from had when the netlink socket was created and the sender of the
1244 * message has has the capability @cap in the user namespace @user_ns.
1246 bool netlink_ns_capable(const struct sk_buff
*skb
,
1247 struct user_namespace
*user_ns
, int cap
)
1249 return __netlink_ns_capable(&NETLINK_CB(skb
), user_ns
, cap
);
1251 EXPORT_SYMBOL(netlink_ns_capable
);
1254 * netlink_capable - Netlink global message capability test
1255 * @skb: socket buffer holding a netlink command from userspace
1256 * @cap: The capability to use
1258 * Test to see if the opener of the socket we received the message
1259 * from had when the netlink socket was created and the sender of the
1260 * message has has the capability @cap in all user namespaces.
1262 bool netlink_capable(const struct sk_buff
*skb
, int cap
)
1264 return netlink_ns_capable(skb
, &init_user_ns
, cap
);
1266 EXPORT_SYMBOL(netlink_capable
);
1269 * netlink_net_capable - Netlink network namespace message capability test
1270 * @skb: socket buffer holding a netlink command from userspace
1271 * @cap: The capability to use
1273 * Test to see if the opener of the socket we received the message
1274 * from had when the netlink socket was created and the sender of the
1275 * message has has the capability @cap over the network namespace of
1276 * the socket we received the message from.
1278 bool netlink_net_capable(const struct sk_buff
*skb
, int cap
)
1280 return netlink_ns_capable(skb
, sock_net(skb
->sk
)->user_ns
, cap
);
1282 EXPORT_SYMBOL(netlink_net_capable
);
1284 static inline int netlink_allowed(const struct socket
*sock
, unsigned int flag
)
1286 return (nl_table
[sock
->sk
->sk_protocol
].flags
& flag
) ||
1287 ns_capable(sock_net(sock
->sk
)->user_ns
, CAP_NET_ADMIN
);
1291 netlink_update_subscriptions(struct sock
*sk
, unsigned int subscriptions
)
1293 struct netlink_sock
*nlk
= nlk_sk(sk
);
1295 if (nlk
->subscriptions
&& !subscriptions
)
1296 __sk_del_bind_node(sk
);
1297 else if (!nlk
->subscriptions
&& subscriptions
)
1298 sk_add_bind_node(sk
, &nl_table
[sk
->sk_protocol
].mc_list
);
1299 nlk
->subscriptions
= subscriptions
;
1302 static int netlink_realloc_groups(struct sock
*sk
)
1304 struct netlink_sock
*nlk
= nlk_sk(sk
);
1305 unsigned int groups
;
1306 unsigned long *new_groups
;
1309 netlink_table_grab();
1311 groups
= nl_table
[sk
->sk_protocol
].groups
;
1312 if (!nl_table
[sk
->sk_protocol
].registered
) {
1317 if (nlk
->ngroups
>= groups
)
1320 new_groups
= krealloc(nlk
->groups
, NLGRPSZ(groups
), GFP_ATOMIC
);
1321 if (new_groups
== NULL
) {
1325 memset((char *)new_groups
+ NLGRPSZ(nlk
->ngroups
), 0,
1326 NLGRPSZ(groups
) - NLGRPSZ(nlk
->ngroups
));
1328 nlk
->groups
= new_groups
;
1329 nlk
->ngroups
= groups
;
1331 netlink_table_ungrab();
1335 static int netlink_bind(struct socket
*sock
, struct sockaddr
*addr
,
1338 struct sock
*sk
= sock
->sk
;
1339 struct net
*net
= sock_net(sk
);
1340 struct netlink_sock
*nlk
= nlk_sk(sk
);
1341 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
1344 if (addr_len
< sizeof(struct sockaddr_nl
))
1347 if (nladdr
->nl_family
!= AF_NETLINK
)
1350 /* Only superuser is allowed to listen multicasts */
1351 if (nladdr
->nl_groups
) {
1352 if (!netlink_allowed(sock
, NL_CFG_F_NONROOT_RECV
))
1354 err
= netlink_realloc_groups(sk
);
1360 if (nladdr
->nl_pid
!= nlk
->portid
)
1363 err
= nladdr
->nl_pid
?
1364 netlink_insert(sk
, net
, nladdr
->nl_pid
) :
1365 netlink_autobind(sock
);
1370 if (!nladdr
->nl_groups
&& (nlk
->groups
== NULL
|| !(u32
)nlk
->groups
[0]))
1373 netlink_table_grab();
1374 netlink_update_subscriptions(sk
, nlk
->subscriptions
+
1375 hweight32(nladdr
->nl_groups
) -
1376 hweight32(nlk
->groups
[0]));
1377 nlk
->groups
[0] = (nlk
->groups
[0] & ~0xffffffffUL
) | nladdr
->nl_groups
;
1378 netlink_update_listeners(sk
);
1379 netlink_table_ungrab();
1381 if (nlk
->netlink_bind
&& nlk
->groups
[0]) {
1384 for (i
=0; i
<nlk
->ngroups
; i
++) {
1385 if (test_bit(i
, nlk
->groups
))
1386 nlk
->netlink_bind(i
);
1393 static int netlink_connect(struct socket
*sock
, struct sockaddr
*addr
,
1394 int alen
, int flags
)
1397 struct sock
*sk
= sock
->sk
;
1398 struct netlink_sock
*nlk
= nlk_sk(sk
);
1399 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
1401 if (alen
< sizeof(addr
->sa_family
))
1404 if (addr
->sa_family
== AF_UNSPEC
) {
1405 sk
->sk_state
= NETLINK_UNCONNECTED
;
1406 nlk
->dst_portid
= 0;
1410 if (addr
->sa_family
!= AF_NETLINK
)
1413 /* Only superuser is allowed to send multicasts */
1414 if (nladdr
->nl_groups
&& !netlink_allowed(sock
, NL_CFG_F_NONROOT_SEND
))
1418 err
= netlink_autobind(sock
);
1421 sk
->sk_state
= NETLINK_CONNECTED
;
1422 nlk
->dst_portid
= nladdr
->nl_pid
;
1423 nlk
->dst_group
= ffs(nladdr
->nl_groups
);
1429 static int netlink_getname(struct socket
*sock
, struct sockaddr
*addr
,
1430 int *addr_len
, int peer
)
1432 struct sock
*sk
= sock
->sk
;
1433 struct netlink_sock
*nlk
= nlk_sk(sk
);
1434 DECLARE_SOCKADDR(struct sockaddr_nl
*, nladdr
, addr
);
1436 nladdr
->nl_family
= AF_NETLINK
;
1438 *addr_len
= sizeof(*nladdr
);
1441 nladdr
->nl_pid
= nlk
->dst_portid
;
1442 nladdr
->nl_groups
= netlink_group_mask(nlk
->dst_group
);
1444 nladdr
->nl_pid
= nlk
->portid
;
1445 nladdr
->nl_groups
= nlk
->groups
? nlk
->groups
[0] : 0;
1450 static struct sock
*netlink_getsockbyportid(struct sock
*ssk
, u32 portid
)
1453 struct netlink_sock
*nlk
;
1455 sock
= netlink_lookup(sock_net(ssk
), ssk
->sk_protocol
, portid
);
1457 return ERR_PTR(-ECONNREFUSED
);
1459 /* Don't bother queuing skb if kernel socket has no input function */
1461 if (sock
->sk_state
== NETLINK_CONNECTED
&&
1462 nlk
->dst_portid
!= nlk_sk(ssk
)->portid
) {
1464 return ERR_PTR(-ECONNREFUSED
);
1469 struct sock
*netlink_getsockbyfilp(struct file
*filp
)
1471 struct inode
*inode
= file_inode(filp
);
1474 if (!S_ISSOCK(inode
->i_mode
))
1475 return ERR_PTR(-ENOTSOCK
);
1477 sock
= SOCKET_I(inode
)->sk
;
1478 if (sock
->sk_family
!= AF_NETLINK
)
1479 return ERR_PTR(-EINVAL
);
1486 * Attach a skb to a netlink socket.
1487 * The caller must hold a reference to the destination socket. On error, the
1488 * reference is dropped. The skb is not send to the destination, just all
1489 * all error checks are performed and memory in the queue is reserved.
1491 * < 0: error. skb freed, reference to sock dropped.
1493 * 1: repeat lookup - reference dropped while waiting for socket memory.
1495 int netlink_attachskb(struct sock
*sk
, struct sk_buff
*skb
,
1496 long *timeo
, struct sock
*ssk
)
1498 struct netlink_sock
*nlk
;
1502 if ((atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
1503 test_bit(NETLINK_CONGESTED
, &nlk
->state
)) &&
1504 !netlink_skb_is_mmaped(skb
)) {
1505 DECLARE_WAITQUEUE(wait
, current
);
1507 if (!ssk
|| netlink_is_kernel(ssk
))
1508 netlink_overrun(sk
);
1514 __set_current_state(TASK_INTERRUPTIBLE
);
1515 add_wait_queue(&nlk
->wait
, &wait
);
1517 if ((atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
1518 test_bit(NETLINK_CONGESTED
, &nlk
->state
)) &&
1519 !sock_flag(sk
, SOCK_DEAD
))
1520 *timeo
= schedule_timeout(*timeo
);
1522 __set_current_state(TASK_RUNNING
);
1523 remove_wait_queue(&nlk
->wait
, &wait
);
1526 if (signal_pending(current
)) {
1528 return sock_intr_errno(*timeo
);
1532 netlink_skb_set_owner_r(skb
, sk
);
1536 static int __netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
)
1540 #ifdef CONFIG_NETLINK_MMAP
1541 if (netlink_skb_is_mmaped(skb
))
1542 netlink_queue_mmaped_skb(sk
, skb
);
1543 else if (netlink_rx_is_mmaped(sk
))
1544 netlink_ring_set_copied(sk
, skb
);
1546 #endif /* CONFIG_NETLINK_MMAP */
1547 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
1548 sk
->sk_data_ready(sk
, len
);
1552 int netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
)
1554 int len
= __netlink_sendskb(sk
, skb
);
1560 void netlink_detachskb(struct sock
*sk
, struct sk_buff
*skb
)
1566 static struct sk_buff
*netlink_trim(struct sk_buff
*skb
, gfp_t allocation
)
1570 WARN_ON(skb
->sk
!= NULL
);
1571 if (netlink_skb_is_mmaped(skb
))
1574 delta
= skb
->end
- skb
->tail
;
1575 if (delta
* 2 < skb
->truesize
)
1578 if (skb_shared(skb
)) {
1579 struct sk_buff
*nskb
= skb_clone(skb
, allocation
);
1586 if (!pskb_expand_head(skb
, 0, -delta
, allocation
))
1587 skb
->truesize
-= delta
;
1592 static int netlink_unicast_kernel(struct sock
*sk
, struct sk_buff
*skb
,
1596 struct netlink_sock
*nlk
= nlk_sk(sk
);
1598 ret
= -ECONNREFUSED
;
1599 if (nlk
->netlink_rcv
!= NULL
) {
1601 netlink_skb_set_owner_r(skb
, sk
);
1602 NETLINK_CB(skb
).sk
= ssk
;
1603 nlk
->netlink_rcv(skb
);
1612 int netlink_unicast(struct sock
*ssk
, struct sk_buff
*skb
,
1613 u32 portid
, int nonblock
)
1619 skb
= netlink_trim(skb
, gfp_any());
1621 timeo
= sock_sndtimeo(ssk
, nonblock
);
1623 sk
= netlink_getsockbyportid(ssk
, portid
);
1628 if (netlink_is_kernel(sk
))
1629 return netlink_unicast_kernel(sk
, skb
, ssk
);
1631 if (sk_filter(sk
, skb
)) {
1638 err
= netlink_attachskb(sk
, skb
, &timeo
, ssk
);
1644 return netlink_sendskb(sk
, skb
);
1646 EXPORT_SYMBOL(netlink_unicast
);
1648 struct sk_buff
*netlink_alloc_skb(struct sock
*ssk
, unsigned int size
,
1649 u32 dst_portid
, gfp_t gfp_mask
)
1651 #ifdef CONFIG_NETLINK_MMAP
1652 struct sock
*sk
= NULL
;
1653 struct sk_buff
*skb
;
1654 struct netlink_ring
*ring
;
1655 struct nl_mmap_hdr
*hdr
;
1656 unsigned int maxlen
;
1658 sk
= netlink_getsockbyportid(ssk
, dst_portid
);
1662 ring
= &nlk_sk(sk
)->rx_ring
;
1663 /* fast-path without atomic ops for common case: non-mmaped receiver */
1664 if (ring
->pg_vec
== NULL
)
1667 skb
= alloc_skb_head(gfp_mask
);
1671 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1672 /* check again under lock */
1673 if (ring
->pg_vec
== NULL
)
1676 maxlen
= ring
->frame_size
- NL_MMAP_HDRLEN
;
1680 netlink_forward_ring(ring
);
1681 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_UNUSED
);
1684 netlink_ring_setup_skb(skb
, sk
, ring
, hdr
);
1685 netlink_set_status(hdr
, NL_MMAP_STATUS_RESERVED
);
1686 atomic_inc(&ring
->pending
);
1687 netlink_increment_head(ring
);
1689 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1694 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1695 netlink_overrun(sk
);
1702 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1707 return alloc_skb(size
, gfp_mask
);
1709 EXPORT_SYMBOL_GPL(netlink_alloc_skb
);
1711 int netlink_has_listeners(struct sock
*sk
, unsigned int group
)
1714 struct listeners
*listeners
;
1716 BUG_ON(!netlink_is_kernel(sk
));
1719 listeners
= rcu_dereference(nl_table
[sk
->sk_protocol
].listeners
);
1721 if (listeners
&& group
- 1 < nl_table
[sk
->sk_protocol
].groups
)
1722 res
= test_bit(group
- 1, listeners
->masks
);
1728 EXPORT_SYMBOL_GPL(netlink_has_listeners
);
1730 static int netlink_broadcast_deliver(struct sock
*sk
, struct sk_buff
*skb
)
1732 struct netlink_sock
*nlk
= nlk_sk(sk
);
1734 if (atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
&&
1735 !test_bit(NETLINK_CONGESTED
, &nlk
->state
)) {
1736 netlink_skb_set_owner_r(skb
, sk
);
1737 __netlink_sendskb(sk
, skb
);
1738 return atomic_read(&sk
->sk_rmem_alloc
) > (sk
->sk_rcvbuf
>> 1);
1743 struct netlink_broadcast_data
{
1744 struct sock
*exclude_sk
;
1749 int delivery_failure
;
1753 struct sk_buff
*skb
, *skb2
;
1754 int (*tx_filter
)(struct sock
*dsk
, struct sk_buff
*skb
, void *data
);
1758 static int do_one_broadcast(struct sock
*sk
,
1759 struct netlink_broadcast_data
*p
)
1761 struct netlink_sock
*nlk
= nlk_sk(sk
);
1764 if (p
->exclude_sk
== sk
)
1767 if (nlk
->portid
== p
->portid
|| p
->group
- 1 >= nlk
->ngroups
||
1768 !test_bit(p
->group
- 1, nlk
->groups
))
1771 if (!net_eq(sock_net(sk
), p
->net
))
1775 netlink_overrun(sk
);
1780 if (p
->skb2
== NULL
) {
1781 if (skb_shared(p
->skb
)) {
1782 p
->skb2
= skb_clone(p
->skb
, p
->allocation
);
1784 p
->skb2
= skb_get(p
->skb
);
1786 * skb ownership may have been set when
1787 * delivered to a previous socket.
1789 skb_orphan(p
->skb2
);
1792 if (p
->skb2
== NULL
) {
1793 netlink_overrun(sk
);
1794 /* Clone failed. Notify ALL listeners. */
1796 if (nlk
->flags
& NETLINK_BROADCAST_SEND_ERROR
)
1797 p
->delivery_failure
= 1;
1798 } else if (p
->tx_filter
&& p
->tx_filter(sk
, p
->skb2
, p
->tx_data
)) {
1801 } else if (sk_filter(sk
, p
->skb2
)) {
1804 } else if ((val
= netlink_broadcast_deliver(sk
, p
->skb2
)) < 0) {
1805 netlink_overrun(sk
);
1806 if (nlk
->flags
& NETLINK_BROADCAST_SEND_ERROR
)
1807 p
->delivery_failure
= 1;
1809 p
->congested
|= val
;
1819 int netlink_broadcast_filtered(struct sock
*ssk
, struct sk_buff
*skb
, u32 portid
,
1820 u32 group
, gfp_t allocation
,
1821 int (*filter
)(struct sock
*dsk
, struct sk_buff
*skb
, void *data
),
1824 struct net
*net
= sock_net(ssk
);
1825 struct netlink_broadcast_data info
;
1828 skb
= netlink_trim(skb
, allocation
);
1830 info
.exclude_sk
= ssk
;
1832 info
.portid
= portid
;
1835 info
.delivery_failure
= 0;
1838 info
.allocation
= allocation
;
1841 info
.tx_filter
= filter
;
1842 info
.tx_data
= filter_data
;
1844 /* While we sleep in clone, do not allow to change socket list */
1846 netlink_lock_table();
1848 sk_for_each_bound(sk
, &nl_table
[ssk
->sk_protocol
].mc_list
)
1849 do_one_broadcast(sk
, &info
);
1853 netlink_unlock_table();
1855 if (info
.delivery_failure
) {
1856 kfree_skb(info
.skb2
);
1859 consume_skb(info
.skb2
);
1861 if (info
.delivered
) {
1862 if (info
.congested
&& (allocation
& __GFP_WAIT
))
1868 EXPORT_SYMBOL(netlink_broadcast_filtered
);
1870 int netlink_broadcast(struct sock
*ssk
, struct sk_buff
*skb
, u32 portid
,
1871 u32 group
, gfp_t allocation
)
1873 return netlink_broadcast_filtered(ssk
, skb
, portid
, group
, allocation
,
1876 EXPORT_SYMBOL(netlink_broadcast
);
1878 struct netlink_set_err_data
{
1879 struct sock
*exclude_sk
;
1885 static int do_one_set_err(struct sock
*sk
, struct netlink_set_err_data
*p
)
1887 struct netlink_sock
*nlk
= nlk_sk(sk
);
1890 if (sk
== p
->exclude_sk
)
1893 if (!net_eq(sock_net(sk
), sock_net(p
->exclude_sk
)))
1896 if (nlk
->portid
== p
->portid
|| p
->group
- 1 >= nlk
->ngroups
||
1897 !test_bit(p
->group
- 1, nlk
->groups
))
1900 if (p
->code
== ENOBUFS
&& nlk
->flags
& NETLINK_RECV_NO_ENOBUFS
) {
1905 sk
->sk_err
= p
->code
;
1906 sk
->sk_error_report(sk
);
1912 * netlink_set_err - report error to broadcast listeners
1913 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1914 * @portid: the PORTID of a process that we want to skip (if any)
1915 * @groups: the broadcast group that will notice the error
1916 * @code: error code, must be negative (as usual in kernelspace)
1918 * This function returns the number of broadcast listeners that have set the
1919 * NETLINK_RECV_NO_ENOBUFS socket option.
1921 int netlink_set_err(struct sock
*ssk
, u32 portid
, u32 group
, int code
)
1923 struct netlink_set_err_data info
;
1927 info
.exclude_sk
= ssk
;
1928 info
.portid
= portid
;
1930 /* sk->sk_err wants a positive error value */
1933 read_lock(&nl_table_lock
);
1935 sk_for_each_bound(sk
, &nl_table
[ssk
->sk_protocol
].mc_list
)
1936 ret
+= do_one_set_err(sk
, &info
);
1938 read_unlock(&nl_table_lock
);
1941 EXPORT_SYMBOL(netlink_set_err
);
1943 /* must be called with netlink table grabbed */
1944 static void netlink_update_socket_mc(struct netlink_sock
*nlk
,
1948 int old
, new = !!is_new
, subscriptions
;
1950 old
= test_bit(group
- 1, nlk
->groups
);
1951 subscriptions
= nlk
->subscriptions
- old
+ new;
1953 __set_bit(group
- 1, nlk
->groups
);
1955 __clear_bit(group
- 1, nlk
->groups
);
1956 netlink_update_subscriptions(&nlk
->sk
, subscriptions
);
1957 netlink_update_listeners(&nlk
->sk
);
1960 static int netlink_setsockopt(struct socket
*sock
, int level
, int optname
,
1961 char __user
*optval
, unsigned int optlen
)
1963 struct sock
*sk
= sock
->sk
;
1964 struct netlink_sock
*nlk
= nlk_sk(sk
);
1965 unsigned int val
= 0;
1968 if (level
!= SOL_NETLINK
)
1969 return -ENOPROTOOPT
;
1971 if (optname
!= NETLINK_RX_RING
&& optname
!= NETLINK_TX_RING
&&
1972 optlen
>= sizeof(int) &&
1973 get_user(val
, (unsigned int __user
*)optval
))
1977 case NETLINK_PKTINFO
:
1979 nlk
->flags
|= NETLINK_RECV_PKTINFO
;
1981 nlk
->flags
&= ~NETLINK_RECV_PKTINFO
;
1984 case NETLINK_ADD_MEMBERSHIP
:
1985 case NETLINK_DROP_MEMBERSHIP
: {
1986 if (!netlink_allowed(sock
, NL_CFG_F_NONROOT_RECV
))
1988 err
= netlink_realloc_groups(sk
);
1991 if (!val
|| val
- 1 >= nlk
->ngroups
)
1993 netlink_table_grab();
1994 netlink_update_socket_mc(nlk
, val
,
1995 optname
== NETLINK_ADD_MEMBERSHIP
);
1996 netlink_table_ungrab();
1998 if (nlk
->netlink_bind
)
1999 nlk
->netlink_bind(val
);
2004 case NETLINK_BROADCAST_ERROR
:
2006 nlk
->flags
|= NETLINK_BROADCAST_SEND_ERROR
;
2008 nlk
->flags
&= ~NETLINK_BROADCAST_SEND_ERROR
;
2011 case NETLINK_NO_ENOBUFS
:
2013 nlk
->flags
|= NETLINK_RECV_NO_ENOBUFS
;
2014 clear_bit(NETLINK_CONGESTED
, &nlk
->state
);
2015 wake_up_interruptible(&nlk
->wait
);
2017 nlk
->flags
&= ~NETLINK_RECV_NO_ENOBUFS
;
2021 #ifdef CONFIG_NETLINK_MMAP
2022 case NETLINK_RX_RING
:
2023 case NETLINK_TX_RING
: {
2024 struct nl_mmap_req req
;
2026 /* Rings might consume more memory than queue limits, require
2029 if (!capable(CAP_NET_ADMIN
))
2031 if (optlen
< sizeof(req
))
2033 if (copy_from_user(&req
, optval
, sizeof(req
)))
2035 err
= netlink_set_ring(sk
, &req
,
2036 optname
== NETLINK_TX_RING
);
2039 #endif /* CONFIG_NETLINK_MMAP */
2046 static int netlink_getsockopt(struct socket
*sock
, int level
, int optname
,
2047 char __user
*optval
, int __user
*optlen
)
2049 struct sock
*sk
= sock
->sk
;
2050 struct netlink_sock
*nlk
= nlk_sk(sk
);
2053 if (level
!= SOL_NETLINK
)
2054 return -ENOPROTOOPT
;
2056 if (get_user(len
, optlen
))
2062 case NETLINK_PKTINFO
:
2063 if (len
< sizeof(int))
2066 val
= nlk
->flags
& NETLINK_RECV_PKTINFO
? 1 : 0;
2067 if (put_user(len
, optlen
) ||
2068 put_user(val
, optval
))
2072 case NETLINK_BROADCAST_ERROR
:
2073 if (len
< sizeof(int))
2076 val
= nlk
->flags
& NETLINK_BROADCAST_SEND_ERROR
? 1 : 0;
2077 if (put_user(len
, optlen
) ||
2078 put_user(val
, optval
))
2082 case NETLINK_NO_ENOBUFS
:
2083 if (len
< sizeof(int))
2086 val
= nlk
->flags
& NETLINK_RECV_NO_ENOBUFS
? 1 : 0;
2087 if (put_user(len
, optlen
) ||
2088 put_user(val
, optval
))
2098 static void netlink_cmsg_recv_pktinfo(struct msghdr
*msg
, struct sk_buff
*skb
)
2100 struct nl_pktinfo info
;
2102 info
.group
= NETLINK_CB(skb
).dst_group
;
2103 put_cmsg(msg
, SOL_NETLINK
, NETLINK_PKTINFO
, sizeof(info
), &info
);
2106 static int netlink_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
2107 struct msghdr
*msg
, size_t len
)
2109 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
2110 struct sock
*sk
= sock
->sk
;
2111 struct netlink_sock
*nlk
= nlk_sk(sk
);
2112 struct sockaddr_nl
*addr
= msg
->msg_name
;
2115 struct sk_buff
*skb
;
2117 struct scm_cookie scm
;
2118 u32 netlink_skb_flags
= 0;
2120 if (msg
->msg_flags
&MSG_OOB
)
2123 if (NULL
== siocb
->scm
)
2126 err
= scm_send(sock
, msg
, siocb
->scm
, true);
2130 if (msg
->msg_namelen
) {
2132 if (addr
->nl_family
!= AF_NETLINK
)
2134 dst_portid
= addr
->nl_pid
;
2135 dst_group
= ffs(addr
->nl_groups
);
2137 if ((dst_group
|| dst_portid
) &&
2138 !netlink_allowed(sock
, NL_CFG_F_NONROOT_SEND
))
2140 netlink_skb_flags
|= NETLINK_SKB_DST
;
2142 dst_portid
= nlk
->dst_portid
;
2143 dst_group
= nlk
->dst_group
;
2147 err
= netlink_autobind(sock
);
2152 if (netlink_tx_is_mmaped(sk
) &&
2153 msg
->msg_iov
->iov_base
== NULL
) {
2154 err
= netlink_mmap_sendmsg(sk
, msg
, dst_portid
, dst_group
,
2160 if (len
> sk
->sk_sndbuf
- 32)
2163 skb
= alloc_skb(len
, GFP_KERNEL
);
2167 NETLINK_CB(skb
).portid
= nlk
->portid
;
2168 NETLINK_CB(skb
).dst_group
= dst_group
;
2169 NETLINK_CB(skb
).creds
= siocb
->scm
->creds
;
2170 NETLINK_CB(skb
).flags
= netlink_skb_flags
;
2173 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
2178 err
= security_netlink_send(sk
, skb
);
2185 atomic_inc(&skb
->users
);
2186 netlink_broadcast(sk
, skb
, dst_portid
, dst_group
, GFP_KERNEL
);
2188 err
= netlink_unicast(sk
, skb
, dst_portid
, msg
->msg_flags
&MSG_DONTWAIT
);
2191 scm_destroy(siocb
->scm
);
2195 static int netlink_recvmsg(struct kiocb
*kiocb
, struct socket
*sock
,
2196 struct msghdr
*msg
, size_t len
,
2199 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
2200 struct scm_cookie scm
;
2201 struct sock
*sk
= sock
->sk
;
2202 struct netlink_sock
*nlk
= nlk_sk(sk
);
2203 int noblock
= flags
&MSG_DONTWAIT
;
2205 struct sk_buff
*skb
, *data_skb
;
2213 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
2219 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2220 if (unlikely(skb_shinfo(skb
)->frag_list
)) {
2222 * If this skb has a frag_list, then here that means that we
2223 * will have to use the frag_list skb's data for compat tasks
2224 * and the regular skb's data for normal (non-compat) tasks.
2226 * If we need to send the compat skb, assign it to the
2227 * 'data_skb' variable so that it will be used below for data
2228 * copying. We keep 'skb' for everything else, including
2229 * freeing both later.
2231 if (flags
& MSG_CMSG_COMPAT
)
2232 data_skb
= skb_shinfo(skb
)->frag_list
;
2236 copied
= data_skb
->len
;
2238 msg
->msg_flags
|= MSG_TRUNC
;
2242 skb_reset_transport_header(data_skb
);
2243 err
= skb_copy_datagram_iovec(data_skb
, 0, msg
->msg_iov
, copied
);
2245 if (msg
->msg_name
) {
2246 struct sockaddr_nl
*addr
= (struct sockaddr_nl
*)msg
->msg_name
;
2247 addr
->nl_family
= AF_NETLINK
;
2249 addr
->nl_pid
= NETLINK_CB(skb
).portid
;
2250 addr
->nl_groups
= netlink_group_mask(NETLINK_CB(skb
).dst_group
);
2251 msg
->msg_namelen
= sizeof(*addr
);
2254 if (nlk
->flags
& NETLINK_RECV_PKTINFO
)
2255 netlink_cmsg_recv_pktinfo(msg
, skb
);
2257 if (NULL
== siocb
->scm
) {
2258 memset(&scm
, 0, sizeof(scm
));
2261 siocb
->scm
->creds
= *NETLINK_CREDS(skb
);
2262 if (flags
& MSG_TRUNC
)
2263 copied
= data_skb
->len
;
2265 skb_free_datagram(sk
, skb
);
2267 if (nlk
->cb
&& atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
/ 2) {
2268 ret
= netlink_dump(sk
);
2271 sk
->sk_error_report(sk
);
2275 scm_recv(sock
, msg
, siocb
->scm
, flags
);
2277 netlink_rcv_wake(sk
);
2278 return err
? : copied
;
2281 static void netlink_data_ready(struct sock
*sk
, int len
)
2287 * We export these functions to other modules. They provide a
2288 * complete set of kernel non-blocking support for message
2293 __netlink_kernel_create(struct net
*net
, int unit
, struct module
*module
,
2294 struct netlink_kernel_cfg
*cfg
)
2296 struct socket
*sock
;
2298 struct netlink_sock
*nlk
;
2299 struct listeners
*listeners
= NULL
;
2300 struct mutex
*cb_mutex
= cfg
? cfg
->cb_mutex
: NULL
;
2301 unsigned int groups
;
2305 if (unit
< 0 || unit
>= MAX_LINKS
)
2308 if (sock_create_lite(PF_NETLINK
, SOCK_DGRAM
, unit
, &sock
))
2312 * We have to just have a reference on the net from sk, but don't
2313 * get_net it. Besides, we cannot get and then put the net here.
2314 * So we create one inside init_net and the move it to net.
2317 if (__netlink_create(&init_net
, sock
, cb_mutex
, unit
) < 0)
2318 goto out_sock_release_nosk
;
2321 sk_change_net(sk
, net
);
2323 if (!cfg
|| cfg
->groups
< 32)
2326 groups
= cfg
->groups
;
2328 listeners
= kzalloc(sizeof(*listeners
) + NLGRPSZ(groups
), GFP_KERNEL
);
2330 goto out_sock_release
;
2332 sk
->sk_data_ready
= netlink_data_ready
;
2333 if (cfg
&& cfg
->input
)
2334 nlk_sk(sk
)->netlink_rcv
= cfg
->input
;
2336 if (netlink_insert(sk
, net
, 0))
2337 goto out_sock_release
;
2340 nlk
->flags
|= NETLINK_KERNEL_SOCKET
;
2342 netlink_table_grab();
2343 if (!nl_table
[unit
].registered
) {
2344 nl_table
[unit
].groups
= groups
;
2345 rcu_assign_pointer(nl_table
[unit
].listeners
, listeners
);
2346 nl_table
[unit
].cb_mutex
= cb_mutex
;
2347 nl_table
[unit
].module
= module
;
2349 nl_table
[unit
].bind
= cfg
->bind
;
2350 nl_table
[unit
].flags
= cfg
->flags
;
2352 nl_table
[unit
].registered
= 1;
2355 nl_table
[unit
].registered
++;
2357 netlink_table_ungrab();
2362 netlink_kernel_release(sk
);
2365 out_sock_release_nosk
:
2369 EXPORT_SYMBOL(__netlink_kernel_create
);
2372 netlink_kernel_release(struct sock
*sk
)
2374 sk_release_kernel(sk
);
2376 EXPORT_SYMBOL(netlink_kernel_release
);
2378 int __netlink_change_ngroups(struct sock
*sk
, unsigned int groups
)
2380 struct listeners
*new, *old
;
2381 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
2386 if (NLGRPSZ(tbl
->groups
) < NLGRPSZ(groups
)) {
2387 new = kzalloc(sizeof(*new) + NLGRPSZ(groups
), GFP_ATOMIC
);
2390 old
= nl_deref_protected(tbl
->listeners
);
2391 memcpy(new->masks
, old
->masks
, NLGRPSZ(tbl
->groups
));
2392 rcu_assign_pointer(tbl
->listeners
, new);
2394 kfree_rcu(old
, rcu
);
2396 tbl
->groups
= groups
;
2402 * netlink_change_ngroups - change number of multicast groups
2404 * This changes the number of multicast groups that are available
2405 * on a certain netlink family. Note that it is not possible to
2406 * change the number of groups to below 32. Also note that it does
2407 * not implicitly call netlink_clear_multicast_users() when the
2408 * number of groups is reduced.
2410 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2411 * @groups: The new number of groups.
2413 int netlink_change_ngroups(struct sock
*sk
, unsigned int groups
)
2417 netlink_table_grab();
2418 err
= __netlink_change_ngroups(sk
, groups
);
2419 netlink_table_ungrab();
2424 void __netlink_clear_multicast_users(struct sock
*ksk
, unsigned int group
)
2427 struct netlink_table
*tbl
= &nl_table
[ksk
->sk_protocol
];
2429 sk_for_each_bound(sk
, &tbl
->mc_list
)
2430 netlink_update_socket_mc(nlk_sk(sk
), group
, 0);
2434 * netlink_clear_multicast_users - kick off multicast listeners
2436 * This function removes all listeners from the given group.
2437 * @ksk: The kernel netlink socket, as returned by
2438 * netlink_kernel_create().
2439 * @group: The multicast group to clear.
2441 void netlink_clear_multicast_users(struct sock
*ksk
, unsigned int group
)
2443 netlink_table_grab();
2444 __netlink_clear_multicast_users(ksk
, group
);
2445 netlink_table_ungrab();
2449 __nlmsg_put(struct sk_buff
*skb
, u32 portid
, u32 seq
, int type
, int len
, int flags
)
2451 struct nlmsghdr
*nlh
;
2452 int size
= nlmsg_msg_size(len
);
2454 nlh
= (struct nlmsghdr
*)skb_put(skb
, NLMSG_ALIGN(size
));
2455 nlh
->nlmsg_type
= type
;
2456 nlh
->nlmsg_len
= size
;
2457 nlh
->nlmsg_flags
= flags
;
2458 nlh
->nlmsg_pid
= portid
;
2459 nlh
->nlmsg_seq
= seq
;
2460 if (!__builtin_constant_p(size
) || NLMSG_ALIGN(size
) - size
!= 0)
2461 memset(nlmsg_data(nlh
) + len
, 0, NLMSG_ALIGN(size
) - size
);
2464 EXPORT_SYMBOL(__nlmsg_put
);
2467 * It looks a bit ugly.
2468 * It would be better to create kernel thread.
2471 static int netlink_dump(struct sock
*sk
)
2473 struct netlink_sock
*nlk
= nlk_sk(sk
);
2474 struct netlink_callback
*cb
;
2475 struct sk_buff
*skb
= NULL
;
2476 struct nlmsghdr
*nlh
;
2477 int len
, err
= -ENOBUFS
;
2480 mutex_lock(nlk
->cb_mutex
);
2488 alloc_size
= max_t(int, cb
->min_dump_alloc
, NLMSG_GOODSIZE
);
2490 if (!netlink_rx_is_mmaped(sk
) &&
2491 atomic_read(&sk
->sk_rmem_alloc
) >= sk
->sk_rcvbuf
)
2493 skb
= netlink_alloc_skb(sk
, alloc_size
, nlk
->portid
, GFP_KERNEL
);
2496 netlink_skb_set_owner_r(skb
, sk
);
2498 len
= cb
->dump(skb
, cb
);
2501 mutex_unlock(nlk
->cb_mutex
);
2503 if (sk_filter(sk
, skb
))
2506 __netlink_sendskb(sk
, skb
);
2510 nlh
= nlmsg_put_answer(skb
, cb
, NLMSG_DONE
, sizeof(len
), NLM_F_MULTI
);
2514 nl_dump_check_consistent(cb
, nlh
);
2516 memcpy(nlmsg_data(nlh
), &len
, sizeof(len
));
2518 if (sk_filter(sk
, skb
))
2521 __netlink_sendskb(sk
, skb
);
2526 mutex_unlock(nlk
->cb_mutex
);
2528 module_put(cb
->module
);
2529 netlink_consume_callback(cb
);
2533 mutex_unlock(nlk
->cb_mutex
);
2538 int __netlink_dump_start(struct sock
*ssk
, struct sk_buff
*skb
,
2539 const struct nlmsghdr
*nlh
,
2540 struct netlink_dump_control
*control
)
2542 struct netlink_callback
*cb
;
2544 struct netlink_sock
*nlk
;
2547 cb
= kzalloc(sizeof(*cb
), GFP_KERNEL
);
2551 /* Memory mapped dump requests need to be copied to avoid looping
2552 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2553 * a reference to the skb.
2555 if (netlink_skb_is_mmaped(skb
)) {
2556 skb
= skb_copy(skb
, GFP_KERNEL
);
2562 atomic_inc(&skb
->users
);
2564 cb
->dump
= control
->dump
;
2565 cb
->done
= control
->done
;
2567 cb
->data
= control
->data
;
2568 cb
->module
= control
->module
;
2569 cb
->min_dump_alloc
= control
->min_dump_alloc
;
2572 sk
= netlink_lookup(sock_net(ssk
), ssk
->sk_protocol
, NETLINK_CB(skb
).portid
);
2574 netlink_destroy_callback(cb
);
2575 return -ECONNREFUSED
;
2579 mutex_lock(nlk
->cb_mutex
);
2580 /* A dump is in progress... */
2582 mutex_unlock(nlk
->cb_mutex
);
2583 netlink_destroy_callback(cb
);
2587 /* add reference of module which cb->dump belongs to */
2588 if (!try_module_get(cb
->module
)) {
2589 mutex_unlock(nlk
->cb_mutex
);
2590 netlink_destroy_callback(cb
);
2591 ret
= -EPROTONOSUPPORT
;
2596 mutex_unlock(nlk
->cb_mutex
);
2598 ret
= netlink_dump(sk
);
2605 /* We successfully started a dump, by returning -EINTR we
2606 * signal not to send ACK even if it was requested.
2610 EXPORT_SYMBOL(__netlink_dump_start
);
2612 void netlink_ack(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
, int err
)
2614 struct sk_buff
*skb
;
2615 struct nlmsghdr
*rep
;
2616 struct nlmsgerr
*errmsg
;
2617 size_t payload
= sizeof(*errmsg
);
2619 /* error messages get the original request appened */
2621 payload
+= nlmsg_len(nlh
);
2623 skb
= netlink_alloc_skb(in_skb
->sk
, nlmsg_total_size(payload
),
2624 NETLINK_CB(in_skb
).portid
, GFP_KERNEL
);
2628 sk
= netlink_lookup(sock_net(in_skb
->sk
),
2629 in_skb
->sk
->sk_protocol
,
2630 NETLINK_CB(in_skb
).portid
);
2632 sk
->sk_err
= ENOBUFS
;
2633 sk
->sk_error_report(sk
);
2639 rep
= __nlmsg_put(skb
, NETLINK_CB(in_skb
).portid
, nlh
->nlmsg_seq
,
2640 NLMSG_ERROR
, payload
, 0);
2641 errmsg
= nlmsg_data(rep
);
2642 errmsg
->error
= err
;
2643 memcpy(&errmsg
->msg
, nlh
, err
? nlh
->nlmsg_len
: sizeof(*nlh
));
2644 netlink_unicast(in_skb
->sk
, skb
, NETLINK_CB(in_skb
).portid
, MSG_DONTWAIT
);
2646 EXPORT_SYMBOL(netlink_ack
);
2648 int netlink_rcv_skb(struct sk_buff
*skb
, int (*cb
)(struct sk_buff
*,
2651 struct nlmsghdr
*nlh
;
2654 while (skb
->len
>= nlmsg_total_size(0)) {
2657 nlh
= nlmsg_hdr(skb
);
2660 if (nlh
->nlmsg_len
< NLMSG_HDRLEN
|| skb
->len
< nlh
->nlmsg_len
)
2663 /* Only requests are handled by the kernel */
2664 if (!(nlh
->nlmsg_flags
& NLM_F_REQUEST
))
2667 /* Skip control messages */
2668 if (nlh
->nlmsg_type
< NLMSG_MIN_TYPE
)
2676 if (nlh
->nlmsg_flags
& NLM_F_ACK
|| err
)
2677 netlink_ack(skb
, nlh
, err
);
2680 msglen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
2681 if (msglen
> skb
->len
)
2683 skb_pull(skb
, msglen
);
2688 EXPORT_SYMBOL(netlink_rcv_skb
);
2691 * nlmsg_notify - send a notification netlink message
2692 * @sk: netlink socket to use
2693 * @skb: notification message
2694 * @portid: destination netlink portid for reports or 0
2695 * @group: destination multicast group or 0
2696 * @report: 1 to report back, 0 to disable
2697 * @flags: allocation flags
2699 int nlmsg_notify(struct sock
*sk
, struct sk_buff
*skb
, u32 portid
,
2700 unsigned int group
, int report
, gfp_t flags
)
2705 int exclude_portid
= 0;
2708 atomic_inc(&skb
->users
);
2709 exclude_portid
= portid
;
2712 /* errors reported via destination sk->sk_err, but propagate
2713 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2714 err
= nlmsg_multicast(sk
, skb
, exclude_portid
, group
, flags
);
2720 err2
= nlmsg_unicast(sk
, skb
, portid
);
2721 if (!err
|| err
== -ESRCH
)
2727 EXPORT_SYMBOL(nlmsg_notify
);
2729 #ifdef CONFIG_PROC_FS
2730 struct nl_seq_iter
{
2731 struct seq_net_private p
;
2736 static struct sock
*netlink_seq_socket_idx(struct seq_file
*seq
, loff_t pos
)
2738 struct nl_seq_iter
*iter
= seq
->private;
2743 for (i
= 0; i
< MAX_LINKS
; i
++) {
2744 struct nl_portid_hash
*hash
= &nl_table
[i
].hash
;
2746 for (j
= 0; j
<= hash
->mask
; j
++) {
2747 sk_for_each(s
, &hash
->table
[j
]) {
2748 if (sock_net(s
) != seq_file_net(seq
))
2762 static void *netlink_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2763 __acquires(nl_table_lock
)
2765 read_lock(&nl_table_lock
);
2766 return *pos
? netlink_seq_socket_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2769 static void *netlink_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2772 struct nl_seq_iter
*iter
;
2777 if (v
== SEQ_START_TOKEN
)
2778 return netlink_seq_socket_idx(seq
, 0);
2780 iter
= seq
->private;
2784 } while (s
&& sock_net(s
) != seq_file_net(seq
));
2789 j
= iter
->hash_idx
+ 1;
2792 struct nl_portid_hash
*hash
= &nl_table
[i
].hash
;
2794 for (; j
<= hash
->mask
; j
++) {
2795 s
= sk_head(&hash
->table
[j
]);
2796 while (s
&& sock_net(s
) != seq_file_net(seq
))
2806 } while (++i
< MAX_LINKS
);
2811 static void netlink_seq_stop(struct seq_file
*seq
, void *v
)
2812 __releases(nl_table_lock
)
2814 read_unlock(&nl_table_lock
);
2818 static int netlink_seq_show(struct seq_file
*seq
, void *v
)
2820 if (v
== SEQ_START_TOKEN
) {
2822 "sk Eth Pid Groups "
2823 "Rmem Wmem Dump Locks Drops Inode\n");
2826 struct netlink_sock
*nlk
= nlk_sk(s
);
2828 seq_printf(seq
, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2832 nlk
->groups
? (u32
)nlk
->groups
[0] : 0,
2833 sk_rmem_alloc_get(s
),
2834 sk_wmem_alloc_get(s
),
2836 atomic_read(&s
->sk_refcnt
),
2837 atomic_read(&s
->sk_drops
),
2845 static const struct seq_operations netlink_seq_ops
= {
2846 .start
= netlink_seq_start
,
2847 .next
= netlink_seq_next
,
2848 .stop
= netlink_seq_stop
,
2849 .show
= netlink_seq_show
,
2853 static int netlink_seq_open(struct inode
*inode
, struct file
*file
)
2855 return seq_open_net(inode
, file
, &netlink_seq_ops
,
2856 sizeof(struct nl_seq_iter
));
2859 static const struct file_operations netlink_seq_fops
= {
2860 .owner
= THIS_MODULE
,
2861 .open
= netlink_seq_open
,
2863 .llseek
= seq_lseek
,
2864 .release
= seq_release_net
,
2869 int netlink_register_notifier(struct notifier_block
*nb
)
2871 return atomic_notifier_chain_register(&netlink_chain
, nb
);
2873 EXPORT_SYMBOL(netlink_register_notifier
);
2875 int netlink_unregister_notifier(struct notifier_block
*nb
)
2877 return atomic_notifier_chain_unregister(&netlink_chain
, nb
);
2879 EXPORT_SYMBOL(netlink_unregister_notifier
);
2881 static const struct proto_ops netlink_ops
= {
2882 .family
= PF_NETLINK
,
2883 .owner
= THIS_MODULE
,
2884 .release
= netlink_release
,
2885 .bind
= netlink_bind
,
2886 .connect
= netlink_connect
,
2887 .socketpair
= sock_no_socketpair
,
2888 .accept
= sock_no_accept
,
2889 .getname
= netlink_getname
,
2890 .poll
= netlink_poll
,
2891 .ioctl
= sock_no_ioctl
,
2892 .listen
= sock_no_listen
,
2893 .shutdown
= sock_no_shutdown
,
2894 .setsockopt
= netlink_setsockopt
,
2895 .getsockopt
= netlink_getsockopt
,
2896 .sendmsg
= netlink_sendmsg
,
2897 .recvmsg
= netlink_recvmsg
,
2898 .mmap
= netlink_mmap
,
2899 .sendpage
= sock_no_sendpage
,
2902 static const struct net_proto_family netlink_family_ops
= {
2903 .family
= PF_NETLINK
,
2904 .create
= netlink_create
,
2905 .owner
= THIS_MODULE
, /* for consistency 8) */
2908 static int __net_init
netlink_net_init(struct net
*net
)
2910 #ifdef CONFIG_PROC_FS
2911 if (!proc_create("netlink", 0, net
->proc_net
, &netlink_seq_fops
))
2917 static void __net_exit
netlink_net_exit(struct net
*net
)
2919 #ifdef CONFIG_PROC_FS
2920 remove_proc_entry("netlink", net
->proc_net
);
2924 static void __init
netlink_add_usersock_entry(void)
2926 struct listeners
*listeners
;
2929 listeners
= kzalloc(sizeof(*listeners
) + NLGRPSZ(groups
), GFP_KERNEL
);
2931 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2933 netlink_table_grab();
2935 nl_table
[NETLINK_USERSOCK
].groups
= groups
;
2936 rcu_assign_pointer(nl_table
[NETLINK_USERSOCK
].listeners
, listeners
);
2937 nl_table
[NETLINK_USERSOCK
].module
= THIS_MODULE
;
2938 nl_table
[NETLINK_USERSOCK
].registered
= 1;
2939 nl_table
[NETLINK_USERSOCK
].flags
= NL_CFG_F_NONROOT_SEND
;
2941 netlink_table_ungrab();
2944 static struct pernet_operations __net_initdata netlink_net_ops
= {
2945 .init
= netlink_net_init
,
2946 .exit
= netlink_net_exit
,
2949 static int __init
netlink_proto_init(void)
2952 unsigned long limit
;
2954 int err
= proto_register(&netlink_proto
, 0);
2959 BUILD_BUG_ON(sizeof(struct netlink_skb_parms
) > FIELD_SIZEOF(struct sk_buff
, cb
));
2961 nl_table
= kcalloc(MAX_LINKS
, sizeof(*nl_table
), GFP_KERNEL
);
2965 if (totalram_pages
>= (128 * 1024))
2966 limit
= totalram_pages
>> (21 - PAGE_SHIFT
);
2968 limit
= totalram_pages
>> (23 - PAGE_SHIFT
);
2970 order
= get_bitmask_order(limit
) - 1 + PAGE_SHIFT
;
2971 limit
= (1UL << order
) / sizeof(struct hlist_head
);
2972 order
= get_bitmask_order(min(limit
, (unsigned long)UINT_MAX
)) - 1;
2974 for (i
= 0; i
< MAX_LINKS
; i
++) {
2975 struct nl_portid_hash
*hash
= &nl_table
[i
].hash
;
2977 hash
->table
= nl_portid_hash_zalloc(1 * sizeof(*hash
->table
));
2980 nl_portid_hash_free(nl_table
[i
].hash
.table
,
2981 1 * sizeof(*hash
->table
));
2985 hash
->max_shift
= order
;
2988 hash
->rehash_time
= jiffies
;
2991 netlink_add_usersock_entry();
2993 sock_register(&netlink_family_ops
);
2994 register_pernet_subsys(&netlink_net_ops
);
2995 /* The netlink device handler may be needed early. */
3000 panic("netlink_init: Cannot allocate nl_table\n");
3003 core_initcall(netlink_proto_init
);