2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
27 #include <linux/sysctl.h>
29 #include <linux/times.h>
30 #include <net/neighbour.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #define PNEIGH_HASHMASK 0xF
56 static void neigh_timer_handler(unsigned long arg
);
58 static void neigh_app_notify(struct neighbour
*n
);
60 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
61 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
);
63 static struct neigh_table
*neigh_tables
;
65 static struct file_operations neigh_stat_seq_fops
;
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock
);
101 static int neigh_blackhole(struct sk_buff
*skb
)
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base
)
115 return (base
? (net_random() % base
) + (base
>> 1) : 0);
119 static int neigh_forced_gc(struct neigh_table
*tbl
)
124 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
126 write_lock_bh(&tbl
->lock
);
127 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
128 struct neighbour
*n
, **np
;
130 np
= &tbl
->hash_buckets
[i
];
131 while ((n
= *np
) != NULL
) {
132 /* Neighbour record may be discarded if:
133 * - nobody refers to it.
134 * - it is not permanent
136 write_lock(&n
->lock
);
137 if (atomic_read(&n
->refcnt
) == 1 &&
138 !(n
->nud_state
& NUD_PERMANENT
)) {
142 write_unlock(&n
->lock
);
146 write_unlock(&n
->lock
);
151 tbl
->last_flush
= jiffies
;
153 write_unlock_bh(&tbl
->lock
);
158 static int neigh_del_timer(struct neighbour
*n
)
160 if ((n
->nud_state
& NUD_IN_TIMER
) &&
161 del_timer(&n
->timer
)) {
168 static void pneigh_queue_purge(struct sk_buff_head
*list
)
172 while ((skb
= skb_dequeue(list
)) != NULL
) {
178 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
182 write_lock_bh(&tbl
->lock
);
184 for (i
=0; i
<= tbl
->hash_mask
; i
++) {
185 struct neighbour
*n
, **np
;
187 np
= &tbl
->hash_buckets
[i
];
188 while ((n
= *np
) != NULL
) {
189 if (dev
&& n
->dev
!= dev
) {
194 write_lock_bh(&n
->lock
);
197 write_unlock_bh(&n
->lock
);
202 write_unlock_bh(&tbl
->lock
);
205 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
209 write_lock_bh(&tbl
->lock
);
211 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
212 struct neighbour
*n
, **np
= &tbl
->hash_buckets
[i
];
214 while ((n
= *np
) != NULL
) {
215 if (dev
&& n
->dev
!= dev
) {
220 write_lock(&n
->lock
);
224 if (atomic_read(&n
->refcnt
) != 1) {
225 /* The most unpleasant situation.
226 We must destroy neighbour entry,
227 but someone still uses it.
229 The destroy will be delayed until
230 the last user releases us, but
231 we must kill timers etc. and move
234 skb_queue_purge(&n
->arp_queue
);
235 n
->output
= neigh_blackhole
;
236 if (n
->nud_state
& NUD_VALID
)
237 n
->nud_state
= NUD_NOARP
;
239 n
->nud_state
= NUD_NONE
;
240 NEIGH_PRINTK2("neigh %p is stray.\n", n
);
242 write_unlock(&n
->lock
);
247 pneigh_ifdown(tbl
, dev
);
248 write_unlock_bh(&tbl
->lock
);
250 del_timer_sync(&tbl
->proxy_timer
);
251 pneigh_queue_purge(&tbl
->proxy_queue
);
255 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
)
257 struct neighbour
*n
= NULL
;
258 unsigned long now
= jiffies
;
261 entries
= atomic_inc_return(&tbl
->entries
) - 1;
262 if (entries
>= tbl
->gc_thresh3
||
263 (entries
>= tbl
->gc_thresh2
&&
264 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
265 if (!neigh_forced_gc(tbl
) &&
266 entries
>= tbl
->gc_thresh3
)
270 n
= kmem_cache_alloc(tbl
->kmem_cachep
, SLAB_ATOMIC
);
274 memset(n
, 0, tbl
->entry_size
);
276 skb_queue_head_init(&n
->arp_queue
);
277 rwlock_init(&n
->lock
);
278 n
->updated
= n
->used
= now
;
279 n
->nud_state
= NUD_NONE
;
280 n
->output
= neigh_blackhole
;
281 n
->parms
= neigh_parms_clone(&tbl
->parms
);
282 init_timer(&n
->timer
);
283 n
->timer
.function
= neigh_timer_handler
;
284 n
->timer
.data
= (unsigned long)n
;
286 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
288 atomic_set(&n
->refcnt
, 1);
294 atomic_dec(&tbl
->entries
);
298 static struct neighbour
**neigh_hash_alloc(unsigned int entries
)
300 unsigned long size
= entries
* sizeof(struct neighbour
*);
301 struct neighbour
**ret
;
303 if (size
<= PAGE_SIZE
) {
304 ret
= kmalloc(size
, GFP_ATOMIC
);
306 ret
= (struct neighbour
**)
307 __get_free_pages(GFP_ATOMIC
, get_order(size
));
310 memset(ret
, 0, size
);
315 static void neigh_hash_free(struct neighbour
**hash
, unsigned int entries
)
317 unsigned long size
= entries
* sizeof(struct neighbour
*);
319 if (size
<= PAGE_SIZE
)
322 free_pages((unsigned long)hash
, get_order(size
));
325 static void neigh_hash_grow(struct neigh_table
*tbl
, unsigned long new_entries
)
327 struct neighbour
**new_hash
, **old_hash
;
328 unsigned int i
, new_hash_mask
, old_entries
;
330 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
332 BUG_ON(new_entries
& (new_entries
- 1));
333 new_hash
= neigh_hash_alloc(new_entries
);
337 old_entries
= tbl
->hash_mask
+ 1;
338 new_hash_mask
= new_entries
- 1;
339 old_hash
= tbl
->hash_buckets
;
341 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
342 for (i
= 0; i
< old_entries
; i
++) {
343 struct neighbour
*n
, *next
;
345 for (n
= old_hash
[i
]; n
; n
= next
) {
346 unsigned int hash_val
= tbl
->hash(n
->primary_key
, n
->dev
);
348 hash_val
&= new_hash_mask
;
351 n
->next
= new_hash
[hash_val
];
352 new_hash
[hash_val
] = n
;
355 tbl
->hash_buckets
= new_hash
;
356 tbl
->hash_mask
= new_hash_mask
;
358 neigh_hash_free(old_hash
, old_entries
);
361 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
362 struct net_device
*dev
)
365 int key_len
= tbl
->key_len
;
366 u32 hash_val
= tbl
->hash(pkey
, dev
) & tbl
->hash_mask
;
368 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
370 read_lock_bh(&tbl
->lock
);
371 for (n
= tbl
->hash_buckets
[hash_val
]; n
; n
= n
->next
) {
372 if (dev
== n
->dev
&& !memcmp(n
->primary_key
, pkey
, key_len
)) {
374 NEIGH_CACHE_STAT_INC(tbl
, hits
);
378 read_unlock_bh(&tbl
->lock
);
382 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, const void *pkey
)
385 int key_len
= tbl
->key_len
;
386 u32 hash_val
= tbl
->hash(pkey
, NULL
) & tbl
->hash_mask
;
388 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
390 read_lock_bh(&tbl
->lock
);
391 for (n
= tbl
->hash_buckets
[hash_val
]; n
; n
= n
->next
) {
392 if (!memcmp(n
->primary_key
, pkey
, key_len
)) {
394 NEIGH_CACHE_STAT_INC(tbl
, hits
);
398 read_unlock_bh(&tbl
->lock
);
402 struct neighbour
*neigh_create(struct neigh_table
*tbl
, const void *pkey
,
403 struct net_device
*dev
)
406 int key_len
= tbl
->key_len
;
408 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
);
411 rc
= ERR_PTR(-ENOBUFS
);
415 memcpy(n
->primary_key
, pkey
, key_len
);
419 /* Protocol specific setup. */
420 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
422 goto out_neigh_release
;
425 /* Device specific setup. */
426 if (n
->parms
->neigh_setup
&&
427 (error
= n
->parms
->neigh_setup(n
)) < 0) {
429 goto out_neigh_release
;
432 n
->confirmed
= jiffies
- (n
->parms
->base_reachable_time
<< 1);
434 write_lock_bh(&tbl
->lock
);
436 if (atomic_read(&tbl
->entries
) > (tbl
->hash_mask
+ 1))
437 neigh_hash_grow(tbl
, (tbl
->hash_mask
+ 1) << 1);
439 hash_val
= tbl
->hash(pkey
, dev
) & tbl
->hash_mask
;
441 if (n
->parms
->dead
) {
442 rc
= ERR_PTR(-EINVAL
);
446 for (n1
= tbl
->hash_buckets
[hash_val
]; n1
; n1
= n1
->next
) {
447 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, pkey
, key_len
)) {
454 n
->next
= tbl
->hash_buckets
[hash_val
];
455 tbl
->hash_buckets
[hash_val
] = n
;
458 write_unlock_bh(&tbl
->lock
);
459 NEIGH_PRINTK2("neigh %p is created.\n", n
);
464 write_unlock_bh(&tbl
->lock
);
470 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
471 struct net_device
*dev
, int creat
)
473 struct pneigh_entry
*n
;
474 int key_len
= tbl
->key_len
;
475 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
477 hash_val
^= (hash_val
>> 16);
478 hash_val
^= hash_val
>> 8;
479 hash_val
^= hash_val
>> 4;
480 hash_val
&= PNEIGH_HASHMASK
;
482 read_lock_bh(&tbl
->lock
);
484 for (n
= tbl
->phash_buckets
[hash_val
]; n
; n
= n
->next
) {
485 if (!memcmp(n
->key
, pkey
, key_len
) &&
486 (n
->dev
== dev
|| !n
->dev
)) {
487 read_unlock_bh(&tbl
->lock
);
491 read_unlock_bh(&tbl
->lock
);
496 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
500 memcpy(n
->key
, pkey
, key_len
);
505 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
513 write_lock_bh(&tbl
->lock
);
514 n
->next
= tbl
->phash_buckets
[hash_val
];
515 tbl
->phash_buckets
[hash_val
] = n
;
516 write_unlock_bh(&tbl
->lock
);
522 int pneigh_delete(struct neigh_table
*tbl
, const void *pkey
,
523 struct net_device
*dev
)
525 struct pneigh_entry
*n
, **np
;
526 int key_len
= tbl
->key_len
;
527 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
529 hash_val
^= (hash_val
>> 16);
530 hash_val
^= hash_val
>> 8;
531 hash_val
^= hash_val
>> 4;
532 hash_val
&= PNEIGH_HASHMASK
;
534 write_lock_bh(&tbl
->lock
);
535 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
537 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
) {
539 write_unlock_bh(&tbl
->lock
);
540 if (tbl
->pdestructor
)
548 write_unlock_bh(&tbl
->lock
);
552 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
554 struct pneigh_entry
*n
, **np
;
557 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
558 np
= &tbl
->phash_buckets
[h
];
559 while ((n
= *np
) != NULL
) {
560 if (!dev
|| n
->dev
== dev
) {
562 if (tbl
->pdestructor
)
577 * neighbour must already be out of the table;
580 void neigh_destroy(struct neighbour
*neigh
)
584 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
588 "Destroying alive neighbour %p\n", neigh
);
593 if (neigh_del_timer(neigh
))
594 printk(KERN_WARNING
"Impossible event.\n");
596 while ((hh
= neigh
->hh
) != NULL
) {
597 neigh
->hh
= hh
->hh_next
;
599 write_lock_bh(&hh
->hh_lock
);
600 hh
->hh_output
= neigh_blackhole
;
601 write_unlock_bh(&hh
->hh_lock
);
602 if (atomic_dec_and_test(&hh
->hh_refcnt
))
606 if (neigh
->ops
&& neigh
->ops
->destructor
)
607 (neigh
->ops
->destructor
)(neigh
);
609 skb_queue_purge(&neigh
->arp_queue
);
612 neigh_parms_put(neigh
->parms
);
614 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh
);
616 atomic_dec(&neigh
->tbl
->entries
);
617 kmem_cache_free(neigh
->tbl
->kmem_cachep
, neigh
);
620 /* Neighbour state is suspicious;
623 Called with write_locked neigh.
625 static void neigh_suspect(struct neighbour
*neigh
)
629 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
631 neigh
->output
= neigh
->ops
->output
;
633 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
634 hh
->hh_output
= neigh
->ops
->output
;
637 /* Neighbour state is OK;
640 Called with write_locked neigh.
642 static void neigh_connect(struct neighbour
*neigh
)
646 NEIGH_PRINTK2("neigh %p is connected.\n", neigh
);
648 neigh
->output
= neigh
->ops
->connected_output
;
650 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
651 hh
->hh_output
= neigh
->ops
->hh_output
;
654 static void neigh_periodic_timer(unsigned long arg
)
656 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
657 struct neighbour
*n
, **np
;
658 unsigned long expire
, now
= jiffies
;
660 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
662 write_lock(&tbl
->lock
);
665 * periodically recompute ReachableTime from random function
668 if (time_after(now
, tbl
->last_rand
+ 300 * HZ
)) {
669 struct neigh_parms
*p
;
670 tbl
->last_rand
= now
;
671 for (p
= &tbl
->parms
; p
; p
= p
->next
)
673 neigh_rand_reach_time(p
->base_reachable_time
);
676 np
= &tbl
->hash_buckets
[tbl
->hash_chain_gc
];
677 tbl
->hash_chain_gc
= ((tbl
->hash_chain_gc
+ 1) & tbl
->hash_mask
);
679 while ((n
= *np
) != NULL
) {
682 write_lock(&n
->lock
);
684 state
= n
->nud_state
;
685 if (state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) {
686 write_unlock(&n
->lock
);
690 if (time_before(n
->used
, n
->confirmed
))
691 n
->used
= n
->confirmed
;
693 if (atomic_read(&n
->refcnt
) == 1 &&
694 (state
== NUD_FAILED
||
695 time_after(now
, n
->used
+ n
->parms
->gc_staletime
))) {
698 write_unlock(&n
->lock
);
702 write_unlock(&n
->lock
);
708 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
709 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
710 * base_reachable_time.
712 expire
= tbl
->parms
.base_reachable_time
>> 1;
713 expire
/= (tbl
->hash_mask
+ 1);
717 mod_timer(&tbl
->gc_timer
, now
+ expire
);
719 write_unlock(&tbl
->lock
);
722 static __inline__
int neigh_max_probes(struct neighbour
*n
)
724 struct neigh_parms
*p
= n
->parms
;
725 return (n
->nud_state
& NUD_PROBE
?
727 p
->ucast_probes
+ p
->app_probes
+ p
->mcast_probes
);
730 static inline void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
732 if (unlikely(mod_timer(&n
->timer
, when
))) {
733 printk("NEIGH: BUG, double timer add, state is %x\n",
739 /* Called when a timer expires for a neighbour entry. */
741 static void neigh_timer_handler(unsigned long arg
)
743 unsigned long now
, next
;
744 struct neighbour
*neigh
= (struct neighbour
*)arg
;
748 write_lock(&neigh
->lock
);
750 state
= neigh
->nud_state
;
754 if (!(state
& NUD_IN_TIMER
)) {
756 printk(KERN_WARNING
"neigh: timer & !nud_in_timer\n");
761 if (state
& NUD_REACHABLE
) {
762 if (time_before_eq(now
,
763 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
764 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh
);
765 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
766 } else if (time_before_eq(now
,
767 neigh
->used
+ neigh
->parms
->delay_probe_time
)) {
768 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
769 neigh
->nud_state
= NUD_DELAY
;
770 neigh_suspect(neigh
);
771 next
= now
+ neigh
->parms
->delay_probe_time
;
773 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
774 neigh
->nud_state
= NUD_STALE
;
775 neigh_suspect(neigh
);
777 } else if (state
& NUD_DELAY
) {
778 if (time_before_eq(now
,
779 neigh
->confirmed
+ neigh
->parms
->delay_probe_time
)) {
780 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh
);
781 neigh
->nud_state
= NUD_REACHABLE
;
782 neigh_connect(neigh
);
783 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
785 NEIGH_PRINTK2("neigh %p is probed.\n", neigh
);
786 neigh
->nud_state
= NUD_PROBE
;
787 atomic_set(&neigh
->probes
, 0);
788 next
= now
+ neigh
->parms
->retrans_time
;
791 /* NUD_PROBE|NUD_INCOMPLETE */
792 next
= now
+ neigh
->parms
->retrans_time
;
795 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
796 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
799 neigh
->nud_state
= NUD_FAILED
;
801 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
802 NEIGH_PRINTK2("neigh %p is failed.\n", neigh
);
804 /* It is very thin place. report_unreachable is very complicated
805 routine. Particularly, it can hit the same neighbour entry!
807 So that, we try to be accurate and avoid dead loop. --ANK
809 while (neigh
->nud_state
== NUD_FAILED
&&
810 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
811 write_unlock(&neigh
->lock
);
812 neigh
->ops
->error_report(neigh
, skb
);
813 write_lock(&neigh
->lock
);
815 skb_queue_purge(&neigh
->arp_queue
);
818 if (neigh
->nud_state
& NUD_IN_TIMER
) {
820 if (time_before(next
, jiffies
+ HZ
/2))
821 next
= jiffies
+ HZ
/2;
822 neigh_add_timer(neigh
, next
);
824 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
825 struct sk_buff
*skb
= skb_peek(&neigh
->arp_queue
);
826 /* keep skb alive even if arp_queue overflows */
829 write_unlock(&neigh
->lock
);
830 neigh
->ops
->solicit(neigh
, skb
);
831 atomic_inc(&neigh
->probes
);
836 write_unlock(&neigh
->lock
);
840 if (notify
&& neigh
->parms
->app_probes
)
841 neigh_app_notify(neigh
);
843 neigh_release(neigh
);
846 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
851 write_lock_bh(&neigh
->lock
);
854 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
859 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
860 if (neigh
->parms
->mcast_probes
+ neigh
->parms
->app_probes
) {
861 atomic_set(&neigh
->probes
, neigh
->parms
->ucast_probes
);
862 neigh
->nud_state
= NUD_INCOMPLETE
;
864 neigh_add_timer(neigh
, now
+ 1);
866 neigh
->nud_state
= NUD_FAILED
;
867 write_unlock_bh(&neigh
->lock
);
873 } else if (neigh
->nud_state
& NUD_STALE
) {
874 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
876 neigh
->nud_state
= NUD_DELAY
;
877 neigh_add_timer(neigh
,
878 jiffies
+ neigh
->parms
->delay_probe_time
);
881 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
883 if (skb_queue_len(&neigh
->arp_queue
) >=
884 neigh
->parms
->queue_len
) {
885 struct sk_buff
*buff
;
886 buff
= neigh
->arp_queue
.next
;
887 __skb_unlink(buff
, &neigh
->arp_queue
);
890 __skb_queue_tail(&neigh
->arp_queue
, skb
);
895 write_unlock_bh(&neigh
->lock
);
899 static __inline__
void neigh_update_hhs(struct neighbour
*neigh
)
902 void (*update
)(struct hh_cache
*, struct net_device
*, unsigned char *) =
903 neigh
->dev
->header_cache_update
;
906 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
) {
907 write_lock_bh(&hh
->hh_lock
);
908 update(hh
, neigh
->dev
, neigh
->ha
);
909 write_unlock_bh(&hh
->hh_lock
);
916 /* Generic update routine.
917 -- lladdr is new lladdr or NULL, if it is not supplied.
920 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
922 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
923 lladdr instead of overriding it
925 It also allows to retain current state
926 if lladdr is unchanged.
927 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
929 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
931 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
934 Caller MUST hold reference count on the entry.
937 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
945 struct net_device
*dev
;
946 int update_isrouter
= 0;
948 write_lock_bh(&neigh
->lock
);
951 old
= neigh
->nud_state
;
954 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
955 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
958 if (!(new & NUD_VALID
)) {
959 neigh_del_timer(neigh
);
960 if (old
& NUD_CONNECTED
)
961 neigh_suspect(neigh
);
962 neigh
->nud_state
= new;
965 notify
= old
& NUD_VALID
;
970 /* Compare new lladdr with cached one */
971 if (!dev
->addr_len
) {
972 /* First case: device needs no address. */
975 /* The second case: if something is already cached
976 and a new address is proposed:
978 - if they are different, check override flag
980 if ((old
& NUD_VALID
) &&
981 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
984 /* No address is supplied; if we know something,
985 use it, otherwise discard the request.
988 if (!(old
& NUD_VALID
))
993 if (new & NUD_CONNECTED
)
994 neigh
->confirmed
= jiffies
;
995 neigh
->updated
= jiffies
;
997 /* If entry was valid and address is not changed,
998 do not change entry state, if new one is STALE.
1001 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1002 if (old
& NUD_VALID
) {
1003 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1004 update_isrouter
= 0;
1005 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1006 (old
& NUD_CONNECTED
)) {
1012 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1013 ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) ||
1014 (old
& NUD_CONNECTED
))
1021 neigh_del_timer(neigh
);
1022 if (new & NUD_IN_TIMER
) {
1024 neigh_add_timer(neigh
, (jiffies
+
1025 ((new & NUD_REACHABLE
) ?
1026 neigh
->parms
->reachable_time
:
1029 neigh
->nud_state
= new;
1032 if (lladdr
!= neigh
->ha
) {
1033 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1034 neigh_update_hhs(neigh
);
1035 if (!(new & NUD_CONNECTED
))
1036 neigh
->confirmed
= jiffies
-
1037 (neigh
->parms
->base_reachable_time
<< 1);
1044 if (new & NUD_CONNECTED
)
1045 neigh_connect(neigh
);
1047 neigh_suspect(neigh
);
1048 if (!(old
& NUD_VALID
)) {
1049 struct sk_buff
*skb
;
1051 /* Again: avoid dead loop if something went wrong */
1053 while (neigh
->nud_state
& NUD_VALID
&&
1054 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1055 struct neighbour
*n1
= neigh
;
1056 write_unlock_bh(&neigh
->lock
);
1057 /* On shaper/eql skb->dst->neighbour != neigh :( */
1058 if (skb
->dst
&& skb
->dst
->neighbour
)
1059 n1
= skb
->dst
->neighbour
;
1061 write_lock_bh(&neigh
->lock
);
1063 skb_queue_purge(&neigh
->arp_queue
);
1066 if (update_isrouter
) {
1067 neigh
->flags
= (flags
& NEIGH_UPDATE_F_ISROUTER
) ?
1068 (neigh
->flags
| NTF_ROUTER
) :
1069 (neigh
->flags
& ~NTF_ROUTER
);
1071 write_unlock_bh(&neigh
->lock
);
1073 if (notify
&& neigh
->parms
->app_probes
)
1074 neigh_app_notify(neigh
);
1079 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1080 u8
*lladdr
, void *saddr
,
1081 struct net_device
*dev
)
1083 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1084 lladdr
|| !dev
->addr_len
);
1086 neigh_update(neigh
, lladdr
, NUD_STALE
,
1087 NEIGH_UPDATE_F_OVERRIDE
);
1091 static void neigh_hh_init(struct neighbour
*n
, struct dst_entry
*dst
,
1094 struct hh_cache
*hh
;
1095 struct net_device
*dev
= dst
->dev
;
1097 for (hh
= n
->hh
; hh
; hh
= hh
->hh_next
)
1098 if (hh
->hh_type
== protocol
)
1101 if (!hh
&& (hh
= kmalloc(sizeof(*hh
), GFP_ATOMIC
)) != NULL
) {
1102 memset(hh
, 0, sizeof(struct hh_cache
));
1103 rwlock_init(&hh
->hh_lock
);
1104 hh
->hh_type
= protocol
;
1105 atomic_set(&hh
->hh_refcnt
, 0);
1107 if (dev
->hard_header_cache(n
, hh
)) {
1111 atomic_inc(&hh
->hh_refcnt
);
1112 hh
->hh_next
= n
->hh
;
1114 if (n
->nud_state
& NUD_CONNECTED
)
1115 hh
->hh_output
= n
->ops
->hh_output
;
1117 hh
->hh_output
= n
->ops
->output
;
1121 atomic_inc(&hh
->hh_refcnt
);
1126 /* This function can be used in contexts, where only old dev_queue_xmit
1127 worked, f.e. if you want to override normal output path (eql, shaper),
1128 but resolution is not made yet.
1131 int neigh_compat_output(struct sk_buff
*skb
)
1133 struct net_device
*dev
= skb
->dev
;
1135 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
1137 if (dev
->hard_header
&&
1138 dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
), NULL
, NULL
,
1140 dev
->rebuild_header(skb
))
1143 return dev_queue_xmit(skb
);
1146 /* Slow and careful. */
1148 int neigh_resolve_output(struct sk_buff
*skb
)
1150 struct dst_entry
*dst
= skb
->dst
;
1151 struct neighbour
*neigh
;
1154 if (!dst
|| !(neigh
= dst
->neighbour
))
1157 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
1159 if (!neigh_event_send(neigh
, skb
)) {
1161 struct net_device
*dev
= neigh
->dev
;
1162 if (dev
->hard_header_cache
&& !dst
->hh
) {
1163 write_lock_bh(&neigh
->lock
);
1165 neigh_hh_init(neigh
, dst
, dst
->ops
->protocol
);
1166 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
),
1167 neigh
->ha
, NULL
, skb
->len
);
1168 write_unlock_bh(&neigh
->lock
);
1170 read_lock_bh(&neigh
->lock
);
1171 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
),
1172 neigh
->ha
, NULL
, skb
->len
);
1173 read_unlock_bh(&neigh
->lock
);
1176 rc
= neigh
->ops
->queue_xmit(skb
);
1183 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1184 dst
, dst
? dst
->neighbour
: NULL
);
1191 /* As fast as possible without hh cache */
1193 int neigh_connected_output(struct sk_buff
*skb
)
1196 struct dst_entry
*dst
= skb
->dst
;
1197 struct neighbour
*neigh
= dst
->neighbour
;
1198 struct net_device
*dev
= neigh
->dev
;
1200 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
1202 read_lock_bh(&neigh
->lock
);
1203 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
),
1204 neigh
->ha
, NULL
, skb
->len
);
1205 read_unlock_bh(&neigh
->lock
);
1207 err
= neigh
->ops
->queue_xmit(skb
);
1215 static void neigh_proxy_process(unsigned long arg
)
1217 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1218 long sched_next
= 0;
1219 unsigned long now
= jiffies
;
1220 struct sk_buff
*skb
;
1222 spin_lock(&tbl
->proxy_queue
.lock
);
1224 skb
= tbl
->proxy_queue
.next
;
1226 while (skb
!= (struct sk_buff
*)&tbl
->proxy_queue
) {
1227 struct sk_buff
*back
= skb
;
1228 long tdif
= NEIGH_CB(back
)->sched_next
- now
;
1232 struct net_device
*dev
= back
->dev
;
1233 __skb_unlink(back
, &tbl
->proxy_queue
);
1234 if (tbl
->proxy_redo
&& netif_running(dev
))
1235 tbl
->proxy_redo(back
);
1240 } else if (!sched_next
|| tdif
< sched_next
)
1243 del_timer(&tbl
->proxy_timer
);
1245 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1246 spin_unlock(&tbl
->proxy_queue
.lock
);
1249 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1250 struct sk_buff
*skb
)
1252 unsigned long now
= jiffies
;
1253 unsigned long sched_next
= now
+ (net_random() % p
->proxy_delay
);
1255 if (tbl
->proxy_queue
.qlen
> p
->proxy_qlen
) {
1260 NEIGH_CB(skb
)->sched_next
= sched_next
;
1261 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1263 spin_lock(&tbl
->proxy_queue
.lock
);
1264 if (del_timer(&tbl
->proxy_timer
)) {
1265 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1266 sched_next
= tbl
->proxy_timer
.expires
;
1268 dst_release(skb
->dst
);
1271 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1272 mod_timer(&tbl
->proxy_timer
, sched_next
);
1273 spin_unlock(&tbl
->proxy_queue
.lock
);
1277 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1278 struct neigh_table
*tbl
)
1280 struct neigh_parms
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
1283 memcpy(p
, &tbl
->parms
, sizeof(*p
));
1285 atomic_set(&p
->refcnt
, 1);
1286 INIT_RCU_HEAD(&p
->rcu_head
);
1288 neigh_rand_reach_time(p
->base_reachable_time
);
1290 if (dev
->neigh_setup
&& dev
->neigh_setup(dev
, p
)) {
1298 p
->sysctl_table
= NULL
;
1299 write_lock_bh(&tbl
->lock
);
1300 p
->next
= tbl
->parms
.next
;
1301 tbl
->parms
.next
= p
;
1302 write_unlock_bh(&tbl
->lock
);
1307 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1309 struct neigh_parms
*parms
=
1310 container_of(head
, struct neigh_parms
, rcu_head
);
1312 neigh_parms_put(parms
);
1315 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1317 struct neigh_parms
**p
;
1319 if (!parms
|| parms
== &tbl
->parms
)
1321 write_lock_bh(&tbl
->lock
);
1322 for (p
= &tbl
->parms
.next
; *p
; p
= &(*p
)->next
) {
1326 write_unlock_bh(&tbl
->lock
);
1328 dev_put(parms
->dev
);
1329 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1333 write_unlock_bh(&tbl
->lock
);
1334 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1337 void neigh_parms_destroy(struct neigh_parms
*parms
)
1343 void neigh_table_init(struct neigh_table
*tbl
)
1345 unsigned long now
= jiffies
;
1346 unsigned long phsize
;
1348 atomic_set(&tbl
->parms
.refcnt
, 1);
1349 INIT_RCU_HEAD(&tbl
->parms
.rcu_head
);
1350 tbl
->parms
.reachable_time
=
1351 neigh_rand_reach_time(tbl
->parms
.base_reachable_time
);
1353 if (!tbl
->kmem_cachep
)
1354 tbl
->kmem_cachep
= kmem_cache_create(tbl
->id
,
1356 0, SLAB_HWCACHE_ALIGN
,
1359 if (!tbl
->kmem_cachep
)
1360 panic("cannot create neighbour cache");
1362 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1364 panic("cannot create neighbour cache statistics");
1366 #ifdef CONFIG_PROC_FS
1367 tbl
->pde
= create_proc_entry(tbl
->id
, 0, proc_net_stat
);
1369 panic("cannot create neighbour proc dir entry");
1370 tbl
->pde
->proc_fops
= &neigh_stat_seq_fops
;
1371 tbl
->pde
->data
= tbl
;
1375 tbl
->hash_buckets
= neigh_hash_alloc(tbl
->hash_mask
+ 1);
1377 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1378 tbl
->phash_buckets
= kmalloc(phsize
, GFP_KERNEL
);
1380 if (!tbl
->hash_buckets
|| !tbl
->phash_buckets
)
1381 panic("cannot allocate neighbour cache hashes");
1383 memset(tbl
->phash_buckets
, 0, phsize
);
1385 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
1387 rwlock_init(&tbl
->lock
);
1388 init_timer(&tbl
->gc_timer
);
1389 tbl
->gc_timer
.data
= (unsigned long)tbl
;
1390 tbl
->gc_timer
.function
= neigh_periodic_timer
;
1391 tbl
->gc_timer
.expires
= now
+ 1;
1392 add_timer(&tbl
->gc_timer
);
1394 init_timer(&tbl
->proxy_timer
);
1395 tbl
->proxy_timer
.data
= (unsigned long)tbl
;
1396 tbl
->proxy_timer
.function
= neigh_proxy_process
;
1397 skb_queue_head_init(&tbl
->proxy_queue
);
1399 tbl
->last_flush
= now
;
1400 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1401 write_lock(&neigh_tbl_lock
);
1402 tbl
->next
= neigh_tables
;
1404 write_unlock(&neigh_tbl_lock
);
1407 int neigh_table_clear(struct neigh_table
*tbl
)
1409 struct neigh_table
**tp
;
1411 /* It is not clean... Fix it to unload IPv6 module safely */
1412 del_timer_sync(&tbl
->gc_timer
);
1413 del_timer_sync(&tbl
->proxy_timer
);
1414 pneigh_queue_purge(&tbl
->proxy_queue
);
1415 neigh_ifdown(tbl
, NULL
);
1416 if (atomic_read(&tbl
->entries
))
1417 printk(KERN_CRIT
"neighbour leakage\n");
1418 write_lock(&neigh_tbl_lock
);
1419 for (tp
= &neigh_tables
; *tp
; tp
= &(*tp
)->next
) {
1425 write_unlock(&neigh_tbl_lock
);
1427 neigh_hash_free(tbl
->hash_buckets
, tbl
->hash_mask
+ 1);
1428 tbl
->hash_buckets
= NULL
;
1430 kfree(tbl
->phash_buckets
);
1431 tbl
->phash_buckets
= NULL
;
1436 int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1438 struct ndmsg
*ndm
= NLMSG_DATA(nlh
);
1439 struct rtattr
**nda
= arg
;
1440 struct neigh_table
*tbl
;
1441 struct net_device
*dev
= NULL
;
1444 if (ndm
->ndm_ifindex
&&
1445 (dev
= dev_get_by_index(ndm
->ndm_ifindex
)) == NULL
)
1448 read_lock(&neigh_tbl_lock
);
1449 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1450 struct rtattr
*dst_attr
= nda
[NDA_DST
- 1];
1451 struct neighbour
*n
;
1453 if (tbl
->family
!= ndm
->ndm_family
)
1455 read_unlock(&neigh_tbl_lock
);
1458 if (!dst_attr
|| RTA_PAYLOAD(dst_attr
) < tbl
->key_len
)
1461 if (ndm
->ndm_flags
& NTF_PROXY
) {
1462 err
= pneigh_delete(tbl
, RTA_DATA(dst_attr
), dev
);
1469 n
= neigh_lookup(tbl
, RTA_DATA(dst_attr
), dev
);
1471 err
= neigh_update(n
, NULL
, NUD_FAILED
,
1472 NEIGH_UPDATE_F_OVERRIDE
|
1473 NEIGH_UPDATE_F_ADMIN
);
1478 read_unlock(&neigh_tbl_lock
);
1479 err
= -EADDRNOTAVAIL
;
1487 int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1489 struct ndmsg
*ndm
= NLMSG_DATA(nlh
);
1490 struct rtattr
**nda
= arg
;
1491 struct neigh_table
*tbl
;
1492 struct net_device
*dev
= NULL
;
1495 if (ndm
->ndm_ifindex
&&
1496 (dev
= dev_get_by_index(ndm
->ndm_ifindex
)) == NULL
)
1499 read_lock(&neigh_tbl_lock
);
1500 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1501 struct rtattr
*lladdr_attr
= nda
[NDA_LLADDR
- 1];
1502 struct rtattr
*dst_attr
= nda
[NDA_DST
- 1];
1504 struct neighbour
*n
;
1506 if (tbl
->family
!= ndm
->ndm_family
)
1508 read_unlock(&neigh_tbl_lock
);
1511 if (!dst_attr
|| RTA_PAYLOAD(dst_attr
) < tbl
->key_len
)
1514 if (ndm
->ndm_flags
& NTF_PROXY
) {
1516 if (pneigh_lookup(tbl
, RTA_DATA(dst_attr
), dev
, 1))
1524 if (lladdr_attr
&& RTA_PAYLOAD(lladdr_attr
) < dev
->addr_len
)
1527 n
= neigh_lookup(tbl
, RTA_DATA(dst_attr
), dev
);
1529 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1535 override
= nlh
->nlmsg_flags
& NLM_F_REPLACE
;
1536 } else if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1540 n
= __neigh_lookup_errno(tbl
, RTA_DATA(dst_attr
), dev
);
1547 err
= neigh_update(n
,
1548 lladdr_attr
? RTA_DATA(lladdr_attr
) : NULL
,
1550 (override
? NEIGH_UPDATE_F_OVERRIDE
: 0) |
1551 NEIGH_UPDATE_F_ADMIN
);
1557 read_unlock(&neigh_tbl_lock
);
1558 err
= -EADDRNOTAVAIL
;
1566 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1568 struct rtattr
*nest
= NULL
;
1570 nest
= RTA_NEST(skb
, NDTA_PARMS
);
1573 RTA_PUT_U32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
);
1575 RTA_PUT_U32(skb
, NDTPA_REFCNT
, atomic_read(&parms
->refcnt
));
1576 RTA_PUT_U32(skb
, NDTPA_QUEUE_LEN
, parms
->queue_len
);
1577 RTA_PUT_U32(skb
, NDTPA_PROXY_QLEN
, parms
->proxy_qlen
);
1578 RTA_PUT_U32(skb
, NDTPA_APP_PROBES
, parms
->app_probes
);
1579 RTA_PUT_U32(skb
, NDTPA_UCAST_PROBES
, parms
->ucast_probes
);
1580 RTA_PUT_U32(skb
, NDTPA_MCAST_PROBES
, parms
->mcast_probes
);
1581 RTA_PUT_MSECS(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
);
1582 RTA_PUT_MSECS(skb
, NDTPA_BASE_REACHABLE_TIME
,
1583 parms
->base_reachable_time
);
1584 RTA_PUT_MSECS(skb
, NDTPA_GC_STALETIME
, parms
->gc_staletime
);
1585 RTA_PUT_MSECS(skb
, NDTPA_DELAY_PROBE_TIME
, parms
->delay_probe_time
);
1586 RTA_PUT_MSECS(skb
, NDTPA_RETRANS_TIME
, parms
->retrans_time
);
1587 RTA_PUT_MSECS(skb
, NDTPA_ANYCAST_DELAY
, parms
->anycast_delay
);
1588 RTA_PUT_MSECS(skb
, NDTPA_PROXY_DELAY
, parms
->proxy_delay
);
1589 RTA_PUT_MSECS(skb
, NDTPA_LOCKTIME
, parms
->locktime
);
1591 return RTA_NEST_END(skb
, nest
);
1594 return RTA_NEST_CANCEL(skb
, nest
);
1597 static int neightbl_fill_info(struct neigh_table
*tbl
, struct sk_buff
*skb
,
1598 struct netlink_callback
*cb
)
1600 struct nlmsghdr
*nlh
;
1601 struct ndtmsg
*ndtmsg
;
1603 nlh
= NLMSG_NEW_ANSWER(skb
, cb
, RTM_NEWNEIGHTBL
, sizeof(struct ndtmsg
),
1606 ndtmsg
= NLMSG_DATA(nlh
);
1608 read_lock_bh(&tbl
->lock
);
1609 ndtmsg
->ndtm_family
= tbl
->family
;
1610 ndtmsg
->ndtm_pad1
= 0;
1611 ndtmsg
->ndtm_pad2
= 0;
1613 RTA_PUT_STRING(skb
, NDTA_NAME
, tbl
->id
);
1614 RTA_PUT_MSECS(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
);
1615 RTA_PUT_U32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
);
1616 RTA_PUT_U32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
);
1617 RTA_PUT_U32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
);
1620 unsigned long now
= jiffies
;
1621 unsigned int flush_delta
= now
- tbl
->last_flush
;
1622 unsigned int rand_delta
= now
- tbl
->last_rand
;
1624 struct ndt_config ndc
= {
1625 .ndtc_key_len
= tbl
->key_len
,
1626 .ndtc_entry_size
= tbl
->entry_size
,
1627 .ndtc_entries
= atomic_read(&tbl
->entries
),
1628 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
1629 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
1630 .ndtc_hash_rnd
= tbl
->hash_rnd
,
1631 .ndtc_hash_mask
= tbl
->hash_mask
,
1632 .ndtc_hash_chain_gc
= tbl
->hash_chain_gc
,
1633 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
1636 RTA_PUT(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
);
1641 struct ndt_stats ndst
;
1643 memset(&ndst
, 0, sizeof(ndst
));
1645 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
1646 struct neigh_statistics
*st
;
1648 if (!cpu_possible(cpu
))
1651 st
= per_cpu_ptr(tbl
->stats
, cpu
);
1652 ndst
.ndts_allocs
+= st
->allocs
;
1653 ndst
.ndts_destroys
+= st
->destroys
;
1654 ndst
.ndts_hash_grows
+= st
->hash_grows
;
1655 ndst
.ndts_res_failed
+= st
->res_failed
;
1656 ndst
.ndts_lookups
+= st
->lookups
;
1657 ndst
.ndts_hits
+= st
->hits
;
1658 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
1659 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
1660 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
1661 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
1664 RTA_PUT(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
);
1667 BUG_ON(tbl
->parms
.dev
);
1668 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
1669 goto rtattr_failure
;
1671 read_unlock_bh(&tbl
->lock
);
1672 return NLMSG_END(skb
, nlh
);
1675 read_unlock_bh(&tbl
->lock
);
1676 return NLMSG_CANCEL(skb
, nlh
);
1682 static int neightbl_fill_param_info(struct neigh_table
*tbl
,
1683 struct neigh_parms
*parms
,
1684 struct sk_buff
*skb
,
1685 struct netlink_callback
*cb
)
1687 struct ndtmsg
*ndtmsg
;
1688 struct nlmsghdr
*nlh
;
1690 nlh
= NLMSG_NEW_ANSWER(skb
, cb
, RTM_NEWNEIGHTBL
, sizeof(struct ndtmsg
),
1693 ndtmsg
= NLMSG_DATA(nlh
);
1695 read_lock_bh(&tbl
->lock
);
1696 ndtmsg
->ndtm_family
= tbl
->family
;
1697 ndtmsg
->ndtm_pad1
= 0;
1698 ndtmsg
->ndtm_pad2
= 0;
1699 RTA_PUT_STRING(skb
, NDTA_NAME
, tbl
->id
);
1701 if (neightbl_fill_parms(skb
, parms
) < 0)
1702 goto rtattr_failure
;
1704 read_unlock_bh(&tbl
->lock
);
1705 return NLMSG_END(skb
, nlh
);
1708 read_unlock_bh(&tbl
->lock
);
1709 return NLMSG_CANCEL(skb
, nlh
);
1715 static inline struct neigh_parms
*lookup_neigh_params(struct neigh_table
*tbl
,
1718 struct neigh_parms
*p
;
1720 for (p
= &tbl
->parms
; p
; p
= p
->next
)
1721 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
) ||
1722 (!p
->dev
&& !ifindex
))
1728 int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1730 struct neigh_table
*tbl
;
1731 struct ndtmsg
*ndtmsg
= NLMSG_DATA(nlh
);
1732 struct rtattr
**tb
= arg
;
1735 if (!tb
[NDTA_NAME
- 1] || !RTA_PAYLOAD(tb
[NDTA_NAME
- 1]))
1738 read_lock(&neigh_tbl_lock
);
1739 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1740 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
1743 if (!rtattr_strcmp(tb
[NDTA_NAME
- 1], tbl
->id
))
1753 * We acquire tbl->lock to be nice to the periodic timers and
1754 * make sure they always see a consistent set of values.
1756 write_lock_bh(&tbl
->lock
);
1758 if (tb
[NDTA_THRESH1
- 1])
1759 tbl
->gc_thresh1
= RTA_GET_U32(tb
[NDTA_THRESH1
- 1]);
1761 if (tb
[NDTA_THRESH2
- 1])
1762 tbl
->gc_thresh2
= RTA_GET_U32(tb
[NDTA_THRESH2
- 1]);
1764 if (tb
[NDTA_THRESH3
- 1])
1765 tbl
->gc_thresh3
= RTA_GET_U32(tb
[NDTA_THRESH3
- 1]);
1767 if (tb
[NDTA_GC_INTERVAL
- 1])
1768 tbl
->gc_interval
= RTA_GET_MSECS(tb
[NDTA_GC_INTERVAL
- 1]);
1770 if (tb
[NDTA_PARMS
- 1]) {
1771 struct rtattr
*tbp
[NDTPA_MAX
];
1772 struct neigh_parms
*p
;
1775 if (rtattr_parse_nested(tbp
, NDTPA_MAX
, tb
[NDTA_PARMS
- 1]) < 0)
1776 goto rtattr_failure
;
1778 if (tbp
[NDTPA_IFINDEX
- 1])
1779 ifindex
= RTA_GET_U32(tbp
[NDTPA_IFINDEX
- 1]);
1781 p
= lookup_neigh_params(tbl
, ifindex
);
1784 goto rtattr_failure
;
1787 if (tbp
[NDTPA_QUEUE_LEN
- 1])
1788 p
->queue_len
= RTA_GET_U32(tbp
[NDTPA_QUEUE_LEN
- 1]);
1790 if (tbp
[NDTPA_PROXY_QLEN
- 1])
1791 p
->proxy_qlen
= RTA_GET_U32(tbp
[NDTPA_PROXY_QLEN
- 1]);
1793 if (tbp
[NDTPA_APP_PROBES
- 1])
1794 p
->app_probes
= RTA_GET_U32(tbp
[NDTPA_APP_PROBES
- 1]);
1796 if (tbp
[NDTPA_UCAST_PROBES
- 1])
1798 RTA_GET_U32(tbp
[NDTPA_UCAST_PROBES
- 1]);
1800 if (tbp
[NDTPA_MCAST_PROBES
- 1])
1802 RTA_GET_U32(tbp
[NDTPA_MCAST_PROBES
- 1]);
1804 if (tbp
[NDTPA_BASE_REACHABLE_TIME
- 1])
1805 p
->base_reachable_time
=
1806 RTA_GET_MSECS(tbp
[NDTPA_BASE_REACHABLE_TIME
- 1]);
1808 if (tbp
[NDTPA_GC_STALETIME
- 1])
1810 RTA_GET_MSECS(tbp
[NDTPA_GC_STALETIME
- 1]);
1812 if (tbp
[NDTPA_DELAY_PROBE_TIME
- 1])
1813 p
->delay_probe_time
=
1814 RTA_GET_MSECS(tbp
[NDTPA_DELAY_PROBE_TIME
- 1]);
1816 if (tbp
[NDTPA_RETRANS_TIME
- 1])
1818 RTA_GET_MSECS(tbp
[NDTPA_RETRANS_TIME
- 1]);
1820 if (tbp
[NDTPA_ANYCAST_DELAY
- 1])
1822 RTA_GET_MSECS(tbp
[NDTPA_ANYCAST_DELAY
- 1]);
1824 if (tbp
[NDTPA_PROXY_DELAY
- 1])
1826 RTA_GET_MSECS(tbp
[NDTPA_PROXY_DELAY
- 1]);
1828 if (tbp
[NDTPA_LOCKTIME
- 1])
1829 p
->locktime
= RTA_GET_MSECS(tbp
[NDTPA_LOCKTIME
- 1]);
1835 write_unlock_bh(&tbl
->lock
);
1837 read_unlock(&neigh_tbl_lock
);
1841 int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1844 int s_idx
= cb
->args
[0];
1845 struct neigh_table
*tbl
;
1847 family
= ((struct rtgenmsg
*)NLMSG_DATA(cb
->nlh
))->rtgen_family
;
1849 read_lock(&neigh_tbl_lock
);
1850 for (tbl
= neigh_tables
, idx
= 0; tbl
; tbl
= tbl
->next
) {
1851 struct neigh_parms
*p
;
1853 if (idx
< s_idx
|| (family
&& tbl
->family
!= family
))
1856 if (neightbl_fill_info(tbl
, skb
, cb
) <= 0)
1859 for (++idx
, p
= tbl
->parms
.next
; p
; p
= p
->next
, idx
++) {
1863 if (neightbl_fill_param_info(tbl
, p
, skb
, cb
) <= 0)
1869 read_unlock(&neigh_tbl_lock
);
1875 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*n
,
1876 u32 pid
, u32 seq
, int event
, unsigned int flags
)
1878 unsigned long now
= jiffies
;
1879 unsigned char *b
= skb
->tail
;
1880 struct nda_cacheinfo ci
;
1883 struct nlmsghdr
*nlh
= NLMSG_NEW(skb
, pid
, seq
, event
,
1884 sizeof(struct ndmsg
), flags
);
1885 struct ndmsg
*ndm
= NLMSG_DATA(nlh
);
1887 ndm
->ndm_family
= n
->ops
->family
;
1890 ndm
->ndm_flags
= n
->flags
;
1891 ndm
->ndm_type
= n
->type
;
1892 ndm
->ndm_ifindex
= n
->dev
->ifindex
;
1893 RTA_PUT(skb
, NDA_DST
, n
->tbl
->key_len
, n
->primary_key
);
1894 read_lock_bh(&n
->lock
);
1896 ndm
->ndm_state
= n
->nud_state
;
1897 if (n
->nud_state
& NUD_VALID
)
1898 RTA_PUT(skb
, NDA_LLADDR
, n
->dev
->addr_len
, n
->ha
);
1899 ci
.ndm_used
= now
- n
->used
;
1900 ci
.ndm_confirmed
= now
- n
->confirmed
;
1901 ci
.ndm_updated
= now
- n
->updated
;
1902 ci
.ndm_refcnt
= atomic_read(&n
->refcnt
) - 1;
1903 probes
= atomic_read(&n
->probes
);
1904 read_unlock_bh(&n
->lock
);
1906 RTA_PUT(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
);
1907 RTA_PUT(skb
, NDA_PROBES
, sizeof(probes
), &probes
);
1908 nlh
->nlmsg_len
= skb
->tail
- b
;
1914 read_unlock_bh(&n
->lock
);
1915 skb_trim(skb
, b
- skb
->data
);
1920 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
1921 struct netlink_callback
*cb
)
1923 struct neighbour
*n
;
1924 int rc
, h
, s_h
= cb
->args
[1];
1925 int idx
, s_idx
= idx
= cb
->args
[2];
1927 for (h
= 0; h
<= tbl
->hash_mask
; h
++) {
1932 read_lock_bh(&tbl
->lock
);
1933 for (n
= tbl
->hash_buckets
[h
], idx
= 0; n
; n
= n
->next
, idx
++) {
1936 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).pid
,
1939 NLM_F_MULTI
) <= 0) {
1940 read_unlock_bh(&tbl
->lock
);
1945 read_unlock_bh(&tbl
->lock
);
1954 int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1956 struct neigh_table
*tbl
;
1959 read_lock(&neigh_tbl_lock
);
1960 family
= ((struct rtgenmsg
*)NLMSG_DATA(cb
->nlh
))->rtgen_family
;
1963 for (tbl
= neigh_tables
, t
= 0; tbl
; tbl
= tbl
->next
, t
++) {
1964 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
1967 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
1968 sizeof(cb
->args
[0]));
1969 if (neigh_dump_table(tbl
, skb
, cb
) < 0)
1972 read_unlock(&neigh_tbl_lock
);
1978 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
1982 read_lock_bh(&tbl
->lock
);
1983 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
1984 struct neighbour
*n
;
1986 for (n
= tbl
->hash_buckets
[chain
]; n
; n
= n
->next
)
1989 read_unlock_bh(&tbl
->lock
);
1991 EXPORT_SYMBOL(neigh_for_each
);
1993 /* The tbl->lock must be held as a writer and BH disabled. */
1994 void __neigh_for_each_release(struct neigh_table
*tbl
,
1995 int (*cb
)(struct neighbour
*))
1999 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
2000 struct neighbour
*n
, **np
;
2002 np
= &tbl
->hash_buckets
[chain
];
2003 while ((n
= *np
) != NULL
) {
2006 write_lock(&n
->lock
);
2013 write_unlock(&n
->lock
);
2019 EXPORT_SYMBOL(__neigh_for_each_release
);
2021 #ifdef CONFIG_PROC_FS
2023 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
2025 struct neigh_seq_state
*state
= seq
->private;
2026 struct neigh_table
*tbl
= state
->tbl
;
2027 struct neighbour
*n
= NULL
;
2028 int bucket
= state
->bucket
;
2030 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
2031 for (bucket
= 0; bucket
<= tbl
->hash_mask
; bucket
++) {
2032 n
= tbl
->hash_buckets
[bucket
];
2035 if (state
->neigh_sub_iter
) {
2039 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
2043 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2045 if (n
->nud_state
& ~NUD_NOARP
)
2054 state
->bucket
= bucket
;
2059 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
2060 struct neighbour
*n
,
2063 struct neigh_seq_state
*state
= seq
->private;
2064 struct neigh_table
*tbl
= state
->tbl
;
2066 if (state
->neigh_sub_iter
) {
2067 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2075 if (state
->neigh_sub_iter
) {
2076 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2081 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2084 if (n
->nud_state
& ~NUD_NOARP
)
2093 if (++state
->bucket
> tbl
->hash_mask
)
2096 n
= tbl
->hash_buckets
[state
->bucket
];
2104 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2106 struct neighbour
*n
= neigh_get_first(seq
);
2110 n
= neigh_get_next(seq
, n
, pos
);
2115 return *pos
? NULL
: n
;
2118 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
2120 struct neigh_seq_state
*state
= seq
->private;
2121 struct neigh_table
*tbl
= state
->tbl
;
2122 struct pneigh_entry
*pn
= NULL
;
2123 int bucket
= state
->bucket
;
2125 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
2126 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
2127 pn
= tbl
->phash_buckets
[bucket
];
2131 state
->bucket
= bucket
;
2136 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
2137 struct pneigh_entry
*pn
,
2140 struct neigh_seq_state
*state
= seq
->private;
2141 struct neigh_table
*tbl
= state
->tbl
;
2145 if (++state
->bucket
> PNEIGH_HASHMASK
)
2147 pn
= tbl
->phash_buckets
[state
->bucket
];
2158 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2160 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
2164 pn
= pneigh_get_next(seq
, pn
, pos
);
2169 return *pos
? NULL
: pn
;
2172 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
2174 struct neigh_seq_state
*state
= seq
->private;
2177 rc
= neigh_get_idx(seq
, pos
);
2178 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2179 rc
= pneigh_get_idx(seq
, pos
);
2184 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
2186 struct neigh_seq_state
*state
= seq
->private;
2187 loff_t pos_minus_one
;
2191 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
2193 read_lock_bh(&tbl
->lock
);
2195 pos_minus_one
= *pos
- 1;
2196 return *pos
? neigh_get_idx_any(seq
, &pos_minus_one
) : SEQ_START_TOKEN
;
2198 EXPORT_SYMBOL(neigh_seq_start
);
2200 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2202 struct neigh_seq_state
*state
;
2205 if (v
== SEQ_START_TOKEN
) {
2206 rc
= neigh_get_idx(seq
, pos
);
2210 state
= seq
->private;
2211 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
2212 rc
= neigh_get_next(seq
, v
, NULL
);
2215 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2216 rc
= pneigh_get_first(seq
);
2218 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
2219 rc
= pneigh_get_next(seq
, v
, NULL
);
2225 EXPORT_SYMBOL(neigh_seq_next
);
2227 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
2229 struct neigh_seq_state
*state
= seq
->private;
2230 struct neigh_table
*tbl
= state
->tbl
;
2232 read_unlock_bh(&tbl
->lock
);
2234 EXPORT_SYMBOL(neigh_seq_stop
);
2236 /* statistics via seq_file */
2238 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2240 struct proc_dir_entry
*pde
= seq
->private;
2241 struct neigh_table
*tbl
= pde
->data
;
2245 return SEQ_START_TOKEN
;
2247 for (cpu
= *pos
-1; cpu
< NR_CPUS
; ++cpu
) {
2248 if (!cpu_possible(cpu
))
2251 return per_cpu_ptr(tbl
->stats
, cpu
);
2256 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2258 struct proc_dir_entry
*pde
= seq
->private;
2259 struct neigh_table
*tbl
= pde
->data
;
2262 for (cpu
= *pos
; cpu
< NR_CPUS
; ++cpu
) {
2263 if (!cpu_possible(cpu
))
2266 return per_cpu_ptr(tbl
->stats
, cpu
);
2271 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
2276 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
2278 struct proc_dir_entry
*pde
= seq
->private;
2279 struct neigh_table
*tbl
= pde
->data
;
2280 struct neigh_statistics
*st
= v
;
2282 if (v
== SEQ_START_TOKEN
) {
2283 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2287 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2288 "%08lx %08lx %08lx %08lx\n",
2289 atomic_read(&tbl
->entries
),
2300 st
->rcv_probes_mcast
,
2301 st
->rcv_probes_ucast
,
2303 st
->periodic_gc_runs
,
2310 static struct seq_operations neigh_stat_seq_ops
= {
2311 .start
= neigh_stat_seq_start
,
2312 .next
= neigh_stat_seq_next
,
2313 .stop
= neigh_stat_seq_stop
,
2314 .show
= neigh_stat_seq_show
,
2317 static int neigh_stat_seq_open(struct inode
*inode
, struct file
*file
)
2319 int ret
= seq_open(file
, &neigh_stat_seq_ops
);
2322 struct seq_file
*sf
= file
->private_data
;
2323 sf
->private = PDE(inode
);
2328 static struct file_operations neigh_stat_seq_fops
= {
2329 .owner
= THIS_MODULE
,
2330 .open
= neigh_stat_seq_open
,
2332 .llseek
= seq_lseek
,
2333 .release
= seq_release
,
2336 #endif /* CONFIG_PROC_FS */
2339 void neigh_app_ns(struct neighbour
*n
)
2341 struct nlmsghdr
*nlh
;
2342 int size
= NLMSG_SPACE(sizeof(struct ndmsg
) + 256);
2343 struct sk_buff
*skb
= alloc_skb(size
, GFP_ATOMIC
);
2348 if (neigh_fill_info(skb
, n
, 0, 0, RTM_GETNEIGH
, 0) < 0) {
2352 nlh
= (struct nlmsghdr
*)skb
->data
;
2353 nlh
->nlmsg_flags
= NLM_F_REQUEST
;
2354 NETLINK_CB(skb
).dst_group
= RTNLGRP_NEIGH
;
2355 netlink_broadcast(rtnl
, skb
, 0, RTNLGRP_NEIGH
, GFP_ATOMIC
);
2358 static void neigh_app_notify(struct neighbour
*n
)
2360 struct nlmsghdr
*nlh
;
2361 int size
= NLMSG_SPACE(sizeof(struct ndmsg
) + 256);
2362 struct sk_buff
*skb
= alloc_skb(size
, GFP_ATOMIC
);
2367 if (neigh_fill_info(skb
, n
, 0, 0, RTM_NEWNEIGH
, 0) < 0) {
2371 nlh
= (struct nlmsghdr
*)skb
->data
;
2372 NETLINK_CB(skb
).dst_group
= RTNLGRP_NEIGH
;
2373 netlink_broadcast(rtnl
, skb
, 0, RTNLGRP_NEIGH
, GFP_ATOMIC
);
2376 #endif /* CONFIG_ARPD */
2378 #ifdef CONFIG_SYSCTL
2380 static struct neigh_sysctl_table
{
2381 struct ctl_table_header
*sysctl_header
;
2382 ctl_table neigh_vars
[__NET_NEIGH_MAX
];
2383 ctl_table neigh_dev
[2];
2384 ctl_table neigh_neigh_dir
[2];
2385 ctl_table neigh_proto_dir
[2];
2386 ctl_table neigh_root_dir
[2];
2387 } neigh_sysctl_template
= {
2390 .ctl_name
= NET_NEIGH_MCAST_SOLICIT
,
2391 .procname
= "mcast_solicit",
2392 .maxlen
= sizeof(int),
2394 .proc_handler
= &proc_dointvec
,
2397 .ctl_name
= NET_NEIGH_UCAST_SOLICIT
,
2398 .procname
= "ucast_solicit",
2399 .maxlen
= sizeof(int),
2401 .proc_handler
= &proc_dointvec
,
2404 .ctl_name
= NET_NEIGH_APP_SOLICIT
,
2405 .procname
= "app_solicit",
2406 .maxlen
= sizeof(int),
2408 .proc_handler
= &proc_dointvec
,
2411 .ctl_name
= NET_NEIGH_RETRANS_TIME
,
2412 .procname
= "retrans_time",
2413 .maxlen
= sizeof(int),
2415 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2418 .ctl_name
= NET_NEIGH_REACHABLE_TIME
,
2419 .procname
= "base_reachable_time",
2420 .maxlen
= sizeof(int),
2422 .proc_handler
= &proc_dointvec_jiffies
,
2423 .strategy
= &sysctl_jiffies
,
2426 .ctl_name
= NET_NEIGH_DELAY_PROBE_TIME
,
2427 .procname
= "delay_first_probe_time",
2428 .maxlen
= sizeof(int),
2430 .proc_handler
= &proc_dointvec_jiffies
,
2431 .strategy
= &sysctl_jiffies
,
2434 .ctl_name
= NET_NEIGH_GC_STALE_TIME
,
2435 .procname
= "gc_stale_time",
2436 .maxlen
= sizeof(int),
2438 .proc_handler
= &proc_dointvec_jiffies
,
2439 .strategy
= &sysctl_jiffies
,
2442 .ctl_name
= NET_NEIGH_UNRES_QLEN
,
2443 .procname
= "unres_qlen",
2444 .maxlen
= sizeof(int),
2446 .proc_handler
= &proc_dointvec
,
2449 .ctl_name
= NET_NEIGH_PROXY_QLEN
,
2450 .procname
= "proxy_qlen",
2451 .maxlen
= sizeof(int),
2453 .proc_handler
= &proc_dointvec
,
2456 .ctl_name
= NET_NEIGH_ANYCAST_DELAY
,
2457 .procname
= "anycast_delay",
2458 .maxlen
= sizeof(int),
2460 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2463 .ctl_name
= NET_NEIGH_PROXY_DELAY
,
2464 .procname
= "proxy_delay",
2465 .maxlen
= sizeof(int),
2467 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2470 .ctl_name
= NET_NEIGH_LOCKTIME
,
2471 .procname
= "locktime",
2472 .maxlen
= sizeof(int),
2474 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2477 .ctl_name
= NET_NEIGH_GC_INTERVAL
,
2478 .procname
= "gc_interval",
2479 .maxlen
= sizeof(int),
2481 .proc_handler
= &proc_dointvec_jiffies
,
2482 .strategy
= &sysctl_jiffies
,
2485 .ctl_name
= NET_NEIGH_GC_THRESH1
,
2486 .procname
= "gc_thresh1",
2487 .maxlen
= sizeof(int),
2489 .proc_handler
= &proc_dointvec
,
2492 .ctl_name
= NET_NEIGH_GC_THRESH2
,
2493 .procname
= "gc_thresh2",
2494 .maxlen
= sizeof(int),
2496 .proc_handler
= &proc_dointvec
,
2499 .ctl_name
= NET_NEIGH_GC_THRESH3
,
2500 .procname
= "gc_thresh3",
2501 .maxlen
= sizeof(int),
2503 .proc_handler
= &proc_dointvec
,
2506 .ctl_name
= NET_NEIGH_RETRANS_TIME_MS
,
2507 .procname
= "retrans_time_ms",
2508 .maxlen
= sizeof(int),
2510 .proc_handler
= &proc_dointvec_ms_jiffies
,
2511 .strategy
= &sysctl_ms_jiffies
,
2514 .ctl_name
= NET_NEIGH_REACHABLE_TIME_MS
,
2515 .procname
= "base_reachable_time_ms",
2516 .maxlen
= sizeof(int),
2518 .proc_handler
= &proc_dointvec_ms_jiffies
,
2519 .strategy
= &sysctl_ms_jiffies
,
2524 .ctl_name
= NET_PROTO_CONF_DEFAULT
,
2525 .procname
= "default",
2529 .neigh_neigh_dir
= {
2531 .procname
= "neigh",
2535 .neigh_proto_dir
= {
2542 .ctl_name
= CTL_NET
,
2549 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
2550 int p_id
, int pdev_id
, char *p_name
,
2551 proc_handler
*handler
, ctl_handler
*strategy
)
2553 struct neigh_sysctl_table
*t
= kmalloc(sizeof(*t
), GFP_KERNEL
);
2554 const char *dev_name_source
= NULL
;
2555 char *dev_name
= NULL
;
2560 memcpy(t
, &neigh_sysctl_template
, sizeof(*t
));
2561 t
->neigh_vars
[0].data
= &p
->mcast_probes
;
2562 t
->neigh_vars
[1].data
= &p
->ucast_probes
;
2563 t
->neigh_vars
[2].data
= &p
->app_probes
;
2564 t
->neigh_vars
[3].data
= &p
->retrans_time
;
2565 t
->neigh_vars
[4].data
= &p
->base_reachable_time
;
2566 t
->neigh_vars
[5].data
= &p
->delay_probe_time
;
2567 t
->neigh_vars
[6].data
= &p
->gc_staletime
;
2568 t
->neigh_vars
[7].data
= &p
->queue_len
;
2569 t
->neigh_vars
[8].data
= &p
->proxy_qlen
;
2570 t
->neigh_vars
[9].data
= &p
->anycast_delay
;
2571 t
->neigh_vars
[10].data
= &p
->proxy_delay
;
2572 t
->neigh_vars
[11].data
= &p
->locktime
;
2575 dev_name_source
= dev
->name
;
2576 t
->neigh_dev
[0].ctl_name
= dev
->ifindex
;
2577 t
->neigh_vars
[12].procname
= NULL
;
2578 t
->neigh_vars
[13].procname
= NULL
;
2579 t
->neigh_vars
[14].procname
= NULL
;
2580 t
->neigh_vars
[15].procname
= NULL
;
2582 dev_name_source
= t
->neigh_dev
[0].procname
;
2583 t
->neigh_vars
[12].data
= (int *)(p
+ 1);
2584 t
->neigh_vars
[13].data
= (int *)(p
+ 1) + 1;
2585 t
->neigh_vars
[14].data
= (int *)(p
+ 1) + 2;
2586 t
->neigh_vars
[15].data
= (int *)(p
+ 1) + 3;
2589 t
->neigh_vars
[16].data
= &p
->retrans_time
;
2590 t
->neigh_vars
[17].data
= &p
->base_reachable_time
;
2592 if (handler
|| strategy
) {
2594 t
->neigh_vars
[3].proc_handler
= handler
;
2595 t
->neigh_vars
[3].strategy
= strategy
;
2596 t
->neigh_vars
[3].extra1
= dev
;
2598 t
->neigh_vars
[4].proc_handler
= handler
;
2599 t
->neigh_vars
[4].strategy
= strategy
;
2600 t
->neigh_vars
[4].extra1
= dev
;
2601 /* RetransTime (in milliseconds)*/
2602 t
->neigh_vars
[16].proc_handler
= handler
;
2603 t
->neigh_vars
[16].strategy
= strategy
;
2604 t
->neigh_vars
[16].extra1
= dev
;
2605 /* ReachableTime (in milliseconds) */
2606 t
->neigh_vars
[17].proc_handler
= handler
;
2607 t
->neigh_vars
[17].strategy
= strategy
;
2608 t
->neigh_vars
[17].extra1
= dev
;
2611 dev_name
= kstrdup(dev_name_source
, GFP_KERNEL
);
2617 t
->neigh_dev
[0].procname
= dev_name
;
2619 t
->neigh_neigh_dir
[0].ctl_name
= pdev_id
;
2621 t
->neigh_proto_dir
[0].procname
= p_name
;
2622 t
->neigh_proto_dir
[0].ctl_name
= p_id
;
2624 t
->neigh_dev
[0].child
= t
->neigh_vars
;
2625 t
->neigh_neigh_dir
[0].child
= t
->neigh_dev
;
2626 t
->neigh_proto_dir
[0].child
= t
->neigh_neigh_dir
;
2627 t
->neigh_root_dir
[0].child
= t
->neigh_proto_dir
;
2629 t
->sysctl_header
= register_sysctl_table(t
->neigh_root_dir
, 0);
2630 if (!t
->sysctl_header
) {
2634 p
->sysctl_table
= t
;
2646 void neigh_sysctl_unregister(struct neigh_parms
*p
)
2648 if (p
->sysctl_table
) {
2649 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
2650 p
->sysctl_table
= NULL
;
2651 unregister_sysctl_table(t
->sysctl_header
);
2652 kfree(t
->neigh_dev
[0].procname
);
2657 #endif /* CONFIG_SYSCTL */
2659 EXPORT_SYMBOL(__neigh_event_send
);
2660 EXPORT_SYMBOL(neigh_add
);
2661 EXPORT_SYMBOL(neigh_changeaddr
);
2662 EXPORT_SYMBOL(neigh_compat_output
);
2663 EXPORT_SYMBOL(neigh_connected_output
);
2664 EXPORT_SYMBOL(neigh_create
);
2665 EXPORT_SYMBOL(neigh_delete
);
2666 EXPORT_SYMBOL(neigh_destroy
);
2667 EXPORT_SYMBOL(neigh_dump_info
);
2668 EXPORT_SYMBOL(neigh_event_ns
);
2669 EXPORT_SYMBOL(neigh_ifdown
);
2670 EXPORT_SYMBOL(neigh_lookup
);
2671 EXPORT_SYMBOL(neigh_lookup_nodev
);
2672 EXPORT_SYMBOL(neigh_parms_alloc
);
2673 EXPORT_SYMBOL(neigh_parms_release
);
2674 EXPORT_SYMBOL(neigh_rand_reach_time
);
2675 EXPORT_SYMBOL(neigh_resolve_output
);
2676 EXPORT_SYMBOL(neigh_table_clear
);
2677 EXPORT_SYMBOL(neigh_table_init
);
2678 EXPORT_SYMBOL(neigh_update
);
2679 EXPORT_SYMBOL(neigh_update_hhs
);
2680 EXPORT_SYMBOL(pneigh_enqueue
);
2681 EXPORT_SYMBOL(pneigh_lookup
);
2682 EXPORT_SYMBOL(neightbl_dump_info
);
2683 EXPORT_SYMBOL(neightbl_set
);
2686 EXPORT_SYMBOL(neigh_app_ns
);
2688 #ifdef CONFIG_SYSCTL
2689 EXPORT_SYMBOL(neigh_sysctl_register
);
2690 EXPORT_SYMBOL(neigh_sysctl_unregister
);