Merge tag 'v3.10.89' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / core / fib_rules.c
1 /*
2 * net/core/fib_rules.c Generic Routing Rules
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
7 *
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
10
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
17 #include <net/sock.h>
18 #include <net/fib_rules.h>
19
20 int fib_default_rule_add(struct fib_rules_ops *ops,
21 u32 pref, u32 table, u32 flags)
22 {
23 struct fib_rule *r;
24
25 r = kzalloc(ops->rule_size, GFP_KERNEL);
26 if (r == NULL)
27 return -ENOMEM;
28
29 atomic_set(&r->refcnt, 1);
30 r->action = FR_ACT_TO_TBL;
31 r->pref = pref;
32 r->table = table;
33 r->flags = flags;
34 r->uid_start = INVALID_UID;
35 r->uid_end = INVALID_UID;
36 r->fr_net = hold_net(ops->fro_net);
37
38 /* The lock is not required here, the list in unreacheable
39 * at the moment this function is called */
40 list_add_tail(&r->list, &ops->rules_list);
41 return 0;
42 }
43 EXPORT_SYMBOL(fib_default_rule_add);
44
45 u32 fib_default_rule_pref(struct fib_rules_ops *ops)
46 {
47 struct list_head *pos;
48 struct fib_rule *rule;
49
50 if (!list_empty(&ops->rules_list)) {
51 pos = ops->rules_list.next;
52 if (pos->next != &ops->rules_list) {
53 rule = list_entry(pos->next, struct fib_rule, list);
54 if (rule->pref)
55 return rule->pref - 1;
56 }
57 }
58
59 return 0;
60 }
61 EXPORT_SYMBOL(fib_default_rule_pref);
62
63 static void notify_rule_change(int event, struct fib_rule *rule,
64 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
65 u32 pid);
66
67 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
68 {
69 struct fib_rules_ops *ops;
70
71 rcu_read_lock();
72 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
73 if (ops->family == family) {
74 if (!try_module_get(ops->owner))
75 ops = NULL;
76 rcu_read_unlock();
77 return ops;
78 }
79 }
80 rcu_read_unlock();
81
82 return NULL;
83 }
84
85 static void rules_ops_put(struct fib_rules_ops *ops)
86 {
87 if (ops)
88 module_put(ops->owner);
89 }
90
91 static void flush_route_cache(struct fib_rules_ops *ops)
92 {
93 if (ops->flush_cache)
94 ops->flush_cache(ops);
95 }
96
97 static int __fib_rules_register(struct fib_rules_ops *ops)
98 {
99 int err = -EEXIST;
100 struct fib_rules_ops *o;
101 struct net *net;
102
103 net = ops->fro_net;
104
105 if (ops->rule_size < sizeof(struct fib_rule))
106 return -EINVAL;
107
108 if (ops->match == NULL || ops->configure == NULL ||
109 ops->compare == NULL || ops->fill == NULL ||
110 ops->action == NULL)
111 return -EINVAL;
112
113 spin_lock(&net->rules_mod_lock);
114 list_for_each_entry(o, &net->rules_ops, list)
115 if (ops->family == o->family)
116 goto errout;
117
118 hold_net(net);
119 list_add_tail_rcu(&ops->list, &net->rules_ops);
120 err = 0;
121 errout:
122 spin_unlock(&net->rules_mod_lock);
123
124 return err;
125 }
126
127 struct fib_rules_ops *
128 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
129 {
130 struct fib_rules_ops *ops;
131 int err;
132
133 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
134 if (ops == NULL)
135 return ERR_PTR(-ENOMEM);
136
137 INIT_LIST_HEAD(&ops->rules_list);
138 ops->fro_net = net;
139
140 err = __fib_rules_register(ops);
141 if (err) {
142 kfree(ops);
143 ops = ERR_PTR(err);
144 }
145
146 return ops;
147 }
148 EXPORT_SYMBOL_GPL(fib_rules_register);
149
150 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
151 {
152 struct fib_rule *rule, *tmp;
153
154 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
155 list_del_rcu(&rule->list);
156 if (ops->delete)
157 ops->delete(rule);
158 fib_rule_put(rule);
159 }
160 }
161
162 static void fib_rules_put_rcu(struct rcu_head *head)
163 {
164 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
165 struct net *net = ops->fro_net;
166
167 release_net(net);
168 kfree(ops);
169 }
170
171 void fib_rules_unregister(struct fib_rules_ops *ops)
172 {
173 struct net *net = ops->fro_net;
174
175 spin_lock(&net->rules_mod_lock);
176 list_del_rcu(&ops->list);
177 fib_rules_cleanup_ops(ops);
178 spin_unlock(&net->rules_mod_lock);
179
180 call_rcu(&ops->rcu, fib_rules_put_rcu);
181 }
182 EXPORT_SYMBOL_GPL(fib_rules_unregister);
183
184 static inline kuid_t fib_nl_uid(struct nlattr *nla)
185 {
186 return make_kuid(current_user_ns(), nla_get_u32(nla));
187 }
188
189 static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
190 {
191 return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
192 }
193
194 static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
195 {
196 return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
197 (uid_gte(fl->flowi_uid, rule->uid_start) &&
198 uid_lte(fl->flowi_uid, rule->uid_end));
199 }
200
201 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
202 struct flowi *fl, int flags)
203 {
204 int ret = 0;
205
206 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
207 goto out;
208
209 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
210 goto out;
211
212 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
213 goto out;
214
215 if (!fib_uid_range_match(fl, rule))
216 goto out;
217
218 ret = ops->match(rule, fl, flags);
219 out:
220 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
221 }
222
223 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
224 int flags, struct fib_lookup_arg *arg)
225 {
226 struct fib_rule *rule;
227 int err;
228
229 rcu_read_lock();
230
231 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
232 jumped:
233 if (!fib_rule_match(rule, ops, fl, flags))
234 continue;
235
236 if (rule->action == FR_ACT_GOTO) {
237 struct fib_rule *target;
238
239 target = rcu_dereference(rule->ctarget);
240 if (target == NULL) {
241 continue;
242 } else {
243 rule = target;
244 goto jumped;
245 }
246 } else if (rule->action == FR_ACT_NOP)
247 continue;
248 else
249 err = ops->action(rule, fl, flags, arg);
250
251 if (err != -EAGAIN) {
252 if ((arg->flags & FIB_LOOKUP_NOREF) ||
253 likely(atomic_inc_not_zero(&rule->refcnt))) {
254 arg->rule = rule;
255 goto out;
256 }
257 break;
258 }
259 }
260
261 err = -ESRCH;
262 out:
263 rcu_read_unlock();
264
265 return err;
266 }
267 EXPORT_SYMBOL_GPL(fib_rules_lookup);
268
269 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
270 struct fib_rules_ops *ops)
271 {
272 int err = -EINVAL;
273
274 if (frh->src_len)
275 if (tb[FRA_SRC] == NULL ||
276 frh->src_len > (ops->addr_size * 8) ||
277 nla_len(tb[FRA_SRC]) != ops->addr_size)
278 goto errout;
279
280 if (frh->dst_len)
281 if (tb[FRA_DST] == NULL ||
282 frh->dst_len > (ops->addr_size * 8) ||
283 nla_len(tb[FRA_DST]) != ops->addr_size)
284 goto errout;
285
286 err = 0;
287 errout:
288 return err;
289 }
290
291 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
292 {
293 struct net *net = sock_net(skb->sk);
294 struct fib_rule_hdr *frh = nlmsg_data(nlh);
295 struct fib_rules_ops *ops = NULL;
296 struct fib_rule *rule, *r, *last = NULL;
297 struct nlattr *tb[FRA_MAX+1];
298 int err = -EINVAL, unresolved = 0;
299
300 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
301 goto errout;
302
303 ops = lookup_rules_ops(net, frh->family);
304 if (ops == NULL) {
305 err = -EAFNOSUPPORT;
306 goto errout;
307 }
308
309 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
310 if (err < 0)
311 goto errout;
312
313 err = validate_rulemsg(frh, tb, ops);
314 if (err < 0)
315 goto errout;
316
317 rule = kzalloc(ops->rule_size, GFP_KERNEL);
318 if (rule == NULL) {
319 err = -ENOMEM;
320 goto errout;
321 }
322 rule->fr_net = hold_net(net);
323
324 if (tb[FRA_PRIORITY])
325 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
326
327 if (tb[FRA_IIFNAME]) {
328 struct net_device *dev;
329
330 rule->iifindex = -1;
331 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
332 dev = __dev_get_by_name(net, rule->iifname);
333 if (dev)
334 rule->iifindex = dev->ifindex;
335 }
336
337 if (tb[FRA_OIFNAME]) {
338 struct net_device *dev;
339
340 rule->oifindex = -1;
341 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
342 dev = __dev_get_by_name(net, rule->oifname);
343 if (dev)
344 rule->oifindex = dev->ifindex;
345 }
346
347 if (tb[FRA_FWMARK]) {
348 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
349 if (rule->mark)
350 /* compatibility: if the mark value is non-zero all bits
351 * are compared unless a mask is explicitly specified.
352 */
353 rule->mark_mask = 0xFFFFFFFF;
354 }
355
356 if (tb[FRA_FWMASK])
357 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
358
359 rule->action = frh->action;
360 rule->flags = frh->flags;
361 rule->table = frh_get_table(frh, tb);
362
363 if (!tb[FRA_PRIORITY] && ops->default_pref)
364 rule->pref = ops->default_pref(ops);
365
366 err = -EINVAL;
367 if (tb[FRA_GOTO]) {
368 if (rule->action != FR_ACT_GOTO)
369 goto errout_free;
370
371 rule->target = nla_get_u32(tb[FRA_GOTO]);
372 /* Backward jumps are prohibited to avoid endless loops */
373 if (rule->target <= rule->pref)
374 goto errout_free;
375
376 list_for_each_entry(r, &ops->rules_list, list) {
377 if (r->pref == rule->target) {
378 RCU_INIT_POINTER(rule->ctarget, r);
379 break;
380 }
381 }
382
383 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
384 unresolved = 1;
385 } else if (rule->action == FR_ACT_GOTO)
386 goto errout_free;
387
388 /* UID start and end must either both be valid or both unspecified. */
389 rule->uid_start = rule->uid_end = INVALID_UID;
390 if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
391 if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
392 rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
393 rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
394 }
395 if (!uid_valid(rule->uid_start) ||
396 !uid_valid(rule->uid_end) ||
397 !uid_lte(rule->uid_start, rule->uid_end))
398 goto errout_free;
399 }
400
401 err = ops->configure(rule, skb, frh, tb);
402 if (err < 0)
403 goto errout_free;
404
405 list_for_each_entry(r, &ops->rules_list, list) {
406 if (r->pref > rule->pref)
407 break;
408 last = r;
409 }
410
411 fib_rule_get(rule);
412
413 if (last)
414 list_add_rcu(&rule->list, &last->list);
415 else
416 list_add_rcu(&rule->list, &ops->rules_list);
417
418 if (ops->unresolved_rules) {
419 /*
420 * There are unresolved goto rules in the list, check if
421 * any of them are pointing to this new rule.
422 */
423 list_for_each_entry(r, &ops->rules_list, list) {
424 if (r->action == FR_ACT_GOTO &&
425 r->target == rule->pref &&
426 rtnl_dereference(r->ctarget) == NULL) {
427 rcu_assign_pointer(r->ctarget, rule);
428 if (--ops->unresolved_rules == 0)
429 break;
430 }
431 }
432 }
433
434 if (rule->action == FR_ACT_GOTO)
435 ops->nr_goto_rules++;
436
437 if (unresolved)
438 ops->unresolved_rules++;
439
440 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
441 flush_route_cache(ops);
442 rules_ops_put(ops);
443 return 0;
444
445 errout_free:
446 release_net(rule->fr_net);
447 kfree(rule);
448 errout:
449 rules_ops_put(ops);
450 return err;
451 }
452
453 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
454 {
455 struct net *net = sock_net(skb->sk);
456 struct fib_rule_hdr *frh = nlmsg_data(nlh);
457 struct fib_rules_ops *ops = NULL;
458 struct fib_rule *rule, *tmp;
459 struct nlattr *tb[FRA_MAX+1];
460 int err = -EINVAL;
461
462 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
463 goto errout;
464
465 ops = lookup_rules_ops(net, frh->family);
466 if (ops == NULL) {
467 err = -EAFNOSUPPORT;
468 goto errout;
469 }
470
471 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
472 if (err < 0)
473 goto errout;
474
475 err = validate_rulemsg(frh, tb, ops);
476 if (err < 0)
477 goto errout;
478
479 list_for_each_entry(rule, &ops->rules_list, list) {
480 if (frh->action && (frh->action != rule->action))
481 continue;
482
483 if (frh_get_table(frh, tb) &&
484 (frh_get_table(frh, tb) != rule->table))
485 continue;
486
487 if (tb[FRA_PRIORITY] &&
488 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
489 continue;
490
491 if (tb[FRA_IIFNAME] &&
492 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
493 continue;
494
495 if (tb[FRA_OIFNAME] &&
496 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
497 continue;
498
499 if (tb[FRA_FWMARK] &&
500 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
501 continue;
502
503 if (tb[FRA_FWMASK] &&
504 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
505 continue;
506
507 if (tb[FRA_UID_START] &&
508 !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
509 continue;
510
511 if (tb[FRA_UID_END] &&
512 !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
513 continue;
514
515 if (!ops->compare(rule, frh, tb))
516 continue;
517
518 if (rule->flags & FIB_RULE_PERMANENT) {
519 err = -EPERM;
520 goto errout;
521 }
522
523 list_del_rcu(&rule->list);
524
525 if (rule->action == FR_ACT_GOTO) {
526 ops->nr_goto_rules--;
527 if (rtnl_dereference(rule->ctarget) == NULL)
528 ops->unresolved_rules--;
529 }
530
531 /*
532 * Check if this rule is a target to any of them. If so,
533 * disable them. As this operation is eventually very
534 * expensive, it is only performed if goto rules have
535 * actually been added.
536 */
537 if (ops->nr_goto_rules > 0) {
538 list_for_each_entry(tmp, &ops->rules_list, list) {
539 if (rtnl_dereference(tmp->ctarget) == rule) {
540 RCU_INIT_POINTER(tmp->ctarget, NULL);
541 ops->unresolved_rules++;
542 }
543 }
544 }
545
546 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
547 NETLINK_CB(skb).portid);
548 if (ops->delete)
549 ops->delete(rule);
550 fib_rule_put(rule);
551 flush_route_cache(ops);
552 rules_ops_put(ops);
553 return 0;
554 }
555
556 err = -ENOENT;
557 errout:
558 rules_ops_put(ops);
559 return err;
560 }
561
562 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
563 struct fib_rule *rule)
564 {
565 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
566 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
567 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
568 + nla_total_size(4) /* FRA_PRIORITY */
569 + nla_total_size(4) /* FRA_TABLE */
570 + nla_total_size(4) /* FRA_FWMARK */
571 + nla_total_size(4) /* FRA_FWMASK */
572 + nla_total_size(4) /* FRA_UID_START */
573 + nla_total_size(4); /* FRA_UID_END */
574
575 if (ops->nlmsg_payload)
576 payload += ops->nlmsg_payload(rule);
577
578 return payload;
579 }
580
581 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
582 u32 pid, u32 seq, int type, int flags,
583 struct fib_rules_ops *ops)
584 {
585 struct nlmsghdr *nlh;
586 struct fib_rule_hdr *frh;
587
588 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
589 if (nlh == NULL)
590 return -EMSGSIZE;
591
592 frh = nlmsg_data(nlh);
593 frh->family = ops->family;
594 frh->table = rule->table;
595 if (nla_put_u32(skb, FRA_TABLE, rule->table))
596 goto nla_put_failure;
597 frh->res1 = 0;
598 frh->res2 = 0;
599 frh->action = rule->action;
600 frh->flags = rule->flags;
601
602 if (rule->action == FR_ACT_GOTO &&
603 rcu_access_pointer(rule->ctarget) == NULL)
604 frh->flags |= FIB_RULE_UNRESOLVED;
605
606 if (rule->iifname[0]) {
607 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
608 goto nla_put_failure;
609 if (rule->iifindex == -1)
610 frh->flags |= FIB_RULE_IIF_DETACHED;
611 }
612
613 if (rule->oifname[0]) {
614 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
615 goto nla_put_failure;
616 if (rule->oifindex == -1)
617 frh->flags |= FIB_RULE_OIF_DETACHED;
618 }
619
620 if ((rule->pref &&
621 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
622 (rule->mark &&
623 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
624 ((rule->mark_mask || rule->mark) &&
625 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
626 (rule->target &&
627 nla_put_u32(skb, FRA_GOTO, rule->target)) ||
628 (uid_valid(rule->uid_start) &&
629 nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
630 (uid_valid(rule->uid_end) &&
631 nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
632 goto nla_put_failure;
633 if (ops->fill(rule, skb, frh) < 0)
634 goto nla_put_failure;
635
636 return nlmsg_end(skb, nlh);
637
638 nla_put_failure:
639 nlmsg_cancel(skb, nlh);
640 return -EMSGSIZE;
641 }
642
643 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
644 struct fib_rules_ops *ops)
645 {
646 int idx = 0;
647 struct fib_rule *rule;
648
649 rcu_read_lock();
650 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
651 if (idx < cb->args[1])
652 goto skip;
653
654 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
655 cb->nlh->nlmsg_seq, RTM_NEWRULE,
656 NLM_F_MULTI, ops) < 0)
657 break;
658 skip:
659 idx++;
660 }
661 rcu_read_unlock();
662 cb->args[1] = idx;
663 rules_ops_put(ops);
664
665 return skb->len;
666 }
667
668 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
669 {
670 struct net *net = sock_net(skb->sk);
671 struct fib_rules_ops *ops;
672 int idx = 0, family;
673
674 family = rtnl_msg_family(cb->nlh);
675 if (family != AF_UNSPEC) {
676 /* Protocol specific dump request */
677 ops = lookup_rules_ops(net, family);
678 if (ops == NULL)
679 return -EAFNOSUPPORT;
680
681 return dump_rules(skb, cb, ops);
682 }
683
684 rcu_read_lock();
685 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
686 if (idx < cb->args[0] || !try_module_get(ops->owner))
687 goto skip;
688
689 if (dump_rules(skb, cb, ops) < 0)
690 break;
691
692 cb->args[1] = 0;
693 skip:
694 idx++;
695 }
696 rcu_read_unlock();
697 cb->args[0] = idx;
698
699 return skb->len;
700 }
701
702 static void notify_rule_change(int event, struct fib_rule *rule,
703 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
704 u32 pid)
705 {
706 struct net *net;
707 struct sk_buff *skb;
708 int err = -ENOBUFS;
709
710 net = ops->fro_net;
711 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
712 if (skb == NULL)
713 goto errout;
714
715 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
716 if (err < 0) {
717 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
718 WARN_ON(err == -EMSGSIZE);
719 kfree_skb(skb);
720 goto errout;
721 }
722
723 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
724 return;
725 errout:
726 if (err < 0)
727 rtnl_set_sk_err(net, ops->nlgroup, err);
728 }
729
730 static void attach_rules(struct list_head *rules, struct net_device *dev)
731 {
732 struct fib_rule *rule;
733
734 list_for_each_entry(rule, rules, list) {
735 if (rule->iifindex == -1 &&
736 strcmp(dev->name, rule->iifname) == 0)
737 rule->iifindex = dev->ifindex;
738 if (rule->oifindex == -1 &&
739 strcmp(dev->name, rule->oifname) == 0)
740 rule->oifindex = dev->ifindex;
741 }
742 }
743
744 static void detach_rules(struct list_head *rules, struct net_device *dev)
745 {
746 struct fib_rule *rule;
747
748 list_for_each_entry(rule, rules, list) {
749 if (rule->iifindex == dev->ifindex)
750 rule->iifindex = -1;
751 if (rule->oifindex == dev->ifindex)
752 rule->oifindex = -1;
753 }
754 }
755
756
757 static int fib_rules_event(struct notifier_block *this, unsigned long event,
758 void *ptr)
759 {
760 struct net_device *dev = ptr;
761 struct net *net = dev_net(dev);
762 struct fib_rules_ops *ops;
763
764 ASSERT_RTNL();
765
766 switch (event) {
767 case NETDEV_REGISTER:
768 list_for_each_entry(ops, &net->rules_ops, list)
769 attach_rules(&ops->rules_list, dev);
770 break;
771
772 case NETDEV_CHANGENAME:
773 list_for_each_entry(ops, &net->rules_ops, list) {
774 detach_rules(&ops->rules_list, dev);
775 attach_rules(&ops->rules_list, dev);
776 }
777 break;
778
779 case NETDEV_UNREGISTER:
780 list_for_each_entry(ops, &net->rules_ops, list)
781 detach_rules(&ops->rules_list, dev);
782 break;
783 }
784
785 return NOTIFY_DONE;
786 }
787
788 static struct notifier_block fib_rules_notifier = {
789 .notifier_call = fib_rules_event,
790 };
791
792 static int __net_init fib_rules_net_init(struct net *net)
793 {
794 INIT_LIST_HEAD(&net->rules_ops);
795 spin_lock_init(&net->rules_mod_lock);
796 return 0;
797 }
798
799 static struct pernet_operations fib_rules_net_ops = {
800 .init = fib_rules_net_init,
801 };
802
803 static int __init fib_rules_init(void)
804 {
805 int err;
806 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
807 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
808 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
809
810 err = register_pernet_subsys(&fib_rules_net_ops);
811 if (err < 0)
812 goto fail;
813
814 err = register_netdevice_notifier(&fib_rules_notifier);
815 if (err < 0)
816 goto fail_unregister;
817
818 return 0;
819
820 fail_unregister:
821 unregister_pernet_subsys(&fib_rules_net_ops);
822 fail:
823 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
824 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
825 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
826 return err;
827 }
828
829 subsys_initcall(fib_rules_init);