2 * net/sched/cls_flower.c Flower classifier
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
16 #include <linux/workqueue.h>
18 #include <linux/if_ether.h>
19 #include <linux/in6.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
25 #include <net/flow_dissector.h>
29 struct flow_dissector_key_control control
;
30 struct flow_dissector_key_basic basic
;
31 struct flow_dissector_key_eth_addrs eth
;
32 struct flow_dissector_key_addrs ipaddrs
;
34 struct flow_dissector_key_ipv4_addrs ipv4
;
35 struct flow_dissector_key_ipv6_addrs ipv6
;
37 struct flow_dissector_key_ports tp
;
38 } __aligned(BITS_PER_LONG
/ 8); /* Ensure that we can do comparisons as longs. */
40 struct fl_flow_mask_range
{
41 unsigned short int start
;
42 unsigned short int end
;
46 struct fl_flow_key key
;
47 struct fl_flow_mask_range range
;
53 struct fl_flow_mask mask
;
54 struct flow_dissector dissector
;
57 struct list_head filters
;
58 struct rhashtable_params ht_params
;
60 struct work_struct work
;
65 struct cls_fl_filter
{
66 struct rhash_head ht_node
;
67 struct fl_flow_key mkey
;
69 struct tcf_result res
;
70 struct fl_flow_key key
;
71 struct list_head list
;
76 static unsigned short int fl_mask_range(const struct fl_flow_mask
*mask
)
78 return mask
->range
.end
- mask
->range
.start
;
81 static void fl_mask_update_range(struct fl_flow_mask
*mask
)
83 const u8
*bytes
= (const u8
*) &mask
->key
;
84 size_t size
= sizeof(mask
->key
);
85 size_t i
, first
= 0, last
= size
- 1;
87 for (i
= 0; i
< sizeof(mask
->key
); i
++) {
94 mask
->range
.start
= rounddown(first
, sizeof(long));
95 mask
->range
.end
= roundup(last
+ 1, sizeof(long));
98 static void *fl_key_get_start(struct fl_flow_key
*key
,
99 const struct fl_flow_mask
*mask
)
101 return (u8
*) key
+ mask
->range
.start
;
104 static void fl_set_masked_key(struct fl_flow_key
*mkey
, struct fl_flow_key
*key
,
105 struct fl_flow_mask
*mask
)
107 const long *lkey
= fl_key_get_start(key
, mask
);
108 const long *lmask
= fl_key_get_start(&mask
->key
, mask
);
109 long *lmkey
= fl_key_get_start(mkey
, mask
);
112 for (i
= 0; i
< fl_mask_range(mask
); i
+= sizeof(long))
113 *lmkey
++ = *lkey
++ & *lmask
++;
116 static void fl_clear_masked_range(struct fl_flow_key
*key
,
117 struct fl_flow_mask
*mask
)
119 memset(fl_key_get_start(key
, mask
), 0, fl_mask_range(mask
));
122 static int fl_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
123 struct tcf_result
*res
)
125 struct cls_fl_head
*head
= rcu_dereference_bh(tp
->root
);
126 struct cls_fl_filter
*f
;
127 struct fl_flow_key skb_key
;
128 struct fl_flow_key skb_mkey
;
130 fl_clear_masked_range(&skb_key
, &head
->mask
);
131 skb_key
.indev_ifindex
= skb
->skb_iif
;
132 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
133 * so do it rather here.
135 skb_key
.basic
.n_proto
= skb
->protocol
;
136 skb_flow_dissect(skb
, &head
->dissector
, &skb_key
, 0);
138 fl_set_masked_key(&skb_mkey
, &skb_key
, &head
->mask
);
140 f
= rhashtable_lookup_fast(&head
->ht
,
141 fl_key_get_start(&skb_mkey
, &head
->mask
),
145 return tcf_exts_exec(skb
, &f
->exts
, res
);
150 static int fl_init(struct tcf_proto
*tp
)
152 struct cls_fl_head
*head
;
154 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
158 INIT_LIST_HEAD_RCU(&head
->filters
);
159 rcu_assign_pointer(tp
->root
, head
);
164 static void fl_destroy_filter(struct rcu_head
*head
)
166 struct cls_fl_filter
*f
= container_of(head
, struct cls_fl_filter
, rcu
);
168 tcf_exts_destroy(&f
->exts
);
172 static void fl_destroy_sleepable(struct work_struct
*work
)
174 struct cls_fl_head
*head
= container_of(work
, struct cls_fl_head
,
176 if (head
->mask_assigned
)
177 rhashtable_destroy(&head
->ht
);
179 module_put(THIS_MODULE
);
182 static void fl_destroy_rcu(struct rcu_head
*rcu
)
184 struct cls_fl_head
*head
= container_of(rcu
, struct cls_fl_head
, rcu
);
186 INIT_WORK(&head
->work
, fl_destroy_sleepable
);
187 schedule_work(&head
->work
);
190 static bool fl_destroy(struct tcf_proto
*tp
, bool force
)
192 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
193 struct cls_fl_filter
*f
, *next
;
195 if (!force
&& !list_empty(&head
->filters
))
198 list_for_each_entry_safe(f
, next
, &head
->filters
, list
) {
199 list_del_rcu(&f
->list
);
200 call_rcu(&f
->rcu
, fl_destroy_filter
);
203 __module_get(THIS_MODULE
);
204 call_rcu(&head
->rcu
, fl_destroy_rcu
);
208 static unsigned long fl_get(struct tcf_proto
*tp
, u32 handle
)
210 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
211 struct cls_fl_filter
*f
;
213 list_for_each_entry(f
, &head
->filters
, list
)
214 if (f
->handle
== handle
)
215 return (unsigned long) f
;
219 static const struct nla_policy fl_policy
[TCA_FLOWER_MAX
+ 1] = {
220 [TCA_FLOWER_UNSPEC
] = { .type
= NLA_UNSPEC
},
221 [TCA_FLOWER_CLASSID
] = { .type
= NLA_U32
},
222 [TCA_FLOWER_INDEV
] = { .type
= NLA_STRING
,
224 [TCA_FLOWER_KEY_ETH_DST
] = { .len
= ETH_ALEN
},
225 [TCA_FLOWER_KEY_ETH_DST_MASK
] = { .len
= ETH_ALEN
},
226 [TCA_FLOWER_KEY_ETH_SRC
] = { .len
= ETH_ALEN
},
227 [TCA_FLOWER_KEY_ETH_SRC_MASK
] = { .len
= ETH_ALEN
},
228 [TCA_FLOWER_KEY_ETH_TYPE
] = { .type
= NLA_U16
},
229 [TCA_FLOWER_KEY_IP_PROTO
] = { .type
= NLA_U8
},
230 [TCA_FLOWER_KEY_IPV4_SRC
] = { .type
= NLA_U32
},
231 [TCA_FLOWER_KEY_IPV4_SRC_MASK
] = { .type
= NLA_U32
},
232 [TCA_FLOWER_KEY_IPV4_DST
] = { .type
= NLA_U32
},
233 [TCA_FLOWER_KEY_IPV4_DST_MASK
] = { .type
= NLA_U32
},
234 [TCA_FLOWER_KEY_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
235 [TCA_FLOWER_KEY_IPV6_SRC_MASK
] = { .len
= sizeof(struct in6_addr
) },
236 [TCA_FLOWER_KEY_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
237 [TCA_FLOWER_KEY_IPV6_DST_MASK
] = { .len
= sizeof(struct in6_addr
) },
238 [TCA_FLOWER_KEY_TCP_SRC
] = { .type
= NLA_U16
},
239 [TCA_FLOWER_KEY_TCP_DST
] = { .type
= NLA_U16
},
240 [TCA_FLOWER_KEY_UDP_SRC
] = { .type
= NLA_U16
},
241 [TCA_FLOWER_KEY_UDP_DST
] = { .type
= NLA_U16
},
244 static void fl_set_key_val(struct nlattr
**tb
,
245 void *val
, int val_type
,
246 void *mask
, int mask_type
, int len
)
250 memcpy(val
, nla_data(tb
[val_type
]), len
);
251 if (mask_type
== TCA_FLOWER_UNSPEC
|| !tb
[mask_type
])
252 memset(mask
, 0xff, len
);
254 memcpy(mask
, nla_data(tb
[mask_type
]), len
);
257 static int fl_set_key(struct net
*net
, struct nlattr
**tb
,
258 struct fl_flow_key
*key
, struct fl_flow_key
*mask
)
260 #ifdef CONFIG_NET_CLS_IND
261 if (tb
[TCA_FLOWER_INDEV
]) {
262 int err
= tcf_change_indev(net
, tb
[TCA_FLOWER_INDEV
]);
265 key
->indev_ifindex
= err
;
266 mask
->indev_ifindex
= 0xffffffff;
270 fl_set_key_val(tb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
271 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
272 sizeof(key
->eth
.dst
));
273 fl_set_key_val(tb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
274 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
275 sizeof(key
->eth
.src
));
277 fl_set_key_val(tb
, &key
->basic
.n_proto
, TCA_FLOWER_KEY_ETH_TYPE
,
278 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
279 sizeof(key
->basic
.n_proto
));
281 if (key
->basic
.n_proto
== htons(ETH_P_IP
) ||
282 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
283 fl_set_key_val(tb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
284 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
285 sizeof(key
->basic
.ip_proto
));
288 if (tb
[TCA_FLOWER_KEY_IPV4_SRC
] || tb
[TCA_FLOWER_KEY_IPV4_DST
]) {
289 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
290 fl_set_key_val(tb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
291 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
292 sizeof(key
->ipv4
.src
));
293 fl_set_key_val(tb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
294 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
295 sizeof(key
->ipv4
.dst
));
296 } else if (tb
[TCA_FLOWER_KEY_IPV6_SRC
] || tb
[TCA_FLOWER_KEY_IPV6_DST
]) {
297 key
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
298 fl_set_key_val(tb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
299 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
300 sizeof(key
->ipv6
.src
));
301 fl_set_key_val(tb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
302 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
303 sizeof(key
->ipv6
.dst
));
306 if (key
->basic
.ip_proto
== IPPROTO_TCP
) {
307 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
308 &mask
->tp
.src
, TCA_FLOWER_UNSPEC
,
309 sizeof(key
->tp
.src
));
310 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
311 &mask
->tp
.dst
, TCA_FLOWER_UNSPEC
,
312 sizeof(key
->tp
.dst
));
313 } else if (key
->basic
.ip_proto
== IPPROTO_UDP
) {
314 fl_set_key_val(tb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
315 &mask
->tp
.src
, TCA_FLOWER_UNSPEC
,
316 sizeof(key
->tp
.src
));
317 fl_set_key_val(tb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
318 &mask
->tp
.dst
, TCA_FLOWER_UNSPEC
,
319 sizeof(key
->tp
.dst
));
325 static bool fl_mask_eq(struct fl_flow_mask
*mask1
,
326 struct fl_flow_mask
*mask2
)
328 const long *lmask1
= fl_key_get_start(&mask1
->key
, mask1
);
329 const long *lmask2
= fl_key_get_start(&mask2
->key
, mask2
);
331 return !memcmp(&mask1
->range
, &mask2
->range
, sizeof(mask1
->range
)) &&
332 !memcmp(lmask1
, lmask2
, fl_mask_range(mask1
));
335 static const struct rhashtable_params fl_ht_params
= {
336 .key_offset
= offsetof(struct cls_fl_filter
, mkey
), /* base offset */
337 .head_offset
= offsetof(struct cls_fl_filter
, ht_node
),
338 .automatic_shrinking
= true,
341 static int fl_init_hashtable(struct cls_fl_head
*head
,
342 struct fl_flow_mask
*mask
)
344 head
->ht_params
= fl_ht_params
;
345 head
->ht_params
.key_len
= fl_mask_range(mask
);
346 head
->ht_params
.key_offset
+= mask
->range
.start
;
348 return rhashtable_init(&head
->ht
, &head
->ht_params
);
351 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
352 #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
353 #define FL_KEY_MEMBER_END_OFFSET(member) \
354 (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
356 #define FL_KEY_IN_RANGE(mask, member) \
357 (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
358 FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
360 #define FL_KEY_SET(keys, cnt, id, member) \
362 keys[cnt].key_id = id; \
363 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
367 #define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
369 if (FL_KEY_IN_RANGE(mask, member)) \
370 FL_KEY_SET(keys, cnt, id, member); \
373 static void fl_init_dissector(struct cls_fl_head
*head
,
374 struct fl_flow_mask
*mask
)
376 struct flow_dissector_key keys
[FLOW_DISSECTOR_KEY_MAX
];
379 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_CONTROL
, control
);
380 FL_KEY_SET(keys
, cnt
, FLOW_DISSECTOR_KEY_BASIC
, basic
);
381 FL_KEY_SET_IF_IN_RANGE(mask
, keys
, cnt
,
382 FLOW_DISSECTOR_KEY_ETH_ADDRS
, eth
);
383 FL_KEY_SET_IF_IN_RANGE(mask
, keys
, cnt
,
384 FLOW_DISSECTOR_KEY_IPV4_ADDRS
, ipv4
);
385 FL_KEY_SET_IF_IN_RANGE(mask
, keys
, cnt
,
386 FLOW_DISSECTOR_KEY_IPV6_ADDRS
, ipv6
);
387 FL_KEY_SET_IF_IN_RANGE(mask
, keys
, cnt
,
388 FLOW_DISSECTOR_KEY_PORTS
, tp
);
390 skb_flow_dissector_init(&head
->dissector
, keys
, cnt
);
393 static int fl_check_assign_mask(struct cls_fl_head
*head
,
394 struct fl_flow_mask
*mask
)
398 if (head
->mask_assigned
) {
399 if (!fl_mask_eq(&head
->mask
, mask
))
405 /* Mask is not assigned yet. So assign it and init hashtable
408 err
= fl_init_hashtable(head
, mask
);
411 memcpy(&head
->mask
, mask
, sizeof(head
->mask
));
412 head
->mask_assigned
= true;
414 fl_init_dissector(head
, mask
);
419 static int fl_set_parms(struct net
*net
, struct tcf_proto
*tp
,
420 struct cls_fl_filter
*f
, struct fl_flow_mask
*mask
,
421 unsigned long base
, struct nlattr
**tb
,
422 struct nlattr
*est
, bool ovr
)
427 tcf_exts_init(&e
, TCA_FLOWER_ACT
, 0);
428 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
, ovr
);
432 if (tb
[TCA_FLOWER_CLASSID
]) {
433 f
->res
.classid
= nla_get_u32(tb
[TCA_FLOWER_CLASSID
]);
434 tcf_bind_filter(tp
, &f
->res
, base
);
437 err
= fl_set_key(net
, tb
, &f
->key
, &mask
->key
);
441 fl_mask_update_range(mask
);
442 fl_set_masked_key(&f
->mkey
, &f
->key
, mask
);
444 tcf_exts_change(tp
, &f
->exts
, &e
);
448 tcf_exts_destroy(&e
);
452 static u32
fl_grab_new_handle(struct tcf_proto
*tp
,
453 struct cls_fl_head
*head
)
455 unsigned int i
= 0x80000000;
459 if (++head
->hgen
== 0x7FFFFFFF)
461 } while (--i
> 0 && fl_get(tp
, head
->hgen
));
463 if (unlikely(i
== 0)) {
464 pr_err("Insufficient number of handles\n");
473 static int fl_change(struct net
*net
, struct sk_buff
*in_skb
,
474 struct tcf_proto
*tp
, unsigned long base
,
475 u32 handle
, struct nlattr
**tca
,
476 unsigned long *arg
, bool ovr
)
478 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
479 struct cls_fl_filter
*fold
= (struct cls_fl_filter
*) *arg
;
480 struct cls_fl_filter
*fnew
;
481 struct nlattr
*tb
[TCA_FLOWER_MAX
+ 1];
482 struct fl_flow_mask mask
= {};
485 if (!tca
[TCA_OPTIONS
])
488 err
= nla_parse_nested(tb
, TCA_FLOWER_MAX
, tca
[TCA_OPTIONS
], fl_policy
);
492 if (fold
&& handle
&& fold
->handle
!= handle
)
495 fnew
= kzalloc(sizeof(*fnew
), GFP_KERNEL
);
499 tcf_exts_init(&fnew
->exts
, TCA_FLOWER_ACT
, 0);
502 handle
= fl_grab_new_handle(tp
, head
);
508 fnew
->handle
= handle
;
510 err
= fl_set_parms(net
, tp
, fnew
, &mask
, base
, tb
, tca
[TCA_RATE
], ovr
);
514 err
= fl_check_assign_mask(head
, &mask
);
518 err
= rhashtable_insert_fast(&head
->ht
, &fnew
->ht_node
,
523 rhashtable_remove_fast(&head
->ht
, &fold
->ht_node
,
526 *arg
= (unsigned long) fnew
;
529 list_replace_rcu(&fold
->list
, &fnew
->list
);
530 tcf_unbind_filter(tp
, &fold
->res
);
531 call_rcu(&fold
->rcu
, fl_destroy_filter
);
533 list_add_tail_rcu(&fnew
->list
, &head
->filters
);
543 static int fl_delete(struct tcf_proto
*tp
, unsigned long arg
)
545 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
546 struct cls_fl_filter
*f
= (struct cls_fl_filter
*) arg
;
548 rhashtable_remove_fast(&head
->ht
, &f
->ht_node
,
550 list_del_rcu(&f
->list
);
551 tcf_unbind_filter(tp
, &f
->res
);
552 call_rcu(&f
->rcu
, fl_destroy_filter
);
556 static void fl_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
558 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
559 struct cls_fl_filter
*f
;
561 list_for_each_entry_rcu(f
, &head
->filters
, list
) {
562 if (arg
->count
< arg
->skip
)
564 if (arg
->fn(tp
, (unsigned long) f
, arg
) < 0) {
573 static int fl_dump_key_val(struct sk_buff
*skb
,
574 void *val
, int val_type
,
575 void *mask
, int mask_type
, int len
)
579 if (!memchr_inv(mask
, 0, len
))
581 err
= nla_put(skb
, val_type
, len
, val
);
584 if (mask_type
!= TCA_FLOWER_UNSPEC
) {
585 err
= nla_put(skb
, mask_type
, len
, mask
);
592 static int fl_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
593 struct sk_buff
*skb
, struct tcmsg
*t
)
595 struct cls_fl_head
*head
= rtnl_dereference(tp
->root
);
596 struct cls_fl_filter
*f
= (struct cls_fl_filter
*) fh
;
598 struct fl_flow_key
*key
, *mask
;
603 t
->tcm_handle
= f
->handle
;
605 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
607 goto nla_put_failure
;
609 if (f
->res
.classid
&&
610 nla_put_u32(skb
, TCA_FLOWER_CLASSID
, f
->res
.classid
))
611 goto nla_put_failure
;
614 mask
= &head
->mask
.key
;
616 if (mask
->indev_ifindex
) {
617 struct net_device
*dev
;
619 dev
= __dev_get_by_index(net
, key
->indev_ifindex
);
620 if (dev
&& nla_put_string(skb
, TCA_FLOWER_INDEV
, dev
->name
))
621 goto nla_put_failure
;
624 if (fl_dump_key_val(skb
, key
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST
,
625 mask
->eth
.dst
, TCA_FLOWER_KEY_ETH_DST_MASK
,
626 sizeof(key
->eth
.dst
)) ||
627 fl_dump_key_val(skb
, key
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC
,
628 mask
->eth
.src
, TCA_FLOWER_KEY_ETH_SRC_MASK
,
629 sizeof(key
->eth
.src
)) ||
630 fl_dump_key_val(skb
, &key
->basic
.n_proto
, TCA_FLOWER_KEY_ETH_TYPE
,
631 &mask
->basic
.n_proto
, TCA_FLOWER_UNSPEC
,
632 sizeof(key
->basic
.n_proto
)))
633 goto nla_put_failure
;
634 if ((key
->basic
.n_proto
== htons(ETH_P_IP
) ||
635 key
->basic
.n_proto
== htons(ETH_P_IPV6
)) &&
636 fl_dump_key_val(skb
, &key
->basic
.ip_proto
, TCA_FLOWER_KEY_IP_PROTO
,
637 &mask
->basic
.ip_proto
, TCA_FLOWER_UNSPEC
,
638 sizeof(key
->basic
.ip_proto
)))
639 goto nla_put_failure
;
641 if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
&&
642 (fl_dump_key_val(skb
, &key
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC
,
643 &mask
->ipv4
.src
, TCA_FLOWER_KEY_IPV4_SRC_MASK
,
644 sizeof(key
->ipv4
.src
)) ||
645 fl_dump_key_val(skb
, &key
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST
,
646 &mask
->ipv4
.dst
, TCA_FLOWER_KEY_IPV4_DST_MASK
,
647 sizeof(key
->ipv4
.dst
))))
648 goto nla_put_failure
;
649 else if (key
->control
.addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
&&
650 (fl_dump_key_val(skb
, &key
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC
,
651 &mask
->ipv6
.src
, TCA_FLOWER_KEY_IPV6_SRC_MASK
,
652 sizeof(key
->ipv6
.src
)) ||
653 fl_dump_key_val(skb
, &key
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST
,
654 &mask
->ipv6
.dst
, TCA_FLOWER_KEY_IPV6_DST_MASK
,
655 sizeof(key
->ipv6
.dst
))))
656 goto nla_put_failure
;
658 if (key
->basic
.ip_proto
== IPPROTO_TCP
&&
659 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_TCP_SRC
,
660 &mask
->tp
.src
, TCA_FLOWER_UNSPEC
,
661 sizeof(key
->tp
.src
)) ||
662 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_TCP_DST
,
663 &mask
->tp
.dst
, TCA_FLOWER_UNSPEC
,
664 sizeof(key
->tp
.dst
))))
665 goto nla_put_failure
;
666 else if (key
->basic
.ip_proto
== IPPROTO_UDP
&&
667 (fl_dump_key_val(skb
, &key
->tp
.src
, TCA_FLOWER_KEY_UDP_SRC
,
668 &mask
->tp
.src
, TCA_FLOWER_UNSPEC
,
669 sizeof(key
->tp
.src
)) ||
670 fl_dump_key_val(skb
, &key
->tp
.dst
, TCA_FLOWER_KEY_UDP_DST
,
671 &mask
->tp
.dst
, TCA_FLOWER_UNSPEC
,
672 sizeof(key
->tp
.dst
))))
673 goto nla_put_failure
;
675 if (tcf_exts_dump(skb
, &f
->exts
))
676 goto nla_put_failure
;
678 nla_nest_end(skb
, nest
);
680 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
681 goto nla_put_failure
;
686 nla_nest_cancel(skb
, nest
);
690 static struct tcf_proto_ops cls_fl_ops __read_mostly
= {
692 .classify
= fl_classify
,
694 .destroy
= fl_destroy
,
700 .owner
= THIS_MODULE
,
703 static int __init
cls_fl_init(void)
705 return register_tcf_proto_ops(&cls_fl_ops
);
708 static void __exit
cls_fl_exit(void)
710 unregister_tcf_proto_ops(&cls_fl_ops
);
713 module_init(cls_fl_init
);
714 module_exit(cls_fl_exit
);
716 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
717 MODULE_DESCRIPTION("Flower classifier");
718 MODULE_LICENSE("GPL v2");