blob: e5a58c82728a485b9fb5abb307915a8f026d13c9 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
16#include <linux/workqueue.h>
17
18#include <linux/if_ether.h>
19#include <linux/in6.h>
20#include <linux/ip.h>
21
22#include <net/sch_generic.h>
23#include <net/pkt_cls.h>
24#include <net/ip.h>
25#include <net/flow_dissector.h>
26
27struct fl_flow_key {
28 int indev_ifindex;
29 struct flow_dissector_key_control control;
30 struct flow_dissector_key_basic basic;
31 struct flow_dissector_key_eth_addrs eth;
32 struct flow_dissector_key_addrs ipaddrs;
33 union {
34 struct flow_dissector_key_ipv4_addrs ipv4;
35 struct flow_dissector_key_ipv6_addrs ipv6;
36 };
37 struct flow_dissector_key_ports tp;
38} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
39
40struct fl_flow_mask_range {
41 unsigned short int start;
42 unsigned short int end;
43};
44
45struct fl_flow_mask {
46 struct fl_flow_key key;
47 struct fl_flow_mask_range range;
48 struct rcu_head rcu;
49};
50
51struct cls_fl_head {
52 struct rhashtable ht;
53 struct fl_flow_mask mask;
54 struct flow_dissector dissector;
55 u32 hgen;
56 bool mask_assigned;
57 struct list_head filters;
58 struct rhashtable_params ht_params;
59 union {
60 struct work_struct work;
61 struct rcu_head rcu;
62 };
63};
64
65struct cls_fl_filter {
66 struct rhash_head ht_node;
67 struct fl_flow_key mkey;
68 struct tcf_exts exts;
69 struct tcf_result res;
70 struct fl_flow_key key;
71 struct list_head list;
72 u32 handle;
73 struct rcu_head rcu;
74};
75
76static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
77{
78 return mask->range.end - mask->range.start;
79}
80
81static void fl_mask_update_range(struct fl_flow_mask *mask)
82{
83 const u8 *bytes = (const u8 *) &mask->key;
84 size_t size = sizeof(mask->key);
85 size_t i, first = 0, last = size - 1;
86
87 for (i = 0; i < sizeof(mask->key); i++) {
88 if (bytes[i]) {
89 if (!first && i)
90 first = i;
91 last = i;
92 }
93 }
94 mask->range.start = rounddown(first, sizeof(long));
95 mask->range.end = roundup(last + 1, sizeof(long));
96}
97
98static void *fl_key_get_start(struct fl_flow_key *key,
99 const struct fl_flow_mask *mask)
100{
101 return (u8 *) key + mask->range.start;
102}
103
104static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
105 struct fl_flow_mask *mask)
106{
107 const long *lkey = fl_key_get_start(key, mask);
108 const long *lmask = fl_key_get_start(&mask->key, mask);
109 long *lmkey = fl_key_get_start(mkey, mask);
110 int i;
111
112 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
113 *lmkey++ = *lkey++ & *lmask++;
114}
115
116static void fl_clear_masked_range(struct fl_flow_key *key,
117 struct fl_flow_mask *mask)
118{
119 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
120}
121
122static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
123 struct tcf_result *res)
124{
125 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
126 struct cls_fl_filter *f;
127 struct fl_flow_key skb_key;
128 struct fl_flow_key skb_mkey;
129
130 fl_clear_masked_range(&skb_key, &head->mask);
131 skb_key.indev_ifindex = skb->skb_iif;
132 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
133 * so do it rather here.
134 */
135 skb_key.basic.n_proto = skb->protocol;
136 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
137
138 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
139
140 f = rhashtable_lookup_fast(&head->ht,
141 fl_key_get_start(&skb_mkey, &head->mask),
142 head->ht_params);
143 if (f) {
144 *res = f->res;
145 return tcf_exts_exec(skb, &f->exts, res);
146 }
147 return -1;
148}
149
150static int fl_init(struct tcf_proto *tp)
151{
152 struct cls_fl_head *head;
153
154 head = kzalloc(sizeof(*head), GFP_KERNEL);
155 if (!head)
156 return -ENOBUFS;
157
158 INIT_LIST_HEAD_RCU(&head->filters);
159 rcu_assign_pointer(tp->root, head);
160
161 return 0;
162}
163
164static void fl_destroy_filter(struct rcu_head *head)
165{
166 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
167
168 tcf_exts_destroy(&f->exts);
169 kfree(f);
170}
171
172static void fl_destroy_sleepable(struct work_struct *work)
173{
174 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
175 work);
176 if (head->mask_assigned)
177 rhashtable_destroy(&head->ht);
178 kfree(head);
179 module_put(THIS_MODULE);
180}
181
182static void fl_destroy_rcu(struct rcu_head *rcu)
183{
184 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
185
186 INIT_WORK(&head->work, fl_destroy_sleepable);
187 schedule_work(&head->work);
188}
189
190static bool fl_destroy(struct tcf_proto *tp, bool force)
191{
192 struct cls_fl_head *head = rtnl_dereference(tp->root);
193 struct cls_fl_filter *f, *next;
194
195 if (!force && !list_empty(&head->filters))
196 return false;
197
198 list_for_each_entry_safe(f, next, &head->filters, list) {
199 list_del_rcu(&f->list);
200 call_rcu(&f->rcu, fl_destroy_filter);
201 }
202
203 __module_get(THIS_MODULE);
204 call_rcu(&head->rcu, fl_destroy_rcu);
205 return true;
206}
207
208static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
209{
210 struct cls_fl_head *head = rtnl_dereference(tp->root);
211 struct cls_fl_filter *f;
212
213 list_for_each_entry(f, &head->filters, list)
214 if (f->handle == handle)
215 return (unsigned long) f;
216 return 0;
217}
218
219static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
220 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
221 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
222 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
223 .len = IFNAMSIZ },
224 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
225 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
226 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
227 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
228 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
229 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
230 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
231 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
232 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
233 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
234 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
235 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
236 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
237 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
238 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
239 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
240 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
241 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
242};
243
244static void fl_set_key_val(struct nlattr **tb,
245 void *val, int val_type,
246 void *mask, int mask_type, int len)
247{
248 if (!tb[val_type])
249 return;
250 memcpy(val, nla_data(tb[val_type]), len);
251 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
252 memset(mask, 0xff, len);
253 else
254 memcpy(mask, nla_data(tb[mask_type]), len);
255}
256
257static int fl_set_key(struct net *net, struct nlattr **tb,
258 struct fl_flow_key *key, struct fl_flow_key *mask)
259{
260#ifdef CONFIG_NET_CLS_IND
261 if (tb[TCA_FLOWER_INDEV]) {
262 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
263 if (err < 0)
264 return err;
265 key->indev_ifindex = err;
266 mask->indev_ifindex = 0xffffffff;
267 }
268#endif
269
270 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
271 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
272 sizeof(key->eth.dst));
273 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
274 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
275 sizeof(key->eth.src));
276
277 fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
278 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
279 sizeof(key->basic.n_proto));
280
281 if (key->basic.n_proto == htons(ETH_P_IP) ||
282 key->basic.n_proto == htons(ETH_P_IPV6)) {
283 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
284 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
285 sizeof(key->basic.ip_proto));
286 }
287
288 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
289 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
290 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
291 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
292 sizeof(key->ipv4.src));
293 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
294 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
295 sizeof(key->ipv4.dst));
296 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
297 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
298 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
299 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
300 sizeof(key->ipv6.src));
301 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
302 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
303 sizeof(key->ipv6.dst));
304 }
305
306 if (key->basic.ip_proto == IPPROTO_TCP) {
307 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
308 &mask->tp.src, TCA_FLOWER_UNSPEC,
309 sizeof(key->tp.src));
310 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
311 &mask->tp.dst, TCA_FLOWER_UNSPEC,
312 sizeof(key->tp.dst));
313 } else if (key->basic.ip_proto == IPPROTO_UDP) {
314 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
315 &mask->tp.src, TCA_FLOWER_UNSPEC,
316 sizeof(key->tp.src));
317 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
318 &mask->tp.dst, TCA_FLOWER_UNSPEC,
319 sizeof(key->tp.dst));
320 }
321
322 return 0;
323}
324
325static bool fl_mask_eq(struct fl_flow_mask *mask1,
326 struct fl_flow_mask *mask2)
327{
328 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
329 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
330
331 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
332 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
333}
334
335static const struct rhashtable_params fl_ht_params = {
336 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
337 .head_offset = offsetof(struct cls_fl_filter, ht_node),
338 .automatic_shrinking = true,
339};
340
341static int fl_init_hashtable(struct cls_fl_head *head,
342 struct fl_flow_mask *mask)
343{
344 head->ht_params = fl_ht_params;
345 head->ht_params.key_len = fl_mask_range(mask);
346 head->ht_params.key_offset += mask->range.start;
347
348 return rhashtable_init(&head->ht, &head->ht_params);
349}
350
351#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
352#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
353#define FL_KEY_MEMBER_END_OFFSET(member) \
354 (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
355
356#define FL_KEY_IN_RANGE(mask, member) \
357 (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
358 FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
359
360#define FL_KEY_SET(keys, cnt, id, member) \
361 do { \
362 keys[cnt].key_id = id; \
363 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
364 cnt++; \
365 } while(0);
366
367#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
368 do { \
369 if (FL_KEY_IN_RANGE(mask, member)) \
370 FL_KEY_SET(keys, cnt, id, member); \
371 } while(0);
372
373static void fl_init_dissector(struct cls_fl_head *head,
374 struct fl_flow_mask *mask)
375{
376 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
377 size_t cnt = 0;
378
379 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
380 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
381 FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
382 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
383 FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
384 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
385 FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
386 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
387 FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
388 FLOW_DISSECTOR_KEY_PORTS, tp);
389
390 skb_flow_dissector_init(&head->dissector, keys, cnt);
391}
392
393static int fl_check_assign_mask(struct cls_fl_head *head,
394 struct fl_flow_mask *mask)
395{
396 int err;
397
398 if (head->mask_assigned) {
399 if (!fl_mask_eq(&head->mask, mask))
400 return -EINVAL;
401 else
402 return 0;
403 }
404
405 /* Mask is not assigned yet. So assign it and init hashtable
406 * according to that.
407 */
408 err = fl_init_hashtable(head, mask);
409 if (err)
410 return err;
411 memcpy(&head->mask, mask, sizeof(head->mask));
412 head->mask_assigned = true;
413
414 fl_init_dissector(head, mask);
415
416 return 0;
417}
418
419static int fl_set_parms(struct net *net, struct tcf_proto *tp,
420 struct cls_fl_filter *f, struct fl_flow_mask *mask,
421 unsigned long base, struct nlattr **tb,
422 struct nlattr *est, bool ovr)
423{
424 struct tcf_exts e;
425 int err;
426
427 tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
428 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
429 if (err < 0)
430 return err;
431
432 if (tb[TCA_FLOWER_CLASSID]) {
433 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
434 tcf_bind_filter(tp, &f->res, base);
435 }
436
437 err = fl_set_key(net, tb, &f->key, &mask->key);
438 if (err)
439 goto errout;
440
441 fl_mask_update_range(mask);
442 fl_set_masked_key(&f->mkey, &f->key, mask);
443
444 tcf_exts_change(tp, &f->exts, &e);
445
446 return 0;
447errout:
448 tcf_exts_destroy(&e);
449 return err;
450}
451
452static u32 fl_grab_new_handle(struct tcf_proto *tp,
453 struct cls_fl_head *head)
454{
455 unsigned int i = 0x80000000;
456 u32 handle;
457
458 do {
459 if (++head->hgen == 0x7FFFFFFF)
460 head->hgen = 1;
461 } while (--i > 0 && fl_get(tp, head->hgen));
462
463 if (unlikely(i == 0)) {
464 pr_err("Insufficient number of handles\n");
465 handle = 0;
466 } else {
467 handle = head->hgen;
468 }
469
470 return handle;
471}
472
473static int fl_change(struct net *net, struct sk_buff *in_skb,
474 struct tcf_proto *tp, unsigned long base,
475 u32 handle, struct nlattr **tca,
476 unsigned long *arg, bool ovr)
477{
478 struct cls_fl_head *head = rtnl_dereference(tp->root);
479 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
480 struct cls_fl_filter *fnew;
481 struct nlattr *tb[TCA_FLOWER_MAX + 1];
482 struct fl_flow_mask mask = {};
483 int err;
484
485 if (!tca[TCA_OPTIONS])
486 return -EINVAL;
487
488 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
489 if (err < 0)
490 return err;
491
492 if (fold && handle && fold->handle != handle)
493 return -EINVAL;
494
495 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
496 if (!fnew)
497 return -ENOBUFS;
498
499 tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
500
501 if (!handle) {
502 handle = fl_grab_new_handle(tp, head);
503 if (!handle) {
504 err = -EINVAL;
505 goto errout;
506 }
507 }
508 fnew->handle = handle;
509
510 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
511 if (err)
512 goto errout;
513
514 err = fl_check_assign_mask(head, &mask);
515 if (err)
516 goto errout;
517
518 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
519 head->ht_params);
520 if (err)
521 goto errout;
522 if (fold)
523 rhashtable_remove_fast(&head->ht, &fold->ht_node,
524 head->ht_params);
525
526 *arg = (unsigned long) fnew;
527
528 if (fold) {
529 list_replace_rcu(&fold->list, &fnew->list);
530 tcf_unbind_filter(tp, &fold->res);
531 call_rcu(&fold->rcu, fl_destroy_filter);
532 } else {
533 list_add_tail_rcu(&fnew->list, &head->filters);
534 }
535
536 return 0;
537
538errout:
539 kfree(fnew);
540 return err;
541}
542
543static int fl_delete(struct tcf_proto *tp, unsigned long arg)
544{
545 struct cls_fl_head *head = rtnl_dereference(tp->root);
546 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
547
548 rhashtable_remove_fast(&head->ht, &f->ht_node,
549 head->ht_params);
550 list_del_rcu(&f->list);
551 tcf_unbind_filter(tp, &f->res);
552 call_rcu(&f->rcu, fl_destroy_filter);
553 return 0;
554}
555
556static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
557{
558 struct cls_fl_head *head = rtnl_dereference(tp->root);
559 struct cls_fl_filter *f;
560
561 list_for_each_entry_rcu(f, &head->filters, list) {
562 if (arg->count < arg->skip)
563 goto skip;
564 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
565 arg->stop = 1;
566 break;
567 }
568skip:
569 arg->count++;
570 }
571}
572
573static int fl_dump_key_val(struct sk_buff *skb,
574 void *val, int val_type,
575 void *mask, int mask_type, int len)
576{
577 int err;
578
579 if (!memchr_inv(mask, 0, len))
580 return 0;
581 err = nla_put(skb, val_type, len, val);
582 if (err)
583 return err;
584 if (mask_type != TCA_FLOWER_UNSPEC) {
585 err = nla_put(skb, mask_type, len, mask);
586 if (err)
587 return err;
588 }
589 return 0;
590}
591
592static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
593 struct sk_buff *skb, struct tcmsg *t)
594{
595 struct cls_fl_head *head = rtnl_dereference(tp->root);
596 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
597 struct nlattr *nest;
598 struct fl_flow_key *key, *mask;
599
600 if (!f)
601 return skb->len;
602
603 t->tcm_handle = f->handle;
604
605 nest = nla_nest_start(skb, TCA_OPTIONS);
606 if (!nest)
607 goto nla_put_failure;
608
609 if (f->res.classid &&
610 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
611 goto nla_put_failure;
612
613 key = &f->key;
614 mask = &head->mask.key;
615
616 if (mask->indev_ifindex) {
617 struct net_device *dev;
618
619 dev = __dev_get_by_index(net, key->indev_ifindex);
620 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
621 goto nla_put_failure;
622 }
623
624 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
625 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
626 sizeof(key->eth.dst)) ||
627 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
628 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
629 sizeof(key->eth.src)) ||
630 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
631 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
632 sizeof(key->basic.n_proto)))
633 goto nla_put_failure;
634 if ((key->basic.n_proto == htons(ETH_P_IP) ||
635 key->basic.n_proto == htons(ETH_P_IPV6)) &&
636 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
637 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
638 sizeof(key->basic.ip_proto)))
639 goto nla_put_failure;
640
641 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
642 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
643 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
644 sizeof(key->ipv4.src)) ||
645 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
646 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
647 sizeof(key->ipv4.dst))))
648 goto nla_put_failure;
649 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
650 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
651 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
652 sizeof(key->ipv6.src)) ||
653 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
654 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
655 sizeof(key->ipv6.dst))))
656 goto nla_put_failure;
657
658 if (key->basic.ip_proto == IPPROTO_TCP &&
659 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
660 &mask->tp.src, TCA_FLOWER_UNSPEC,
661 sizeof(key->tp.src)) ||
662 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
663 &mask->tp.dst, TCA_FLOWER_UNSPEC,
664 sizeof(key->tp.dst))))
665 goto nla_put_failure;
666 else if (key->basic.ip_proto == IPPROTO_UDP &&
667 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
668 &mask->tp.src, TCA_FLOWER_UNSPEC,
669 sizeof(key->tp.src)) ||
670 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
671 &mask->tp.dst, TCA_FLOWER_UNSPEC,
672 sizeof(key->tp.dst))))
673 goto nla_put_failure;
674
675 if (tcf_exts_dump(skb, &f->exts))
676 goto nla_put_failure;
677
678 nla_nest_end(skb, nest);
679
680 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
681 goto nla_put_failure;
682
683 return skb->len;
684
685nla_put_failure:
686 nla_nest_cancel(skb, nest);
687 return -1;
688}
689
690static struct tcf_proto_ops cls_fl_ops __read_mostly = {
691 .kind = "flower",
692 .classify = fl_classify,
693 .init = fl_init,
694 .destroy = fl_destroy,
695 .get = fl_get,
696 .change = fl_change,
697 .delete = fl_delete,
698 .walk = fl_walk,
699 .dump = fl_dump,
700 .owner = THIS_MODULE,
701};
702
703static int __init cls_fl_init(void)
704{
705 return register_tcf_proto_ops(&cls_fl_ops);
706}
707
708static void __exit cls_fl_exit(void)
709{
710 unregister_tcf_proto_ops(&cls_fl_ops);
711}
712
713module_init(cls_fl_init);
714module_exit(cls_fl_exit);
715
716MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
717MODULE_DESCRIPTION("Flower classifier");
718MODULE_LICENSE("GPL v2");