2 * net/sched/cls_flower.c Flower classifier
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
17 #include <linux/if_ether.h>
18 #include <linux/in6.h>
21 #include <net/sch_generic.h>
22 #include <net/pkt_cls.h>
24 #include <net/flow_dissector.h>
27 #include <net/dst_metadata.h>
31 struct flow_dissector_key_control control;
32 struct flow_dissector_key_control enc_control;
33 struct flow_dissector_key_basic basic;
34 struct flow_dissector_key_eth_addrs eth;
35 struct flow_dissector_key_vlan vlan;
37 struct flow_dissector_key_ipv4_addrs ipv4;
38 struct flow_dissector_key_ipv6_addrs ipv6;
40 struct flow_dissector_key_ports tp;
41 struct flow_dissector_key_keyid enc_key_id;
43 struct flow_dissector_key_ipv4_addrs enc_ipv4;
44 struct flow_dissector_key_ipv6_addrs enc_ipv6;
46 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
48 struct fl_flow_mask_range {
49 unsigned short int start;
50 unsigned short int end;
54 struct fl_flow_key key;
55 struct fl_flow_mask_range range;
61 struct fl_flow_mask mask;
62 struct flow_dissector dissector;
65 struct list_head filters;
66 struct rhashtable_params ht_params;
70 struct cls_fl_filter {
71 struct rhash_head ht_node;
72 struct fl_flow_key mkey;
74 struct tcf_result res;
75 struct fl_flow_key key;
76 struct list_head list;
82 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
84 return mask->range.end - mask->range.start;
87 static void fl_mask_update_range(struct fl_flow_mask *mask)
89 const u8 *bytes = (const u8 *) &mask->key;
90 size_t size = sizeof(mask->key);
91 size_t i, first = 0, last = size - 1;
93 for (i = 0; i < sizeof(mask->key); i++) {
100 mask->range.start = rounddown(first, sizeof(long));
101 mask->range.end = roundup(last + 1, sizeof(long));
104 static void *fl_key_get_start(struct fl_flow_key *key,
105 const struct fl_flow_mask *mask)
107 return (u8 *) key + mask->range.start;
110 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
111 struct fl_flow_mask *mask)
113 const long *lkey = fl_key_get_start(key, mask);
114 const long *lmask = fl_key_get_start(&mask->key, mask);
115 long *lmkey = fl_key_get_start(mkey, mask);
118 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
119 *lmkey++ = *lkey++ & *lmask++;
122 static void fl_clear_masked_range(struct fl_flow_key *key,
123 struct fl_flow_mask *mask)
125 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
128 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
129 struct tcf_result *res)
131 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
132 struct cls_fl_filter *f;
133 struct fl_flow_key skb_key;
134 struct fl_flow_key skb_mkey;
135 struct ip_tunnel_info *info;
137 if (!atomic_read(&head->ht.nelems))
140 fl_clear_masked_range(&skb_key, &head->mask);
142 info = skb_tunnel_info(skb);
144 struct ip_tunnel_key *key = &info->key;
146 switch (ip_tunnel_info_af(info)) {
148 skb_key.enc_ipv4.src = key->u.ipv4.src;
149 skb_key.enc_ipv4.dst = key->u.ipv4.dst;
152 skb_key.enc_ipv6.src = key->u.ipv6.src;
153 skb_key.enc_ipv6.dst = key->u.ipv6.dst;
157 skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
160 skb_key.indev_ifindex = skb->skb_iif;
161 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
162 * so do it rather here.
164 skb_key.basic.n_proto = skb->protocol;
165 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
167 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
169 f = rhashtable_lookup_fast(&head->ht,
170 fl_key_get_start(&skb_mkey, &head->mask),
172 if (f && !tc_skip_sw(f->flags)) {
174 return tcf_exts_exec(skb, &f->exts, res);
179 static int fl_init(struct tcf_proto *tp)
181 struct cls_fl_head *head;
183 head = kzalloc(sizeof(*head), GFP_KERNEL);
187 INIT_LIST_HEAD_RCU(&head->filters);
188 rcu_assign_pointer(tp->root, head);
193 static void fl_destroy_filter(struct rcu_head *head)
195 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
197 tcf_exts_destroy(&f->exts);
201 static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
203 struct net_device *dev = tp->q->dev_queue->dev;
204 struct tc_cls_flower_offload offload = {0};
205 struct tc_to_netdev tc;
207 if (!tc_should_offload(dev, tp, 0))
210 offload.command = TC_CLSFLOWER_DESTROY;
211 offload.cookie = cookie;
213 tc.type = TC_SETUP_CLSFLOWER;
214 tc.cls_flower = &offload;
216 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
219 static int fl_hw_replace_filter(struct tcf_proto *tp,
220 struct flow_dissector *dissector,
221 struct fl_flow_key *mask,
222 struct fl_flow_key *key,
223 struct tcf_exts *actions,
224 unsigned long cookie, u32 flags)
226 struct net_device *dev = tp->q->dev_queue->dev;
227 struct tc_cls_flower_offload offload = {0};
228 struct tc_to_netdev tc;
231 if (!tc_should_offload(dev, tp, flags))
232 return tc_skip_sw(flags) ? -EINVAL : 0;
234 offload.command = TC_CLSFLOWER_REPLACE;
235 offload.cookie = cookie;
236 offload.dissector = dissector;
239 offload.exts = actions;
241 tc.type = TC_SETUP_CLSFLOWER;
242 tc.cls_flower = &offload;
244 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
246 if (tc_skip_sw(flags))
252 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
254 struct net_device *dev = tp->q->dev_queue->dev;
255 struct tc_cls_flower_offload offload = {0};
256 struct tc_to_netdev tc;
258 if (!tc_should_offload(dev, tp, 0))
261 offload.command = TC_CLSFLOWER_STATS;
262 offload.cookie = (unsigned long)f;
263 offload.exts = &f->exts;
265 tc.type = TC_SETUP_CLSFLOWER;
266 tc.cls_flower = &offload;
268 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
271 static bool fl_destroy(struct tcf_proto *tp, bool force)
273 struct cls_fl_head *head = rtnl_dereference(tp->root);
274 struct cls_fl_filter *f, *next;
276 if (!force && !list_empty(&head->filters))
279 list_for_each_entry_safe(f, next, &head->filters, list) {
280 fl_hw_destroy_filter(tp, (unsigned long)f);
281 list_del_rcu(&f->list);
282 call_rcu(&f->rcu, fl_destroy_filter);
284 RCU_INIT_POINTER(tp->root, NULL);
285 if (head->mask_assigned)
286 rhashtable_destroy(&head->ht);
287 kfree_rcu(head, rcu);
291 static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
293 struct cls_fl_head *head = rtnl_dereference(tp->root);
294 struct cls_fl_filter *f;
296 list_for_each_entry(f, &head->filters, list)
297 if (f->handle == handle)
298 return (unsigned long) f;
302 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
303 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
304 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
305 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
307 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
308 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
309 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
310 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
311 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
312 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
313 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
314 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
315 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
316 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
317 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
318 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
319 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
320 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
321 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
322 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
323 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
324 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
325 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
326 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
327 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
328 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
329 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
330 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
331 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
332 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
333 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
334 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
335 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
336 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
337 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
338 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
339 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
340 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
343 static void fl_set_key_val(struct nlattr **tb,
344 void *val, int val_type,
345 void *mask, int mask_type, int len)
349 memcpy(val, nla_data(tb[val_type]), len);
350 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
351 memset(mask, 0xff, len);
353 memcpy(mask, nla_data(tb[mask_type]), len);
356 static void fl_set_key_vlan(struct nlattr **tb,
357 struct flow_dissector_key_vlan *key_val,
358 struct flow_dissector_key_vlan *key_mask)
360 #define VLAN_PRIORITY_MASK 0x7
362 if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
364 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
365 key_mask->vlan_id = VLAN_VID_MASK;
367 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
368 key_val->vlan_priority =
369 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
371 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
375 static int fl_set_key(struct net *net, struct nlattr **tb,
376 struct fl_flow_key *key, struct fl_flow_key *mask)
379 #ifdef CONFIG_NET_CLS_IND
380 if (tb[TCA_FLOWER_INDEV]) {
381 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
384 key->indev_ifindex = err;
385 mask->indev_ifindex = 0xffffffff;
389 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
390 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
391 sizeof(key->eth.dst));
392 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
393 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
394 sizeof(key->eth.src));
396 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
397 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
399 if (ethertype == htons(ETH_P_8021Q)) {
400 fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
401 fl_set_key_val(tb, &key->basic.n_proto,
402 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
403 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
404 sizeof(key->basic.n_proto));
406 key->basic.n_proto = ethertype;
407 mask->basic.n_proto = cpu_to_be16(~0);
411 if (key->basic.n_proto == htons(ETH_P_IP) ||
412 key->basic.n_proto == htons(ETH_P_IPV6)) {
413 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
414 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
415 sizeof(key->basic.ip_proto));
418 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
419 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
420 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
421 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
422 sizeof(key->ipv4.src));
423 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
424 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
425 sizeof(key->ipv4.dst));
426 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
427 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
428 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
429 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
430 sizeof(key->ipv6.src));
431 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
432 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
433 sizeof(key->ipv6.dst));
436 if (key->basic.ip_proto == IPPROTO_TCP) {
437 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
438 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
439 sizeof(key->tp.src));
440 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
441 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
442 sizeof(key->tp.dst));
443 } else if (key->basic.ip_proto == IPPROTO_UDP) {
444 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
445 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
446 sizeof(key->tp.src));
447 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
448 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
449 sizeof(key->tp.dst));
452 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
453 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
454 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
455 fl_set_key_val(tb, &key->enc_ipv4.src,
456 TCA_FLOWER_KEY_ENC_IPV4_SRC,
458 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
459 sizeof(key->enc_ipv4.src));
460 fl_set_key_val(tb, &key->enc_ipv4.dst,
461 TCA_FLOWER_KEY_ENC_IPV4_DST,
463 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
464 sizeof(key->enc_ipv4.dst));
467 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
468 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
469 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
470 fl_set_key_val(tb, &key->enc_ipv6.src,
471 TCA_FLOWER_KEY_ENC_IPV6_SRC,
473 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
474 sizeof(key->enc_ipv6.src));
475 fl_set_key_val(tb, &key->enc_ipv6.dst,
476 TCA_FLOWER_KEY_ENC_IPV6_DST,
478 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
479 sizeof(key->enc_ipv6.dst));
482 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
483 &mask->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
484 sizeof(key->enc_key_id.keyid));
489 static bool fl_mask_eq(struct fl_flow_mask *mask1,
490 struct fl_flow_mask *mask2)
492 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
493 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
495 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
496 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
499 static const struct rhashtable_params fl_ht_params = {
500 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
501 .head_offset = offsetof(struct cls_fl_filter, ht_node),
502 .automatic_shrinking = true,
505 static int fl_init_hashtable(struct cls_fl_head *head,
506 struct fl_flow_mask *mask)
508 head->ht_params = fl_ht_params;
509 head->ht_params.key_len = fl_mask_range(mask);
510 head->ht_params.key_offset += mask->range.start;
512 return rhashtable_init(&head->ht, &head->ht_params);
515 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
516 #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
518 #define FL_KEY_IS_MASKED(mask, member) \
519 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
520 0, FL_KEY_MEMBER_SIZE(member)) \
522 #define FL_KEY_SET(keys, cnt, id, member) \
524 keys[cnt].key_id = id; \
525 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
529 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
531 if (FL_KEY_IS_MASKED(mask, member)) \
532 FL_KEY_SET(keys, cnt, id, member); \
535 static void fl_init_dissector(struct cls_fl_head *head,
536 struct fl_flow_mask *mask)
538 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
541 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
542 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
543 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
544 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
545 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
546 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
547 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
548 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
549 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
550 FLOW_DISSECTOR_KEY_PORTS, tp);
551 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
552 FLOW_DISSECTOR_KEY_VLAN, vlan);
554 skb_flow_dissector_init(&head->dissector, keys, cnt);
557 static int fl_check_assign_mask(struct cls_fl_head *head,
558 struct fl_flow_mask *mask)
562 if (head->mask_assigned) {
563 if (!fl_mask_eq(&head->mask, mask))
569 /* Mask is not assigned yet. So assign it and init hashtable
572 err = fl_init_hashtable(head, mask);
575 memcpy(&head->mask, mask, sizeof(head->mask));
576 head->mask_assigned = true;
578 fl_init_dissector(head, mask);
583 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
584 struct cls_fl_filter *f, struct fl_flow_mask *mask,
585 unsigned long base, struct nlattr **tb,
586 struct nlattr *est, bool ovr)
591 err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
594 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
598 if (tb[TCA_FLOWER_CLASSID]) {
599 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
600 tcf_bind_filter(tp, &f->res, base);
603 err = fl_set_key(net, tb, &f->key, &mask->key);
607 fl_mask_update_range(mask);
608 fl_set_masked_key(&f->mkey, &f->key, mask);
610 tcf_exts_change(tp, &f->exts, &e);
614 tcf_exts_destroy(&e);
618 static u32 fl_grab_new_handle(struct tcf_proto *tp,
619 struct cls_fl_head *head)
621 unsigned int i = 0x80000000;
625 if (++head->hgen == 0x7FFFFFFF)
627 } while (--i > 0 && fl_get(tp, head->hgen));
629 if (unlikely(i == 0)) {
630 pr_err("Insufficient number of handles\n");
639 static int fl_change(struct net *net, struct sk_buff *in_skb,
640 struct tcf_proto *tp, unsigned long base,
641 u32 handle, struct nlattr **tca,
642 unsigned long *arg, bool ovr)
644 struct cls_fl_head *head = rtnl_dereference(tp->root);
645 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
646 struct cls_fl_filter *fnew;
647 struct nlattr *tb[TCA_FLOWER_MAX + 1];
648 struct fl_flow_mask mask = {};
651 if (!tca[TCA_OPTIONS])
654 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
658 if (fold && handle && fold->handle != handle)
661 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
665 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
670 handle = fl_grab_new_handle(tp, head);
676 fnew->handle = handle;
678 if (tb[TCA_FLOWER_FLAGS]) {
679 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
681 if (!tc_flags_valid(fnew->flags)) {
687 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
691 err = fl_check_assign_mask(head, &mask);
695 if (!tc_skip_sw(fnew->flags)) {
696 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
702 err = fl_hw_replace_filter(tp,
713 rhashtable_remove_fast(&head->ht, &fold->ht_node,
715 fl_hw_destroy_filter(tp, (unsigned long)fold);
718 *arg = (unsigned long) fnew;
721 list_replace_rcu(&fold->list, &fnew->list);
722 tcf_unbind_filter(tp, &fold->res);
723 call_rcu(&fold->rcu, fl_destroy_filter);
725 list_add_tail_rcu(&fnew->list, &head->filters);
731 tcf_exts_destroy(&fnew->exts);
736 static int fl_delete(struct tcf_proto *tp, unsigned long arg)
738 struct cls_fl_head *head = rtnl_dereference(tp->root);
739 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
741 rhashtable_remove_fast(&head->ht, &f->ht_node,
743 list_del_rcu(&f->list);
744 fl_hw_destroy_filter(tp, (unsigned long)f);
745 tcf_unbind_filter(tp, &f->res);
746 call_rcu(&f->rcu, fl_destroy_filter);
750 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
752 struct cls_fl_head *head = rtnl_dereference(tp->root);
753 struct cls_fl_filter *f;
755 list_for_each_entry_rcu(f, &head->filters, list) {
756 if (arg->count < arg->skip)
758 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
767 static int fl_dump_key_val(struct sk_buff *skb,
768 void *val, int val_type,
769 void *mask, int mask_type, int len)
773 if (!memchr_inv(mask, 0, len))
775 err = nla_put(skb, val_type, len, val);
778 if (mask_type != TCA_FLOWER_UNSPEC) {
779 err = nla_put(skb, mask_type, len, mask);
786 static int fl_dump_key_vlan(struct sk_buff *skb,
787 struct flow_dissector_key_vlan *vlan_key,
788 struct flow_dissector_key_vlan *vlan_mask)
792 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
794 if (vlan_mask->vlan_id) {
795 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
800 if (vlan_mask->vlan_priority) {
801 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
802 vlan_key->vlan_priority);
809 static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
810 struct sk_buff *skb, struct tcmsg *t)
812 struct cls_fl_head *head = rtnl_dereference(tp->root);
813 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
815 struct fl_flow_key *key, *mask;
820 t->tcm_handle = f->handle;
822 nest = nla_nest_start(skb, TCA_OPTIONS);
824 goto nla_put_failure;
826 if (f->res.classid &&
827 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
828 goto nla_put_failure;
831 mask = &head->mask.key;
833 if (mask->indev_ifindex) {
834 struct net_device *dev;
836 dev = __dev_get_by_index(net, key->indev_ifindex);
837 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
838 goto nla_put_failure;
841 fl_hw_update_stats(tp, f);
843 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
844 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
845 sizeof(key->eth.dst)) ||
846 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
847 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
848 sizeof(key->eth.src)) ||
849 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
850 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
851 sizeof(key->basic.n_proto)))
852 goto nla_put_failure;
854 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
855 goto nla_put_failure;
857 if ((key->basic.n_proto == htons(ETH_P_IP) ||
858 key->basic.n_proto == htons(ETH_P_IPV6)) &&
859 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
860 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
861 sizeof(key->basic.ip_proto)))
862 goto nla_put_failure;
864 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
865 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
866 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
867 sizeof(key->ipv4.src)) ||
868 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
869 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
870 sizeof(key->ipv4.dst))))
871 goto nla_put_failure;
872 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
873 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
874 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
875 sizeof(key->ipv6.src)) ||
876 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
877 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
878 sizeof(key->ipv6.dst))))
879 goto nla_put_failure;
881 if (key->basic.ip_proto == IPPROTO_TCP &&
882 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
883 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
884 sizeof(key->tp.src)) ||
885 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
886 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
887 sizeof(key->tp.dst))))
888 goto nla_put_failure;
889 else if (key->basic.ip_proto == IPPROTO_UDP &&
890 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
891 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
892 sizeof(key->tp.src)) ||
893 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
894 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
895 sizeof(key->tp.dst))))
896 goto nla_put_failure;
898 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
899 (fl_dump_key_val(skb, &key->enc_ipv4.src,
900 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
901 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
902 sizeof(key->enc_ipv4.src)) ||
903 fl_dump_key_val(skb, &key->enc_ipv4.dst,
904 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
905 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
906 sizeof(key->enc_ipv4.dst))))
907 goto nla_put_failure;
908 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
909 (fl_dump_key_val(skb, &key->enc_ipv6.src,
910 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
911 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
912 sizeof(key->enc_ipv6.src)) ||
913 fl_dump_key_val(skb, &key->enc_ipv6.dst,
914 TCA_FLOWER_KEY_ENC_IPV6_DST,
916 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
917 sizeof(key->enc_ipv6.dst))))
918 goto nla_put_failure;
920 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
921 &mask->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
922 sizeof(key->enc_key_id)))
923 goto nla_put_failure;
925 nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
927 if (tcf_exts_dump(skb, &f->exts))
928 goto nla_put_failure;
930 nla_nest_end(skb, nest);
932 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
933 goto nla_put_failure;
938 nla_nest_cancel(skb, nest);
942 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
944 .classify = fl_classify,
946 .destroy = fl_destroy,
952 .owner = THIS_MODULE,
955 static int __init cls_fl_init(void)
957 return register_tcf_proto_ops(&cls_fl_ops);
960 static void __exit cls_fl_exit(void)
962 unregister_tcf_proto_ops(&cls_fl_ops);
965 module_init(cls_fl_init);
966 module_exit(cls_fl_exit);
968 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
969 MODULE_DESCRIPTION("Flower classifier");
970 MODULE_LICENSE("GPL v2");