2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
36 #include "mlx5_core.h"
40 static void del_rule(struct fs_node *node);
41 static void del_flow_table(struct fs_node *node);
42 static void del_flow_group(struct fs_node *node);
43 static void del_fte(struct fs_node *node);
45 static void tree_init_node(struct fs_node *node,
46 unsigned int refcount,
47 void (*remove_func)(struct fs_node *))
49 atomic_set(&node->refcount, refcount);
50 INIT_LIST_HEAD(&node->list);
51 INIT_LIST_HEAD(&node->children);
52 mutex_init(&node->lock);
53 node->remove_func = remove_func;
56 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
59 atomic_inc(&parent->refcount);
60 node->parent = parent;
62 /* Parent is the root */
66 node->root = parent->root;
69 static void tree_get_node(struct fs_node *node)
71 atomic_inc(&node->refcount);
74 static void nested_lock_ref_node(struct fs_node *node)
77 mutex_lock_nested(&node->lock, SINGLE_DEPTH_NESTING);
78 atomic_inc(&node->refcount);
82 static void lock_ref_node(struct fs_node *node)
85 mutex_lock(&node->lock);
86 atomic_inc(&node->refcount);
90 static void unlock_ref_node(struct fs_node *node)
93 atomic_dec(&node->refcount);
94 mutex_unlock(&node->lock);
98 static void tree_put_node(struct fs_node *node)
100 struct fs_node *parent_node = node->parent;
102 lock_ref_node(parent_node);
103 if (atomic_dec_and_test(&node->refcount)) {
105 list_del_init(&node->list);
106 if (node->remove_func)
107 node->remove_func(node);
111 unlock_ref_node(parent_node);
112 if (!node && parent_node)
113 tree_put_node(parent_node);
116 static int tree_remove_node(struct fs_node *node)
118 if (atomic_read(&node->refcount) > 1)
124 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
127 struct fs_prio *iter_prio;
129 fs_for_each_prio(iter_prio, ns) {
130 if (iter_prio->prio == prio)
137 static unsigned int find_next_free_level(struct fs_prio *prio)
139 if (!list_empty(&prio->node.children)) {
140 struct mlx5_flow_table *ft;
142 ft = list_last_entry(&prio->node.children,
143 struct mlx5_flow_table,
145 return ft->level + 1;
147 return prio->start_level;
150 static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
154 for (i = 0; i < size; i++, mask++, val1++, val2++)
155 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
156 ((*(u8 *)val2) & (*(u8 *)mask)))
162 static bool compare_match_value(struct mlx5_flow_group_mask *mask,
163 void *fte_param1, void *fte_param2)
165 if (mask->match_criteria_enable &
166 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
167 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
168 fte_param1, outer_headers);
169 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
170 fte_param2, outer_headers);
171 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
172 mask->match_criteria, outer_headers);
174 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
175 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
179 if (mask->match_criteria_enable &
180 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
181 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
182 fte_param1, misc_parameters);
183 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
184 fte_param2, misc_parameters);
185 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
186 mask->match_criteria, misc_parameters);
188 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
189 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
193 if (mask->match_criteria_enable &
194 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
195 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
196 fte_param1, inner_headers);
197 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
198 fte_param2, inner_headers);
199 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
200 mask->match_criteria, inner_headers);
202 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
203 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
209 static bool compare_match_criteria(u8 match_criteria_enable1,
210 u8 match_criteria_enable2,
211 void *mask1, void *mask2)
213 return match_criteria_enable1 == match_criteria_enable2 &&
214 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
217 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
219 struct fs_node *root;
220 struct mlx5_flow_namespace *ns;
224 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
225 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
229 ns = container_of(root, struct mlx5_flow_namespace, node);
230 return container_of(ns, struct mlx5_flow_root_namespace, ns);
233 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
235 struct mlx5_flow_root_namespace *root = find_root(node);
242 static void del_flow_table(struct fs_node *node)
244 struct mlx5_flow_table *ft;
245 struct mlx5_core_dev *dev;
246 struct fs_prio *prio;
249 fs_get_obj(ft, node);
250 dev = get_dev(&ft->node);
252 err = mlx5_cmd_destroy_flow_table(dev, ft);
254 pr_warn("flow steering can't destroy ft\n");
255 fs_get_obj(prio, ft->node.parent);
259 static void del_rule(struct fs_node *node)
261 struct mlx5_flow_rule *rule;
262 struct mlx5_flow_table *ft;
263 struct mlx5_flow_group *fg;
266 struct mlx5_core_dev *dev = get_dev(node);
267 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
270 match_value = mlx5_vzalloc(match_len);
272 pr_warn("failed to allocate inbox\n");
276 fs_get_obj(rule, node);
277 fs_get_obj(fte, rule->node.parent);
278 fs_get_obj(fg, fte->node.parent);
279 memcpy(match_value, fte->val, sizeof(fte->val));
280 fs_get_obj(ft, fg->node.parent);
281 list_del(&rule->node.list);
283 if (fte->dests_size) {
284 err = mlx5_cmd_update_fte(dev, ft,
287 pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
288 __func__, fg->id, fte->index);
293 static void del_fte(struct fs_node *node)
295 struct mlx5_flow_table *ft;
296 struct mlx5_flow_group *fg;
297 struct mlx5_core_dev *dev;
301 fs_get_obj(fte, node);
302 fs_get_obj(fg, fte->node.parent);
303 fs_get_obj(ft, fg->node.parent);
305 dev = get_dev(&ft->node);
306 err = mlx5_cmd_delete_fte(dev, ft,
309 pr_warn("flow steering can't delete fte in index %d of flow group id %d\n",
316 static void del_flow_group(struct fs_node *node)
318 struct mlx5_flow_group *fg;
319 struct mlx5_flow_table *ft;
320 struct mlx5_core_dev *dev;
322 fs_get_obj(fg, node);
323 fs_get_obj(ft, fg->node.parent);
324 dev = get_dev(&ft->node);
326 if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
327 pr_warn("flow steering can't destroy fg %d of ft %d\n",
331 static struct fs_fte *alloc_fte(u8 action,
338 fte = kzalloc(sizeof(*fte), GFP_KERNEL);
340 return ERR_PTR(-ENOMEM);
342 memcpy(fte->val, match_value, sizeof(fte->val));
343 fte->node.type = FS_TYPE_FLOW_ENTRY;
344 fte->flow_tag = flow_tag;
346 fte->action = action;
351 static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
353 struct mlx5_flow_group *fg;
354 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
355 create_fg_in, match_criteria);
356 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
358 match_criteria_enable);
359 fg = kzalloc(sizeof(*fg), GFP_KERNEL);
361 return ERR_PTR(-ENOMEM);
363 fg->mask.match_criteria_enable = match_criteria_enable;
364 memcpy(&fg->mask.match_criteria, match_criteria,
365 sizeof(fg->mask.match_criteria));
366 fg->node.type = FS_TYPE_FLOW_GROUP;
367 fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
369 fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
370 end_flow_index) - fg->start_index + 1;
374 static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
375 enum fs_flow_table_type table_type)
377 struct mlx5_flow_table *ft;
379 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
384 ft->node.type = FS_TYPE_FLOW_TABLE;
385 ft->type = table_type;
386 ft->max_fte = max_fte;
391 static struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
395 struct mlx5_flow_table *ft;
398 struct mlx5_flow_root_namespace *root =
399 find_root(&ns->node);
400 struct fs_prio *fs_prio = NULL;
403 pr_err("mlx5: flow steering failed to find root of namespace\n");
404 return ERR_PTR(-ENODEV);
407 fs_prio = find_prio(ns, prio);
409 return ERR_PTR(-EINVAL);
411 lock_ref_node(&fs_prio->node);
412 if (fs_prio->num_ft == fs_prio->max_ft) {
417 ft = alloc_flow_table(find_next_free_level(fs_prio),
418 roundup_pow_of_two(max_fte),
425 tree_init_node(&ft->node, 1, del_flow_table);
426 log_table_sz = ilog2(ft->max_fte);
427 err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
428 log_table_sz, &ft->id);
432 tree_add_node(&ft->node, &fs_prio->node);
433 list_add_tail(&ft->node.list, &fs_prio->node.children);
435 unlock_ref_node(&fs_prio->node);
442 unlock_ref_node(&fs_prio->node);
446 static struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
449 struct mlx5_flow_group *fg;
450 struct mlx5_core_dev *dev = get_dev(&ft->node);
454 return ERR_PTR(-ENODEV);
456 fg = alloc_flow_group(fg_in);
460 lock_ref_node(&ft->node);
461 err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
464 unlock_ref_node(&ft->node);
467 /* Add node to tree */
468 tree_init_node(&fg->node, 1, del_flow_group);
469 tree_add_node(&fg->node, &ft->node);
470 /* Add node to group list */
471 list_add(&fg->node.list, ft->node.children.prev);
472 unlock_ref_node(&ft->node);
477 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
479 struct mlx5_flow_rule *rule;
481 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
485 rule->node.type = FS_TYPE_FLOW_DEST;
486 memcpy(&rule->dest_attr, dest, sizeof(*dest));
491 /* fte should not be deleted while calling this function */
492 static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
493 struct mlx5_flow_group *fg,
494 struct mlx5_flow_destination *dest)
496 struct mlx5_flow_table *ft;
497 struct mlx5_flow_rule *rule;
500 rule = alloc_rule(dest);
502 return ERR_PTR(-ENOMEM);
504 fs_get_obj(ft, fg->node.parent);
505 /* Add dest to dests list- added as first element after the head */
506 tree_init_node(&rule->node, 1, del_rule);
507 list_add_tail(&rule->node.list, &fte->node.children);
509 if (fte->dests_size == 1)
510 err = mlx5_cmd_create_fte(get_dev(&ft->node),
513 err = mlx5_cmd_update_fte(get_dev(&ft->node),
518 fte->status |= FS_FTE_STATUS_EXISTING;
523 list_del(&rule->node.list);
529 /* Assumed fg is locked */
530 static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
531 struct list_head **prev)
534 unsigned int start = fg->start_index;
537 *prev = &fg->node.children;
539 /* assumed list is sorted by index */
540 fs_for_each_fte(fte, fg) {
541 if (fte->index != start)
545 *prev = &fte->node.list;
551 /* prev is output, prev->next = new_fte */
552 static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
556 struct list_head **prev)
561 index = get_free_fte_index(fg, prev);
562 fte = alloc_fte(action, flow_tag, match_value, index);
569 /* Assuming parent fg(flow table) is locked */
570 static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
574 struct mlx5_flow_destination *dest)
577 struct mlx5_flow_rule *rule;
578 struct mlx5_flow_table *ft;
579 struct list_head *prev;
581 lock_ref_node(&fg->node);
582 fs_for_each_fte(fte, fg) {
583 nested_lock_ref_node(&fte->node);
584 if (compare_match_value(&fg->mask, match_value, &fte->val) &&
585 action == fte->action && flow_tag == fte->flow_tag) {
586 rule = add_rule_fte(fte, fg, dest);
587 unlock_ref_node(&fte->node);
593 unlock_ref_node(&fte->node);
595 fs_get_obj(ft, fg->node.parent);
596 if (fg->num_ftes >= fg->max_ftes) {
597 rule = ERR_PTR(-ENOSPC);
601 fte = create_fte(fg, match_value, action, flow_tag, &prev);
606 tree_init_node(&fte->node, 0, del_fte);
607 rule = add_rule_fte(fte, fg, dest);
615 tree_add_node(&fte->node, &fg->node);
616 list_add(&fte->node.list, prev);
618 tree_add_node(&rule->node, &fte->node);
620 unlock_ref_node(&fg->node);
624 static struct mlx5_flow_rule *
625 mlx5_add_flow_rule(struct mlx5_flow_table *ft,
626 u8 match_criteria_enable,
631 struct mlx5_flow_destination *dest)
633 struct mlx5_flow_group *g;
634 struct mlx5_flow_rule *rule = ERR_PTR(-EINVAL);
636 tree_get_node(&ft->node);
637 lock_ref_node(&ft->node);
638 fs_for_each_fg(g, ft)
639 if (compare_match_criteria(g->mask.match_criteria_enable,
640 match_criteria_enable,
641 g->mask.match_criteria,
643 unlock_ref_node(&ft->node);
644 rule = add_rule_fg(g, match_value,
645 action, flow_tag, dest);
648 unlock_ref_node(&ft->node);
650 tree_put_node(&ft->node);
654 static void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
656 tree_remove_node(&rule->node);
659 static int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
661 if (tree_remove_node(&ft->node))
662 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
668 static void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
670 if (tree_remove_node(&fg->node))
671 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",