2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
36 #include "mlx5_core.h"
39 static void tree_init_node(struct fs_node *node,
40 unsigned int refcount,
41 void (*remove_func)(struct fs_node *))
43 atomic_set(&node->refcount, refcount);
44 INIT_LIST_HEAD(&node->list);
45 INIT_LIST_HEAD(&node->children);
46 mutex_init(&node->lock);
47 node->remove_func = remove_func;
50 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
53 atomic_inc(&parent->refcount);
54 node->parent = parent;
56 /* Parent is the root */
60 node->root = parent->root;
63 static void tree_get_node(struct fs_node *node)
65 atomic_inc(&node->refcount);
68 static void nested_lock_ref_node(struct fs_node *node)
71 mutex_lock_nested(&node->lock, SINGLE_DEPTH_NESTING);
72 atomic_inc(&node->refcount);
76 static void lock_ref_node(struct fs_node *node)
79 mutex_lock(&node->lock);
80 atomic_inc(&node->refcount);
84 static void unlock_ref_node(struct fs_node *node)
87 atomic_dec(&node->refcount);
88 mutex_unlock(&node->lock);
92 static void tree_put_node(struct fs_node *node)
94 struct fs_node *parent_node = node->parent;
96 lock_ref_node(parent_node);
97 if (atomic_dec_and_test(&node->refcount)) {
99 list_del_init(&node->list);
100 if (node->remove_func)
101 node->remove_func(node);
105 unlock_ref_node(parent_node);
106 if (!node && parent_node)
107 tree_put_node(parent_node);
110 static int tree_remove_node(struct fs_node *node)
112 if (atomic_read(&node->refcount) > 1)
118 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
121 struct fs_prio *iter_prio;
123 fs_for_each_prio(iter_prio, ns) {
124 if (iter_prio->prio == prio)
131 static unsigned int find_next_free_level(struct fs_prio *prio)
133 if (!list_empty(&prio->node.children)) {
134 struct mlx5_flow_table *ft;
136 ft = list_last_entry(&prio->node.children,
137 struct mlx5_flow_table,
139 return ft->level + 1;
141 return prio->start_level;
144 static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
148 for (i = 0; i < size; i++, mask++, val1++, val2++)
149 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
150 ((*(u8 *)val2) & (*(u8 *)mask)))
156 static bool compare_match_value(struct mlx5_flow_group_mask *mask,
157 void *fte_param1, void *fte_param2)
159 if (mask->match_criteria_enable &
160 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
161 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
162 fte_param1, outer_headers);
163 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
164 fte_param2, outer_headers);
165 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
166 mask->match_criteria, outer_headers);
168 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
169 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
173 if (mask->match_criteria_enable &
174 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
175 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
176 fte_param1, misc_parameters);
177 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
178 fte_param2, misc_parameters);
179 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
180 mask->match_criteria, misc_parameters);
182 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
183 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
187 if (mask->match_criteria_enable &
188 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
189 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
190 fte_param1, inner_headers);
191 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
192 fte_param2, inner_headers);
193 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
194 mask->match_criteria, inner_headers);
196 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
197 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
203 static bool compare_match_criteria(u8 match_criteria_enable1,
204 u8 match_criteria_enable2,
205 void *mask1, void *mask2)
207 return match_criteria_enable1 == match_criteria_enable2 &&
208 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));