f7d62fe595f6373574a414d9b9b68f49c5d92779
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_core.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35
36 #include "mlx5_core.h"
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39
40 #define INIT_TREE_NODE_ARRAY_SIZE(...)  (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
41                                          sizeof(struct init_tree_node))
42
43 #define INIT_PRIO(min_level_val, max_ft_val,\
44                   start_level_val, ...) {.type = FS_TYPE_PRIO,\
45         .min_ft_level = min_level_val,\
46         .start_level = start_level_val,\
47         .max_ft = max_ft_val,\
48         .children = (struct init_tree_node[]) {__VA_ARGS__},\
49         .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
50 }
51
52 #define ADD_PRIO(min_level_val, max_ft_val, start_level_val, ...)\
53         INIT_PRIO(min_level_val, max_ft_val, start_level_val,\
54                   __VA_ARGS__)\
55
56 #define ADD_FT_PRIO(max_ft_val, start_level_val, ...)\
57         INIT_PRIO(0, max_ft_val, start_level_val,\
58                   __VA_ARGS__)\
59
60 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
61         .children = (struct init_tree_node[]) {__VA_ARGS__},\
62         .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
63 }
64
65 #define KERNEL_START_LEVEL 0
66 #define KERNEL_P0_START_LEVEL KERNEL_START_LEVEL
67 #define KERNEL_MAX_FT 2
68 #define KENREL_MIN_LEVEL 2
69 static struct init_tree_node {
70         enum fs_node_type       type;
71         struct init_tree_node *children;
72         int ar_size;
73         int min_ft_level;
74         int prio;
75         int max_ft;
76         int start_level;
77 } root_fs = {
78         .type = FS_TYPE_NAMESPACE,
79         .ar_size = 1,
80         .children = (struct init_tree_node[]) {
81                 ADD_PRIO(KENREL_MIN_LEVEL, KERNEL_MAX_FT,
82                          KERNEL_START_LEVEL,
83                          ADD_NS(ADD_FT_PRIO(KERNEL_MAX_FT,
84                                             KERNEL_P0_START_LEVEL))),
85         }
86 };
87
88 static void del_rule(struct fs_node *node);
89 static void del_flow_table(struct fs_node *node);
90 static void del_flow_group(struct fs_node *node);
91 static void del_fte(struct fs_node *node);
92
93 static void tree_init_node(struct fs_node *node,
94                            unsigned int refcount,
95                            void (*remove_func)(struct fs_node *))
96 {
97         atomic_set(&node->refcount, refcount);
98         INIT_LIST_HEAD(&node->list);
99         INIT_LIST_HEAD(&node->children);
100         mutex_init(&node->lock);
101         node->remove_func = remove_func;
102 }
103
104 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
105 {
106         if (parent)
107                 atomic_inc(&parent->refcount);
108         node->parent = parent;
109
110         /* Parent is the root */
111         if (!parent)
112                 node->root = node;
113         else
114                 node->root = parent->root;
115 }
116
117 static void tree_get_node(struct fs_node *node)
118 {
119         atomic_inc(&node->refcount);
120 }
121
122 static void nested_lock_ref_node(struct fs_node *node)
123 {
124         if (node) {
125                 mutex_lock_nested(&node->lock, SINGLE_DEPTH_NESTING);
126                 atomic_inc(&node->refcount);
127         }
128 }
129
130 static void lock_ref_node(struct fs_node *node)
131 {
132         if (node) {
133                 mutex_lock(&node->lock);
134                 atomic_inc(&node->refcount);
135         }
136 }
137
138 static void unlock_ref_node(struct fs_node *node)
139 {
140         if (node) {
141                 atomic_dec(&node->refcount);
142                 mutex_unlock(&node->lock);
143         }
144 }
145
146 static void tree_put_node(struct fs_node *node)
147 {
148         struct fs_node *parent_node = node->parent;
149
150         lock_ref_node(parent_node);
151         if (atomic_dec_and_test(&node->refcount)) {
152                 if (parent_node)
153                         list_del_init(&node->list);
154                 if (node->remove_func)
155                         node->remove_func(node);
156                 kfree(node);
157                 node = NULL;
158         }
159         unlock_ref_node(parent_node);
160         if (!node && parent_node)
161                 tree_put_node(parent_node);
162 }
163
164 static int tree_remove_node(struct fs_node *node)
165 {
166         if (atomic_read(&node->refcount) > 1)
167                 return -EPERM;
168         tree_put_node(node);
169         return 0;
170 }
171
172 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
173                                  unsigned int prio)
174 {
175         struct fs_prio *iter_prio;
176
177         fs_for_each_prio(iter_prio, ns) {
178                 if (iter_prio->prio == prio)
179                         return iter_prio;
180         }
181
182         return NULL;
183 }
184
185 static unsigned int find_next_free_level(struct fs_prio *prio)
186 {
187         if (!list_empty(&prio->node.children)) {
188                 struct mlx5_flow_table *ft;
189
190                 ft = list_last_entry(&prio->node.children,
191                                      struct mlx5_flow_table,
192                                      node.list);
193                 return ft->level + 1;
194         }
195         return prio->start_level;
196 }
197
198 static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
199 {
200         unsigned int i;
201
202         for (i = 0; i < size; i++, mask++, val1++, val2++)
203                 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
204                     ((*(u8 *)val2) & (*(u8 *)mask)))
205                         return false;
206
207         return true;
208 }
209
210 static bool compare_match_value(struct mlx5_flow_group_mask *mask,
211                                 void *fte_param1, void *fte_param2)
212 {
213         if (mask->match_criteria_enable &
214             1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
215                 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
216                                                 fte_param1, outer_headers);
217                 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
218                                                 fte_param2, outer_headers);
219                 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
220                                               mask->match_criteria, outer_headers);
221
222                 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
223                                    MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
224                         return false;
225         }
226
227         if (mask->match_criteria_enable &
228             1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
229                 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
230                                                 fte_param1, misc_parameters);
231                 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
232                                                 fte_param2, misc_parameters);
233                 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
234                                           mask->match_criteria, misc_parameters);
235
236                 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
237                                    MLX5_ST_SZ_BYTES(fte_match_set_misc)))
238                         return false;
239         }
240
241         if (mask->match_criteria_enable &
242             1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
243                 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
244                                                 fte_param1, inner_headers);
245                 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
246                                                 fte_param2, inner_headers);
247                 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
248                                           mask->match_criteria, inner_headers);
249
250                 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
251                                    MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
252                         return false;
253         }
254         return true;
255 }
256
257 static bool compare_match_criteria(u8 match_criteria_enable1,
258                                    u8 match_criteria_enable2,
259                                    void *mask1, void *mask2)
260 {
261         return match_criteria_enable1 == match_criteria_enable2 &&
262                 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
263 }
264
265 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
266 {
267         struct fs_node *root;
268         struct mlx5_flow_namespace *ns;
269
270         root = node->root;
271
272         if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
273                 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
274                 return NULL;
275         }
276
277         ns = container_of(root, struct mlx5_flow_namespace, node);
278         return container_of(ns, struct mlx5_flow_root_namespace, ns);
279 }
280
281 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
282 {
283         struct mlx5_flow_root_namespace *root = find_root(node);
284
285         if (root)
286                 return root->dev;
287         return NULL;
288 }
289
290 static void del_flow_table(struct fs_node *node)
291 {
292         struct mlx5_flow_table *ft;
293         struct mlx5_core_dev *dev;
294         struct fs_prio *prio;
295         int err;
296
297         fs_get_obj(ft, node);
298         dev = get_dev(&ft->node);
299
300         err = mlx5_cmd_destroy_flow_table(dev, ft);
301         if (err)
302                 pr_warn("flow steering can't destroy ft\n");
303         fs_get_obj(prio, ft->node.parent);
304         prio->num_ft--;
305 }
306
307 static void del_rule(struct fs_node *node)
308 {
309         struct mlx5_flow_rule *rule;
310         struct mlx5_flow_table *ft;
311         struct mlx5_flow_group *fg;
312         struct fs_fte *fte;
313         u32     *match_value;
314         struct mlx5_core_dev *dev = get_dev(node);
315         int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
316         int err;
317
318         match_value = mlx5_vzalloc(match_len);
319         if (!match_value) {
320                 pr_warn("failed to allocate inbox\n");
321                 return;
322         }
323
324         fs_get_obj(rule, node);
325         fs_get_obj(fte, rule->node.parent);
326         fs_get_obj(fg, fte->node.parent);
327         memcpy(match_value, fte->val, sizeof(fte->val));
328         fs_get_obj(ft, fg->node.parent);
329         list_del(&rule->node.list);
330         fte->dests_size--;
331         if (fte->dests_size) {
332                 err = mlx5_cmd_update_fte(dev, ft,
333                                           fg->id, fte);
334                 if (err)
335                         pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
336                                 __func__, fg->id, fte->index);
337         }
338         kvfree(match_value);
339 }
340
341 static void del_fte(struct fs_node *node)
342 {
343         struct mlx5_flow_table *ft;
344         struct mlx5_flow_group *fg;
345         struct mlx5_core_dev *dev;
346         struct fs_fte *fte;
347         int err;
348
349         fs_get_obj(fte, node);
350         fs_get_obj(fg, fte->node.parent);
351         fs_get_obj(ft, fg->node.parent);
352
353         dev = get_dev(&ft->node);
354         err = mlx5_cmd_delete_fte(dev, ft,
355                                   fte->index);
356         if (err)
357                 pr_warn("flow steering can't delete fte in index %d of flow group id %d\n",
358                         fte->index, fg->id);
359
360         fte->status = 0;
361         fg->num_ftes--;
362 }
363
364 static void del_flow_group(struct fs_node *node)
365 {
366         struct mlx5_flow_group *fg;
367         struct mlx5_flow_table *ft;
368         struct mlx5_core_dev *dev;
369
370         fs_get_obj(fg, node);
371         fs_get_obj(ft, fg->node.parent);
372         dev = get_dev(&ft->node);
373
374         if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
375                 pr_warn("flow steering can't destroy fg %d of ft %d\n",
376                         fg->id, ft->id);
377 }
378
379 static struct fs_fte *alloc_fte(u8 action,
380                                 u32 flow_tag,
381                                 u32 *match_value,
382                                 unsigned int index)
383 {
384         struct fs_fte *fte;
385
386         fte = kzalloc(sizeof(*fte), GFP_KERNEL);
387         if (!fte)
388                 return ERR_PTR(-ENOMEM);
389
390         memcpy(fte->val, match_value, sizeof(fte->val));
391         fte->node.type =  FS_TYPE_FLOW_ENTRY;
392         fte->flow_tag = flow_tag;
393         fte->index = index;
394         fte->action = action;
395
396         return fte;
397 }
398
399 static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
400 {
401         struct mlx5_flow_group *fg;
402         void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
403                                             create_fg_in, match_criteria);
404         u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
405                                             create_fg_in,
406                                             match_criteria_enable);
407         fg = kzalloc(sizeof(*fg), GFP_KERNEL);
408         if (!fg)
409                 return ERR_PTR(-ENOMEM);
410
411         fg->mask.match_criteria_enable = match_criteria_enable;
412         memcpy(&fg->mask.match_criteria, match_criteria,
413                sizeof(fg->mask.match_criteria));
414         fg->node.type =  FS_TYPE_FLOW_GROUP;
415         fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
416                                    start_flow_index);
417         fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
418                                 end_flow_index) - fg->start_index + 1;
419         return fg;
420 }
421
422 static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
423                                                 enum fs_flow_table_type table_type)
424 {
425         struct mlx5_flow_table *ft;
426
427         ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
428         if (!ft)
429                 return NULL;
430
431         ft->level = level;
432         ft->node.type = FS_TYPE_FLOW_TABLE;
433         ft->type = table_type;
434         ft->max_fte = max_fte;
435
436         return ft;
437 }
438
439 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
440                                                int prio,
441                                                int max_fte)
442 {
443         struct mlx5_flow_table *ft;
444         int err;
445         int log_table_sz;
446         struct mlx5_flow_root_namespace *root =
447                 find_root(&ns->node);
448         struct fs_prio *fs_prio = NULL;
449
450         if (!root) {
451                 pr_err("mlx5: flow steering failed to find root of namespace\n");
452                 return ERR_PTR(-ENODEV);
453         }
454
455         fs_prio = find_prio(ns, prio);
456         if (!fs_prio)
457                 return ERR_PTR(-EINVAL);
458
459         lock_ref_node(&fs_prio->node);
460         if (fs_prio->num_ft == fs_prio->max_ft) {
461                 err = -ENOSPC;
462                 goto unlock_prio;
463         }
464
465         ft = alloc_flow_table(find_next_free_level(fs_prio),
466                               roundup_pow_of_two(max_fte),
467                               root->table_type);
468         if (!ft) {
469                 err = -ENOMEM;
470                 goto unlock_prio;
471         }
472
473         tree_init_node(&ft->node, 1, del_flow_table);
474         log_table_sz = ilog2(ft->max_fte);
475         err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
476                                          log_table_sz, &ft->id);
477         if (err)
478                 goto free_ft;
479
480         tree_add_node(&ft->node, &fs_prio->node);
481         list_add_tail(&ft->node.list, &fs_prio->node.children);
482         fs_prio->num_ft++;
483         unlock_ref_node(&fs_prio->node);
484
485         return ft;
486
487 free_ft:
488         kfree(ft);
489 unlock_prio:
490         unlock_ref_node(&fs_prio->node);
491         return ERR_PTR(err);
492 }
493
494 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
495                                                u32 *fg_in)
496 {
497         struct mlx5_flow_group *fg;
498         struct mlx5_core_dev *dev = get_dev(&ft->node);
499         int err;
500
501         if (!dev)
502                 return ERR_PTR(-ENODEV);
503
504         fg = alloc_flow_group(fg_in);
505         if (IS_ERR(fg))
506                 return fg;
507
508         lock_ref_node(&ft->node);
509         err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
510         if (err) {
511                 kfree(fg);
512                 unlock_ref_node(&ft->node);
513                 return ERR_PTR(err);
514         }
515         /* Add node to tree */
516         tree_init_node(&fg->node, 1, del_flow_group);
517         tree_add_node(&fg->node, &ft->node);
518         /* Add node to group list */
519         list_add(&fg->node.list, ft->node.children.prev);
520         unlock_ref_node(&ft->node);
521
522         return fg;
523 }
524
525 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
526 {
527         struct mlx5_flow_rule *rule;
528
529         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
530         if (!rule)
531                 return NULL;
532
533         rule->node.type = FS_TYPE_FLOW_DEST;
534         memcpy(&rule->dest_attr, dest, sizeof(*dest));
535
536         return rule;
537 }
538
539 /* fte should not be deleted while calling this function */
540 static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
541                                            struct mlx5_flow_group *fg,
542                                            struct mlx5_flow_destination *dest)
543 {
544         struct mlx5_flow_table *ft;
545         struct mlx5_flow_rule *rule;
546         int err;
547
548         rule = alloc_rule(dest);
549         if (!rule)
550                 return ERR_PTR(-ENOMEM);
551
552         fs_get_obj(ft, fg->node.parent);
553         /* Add dest to dests list- added as first element after the head */
554         tree_init_node(&rule->node, 1, del_rule);
555         list_add_tail(&rule->node.list, &fte->node.children);
556         fte->dests_size++;
557         if (fte->dests_size == 1)
558                 err = mlx5_cmd_create_fte(get_dev(&ft->node),
559                                           ft, fg->id, fte);
560         else
561                 err = mlx5_cmd_update_fte(get_dev(&ft->node),
562                                           ft, fg->id, fte);
563         if (err)
564                 goto free_rule;
565
566         fte->status |= FS_FTE_STATUS_EXISTING;
567
568         return rule;
569
570 free_rule:
571         list_del(&rule->node.list);
572         kfree(rule);
573         fte->dests_size--;
574         return ERR_PTR(err);
575 }
576
577 /* Assumed fg is locked */
578 static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
579                                        struct list_head **prev)
580 {
581         struct fs_fte *fte;
582         unsigned int start = fg->start_index;
583
584         if (prev)
585                 *prev = &fg->node.children;
586
587         /* assumed list is sorted by index */
588         fs_for_each_fte(fte, fg) {
589                 if (fte->index != start)
590                         return start;
591                 start++;
592                 if (prev)
593                         *prev = &fte->node.list;
594         }
595
596         return start;
597 }
598
599 /* prev is output, prev->next = new_fte */
600 static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
601                                  u32 *match_value,
602                                  u8 action,
603                                  u32 flow_tag,
604                                  struct list_head **prev)
605 {
606         struct fs_fte *fte;
607         int index;
608
609         index = get_free_fte_index(fg, prev);
610         fte = alloc_fte(action, flow_tag, match_value, index);
611         if (IS_ERR(fte))
612                 return fte;
613
614         return fte;
615 }
616
617 /* Assuming parent fg(flow table) is locked */
618 static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
619                                           u32 *match_value,
620                                           u8 action,
621                                           u32 flow_tag,
622                                           struct mlx5_flow_destination *dest)
623 {
624         struct fs_fte *fte;
625         struct mlx5_flow_rule *rule;
626         struct mlx5_flow_table *ft;
627         struct list_head *prev;
628
629         lock_ref_node(&fg->node);
630         fs_for_each_fte(fte, fg) {
631                 nested_lock_ref_node(&fte->node);
632                 if (compare_match_value(&fg->mask, match_value, &fte->val) &&
633                     action == fte->action && flow_tag == fte->flow_tag) {
634                         rule = add_rule_fte(fte, fg, dest);
635                         unlock_ref_node(&fte->node);
636                         if (IS_ERR(rule))
637                                 goto unlock_fg;
638                         else
639                                 goto add_rule;
640                 }
641                 unlock_ref_node(&fte->node);
642         }
643         fs_get_obj(ft, fg->node.parent);
644         if (fg->num_ftes >= fg->max_ftes) {
645                 rule = ERR_PTR(-ENOSPC);
646                 goto unlock_fg;
647         }
648
649         fte = create_fte(fg, match_value, action, flow_tag, &prev);
650         if (IS_ERR(fte)) {
651                 rule = (void *)fte;
652                 goto unlock_fg;
653         }
654         tree_init_node(&fte->node, 0, del_fte);
655         rule = add_rule_fte(fte, fg, dest);
656         if (IS_ERR(rule)) {
657                 kfree(fte);
658                 goto unlock_fg;
659         }
660
661         fg->num_ftes++;
662
663         tree_add_node(&fte->node, &fg->node);
664         list_add(&fte->node.list, prev);
665 add_rule:
666         tree_add_node(&rule->node, &fte->node);
667 unlock_fg:
668         unlock_ref_node(&fg->node);
669         return rule;
670 }
671
672 struct mlx5_flow_rule *
673 mlx5_add_flow_rule(struct mlx5_flow_table *ft,
674                    u8 match_criteria_enable,
675                    u32 *match_criteria,
676                    u32 *match_value,
677                    u32 action,
678                    u32 flow_tag,
679                    struct mlx5_flow_destination *dest)
680 {
681         struct mlx5_flow_group *g;
682         struct mlx5_flow_rule *rule = ERR_PTR(-EINVAL);
683
684         tree_get_node(&ft->node);
685         lock_ref_node(&ft->node);
686         fs_for_each_fg(g, ft)
687                 if (compare_match_criteria(g->mask.match_criteria_enable,
688                                            match_criteria_enable,
689                                            g->mask.match_criteria,
690                                            match_criteria)) {
691                         unlock_ref_node(&ft->node);
692                         rule = add_rule_fg(g, match_value,
693                                            action, flow_tag, dest);
694                         goto put;
695                 }
696         unlock_ref_node(&ft->node);
697 put:
698         tree_put_node(&ft->node);
699         return rule;
700 }
701
702 void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
703 {
704         tree_remove_node(&rule->node);
705 }
706
707 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
708 {
709         if (tree_remove_node(&ft->node))
710                 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
711                                ft->id);
712
713         return 0;
714 }
715
716 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
717 {
718         if (tree_remove_node(&fg->node))
719                 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
720                                fg->id);
721 }
722
723 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
724                                                     enum mlx5_flow_namespace_type type)
725 {
726         struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
727         int prio;
728         static struct fs_prio *fs_prio;
729         struct mlx5_flow_namespace *ns;
730
731         if (!root_ns)
732                 return NULL;
733
734         switch (type) {
735         case MLX5_FLOW_NAMESPACE_KERNEL:
736                 prio = 0;
737                 break;
738         case MLX5_FLOW_NAMESPACE_FDB:
739                 if (dev->priv.fdb_root_ns)
740                         return &dev->priv.fdb_root_ns->ns;
741                 else
742                         return NULL;
743         default:
744                 return NULL;
745         }
746
747         fs_prio = find_prio(&root_ns->ns, prio);
748         if (!fs_prio)
749                 return NULL;
750
751         ns = list_first_entry(&fs_prio->node.children,
752                               typeof(*ns),
753                               node.list);
754
755         return ns;
756 }
757
758 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
759                                       unsigned prio, int max_ft,
760                                       int start_level)
761 {
762         struct fs_prio *fs_prio;
763
764         fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
765         if (!fs_prio)
766                 return ERR_PTR(-ENOMEM);
767
768         fs_prio->node.type = FS_TYPE_PRIO;
769         tree_init_node(&fs_prio->node, 1, NULL);
770         tree_add_node(&fs_prio->node, &ns->node);
771         fs_prio->max_ft = max_ft;
772         fs_prio->prio = prio;
773         fs_prio->start_level = start_level;
774         list_add_tail(&fs_prio->node.list, &ns->node.children);
775
776         return fs_prio;
777 }
778
779 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
780                                                      *ns)
781 {
782         ns->node.type = FS_TYPE_NAMESPACE;
783
784         return ns;
785 }
786
787 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
788 {
789         struct mlx5_flow_namespace      *ns;
790
791         ns = kzalloc(sizeof(*ns), GFP_KERNEL);
792         if (!ns)
793                 return ERR_PTR(-ENOMEM);
794
795         fs_init_namespace(ns);
796         tree_init_node(&ns->node, 1, NULL);
797         tree_add_node(&ns->node, &prio->node);
798         list_add_tail(&ns->node.list, &prio->node.children);
799
800         return ns;
801 }
802
803 static int init_root_tree_recursive(int max_ft_level, struct init_tree_node *init_node,
804                                     struct fs_node *fs_parent_node,
805                                     struct init_tree_node *init_parent_node,
806                                     int index)
807 {
808         struct mlx5_flow_namespace *fs_ns;
809         struct fs_prio *fs_prio;
810         struct fs_node *base;
811         int i;
812         int err;
813
814         if (init_node->type == FS_TYPE_PRIO) {
815                 if (init_node->min_ft_level > max_ft_level)
816                         return -ENOTSUPP;
817
818                 fs_get_obj(fs_ns, fs_parent_node);
819                 fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft,
820                                          init_node->start_level);
821                 if (IS_ERR(fs_prio))
822                         return PTR_ERR(fs_prio);
823                 base = &fs_prio->node;
824         } else if (init_node->type == FS_TYPE_NAMESPACE) {
825                 fs_get_obj(fs_prio, fs_parent_node);
826                 fs_ns = fs_create_namespace(fs_prio);
827                 if (IS_ERR(fs_ns))
828                         return PTR_ERR(fs_ns);
829                 base = &fs_ns->node;
830         } else {
831                 return -EINVAL;
832         }
833         for (i = 0; i < init_node->ar_size; i++) {
834                 err = init_root_tree_recursive(max_ft_level,
835                                                &init_node->children[i], base,
836                                                init_node, i);
837                 if (err)
838                         return err;
839         }
840
841         return 0;
842 }
843
844 static int init_root_tree(int max_ft_level, struct init_tree_node *init_node,
845                           struct fs_node *fs_parent_node)
846 {
847         int i;
848         struct mlx5_flow_namespace *fs_ns;
849         int err;
850
851         fs_get_obj(fs_ns, fs_parent_node);
852         for (i = 0; i < init_node->ar_size; i++) {
853                 err = init_root_tree_recursive(max_ft_level,
854                                                &init_node->children[i],
855                                                &fs_ns->node,
856                                                init_node, i);
857                 if (err)
858                         return err;
859         }
860         return 0;
861 }
862
863 static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
864                                                        enum fs_flow_table_type
865                                                        table_type)
866 {
867         struct mlx5_flow_root_namespace *root_ns;
868         struct mlx5_flow_namespace *ns;
869
870         /* Create the root namespace */
871         root_ns = mlx5_vzalloc(sizeof(*root_ns));
872         if (!root_ns)
873                 return NULL;
874
875         root_ns->dev = dev;
876         root_ns->table_type = table_type;
877
878         ns = &root_ns->ns;
879         fs_init_namespace(ns);
880         tree_init_node(&ns->node, 1, NULL);
881         tree_add_node(&ns->node, NULL);
882
883         return root_ns;
884 }
885
886 static int init_root_ns(struct mlx5_core_dev *dev)
887 {
888         int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
889                                               flow_table_properties_nic_receive.
890                                               max_ft_level);
891
892         dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX);
893         if (IS_ERR_OR_NULL(dev->priv.root_ns))
894                 goto cleanup;
895
896         if (init_root_tree(max_ft_level, &root_fs, &dev->priv.root_ns->ns.node))
897                 goto cleanup;
898
899         return 0;
900
901 cleanup:
902         mlx5_cleanup_fs(dev);
903         return -ENOMEM;
904 }
905
906 static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
907                                         struct mlx5_flow_root_namespace *root_ns)
908 {
909         struct fs_node *prio;
910
911         if (!root_ns)
912                 return;
913
914         if (!list_empty(&root_ns->ns.node.children)) {
915                 prio = list_first_entry(&root_ns->ns.node.children,
916                                         struct fs_node,
917                                  list);
918                 if (tree_remove_node(prio))
919                         mlx5_core_warn(dev,
920                                        "Flow steering priority wasn't destroyed, refcount > 1\n");
921         }
922         if (tree_remove_node(&root_ns->ns.node))
923                 mlx5_core_warn(dev,
924                                "Flow steering namespace wasn't destroyed, refcount > 1\n");
925         root_ns = NULL;
926 }
927
928 static void cleanup_root_ns(struct mlx5_core_dev *dev)
929 {
930         struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
931         struct fs_prio *iter_prio;
932
933         if (!MLX5_CAP_GEN(dev, nic_flow_table))
934                 return;
935
936         if (!root_ns)
937                 return;
938
939         /* stage 1 */
940         fs_for_each_prio(iter_prio, &root_ns->ns) {
941                 struct fs_node *node;
942                 struct mlx5_flow_namespace *iter_ns;
943
944                 fs_for_each_ns_or_ft(node, iter_prio) {
945                         if (node->type == FS_TYPE_FLOW_TABLE)
946                                 continue;
947                         fs_get_obj(iter_ns, node);
948                         while (!list_empty(&iter_ns->node.children)) {
949                                 struct fs_prio *obj_iter_prio2;
950                                 struct fs_node *iter_prio2 =
951                                         list_first_entry(&iter_ns->node.children,
952                                                          struct fs_node,
953                                                          list);
954
955                                 fs_get_obj(obj_iter_prio2, iter_prio2);
956                                 if (tree_remove_node(iter_prio2)) {
957                                         mlx5_core_warn(dev,
958                                                        "Priority %d wasn't destroyed, refcount > 1\n",
959                                                        obj_iter_prio2->prio);
960                                         return;
961                                 }
962                         }
963                 }
964         }
965
966         /* stage 2 */
967         fs_for_each_prio(iter_prio, &root_ns->ns) {
968                 while (!list_empty(&iter_prio->node.children)) {
969                         struct fs_node *iter_ns =
970                                 list_first_entry(&iter_prio->node.children,
971                                                  struct fs_node,
972                                                  list);
973                         if (tree_remove_node(iter_ns)) {
974                                 mlx5_core_warn(dev,
975                                                "Namespace wasn't destroyed, refcount > 1\n");
976                                 return;
977                         }
978                 }
979         }
980
981         /* stage 3 */
982         while (!list_empty(&root_ns->ns.node.children)) {
983                 struct fs_prio *obj_prio_node;
984                 struct fs_node *prio_node =
985                         list_first_entry(&root_ns->ns.node.children,
986                                          struct fs_node,
987                                          list);
988
989                 fs_get_obj(obj_prio_node, prio_node);
990                 if (tree_remove_node(prio_node)) {
991                         mlx5_core_warn(dev,
992                                        "Priority %d wasn't destroyed, refcount > 1\n",
993                                        obj_prio_node->prio);
994                         return;
995                 }
996         }
997
998         if (tree_remove_node(&root_ns->ns.node)) {
999                 mlx5_core_warn(dev,
1000                                "root namespace wasn't destroyed, refcount > 1\n");
1001                 return;
1002         }
1003
1004         dev->priv.root_ns = NULL;
1005 }
1006
1007 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1008 {
1009         cleanup_root_ns(dev);
1010         cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
1011 }
1012
1013 static int init_fdb_root_ns(struct mlx5_core_dev *dev)
1014 {
1015         struct fs_prio *prio;
1016
1017         dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB);
1018         if (!dev->priv.fdb_root_ns)
1019                 return -ENOMEM;
1020
1021         /* Create single prio */
1022         prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1, 0);
1023         if (IS_ERR(prio)) {
1024                 cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
1025                 return PTR_ERR(prio);
1026         } else {
1027                 return 0;
1028         }
1029 }
1030
1031 int mlx5_init_fs(struct mlx5_core_dev *dev)
1032 {
1033         int err = 0;
1034
1035         if (MLX5_CAP_GEN(dev, nic_flow_table)) {
1036                 err = init_root_ns(dev);
1037                 if (err)
1038                         return err;
1039         }
1040         if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
1041                 err = init_fdb_root_ns(dev);
1042                 if (err)
1043                         cleanup_root_ns(dev);
1044         }
1045
1046         return err;
1047 }