X-Git-Url: http://git.cascardo.info/?a=blobdiff_plain;f=drivers%2Fnet%2Fethernet%2Fmellanox%2Fmlx5%2Fcore%2Ffs_core.c;h=f5571a5a57c5c29f36e34de27655ae8300a06152;hb=87d22483ce68e609818d61e3a65361f5634c6cd6;hp=75bb8c8645575058324c4807356586055f13da3b;hpb=884316deb4c9fdf9becfa31831a9e40717e3026c;p=cascardo%2Flinux.git diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 75bb8c864557..f5571a5a57c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -96,6 +96,10 @@ #define OFFLOADS_NUM_PRIOS 1 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1) +#define LAG_PRIO_NUM_LEVELS 1 +#define LAG_NUM_PRIOS 1 +#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1) + struct node_caps { size_t arr_sz; long *caps; @@ -111,12 +115,16 @@ static struct init_tree_node { int num_levels; } root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 6, + .ar_size = 7, .children = (struct init_tree_node[]) { ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, LAG_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, + LAG_PRIO_NUM_LEVELS))), ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))), ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, @@ -345,7 +353,7 @@ static void del_flow_table(struct fs_node *node) err = mlx5_cmd_destroy_flow_table(dev, ft); if (err) - pr_warn("flow steering can't destroy ft\n"); + mlx5_core_warn(dev, "flow steering can't destroy ft\n"); fs_get_obj(prio, ft->node.parent); prio->num_ft--; } @@ -364,7 +372,7 @@ static void del_rule(struct fs_node *node) match_value = mlx5_vzalloc(match_len); if (!match_value) { - pr_warn("failed to allocate inbox\n"); + mlx5_core_warn(dev, "failed to allocate inbox\n"); return; } @@ -387,8 +395,9 @@ static void del_rule(struct fs_node *node) modify_mask, fte); if (err) - pr_warn("%s can't del rule fg id=%d fte_index=%d\n", - __func__, fg->id, fte->index); + mlx5_core_warn(dev, + "%s can't del rule fg id=%d fte_index=%d\n", + __func__, fg->id, fte->index); } kvfree(match_value); } @@ -409,8 +418,9 @@ static void del_fte(struct fs_node *node) err = mlx5_cmd_delete_fte(dev, ft, fte->index); if (err) - pr_warn("flow steering can't delete fte in index %d of flow group id %d\n", - fte->index, fg->id); + mlx5_core_warn(dev, + "flow steering can't delete fte in index %d of flow group id %d\n", + fte->index, fg->id); fte->status = 0; fg->num_ftes--; @@ -427,8 +437,8 @@ static void del_flow_group(struct fs_node *node) dev = get_dev(&ft->node); if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) - pr_warn("flow steering can't destroy fg %d of ft %d\n", - fg->id, ft->id); + mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", + fg->id, ft->id); } static struct fs_fte *alloc_fte(u8 action, @@ -475,7 +485,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) } static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte, - enum fs_flow_table_type table_type) + enum fs_flow_table_type table_type, + enum fs_flow_table_op_mod op_mod) { struct mlx5_flow_table *ft; @@ -485,6 +496,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft ft->level = level; ft->node.type = FS_TYPE_FLOW_TABLE; + ft->op_mod = op_mod; ft->type = table_type; ft->vport = vport; ft->max_fte = max_fte; @@ -722,6 +734,7 @@ static void list_add_flow_table(struct mlx5_flow_table *ft, } static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + enum fs_flow_table_op_mod op_mod, u16 vport, int prio, int max_fte, u32 level) { @@ -754,18 +767,19 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa level += fs_prio->start_level; ft = alloc_flow_table(level, vport, - roundup_pow_of_two(max_fte), - root->table_type); + max_fte ? roundup_pow_of_two(max_fte) : 0, + root->table_type, + op_mod); if (!ft) { err = -ENOMEM; goto unlock_root; } tree_init_node(&ft->node, 1, del_flow_table); - log_table_sz = ilog2(ft->max_fte); + log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); - err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level, - log_table_sz, next_ft, &ft->id); + err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type, + ft->level, log_table_sz, next_ft, &ft->id); if (err) goto free_ft; @@ -792,15 +806,26 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte, u32 level) { - return __mlx5_create_flow_table(ns, 0, prio, max_fte, level); + return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio, + max_fte, level); } struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte, u32 level, u16 vport) { - return __mlx5_create_flow_table(ns, vport, prio, max_fte, level); + return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio, + max_fte, level); +} + +struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( + struct mlx5_flow_namespace *ns, + int prio, u32 level) +{ + return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0, + level); } +EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, @@ -1379,6 +1404,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, switch (type) { case MLX5_FLOW_NAMESPACE_BYPASS: + case MLX5_FLOW_NAMESPACE_LAG: case MLX5_FLOW_NAMESPACE_OFFLOADS: case MLX5_FLOW_NAMESPACE_ETHTOOL: case MLX5_FLOW_NAMESPACE_KERNEL: @@ -1401,6 +1427,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return &steering->esw_ingress_root_ns->ns; else return NULL; + case MLX5_FLOW_NAMESPACE_SNIFFER_RX: + if (steering->sniffer_rx_root_ns) + return &steering->sniffer_rx_root_ns->ns; + else + return NULL; + case MLX5_FLOW_NAMESPACE_SNIFFER_TX: + if (steering->sniffer_tx_root_ns) + return &steering->sniffer_tx_root_ns->ns; + else + return NULL; default: return NULL; } @@ -1700,10 +1736,46 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) cleanup_root_ns(steering->esw_egress_root_ns); cleanup_root_ns(steering->esw_ingress_root_ns); cleanup_root_ns(steering->fdb_root_ns); + cleanup_root_ns(steering->sniffer_rx_root_ns); + cleanup_root_ns(steering->sniffer_tx_root_ns); mlx5_cleanup_fc_stats(dev); kfree(steering); } +static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX); + if (!steering->sniffer_tx_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1); + if (IS_ERR(prio)) { + cleanup_root_ns(steering->sniffer_tx_root_ns); + return PTR_ERR(prio); + } + return 0; +} + +static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX); + if (!steering->sniffer_rx_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1); + if (IS_ERR(prio)) { + cleanup_root_ns(steering->sniffer_rx_root_ns); + return PTR_ERR(prio); + } + return 0; +} + static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *prio; @@ -1800,6 +1872,18 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) } } + if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) { + err = init_sniffer_rx_root_ns(steering); + if (err) + goto err; + } + + if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) { + err = init_sniffer_tx_root_ns(steering); + if (err) + goto err; + } + return 0; err: mlx5_cleanup_fs(dev);