1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include "en/tc_priv.h"
5 #include "en_tc.h"
6 #include "post_act.h"
7 #include "mlx5_core.h"
8 #include "fs_core.h"
9 
10 struct mlx5e_post_act {
11 	enum mlx5_flow_namespace_type ns_type;
12 	struct mlx5_fs_chains *chains;
13 	struct mlx5_flow_table *ft;
14 	struct mlx5e_priv *priv;
15 	struct xarray ids;
16 };
17 
18 struct mlx5e_post_act_handle {
19 	enum mlx5_flow_namespace_type ns_type;
20 	struct mlx5_flow_attr *attr;
21 	struct mlx5_flow_handle *rule;
22 	u32 id;
23 };
24 
25 #define MLX5_POST_ACTION_BITS MLX5_REG_MAPPING_MBITS(FTEID_TO_REG)
26 #define MLX5_POST_ACTION_MASK MLX5_REG_MAPPING_MASK(FTEID_TO_REG)
27 #define MLX5_POST_ACTION_MAX MLX5_POST_ACTION_MASK
28 
29 struct mlx5e_post_act *
30 mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
31 		       enum mlx5_flow_namespace_type ns_type)
32 {
33 	enum fs_flow_table_type table_type = ns_type == MLX5_FLOW_NAMESPACE_FDB ?
34 					     FS_FT_FDB : FS_FT_NIC_RX;
35 	struct mlx5e_post_act *post_act;
36 	int err;
37 
38 	if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
39 		if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
40 			mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
41 		err = -EOPNOTSUPP;
42 		goto err_check;
43 	}
44 
45 	post_act = kzalloc(sizeof(*post_act), GFP_KERNEL);
46 	if (!post_act) {
47 		err = -ENOMEM;
48 		goto err_check;
49 	}
50 	post_act->ft = mlx5_chains_create_global_table(chains);
51 	if (IS_ERR(post_act->ft)) {
52 		err = PTR_ERR(post_act->ft);
53 		mlx5_core_warn(priv->mdev, "failed to create post action table, err: %d\n", err);
54 		goto err_ft;
55 	}
56 	post_act->chains = chains;
57 	post_act->ns_type = ns_type;
58 	post_act->priv = priv;
59 	xa_init_flags(&post_act->ids, XA_FLAGS_ALLOC1);
60 	return post_act;
61 
62 err_ft:
63 	kfree(post_act);
64 err_check:
65 	return ERR_PTR(err);
66 }
67 
68 void
69 mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act)
70 {
71 	if (IS_ERR_OR_NULL(post_act))
72 		return;
73 
74 	xa_destroy(&post_act->ids);
75 	mlx5_chains_destroy_global_table(post_act->chains, post_act->ft);
76 	kfree(post_act);
77 }
78 
79 int
80 mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
81 			  struct mlx5e_post_act_handle *handle)
82 {
83 	struct mlx5_flow_spec *spec;
84 	int err;
85 
86 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
87 	if (!spec)
88 		return -ENOMEM;
89 
90 	/* Post action rule matches on fte_id and executes original rule's tc rule action */
91 	mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, handle->id, MLX5_POST_ACTION_MASK);
92 
93 	handle->rule = mlx5e_tc_rule_offload(post_act->priv, spec, handle->attr);
94 	if (IS_ERR(handle->rule)) {
95 		err = PTR_ERR(handle->rule);
96 		netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
97 		goto err_rule;
98 	}
99 
100 	kvfree(spec);
101 	return 0;
102 
103 err_rule:
104 	kvfree(spec);
105 	return err;
106 }
107 
108 struct mlx5e_post_act_handle *
109 mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
110 {
111 	u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
112 	struct mlx5e_post_act_handle *handle;
113 	struct mlx5_flow_attr *post_attr;
114 	int err;
115 
116 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
117 	post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
118 	if (!handle || !post_attr) {
119 		kfree(post_attr);
120 		kfree(handle);
121 		return ERR_PTR(-ENOMEM);
122 	}
123 
124 	memcpy(post_attr, attr, attr_sz);
125 	post_attr->chain = 0;
126 	post_attr->prio = 0;
127 	post_attr->ft = post_act->ft;
128 	post_attr->inner_match_level = MLX5_MATCH_NONE;
129 	post_attr->outer_match_level = MLX5_MATCH_NONE;
130 	post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
131 	post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
132 
133 	handle->ns_type = post_act->ns_type;
134 	/* Splits were handled before post action */
135 	if (handle->ns_type == MLX5_FLOW_NAMESPACE_FDB)
136 		post_attr->esw_attr->split_count = 0;
137 
138 	err = xa_alloc(&post_act->ids, &handle->id, post_attr,
139 		       XA_LIMIT(1, MLX5_POST_ACTION_MAX), GFP_KERNEL);
140 	if (err)
141 		goto err_xarray;
142 
143 	handle->attr = post_attr;
144 
145 	return handle;
146 
147 err_xarray:
148 	kfree(post_attr);
149 	kfree(handle);
150 	return ERR_PTR(err);
151 }
152 
153 void
154 mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
155 			    struct mlx5e_post_act_handle *handle)
156 {
157 	mlx5e_tc_rule_unoffload(post_act->priv, handle->rule, handle->attr);
158 	handle->rule = NULL;
159 }
160 
161 void
162 mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle)
163 {
164 	if (!IS_ERR_OR_NULL(handle->rule))
165 		mlx5e_tc_post_act_unoffload(post_act, handle);
166 	xa_erase(&post_act->ids, handle->id);
167 	kfree(handle->attr);
168 	kfree(handle);
169 }
170 
171 struct mlx5_flow_table *
172 mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act)
173 {
174 	return post_act->ft;
175 }
176 
177 /* Allocate a header modify action to write the post action handle fte id to a register. */
178 int
179 mlx5e_tc_post_act_set_handle(struct mlx5_core_dev *dev,
180 			     struct mlx5e_post_act_handle *handle,
181 			     struct mlx5e_tc_mod_hdr_acts *acts)
182 {
183 	return mlx5e_tc_match_to_reg_set(dev, acts, handle->ns_type, FTEID_TO_REG, handle->id);
184 }
185