1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <dev/mlx5/driver.h>
34 #include <dev/mlx5/device.h>
35 #include <dev/mlx5/mlx5_ifc.h>
36
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "fs_ft_pool.h"
40 #include "mlx5_core.h"
41 #include "eswitch.h"
42
mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)43 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
44 struct mlx5_flow_table *ft,
45 u32 underlay_qpn,
46 bool disconnect)
47 {
48 return 0;
49 }
50
mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)51 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
52 struct mlx5_flow_table *ft,
53 struct mlx5_flow_table_attr *ft_attr,
54 struct mlx5_flow_table *next_ft)
55 {
56 int max_fte = ft_attr->max_fte;
57
58 ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1;
59
60 return 0;
61 }
62
mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)63 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
64 struct mlx5_flow_table *ft)
65 {
66 return 0;
67 }
68
mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)69 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
70 struct mlx5_flow_table *ft,
71 struct mlx5_flow_table *next_ft)
72 {
73 return 0;
74 }
75
mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)76 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
77 struct mlx5_flow_table *ft,
78 u32 *in,
79 struct mlx5_flow_group *fg)
80 {
81 return 0;
82 }
83
mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)84 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
85 struct mlx5_flow_table *ft,
86 struct mlx5_flow_group *fg)
87 {
88 return 0;
89 }
90
mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)91 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
92 struct mlx5_flow_table *ft,
93 struct mlx5_flow_group *group,
94 struct fs_fte *fte)
95 {
96 return 0;
97 }
98
mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)99 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
100 struct mlx5_flow_table *ft,
101 struct mlx5_flow_group *group,
102 int modify_mask,
103 struct fs_fte *fte)
104 {
105 return -EOPNOTSUPP;
106 }
107
mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)108 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
109 struct mlx5_flow_table *ft,
110 struct fs_fte *fte)
111 {
112 return 0;
113 }
114
mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)115 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
116 struct mlx5_pkt_reformat_params *params,
117 enum mlx5_flow_namespace_type namespace,
118 struct mlx5_pkt_reformat *pkt_reformat)
119 {
120 return 0;
121 }
122
mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)123 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
124 struct mlx5_pkt_reformat *pkt_reformat)
125 {
126 }
127
mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)128 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
129 u8 namespace, u8 num_actions,
130 void *modify_actions,
131 struct mlx5_modify_hdr *modify_hdr)
132 {
133 return 0;
134 }
135
mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)136 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
137 struct mlx5_modify_hdr *modify_hdr)
138 {
139 }
140
mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)141 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
142 struct mlx5_flow_root_namespace *peer_ns)
143 {
144 return 0;
145 }
146
mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace * ns)147 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
148 {
149 return 0;
150 }
151
mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace * ns)152 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
153 {
154 return 0;
155 }
156
mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)157 static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns,
158 enum fs_flow_table_type ft_type)
159 {
160 return 0;
161 }
162
mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)163 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
164 struct mlx5_flow_table *ft, u32 underlay_qpn,
165 bool disconnect)
166 {
167 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
168 struct mlx5_core_dev *dev = ns->dev;
169
170 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
171 underlay_qpn == 0)
172 return 0;
173
174 MLX5_SET(set_flow_table_root_in, in, opcode,
175 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
176 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
177
178 if (disconnect)
179 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
180 else
181 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
182
183 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
184 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
185 MLX5_SET(set_flow_table_root_in, in, other_vport,
186 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
187
188 return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
189 }
190
mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)191 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
192 struct mlx5_flow_table *ft,
193 struct mlx5_flow_table_attr *ft_attr,
194 struct mlx5_flow_table *next_ft)
195 {
196 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
197 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
198 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
199 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
200 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
201 struct mlx5_core_dev *dev = ns->dev;
202 unsigned int size;
203 int err;
204
205 size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
206 if (!size)
207 return -ENOSPC;
208
209 MLX5_SET(create_flow_table_in, in, opcode,
210 MLX5_CMD_OP_CREATE_FLOW_TABLE);
211
212 MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
213 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
214 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
215 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
216 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
217 MLX5_SET(create_flow_table_in, in, other_vport,
218 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
219
220 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
221 en_decap);
222 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
223 en_encap);
224 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
225 term);
226
227 switch (ft->op_mod) {
228 case FS_FT_OP_MOD_NORMAL:
229 if (next_ft) {
230 MLX5_SET(create_flow_table_in, in,
231 flow_table_context.table_miss_action,
232 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
233 MLX5_SET(create_flow_table_in, in,
234 flow_table_context.table_miss_id, next_ft->id);
235 } else {
236 MLX5_SET(create_flow_table_in, in,
237 flow_table_context.table_miss_action,
238 ft->def_miss_action);
239 }
240 break;
241
242 case FS_FT_OP_MOD_LAG_DEMUX:
243 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
244 if (next_ft)
245 MLX5_SET(create_flow_table_in, in,
246 flow_table_context.lag_master_next_table_id,
247 next_ft->id);
248 break;
249 }
250
251 err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
252 if (!err) {
253 ft->id = MLX5_GET(create_flow_table_out, out,
254 table_id);
255 ft->max_fte = size;
256 } else {
257 mlx5_ft_pool_put_sz(ns->dev, size);
258 }
259
260 return err;
261 }
262
mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)263 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
264 struct mlx5_flow_table *ft)
265 {
266 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
267 struct mlx5_core_dev *dev = ns->dev;
268 int err;
269
270 MLX5_SET(destroy_flow_table_in, in, opcode,
271 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
272 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
273 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
274 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
275 MLX5_SET(destroy_flow_table_in, in, other_vport,
276 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
277
278 err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
279 if (!err)
280 mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
281
282 return err;
283 }
284
mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)285 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
286 struct mlx5_flow_table *ft,
287 struct mlx5_flow_table *next_ft)
288 {
289 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
290 struct mlx5_core_dev *dev = ns->dev;
291
292 MLX5_SET(modify_flow_table_in, in, opcode,
293 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
294 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
295 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
296
297 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
298 MLX5_SET(modify_flow_table_in, in, modify_field_select,
299 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
300 if (next_ft) {
301 MLX5_SET(modify_flow_table_in, in,
302 flow_table_context.lag_master_next_table_id, next_ft->id);
303 } else {
304 MLX5_SET(modify_flow_table_in, in,
305 flow_table_context.lag_master_next_table_id, 0);
306 }
307 } else {
308 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
309 MLX5_SET(modify_flow_table_in, in, other_vport,
310 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
311 MLX5_SET(modify_flow_table_in, in, modify_field_select,
312 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
313 if (next_ft) {
314 MLX5_SET(modify_flow_table_in, in,
315 flow_table_context.table_miss_action,
316 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
317 MLX5_SET(modify_flow_table_in, in,
318 flow_table_context.table_miss_id,
319 next_ft->id);
320 } else {
321 MLX5_SET(modify_flow_table_in, in,
322 flow_table_context.table_miss_action,
323 ft->def_miss_action);
324 }
325 }
326
327 return mlx5_cmd_exec_in(dev, modify_flow_table, in);
328 }
329
mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)330 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
331 struct mlx5_flow_table *ft,
332 u32 *in,
333 struct mlx5_flow_group *fg)
334 {
335 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
336 struct mlx5_core_dev *dev = ns->dev;
337 int err;
338
339 MLX5_SET(create_flow_group_in, in, opcode,
340 MLX5_CMD_OP_CREATE_FLOW_GROUP);
341 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
342 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
343 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
344 MLX5_SET(create_flow_group_in, in, other_vport,
345 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
346 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
347 if (!err)
348 fg->id = MLX5_GET(create_flow_group_out, out,
349 group_id);
350 return err;
351 }
352
mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)353 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
354 struct mlx5_flow_table *ft,
355 struct mlx5_flow_group *fg)
356 {
357 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
358 struct mlx5_core_dev *dev = ns->dev;
359
360 MLX5_SET(destroy_flow_group_in, in, opcode,
361 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
362 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
363 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
364 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
365 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
366 MLX5_SET(destroy_flow_group_in, in, other_vport,
367 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
368 return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
369 }
370
mlx5_set_extended_dest(struct mlx5_core_dev * dev,struct fs_fte * fte,bool * extended_dest)371 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
372 struct fs_fte *fte, bool *extended_dest)
373 {
374 int fw_log_max_fdb_encap_uplink =
375 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
376 int num_fwd_destinations = 0;
377 struct mlx5_flow_rule *dst;
378 int num_encap = 0;
379
380 *extended_dest = false;
381 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
382 return 0;
383
384 list_for_each_entry(dst, &fte->node.children, node.list) {
385 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
386 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE)
387 continue;
388 if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
389 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
390 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
391 num_encap++;
392 num_fwd_destinations++;
393 }
394 if (num_fwd_destinations > 1 && num_encap > 0)
395 *extended_dest = true;
396
397 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
398 mlx5_core_warn(dev, "FW does not support extended destination");
399 return -EOPNOTSUPP;
400 }
401 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
402 mlx5_core_warn(dev, "FW does not support more than %d encaps",
403 1 << fw_log_max_fdb_encap_uplink);
404 return -EOPNOTSUPP;
405 }
406
407 return 0;
408 }
409
410 static void
mlx5_cmd_set_fte_flow_meter(struct fs_fte * fte,void * in_flow_context)411 mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
412 {
413 void *exe_aso_ctrl;
414 void *execute_aso;
415
416 execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context,
417 execute_aso[0]);
418 MLX5_SET(execute_aso, execute_aso, valid, 1);
419 MLX5_SET(execute_aso, execute_aso, aso_object_id,
420 fte->action.exe_aso.object_id);
421
422 exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
423 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
424 fte->action.exe_aso.return_reg_id);
425 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
426 fte->action.exe_aso.type);
427 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
428 fte->action.exe_aso.flow_meter.init_color);
429 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
430 fte->action.exe_aso.flow_meter.meter_idx);
431 }
432
mlx5_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5_flow_table * ft,unsigned group_id,struct fs_fte * fte)433 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
434 int opmod, int modify_mask,
435 struct mlx5_flow_table *ft,
436 unsigned group_id,
437 struct fs_fte *fte)
438 {
439 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
440 bool extended_dest = false;
441 struct mlx5_flow_rule *dst;
442 void *in_flow_context, *vlan;
443 void *in_match_value;
444 unsigned int inlen;
445 int dst_cnt_size;
446 void *in_dests;
447 u32 *in;
448 int err;
449
450 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
451 return -EOPNOTSUPP;
452
453 if (!extended_dest)
454 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
455 else
456 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
457
458 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
459 in = kvzalloc(inlen, GFP_KERNEL);
460 if (!in)
461 return -ENOMEM;
462
463 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
464 MLX5_SET(set_fte_in, in, op_mod, opmod);
465 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
466 MLX5_SET(set_fte_in, in, table_type, ft->type);
467 MLX5_SET(set_fte_in, in, table_id, ft->id);
468 MLX5_SET(set_fte_in, in, flow_index, fte->index);
469 MLX5_SET(set_fte_in, in, ignore_flow_level,
470 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
471
472 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
473 MLX5_SET(set_fte_in, in, other_vport,
474 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
475
476 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
477 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
478
479 MLX5_SET(flow_context, in_flow_context, flow_tag,
480 fte->flow_context.flow_tag);
481 MLX5_SET(flow_context, in_flow_context, flow_source,
482 fte->flow_context.flow_source);
483
484 MLX5_SET(flow_context, in_flow_context, extended_destination,
485 extended_dest);
486 if (extended_dest) {
487 u32 action;
488
489 action = fte->action.action &
490 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
491 MLX5_SET(flow_context, in_flow_context, action, action);
492 } else {
493 MLX5_SET(flow_context, in_flow_context, action,
494 fte->action.action);
495 if (fte->action.pkt_reformat)
496 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
497 fte->action.pkt_reformat->id);
498 }
499 if (fte->action.modify_hdr)
500 MLX5_SET(flow_context, in_flow_context, modify_header_id,
501 fte->action.modify_hdr->id);
502
503 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
504 fte->action.crypto.type);
505 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
506 fte->action.crypto.obj_id);
507
508 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
509
510 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
511 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
512 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
513
514 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
515
516 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
517 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
518 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
519
520 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
521 match_value);
522 memcpy(in_match_value, &fte->val, sizeof(fte->val));
523
524 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
525 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
526 int list_size = 0;
527
528 list_for_each_entry(dst, &fte->node.children, node.list) {
529 enum mlx5_flow_destination_type type = dst->dest_attr.type;
530 enum mlx5_ifc_flow_destination_type ifc_type;
531 unsigned int id;
532
533 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
534 continue;
535
536 switch (type) {
537 case MLX5_FLOW_DESTINATION_TYPE_NONE:
538 continue;
539 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
540 id = dst->dest_attr.ft_num;
541 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
542 break;
543 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
544 id = dst->dest_attr.ft->id;
545 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
546 break;
547 case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
548 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
549 MLX5_SET(dest_format_struct, in_dests,
550 destination_eswitch_owner_vhca_id_valid,
551 !!(dst->dest_attr.vport.flags &
552 MLX5_FLOW_DEST_VPORT_VHCA_ID));
553 MLX5_SET(dest_format_struct, in_dests,
554 destination_eswitch_owner_vhca_id,
555 dst->dest_attr.vport.vhca_id);
556 if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
557 /* destination_id is reserved */
558 id = 0;
559 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
560 break;
561 }
562 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
563 id = dst->dest_attr.vport.num;
564 if (extended_dest &&
565 dst->dest_attr.vport.pkt_reformat) {
566 MLX5_SET(dest_format_struct, in_dests,
567 packet_reformat,
568 !!(dst->dest_attr.vport.flags &
569 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
570 MLX5_SET(extended_dest_format, in_dests,
571 packet_reformat_id,
572 dst->dest_attr.vport.pkt_reformat->id);
573 }
574 break;
575 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
576 id = dst->dest_attr.sampler_id;
577 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
578 break;
579 case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
580 MLX5_SET(dest_format_struct, in_dests,
581 destination_table_type, dst->dest_attr.ft->type);
582 id = dst->dest_attr.ft->id;
583 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
584 break;
585 default:
586 id = dst->dest_attr.tir_num;
587 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
588 }
589
590 MLX5_SET(dest_format_struct, in_dests, destination_type,
591 ifc_type);
592 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
593 in_dests += dst_cnt_size;
594 list_size++;
595 }
596
597 MLX5_SET(flow_context, in_flow_context, destination_list_size,
598 list_size);
599 }
600
601 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
602 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
603 log_max_flow_counter,
604 ft->type));
605 int list_size = 0;
606
607 list_for_each_entry(dst, &fte->node.children, node.list) {
608 if (dst->dest_attr.type !=
609 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
610 continue;
611
612 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
613 dst->dest_attr.counter_id);
614 in_dests += dst_cnt_size;
615 list_size++;
616 }
617 if (list_size > max_list_size) {
618 err = -EINVAL;
619 goto err_out;
620 }
621
622 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
623 list_size);
624 }
625
626 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
627 if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
628 mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
629 } else {
630 err = -EOPNOTSUPP;
631 goto err_out;
632 }
633 }
634
635 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
636 err_out:
637 kvfree(in);
638 return err;
639 }
640
mlx5_cmd_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)641 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
642 struct mlx5_flow_table *ft,
643 struct mlx5_flow_group *group,
644 struct fs_fte *fte)
645 {
646 struct mlx5_core_dev *dev = ns->dev;
647 unsigned int group_id = group->id;
648
649 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
650 }
651
mlx5_cmd_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,int modify_mask,struct fs_fte * fte)652 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
653 struct mlx5_flow_table *ft,
654 struct mlx5_flow_group *fg,
655 int modify_mask,
656 struct fs_fte *fte)
657 {
658 int opmod;
659 struct mlx5_core_dev *dev = ns->dev;
660 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
661 flow_table_properties_nic_receive.
662 flow_modify_en);
663 if (!atomic_mod_cap)
664 return -EOPNOTSUPP;
665 opmod = 1;
666
667 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
668 }
669
mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)670 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
671 struct mlx5_flow_table *ft,
672 struct fs_fte *fte)
673 {
674 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
675 struct mlx5_core_dev *dev = ns->dev;
676
677 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
678 MLX5_SET(delete_fte_in, in, table_type, ft->type);
679 MLX5_SET(delete_fte_in, in, table_id, ft->id);
680 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
681 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
682 MLX5_SET(delete_fte_in, in, other_vport,
683 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
684
685 return mlx5_cmd_exec_in(dev, delete_fte, in);
686 }
687
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev * dev,enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,u32 * id)688 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
689 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
690 u32 *id)
691 {
692 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
693 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
694 int err;
695
696 MLX5_SET(alloc_flow_counter_in, in, opcode,
697 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
698 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
699
700 err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
701 if (!err)
702 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
703 return err;
704 }
705
mlx5_cmd_fc_alloc(struct mlx5_core_dev * dev,u32 * id)706 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
707 {
708 return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
709 }
710
mlx5_cmd_fc_free(struct mlx5_core_dev * dev,u32 id)711 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
712 {
713 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
714
715 MLX5_SET(dealloc_flow_counter_in, in, opcode,
716 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
717 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
718 return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
719 }
720
mlx5_cmd_fc_query(struct mlx5_core_dev * dev,u32 id,u64 * packets,u64 * bytes)721 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
722 u64 *packets, u64 *bytes)
723 {
724 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
725 MLX5_ST_SZ_BYTES(traffic_counter)] = {};
726 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
727 void *stats;
728 int err = 0;
729
730 MLX5_SET(query_flow_counter_in, in, opcode,
731 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
732 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
733 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
734 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
735 if (err)
736 return err;
737
738 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
739 *packets = MLX5_GET64(traffic_counter, stats, packets);
740 *bytes = MLX5_GET64(traffic_counter, stats, octets);
741 return 0;
742 }
743
mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)744 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
745 {
746 return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
747 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
748 }
749
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev * dev,u32 base_id,int bulk_len,u32 * out)750 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
751 u32 *out)
752 {
753 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
754 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
755
756 MLX5_SET(query_flow_counter_in, in, opcode,
757 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
758 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
759 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
760 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
761 }
762
mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)763 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
764 struct mlx5_pkt_reformat_params *params,
765 enum mlx5_flow_namespace_type namespace,
766 struct mlx5_pkt_reformat *pkt_reformat)
767 {
768 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
769 struct mlx5_core_dev *dev = ns->dev;
770 void *packet_reformat_context_in;
771 int max_encap_size;
772 void *reformat;
773 int inlen;
774 int err;
775 u32 *in;
776
777 if (namespace == MLX5_FLOW_NAMESPACE_FDB ||
778 namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS)
779 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
780 else
781 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
782
783 if (params->size > max_encap_size) {
784 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
785 params->size, max_encap_size);
786 return -EINVAL;
787 }
788
789 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
790 params->size, GFP_KERNEL);
791 if (!in)
792 return -ENOMEM;
793
794 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
795 in, packet_reformat_context);
796 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
797 packet_reformat_context_in,
798 reformat_data);
799 inlen = reformat - (void *)in + params->size;
800
801 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
802 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
803 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
804 reformat_data_size, params->size);
805 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
806 reformat_type, params->type);
807 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
808 reformat_param_0, params->param_0);
809 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
810 reformat_param_1, params->param_1);
811 if (params->data && params->size)
812 memcpy(reformat, params->data, params->size);
813
814 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
815
816 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
817 out, packet_reformat_id);
818 kfree(in);
819 return err;
820 }
821
mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)822 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
823 struct mlx5_pkt_reformat *pkt_reformat)
824 {
825 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
826 struct mlx5_core_dev *dev = ns->dev;
827
828 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
829 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
830 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
831 pkt_reformat->id);
832
833 mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
834 }
835
mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)836 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
837 u8 namespace, u8 num_actions,
838 void *modify_actions,
839 struct mlx5_modify_hdr *modify_hdr)
840 {
841 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
842 int max_actions, actions_size, inlen, err;
843 struct mlx5_core_dev *dev = ns->dev;
844 void *actions_in;
845 u8 table_type;
846 u32 *in;
847
848 switch (namespace) {
849 case MLX5_FLOW_NAMESPACE_FDB:
850 case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
851 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
852 table_type = FS_FT_FDB;
853 break;
854 case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
855 case MLX5_FLOW_NAMESPACE_KERNEL:
856 case MLX5_FLOW_NAMESPACE_BYPASS:
857 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
858 table_type = FS_FT_NIC_RX;
859 break;
860 case MLX5_FLOW_NAMESPACE_EGRESS:
861 case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
862 case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
863 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
864 table_type = FS_FT_NIC_TX;
865 break;
866 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
867 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
868 table_type = FS_FT_ESW_INGRESS_ACL;
869 break;
870 case MLX5_FLOW_NAMESPACE_RDMA_TX:
871 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
872 table_type = FS_FT_RDMA_TX;
873 break;
874 default:
875 return -EOPNOTSUPP;
876 }
877
878 if (num_actions > max_actions) {
879 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
880 num_actions, max_actions);
881 return -EOPNOTSUPP;
882 }
883
884 actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
885 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
886
887 in = kzalloc(inlen, GFP_KERNEL);
888 if (!in)
889 return -ENOMEM;
890
891 MLX5_SET(alloc_modify_header_context_in, in, opcode,
892 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
893 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
894 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
895
896 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
897 memcpy(actions_in, modify_actions, actions_size);
898
899 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
900
901 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
902 kfree(in);
903 return err;
904 }
905
mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)906 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
907 struct mlx5_modify_hdr *modify_hdr)
908 {
909 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
910 struct mlx5_core_dev *dev = ns->dev;
911
912 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
913 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
914 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
915 modify_hdr->id);
916
917 mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
918 }
919
mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)920 static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
921 enum fs_flow_table_type ft_type)
922 {
923 return 0;
924 }
925
926 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
927 .create_flow_table = mlx5_cmd_create_flow_table,
928 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
929 .modify_flow_table = mlx5_cmd_modify_flow_table,
930 .create_flow_group = mlx5_cmd_create_flow_group,
931 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
932 .create_fte = mlx5_cmd_create_fte,
933 .update_fte = mlx5_cmd_update_fte,
934 .delete_fte = mlx5_cmd_delete_fte,
935 .update_root_ft = mlx5_cmd_update_root_ft,
936 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
937 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
938 .modify_header_alloc = mlx5_cmd_modify_header_alloc,
939 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
940 .set_peer = mlx5_cmd_stub_set_peer,
941 .create_ns = mlx5_cmd_stub_create_ns,
942 .destroy_ns = mlx5_cmd_stub_destroy_ns,
943 .get_capabilities = mlx5_cmd_get_capabilities,
944 };
945
946 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
947 .create_flow_table = mlx5_cmd_stub_create_flow_table,
948 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
949 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
950 .create_flow_group = mlx5_cmd_stub_create_flow_group,
951 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
952 .create_fte = mlx5_cmd_stub_create_fte,
953 .update_fte = mlx5_cmd_stub_update_fte,
954 .delete_fte = mlx5_cmd_stub_delete_fte,
955 .update_root_ft = mlx5_cmd_stub_update_root_ft,
956 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
957 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
958 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
959 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
960 .set_peer = mlx5_cmd_stub_set_peer,
961 .create_ns = mlx5_cmd_stub_create_ns,
962 .destroy_ns = mlx5_cmd_stub_destroy_ns,
963 .get_capabilities = mlx5_cmd_stub_get_capabilities,
964 };
965
mlx5_fs_cmd_get_fw_cmds(void)966 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
967 {
968 return &mlx5_flow_cmds;
969 }
970
mlx5_fs_cmd_get_stub_cmds(void)971 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
972 {
973 return &mlx5_flow_cmd_stubs;
974 }
975
mlx5_fs_cmd_get_default(enum fs_flow_table_type type)976 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
977 {
978 switch (type) {
979 case FS_FT_NIC_RX:
980 case FS_FT_ESW_EGRESS_ACL:
981 case FS_FT_ESW_INGRESS_ACL:
982 case FS_FT_FDB:
983 case FS_FT_SNIFFER_RX:
984 case FS_FT_SNIFFER_TX:
985 case FS_FT_NIC_TX:
986 case FS_FT_RDMA_RX:
987 case FS_FT_RDMA_TX:
988 case FS_FT_PORT_SEL:
989 return mlx5_fs_cmd_get_fw_cmds();
990 default:
991 return mlx5_fs_cmd_get_stub_cmds();
992 }
993 }
994