1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019, Mellanox Technologies */
3 
4 #ifndef	_DR_TYPES_
5 #define	_DR_TYPES_
6 
7 #include <linux/mlx5/driver.h>
8 #include <linux/refcount.h>
9 #include "fs_core.h"
10 #include "wq.h"
11 #include "lib/mlx5.h"
12 #include "mlx5_ifc_dr.h"
13 #include "mlx5dr.h"
14 
15 #define DR_RULE_MAX_STES 17
16 #define DR_ACTION_MAX_STES 5
17 #define WIRE_PORT 0xFFFF
18 #define DR_STE_SVLAN 0x1
19 #define DR_STE_CVLAN 0x2
20 
21 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
22 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
23 #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
24 
25 enum mlx5dr_icm_chunk_size {
26 	DR_CHUNK_SIZE_1,
27 	DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
28 	DR_CHUNK_SIZE_2,
29 	DR_CHUNK_SIZE_4,
30 	DR_CHUNK_SIZE_8,
31 	DR_CHUNK_SIZE_16,
32 	DR_CHUNK_SIZE_32,
33 	DR_CHUNK_SIZE_64,
34 	DR_CHUNK_SIZE_128,
35 	DR_CHUNK_SIZE_256,
36 	DR_CHUNK_SIZE_512,
37 	DR_CHUNK_SIZE_1K,
38 	DR_CHUNK_SIZE_2K,
39 	DR_CHUNK_SIZE_4K,
40 	DR_CHUNK_SIZE_8K,
41 	DR_CHUNK_SIZE_16K,
42 	DR_CHUNK_SIZE_32K,
43 	DR_CHUNK_SIZE_64K,
44 	DR_CHUNK_SIZE_128K,
45 	DR_CHUNK_SIZE_256K,
46 	DR_CHUNK_SIZE_512K,
47 	DR_CHUNK_SIZE_1024K,
48 	DR_CHUNK_SIZE_2048K,
49 	DR_CHUNK_SIZE_MAX,
50 };
51 
52 enum mlx5dr_icm_type {
53 	DR_ICM_TYPE_STE,
54 	DR_ICM_TYPE_MODIFY_ACTION,
55 };
56 
57 static inline enum mlx5dr_icm_chunk_size
58 mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
59 {
60 	chunk += 2;
61 	if (chunk < DR_CHUNK_SIZE_MAX)
62 		return chunk;
63 
64 	return DR_CHUNK_SIZE_MAX;
65 }
66 
67 enum {
68 	DR_STE_SIZE = 64,
69 	DR_STE_SIZE_CTRL = 32,
70 	DR_STE_SIZE_TAG = 16,
71 	DR_STE_SIZE_MASK = 16,
72 };
73 
74 enum {
75 	DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
76 };
77 
78 enum {
79 	DR_MODIFY_ACTION_SIZE = 8,
80 };
81 
82 enum mlx5dr_matcher_criteria {
83 	DR_MATCHER_CRITERIA_EMPTY = 0,
84 	DR_MATCHER_CRITERIA_OUTER = 1 << 0,
85 	DR_MATCHER_CRITERIA_MISC = 1 << 1,
86 	DR_MATCHER_CRITERIA_INNER = 1 << 2,
87 	DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
88 	DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
89 	DR_MATCHER_CRITERIA_MAX = 1 << 5,
90 };
91 
92 enum mlx5dr_action_type {
93 	DR_ACTION_TYP_TNL_L2_TO_L2,
94 	DR_ACTION_TYP_L2_TO_TNL_L2,
95 	DR_ACTION_TYP_TNL_L3_TO_L2,
96 	DR_ACTION_TYP_L2_TO_TNL_L3,
97 	DR_ACTION_TYP_DROP,
98 	DR_ACTION_TYP_QP,
99 	DR_ACTION_TYP_FT,
100 	DR_ACTION_TYP_CTR,
101 	DR_ACTION_TYP_TAG,
102 	DR_ACTION_TYP_MODIFY_HDR,
103 	DR_ACTION_TYP_VPORT,
104 	DR_ACTION_TYP_POP_VLAN,
105 	DR_ACTION_TYP_PUSH_VLAN,
106 	DR_ACTION_TYP_MAX,
107 };
108 
109 enum mlx5dr_ipv {
110 	DR_RULE_IPV4,
111 	DR_RULE_IPV6,
112 	DR_RULE_IPV_MAX,
113 };
114 
115 struct mlx5dr_icm_pool;
116 struct mlx5dr_icm_chunk;
117 struct mlx5dr_icm_bucket;
118 struct mlx5dr_ste_htbl;
119 struct mlx5dr_match_param;
120 struct mlx5dr_cmd_caps;
121 struct mlx5dr_matcher_rx_tx;
122 
123 struct mlx5dr_ste {
124 	u8 *hw_ste;
125 	/* refcount: indicates the num of rules that using this ste */
126 	u32 refcount;
127 
128 	/* attached to the miss_list head at each htbl entry */
129 	struct list_head miss_list_node;
130 
131 	/* each rule member that uses this ste attached here */
132 	struct list_head rule_list;
133 
134 	/* this ste is member of htbl */
135 	struct mlx5dr_ste_htbl *htbl;
136 
137 	struct mlx5dr_ste_htbl *next_htbl;
138 
139 	/* this ste is part of a rule, located in ste's chain */
140 	u8 ste_chain_location;
141 };
142 
143 struct mlx5dr_ste_htbl_ctrl {
144 	/* total number of valid entries belonging to this hash table. This
145 	 * includes the non collision and collision entries
146 	 */
147 	unsigned int num_of_valid_entries;
148 
149 	/* total number of collisions entries attached to this table */
150 	unsigned int num_of_collisions;
151 	unsigned int increase_threshold;
152 	u8 may_grow:1;
153 };
154 
155 struct mlx5dr_ste_htbl {
156 	u8 lu_type;
157 	u16 byte_mask;
158 	u32 refcount;
159 	struct mlx5dr_icm_chunk *chunk;
160 	struct mlx5dr_ste *ste_arr;
161 	u8 *hw_ste_arr;
162 
163 	struct list_head *miss_list;
164 
165 	enum mlx5dr_icm_chunk_size chunk_size;
166 	struct mlx5dr_ste *pointing_ste;
167 
168 	struct mlx5dr_ste_htbl_ctrl ctrl;
169 };
170 
171 struct mlx5dr_ste_send_info {
172 	struct mlx5dr_ste *ste;
173 	struct list_head send_list;
174 	u16 size;
175 	u16 offset;
176 	u8 data_cont[DR_STE_SIZE];
177 	u8 *data;
178 };
179 
180 void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
181 					       u16 offset, u8 *data,
182 					       struct mlx5dr_ste_send_info *ste_info,
183 					       struct list_head *send_list,
184 					       bool copy_data);
185 
186 struct mlx5dr_ste_build {
187 	u8 inner:1;
188 	u8 rx:1;
189 	u8 vhca_id_valid:1;
190 	struct mlx5dr_domain *dmn;
191 	struct mlx5dr_cmd_caps *caps;
192 	u8 lu_type;
193 	u16 byte_mask;
194 	u8 bit_mask[DR_STE_SIZE_MASK];
195 	int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
196 				  struct mlx5dr_ste_build *sb,
197 				  u8 *hw_ste_p);
198 };
199 
200 struct mlx5dr_ste_htbl *
201 mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
202 		      enum mlx5dr_icm_chunk_size chunk_size,
203 		      u8 lu_type, u16 byte_mask);
204 
205 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
206 
207 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
208 {
209 	htbl->refcount--;
210 	if (!htbl->refcount)
211 		mlx5dr_ste_htbl_free(htbl);
212 }
213 
214 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
215 {
216 	htbl->refcount++;
217 }
218 
219 /* STE utils */
220 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
221 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
222 void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
223 				struct mlx5dr_ste_htbl *next_htbl);
224 void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
225 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
226 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
227 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
228 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
229 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
230 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste);
231 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
232 				u8 ste_location);
233 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
234 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
235 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
236 			     int size, bool encap_l3);
237 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
238 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
239 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
240 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
241 				 bool go_back);
242 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
243 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
244 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
245 				    u32 re_write_index);
246 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
247 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
248 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
249 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
250 
251 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
252 		     struct mlx5dr_matcher *matcher,
253 		     struct mlx5dr_matcher_rx_tx *nic_matcher);
254 static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
255 				  struct mlx5dr_matcher *matcher,
256 				  struct mlx5dr_matcher_rx_tx *nic_matcher)
257 {
258 	ste->refcount--;
259 	if (!ste->refcount)
260 		mlx5dr_ste_free(ste, matcher, nic_matcher);
261 }
262 
263 /* initial as 0, increased only when ste appears in a new rule */
264 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
265 {
266 	ste->refcount++;
267 }
268 
269 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
270 					  struct mlx5dr_ste_htbl *next_htbl);
271 bool mlx5dr_ste_equal_tag(void *src, void *dst);
272 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
273 				struct mlx5dr_matcher_rx_tx *nic_matcher,
274 				struct mlx5dr_ste *ste,
275 				u8 *cur_hw_ste,
276 				enum mlx5dr_icm_chunk_size log_table_size);
277 
278 /* STE build functions */
279 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
280 			       u8 match_criteria,
281 			       struct mlx5dr_match_param *mask,
282 			       struct mlx5dr_match_param *value);
283 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
284 			     struct mlx5dr_matcher_rx_tx *nic_matcher,
285 			     struct mlx5dr_match_param *value,
286 			     u8 *ste_arr);
287 int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
288 				    struct mlx5dr_match_param *mask,
289 				    bool inner, bool rx);
290 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
291 					  struct mlx5dr_match_param *mask,
292 					  bool inner, bool rx);
293 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
294 				       struct mlx5dr_match_param *mask,
295 				       bool inner, bool rx);
296 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
297 				      struct mlx5dr_match_param *mask,
298 				      bool inner, bool rx);
299 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
300 				      struct mlx5dr_match_param *mask,
301 				      bool inner, bool rx);
302 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
303 				 struct mlx5dr_match_param *mask,
304 				 bool inner, bool rx);
305 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
306 				 struct mlx5dr_match_param *mask,
307 				 bool inner, bool rx);
308 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
309 				 struct mlx5dr_match_param *mask,
310 				 bool inner, bool rx);
311 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
312 				 struct mlx5dr_match_param *mask,
313 				 bool inner, bool rx);
314 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
315 				  struct mlx5dr_match_param *mask,
316 				  bool inner, bool rx);
317 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
318 			  struct mlx5dr_match_param *mask,
319 			  bool inner, bool rx);
320 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
321 			   struct mlx5dr_match_param *mask,
322 			   bool inner, bool rx);
323 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
324 				    struct mlx5dr_match_param *mask,
325 				    bool inner, bool rx);
326 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
327 				   struct mlx5dr_match_param *mask,
328 				   struct mlx5dr_cmd_caps *caps,
329 				   bool inner, bool rx);
330 void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
331 						struct mlx5dr_match_param *mask,
332 						bool inner, bool rx);
333 void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
334 					     struct mlx5dr_match_param *mask,
335 					     bool inner, bool rx);
336 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
337 				      struct mlx5dr_match_param *mask,
338 				      bool inner, bool rx);
339 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
340 				 struct mlx5dr_match_param *mask,
341 				 bool inner, bool rx);
342 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
343 				 struct mlx5dr_match_param *mask,
344 				 bool inner, bool rx);
345 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
346 				  struct mlx5dr_match_param *mask,
347 				  struct mlx5dr_domain *dmn,
348 				  bool inner, bool rx);
349 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
350 
351 /* Actions utils */
352 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
353 				 struct mlx5dr_matcher_rx_tx *nic_matcher,
354 				 struct mlx5dr_action *actions[],
355 				 u32 num_actions,
356 				 u8 *ste_arr,
357 				 u32 *new_hw_ste_arr_sz);
358 
359 struct mlx5dr_match_spec {
360 	u32 smac_47_16;		/* Source MAC address of incoming packet */
361 	/* Incoming packet Ethertype - this is the Ethertype
362 	 * following the last VLAN tag of the packet
363 	 */
364 	u32 ethertype:16;
365 	u32 smac_15_0:16;	/* Source MAC address of incoming packet */
366 	u32 dmac_47_16;		/* Destination MAC address of incoming packet */
367 	/* VLAN ID of first VLAN tag in the incoming packet.
368 	 * Valid only when cvlan_tag==1 or svlan_tag==1
369 	 */
370 	u32 first_vid:12;
371 	/* CFI bit of first VLAN tag in the incoming packet.
372 	 * Valid only when cvlan_tag==1 or svlan_tag==1
373 	 */
374 	u32 first_cfi:1;
375 	/* Priority of first VLAN tag in the incoming packet.
376 	 * Valid only when cvlan_tag==1 or svlan_tag==1
377 	 */
378 	u32 first_prio:3;
379 	u32 dmac_15_0:16;	/* Destination MAC address of incoming packet */
380 	/* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
381 	 *             Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
382 	 */
383 	u32 tcp_flags:9;
384 	u32 ip_version:4;	/* IP version */
385 	u32 frag:1;		/* Packet is an IP fragment */
386 	/* The first vlan in the packet is s-vlan (0x8a88).
387 	 * cvlan_tag and svlan_tag cannot be set together
388 	 */
389 	u32 svlan_tag:1;
390 	/* The first vlan in the packet is c-vlan (0x8100).
391 	 * cvlan_tag and svlan_tag cannot be set together
392 	 */
393 	u32 cvlan_tag:1;
394 	/* Explicit Congestion Notification derived from
395 	 * Traffic Class/TOS field of IPv6/v4
396 	 */
397 	u32 ip_ecn:2;
398 	/* Differentiated Services Code Point derived from
399 	 * Traffic Class/TOS field of IPv6/v4
400 	 */
401 	u32 ip_dscp:6;
402 	u32 ip_protocol:8;	/* IP protocol */
403 	/* TCP destination port.
404 	 * tcp and udp sport/dport are mutually exclusive
405 	 */
406 	u32 tcp_dport:16;
407 	/* TCP source port.;tcp and udp sport/dport are mutually exclusive */
408 	u32 tcp_sport:16;
409 	u32 ttl_hoplimit:8;
410 	u32 reserved:24;
411 	/* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
412 	u32 udp_dport:16;
413 	/* UDP source port.;tcp and udp sport/dport are mutually exclusive */
414 	u32 udp_sport:16;
415 	/* IPv6 source address of incoming packets
416 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
417 	 * This field should be qualified by an appropriate ethertype
418 	 */
419 	u32 src_ip_127_96;
420 	/* IPv6 source address of incoming packets
421 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
422 	 * This field should be qualified by an appropriate ethertype
423 	 */
424 	u32 src_ip_95_64;
425 	/* IPv6 source address of incoming packets
426 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
427 	 * This field should be qualified by an appropriate ethertype
428 	 */
429 	u32 src_ip_63_32;
430 	/* IPv6 source address of incoming packets
431 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
432 	 * This field should be qualified by an appropriate ethertype
433 	 */
434 	u32 src_ip_31_0;
435 	/* IPv6 destination address of incoming packets
436 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
437 	 * This field should be qualified by an appropriate ethertype
438 	 */
439 	u32 dst_ip_127_96;
440 	/* IPv6 destination address of incoming packets
441 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
442 	 * This field should be qualified by an appropriate ethertype
443 	 */
444 	u32 dst_ip_95_64;
445 	/* IPv6 destination address of incoming packets
446 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
447 	 * This field should be qualified by an appropriate ethertype
448 	 */
449 	u32 dst_ip_63_32;
450 	/* IPv6 destination address of incoming packets
451 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
452 	 * This field should be qualified by an appropriate ethertype
453 	 */
454 	u32 dst_ip_31_0;
455 };
456 
457 struct mlx5dr_match_misc {
458 	u32 source_sqn:24;		/* Source SQN */
459 	u32 source_vhca_port:4;
460 	/* used with GRE, sequence number exist when gre_s_present == 1 */
461 	u32 gre_s_present:1;
462 	/* used with GRE, key exist when gre_k_present == 1 */
463 	u32 gre_k_present:1;
464 	u32 reserved_auto1:1;
465 	/* used with GRE, checksum exist when gre_c_present == 1 */
466 	u32 gre_c_present:1;
467 	/* Source port.;0xffff determines wire port */
468 	u32 source_port:16;
469 	u32 source_eswitch_owner_vhca_id:16;
470 	/* VLAN ID of first VLAN tag the inner header of the incoming packet.
471 	 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
472 	 */
473 	u32 inner_second_vid:12;
474 	/* CFI bit of first VLAN tag in the inner header of the incoming packet.
475 	 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
476 	 */
477 	u32 inner_second_cfi:1;
478 	/* Priority of second VLAN tag in the inner header of the incoming packet.
479 	 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
480 	 */
481 	u32 inner_second_prio:3;
482 	/* VLAN ID of first VLAN tag the outer header of the incoming packet.
483 	 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
484 	 */
485 	u32 outer_second_vid:12;
486 	/* CFI bit of first VLAN tag in the outer header of the incoming packet.
487 	 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
488 	 */
489 	u32 outer_second_cfi:1;
490 	/* Priority of second VLAN tag in the outer header of the incoming packet.
491 	 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
492 	 */
493 	u32 outer_second_prio:3;
494 	u32 gre_protocol:16;		/* GRE Protocol (outer) */
495 	u32 reserved_auto3:12;
496 	/* The second vlan in the inner header of the packet is s-vlan (0x8a88).
497 	 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
498 	 */
499 	u32 inner_second_svlan_tag:1;
500 	/* The second vlan in the outer header of the packet is s-vlan (0x8a88).
501 	 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
502 	 */
503 	u32 outer_second_svlan_tag:1;
504 	/* The second vlan in the inner header of the packet is c-vlan (0x8100).
505 	 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
506 	 */
507 	u32 inner_second_cvlan_tag:1;
508 	/* The second vlan in the outer header of the packet is c-vlan (0x8100).
509 	 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
510 	 */
511 	u32 outer_second_cvlan_tag:1;
512 	u32 gre_key_l:8;		/* GRE Key [7:0] (outer) */
513 	u32 gre_key_h:24;		/* GRE Key[31:8] (outer) */
514 	u32 reserved_auto4:8;
515 	u32 vxlan_vni:24;		/* VXLAN VNI (outer) */
516 	u32 geneve_oam:1;		/* GENEVE OAM field (outer) */
517 	u32 reserved_auto5:7;
518 	u32 geneve_vni:24;		/* GENEVE VNI field (outer) */
519 	u32 outer_ipv6_flow_label:20;	/* Flow label of incoming IPv6 packet (outer) */
520 	u32 reserved_auto6:12;
521 	u32 inner_ipv6_flow_label:20;	/* Flow label of incoming IPv6 packet (inner) */
522 	u32 reserved_auto7:12;
523 	u32 geneve_protocol_type:16;	/* GENEVE protocol type (outer) */
524 	u32 geneve_opt_len:6;		/* GENEVE OptLen (outer) */
525 	u32 reserved_auto8:10;
526 	u32 bth_dst_qp:24;		/* Destination QP in BTH header */
527 	u32 reserved_auto9:8;
528 	u8 reserved_auto10[20];
529 };
530 
531 struct mlx5dr_match_misc2 {
532 	u32 outer_first_mpls_ttl:8;		/* First MPLS TTL (outer) */
533 	u32 outer_first_mpls_s_bos:1;		/* First MPLS S_BOS (outer) */
534 	u32 outer_first_mpls_exp:3;		/* First MPLS EXP (outer) */
535 	u32 outer_first_mpls_label:20;		/* First MPLS LABEL (outer) */
536 	u32 inner_first_mpls_ttl:8;		/* First MPLS TTL (inner) */
537 	u32 inner_first_mpls_s_bos:1;		/* First MPLS S_BOS (inner) */
538 	u32 inner_first_mpls_exp:3;		/* First MPLS EXP (inner) */
539 	u32 inner_first_mpls_label:20;		/* First MPLS LABEL (inner) */
540 	u32 outer_first_mpls_over_gre_ttl:8;	/* last MPLS TTL (outer) */
541 	u32 outer_first_mpls_over_gre_s_bos:1;	/* last MPLS S_BOS (outer) */
542 	u32 outer_first_mpls_over_gre_exp:3;	/* last MPLS EXP (outer) */
543 	u32 outer_first_mpls_over_gre_label:20;	/* last MPLS LABEL (outer) */
544 	u32 outer_first_mpls_over_udp_ttl:8;	/* last MPLS TTL (outer) */
545 	u32 outer_first_mpls_over_udp_s_bos:1;	/* last MPLS S_BOS (outer) */
546 	u32 outer_first_mpls_over_udp_exp:3;	/* last MPLS EXP (outer) */
547 	u32 outer_first_mpls_over_udp_label:20;	/* last MPLS LABEL (outer) */
548 	u32 metadata_reg_c_7;			/* metadata_reg_c_7 */
549 	u32 metadata_reg_c_6;			/* metadata_reg_c_6 */
550 	u32 metadata_reg_c_5;			/* metadata_reg_c_5 */
551 	u32 metadata_reg_c_4;			/* metadata_reg_c_4 */
552 	u32 metadata_reg_c_3;			/* metadata_reg_c_3 */
553 	u32 metadata_reg_c_2;			/* metadata_reg_c_2 */
554 	u32 metadata_reg_c_1;			/* metadata_reg_c_1 */
555 	u32 metadata_reg_c_0;			/* metadata_reg_c_0 */
556 	u32 metadata_reg_a;			/* metadata_reg_a */
557 	u32 metadata_reg_b;			/* metadata_reg_b */
558 	u8 reserved_auto2[8];
559 };
560 
561 struct mlx5dr_match_misc3 {
562 	u32 inner_tcp_seq_num;
563 	u32 outer_tcp_seq_num;
564 	u32 inner_tcp_ack_num;
565 	u32 outer_tcp_ack_num;
566 	u32 outer_vxlan_gpe_vni:24;
567 	u32 reserved_auto1:8;
568 	u32 reserved_auto2:16;
569 	u32 outer_vxlan_gpe_flags:8;
570 	u32 outer_vxlan_gpe_next_protocol:8;
571 	u32 icmpv4_header_data;
572 	u32 icmpv6_header_data;
573 	u32 icmpv6_code:8;
574 	u32 icmpv6_type:8;
575 	u32 icmpv4_code:8;
576 	u32 icmpv4_type:8;
577 	u8 reserved_auto3[0x1c];
578 };
579 
580 struct mlx5dr_match_param {
581 	struct mlx5dr_match_spec outer;
582 	struct mlx5dr_match_misc misc;
583 	struct mlx5dr_match_spec inner;
584 	struct mlx5dr_match_misc2 misc2;
585 	struct mlx5dr_match_misc3 misc3;
586 };
587 
588 #define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
589 						   (_misc3)->icmpv4_code || \
590 						   (_misc3)->icmpv4_header_data)
591 
592 struct mlx5dr_esw_caps {
593 	u64 drop_icm_address_rx;
594 	u64 drop_icm_address_tx;
595 	u64 uplink_icm_address_rx;
596 	u64 uplink_icm_address_tx;
597 	bool sw_owner;
598 };
599 
600 struct mlx5dr_cmd_vport_cap {
601 	u16 vport_gvmi;
602 	u16 vhca_gvmi;
603 	u64 icm_address_rx;
604 	u64 icm_address_tx;
605 	u32 num;
606 };
607 
608 struct mlx5dr_cmd_caps {
609 	u16 gvmi;
610 	u64 nic_rx_drop_address;
611 	u64 nic_tx_drop_address;
612 	u64 nic_tx_allow_address;
613 	u64 esw_rx_drop_address;
614 	u64 esw_tx_drop_address;
615 	u32 log_icm_size;
616 	u64 hdr_modify_icm_addr;
617 	u32 flex_protocols;
618 	u8 flex_parser_id_icmp_dw0;
619 	u8 flex_parser_id_icmp_dw1;
620 	u8 flex_parser_id_icmpv6_dw0;
621 	u8 flex_parser_id_icmpv6_dw1;
622 	u8 max_ft_level;
623 	u16 roce_min_src_udp;
624 	u8 num_esw_ports;
625 	bool eswitch_manager;
626 	bool rx_sw_owner;
627 	bool tx_sw_owner;
628 	bool fdb_sw_owner;
629 	u32 num_vports;
630 	struct mlx5dr_esw_caps esw_caps;
631 	struct mlx5dr_cmd_vport_cap *vports_caps;
632 	bool prio_tag_required;
633 };
634 
635 struct mlx5dr_domain_rx_tx {
636 	u64 drop_icm_addr;
637 	u64 default_icm_addr;
638 	enum mlx5dr_ste_entry_type ste_type;
639 };
640 
641 struct mlx5dr_domain_info {
642 	bool supp_sw_steering;
643 	u32 max_inline_size;
644 	u32 max_send_wr;
645 	u32 max_log_sw_icm_sz;
646 	u32 max_log_action_icm_sz;
647 	struct mlx5dr_domain_rx_tx rx;
648 	struct mlx5dr_domain_rx_tx tx;
649 	struct mlx5dr_cmd_caps caps;
650 };
651 
652 struct mlx5dr_domain_cache {
653 	struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
654 };
655 
656 struct mlx5dr_domain {
657 	struct mlx5dr_domain *peer_dmn;
658 	struct mlx5_core_dev *mdev;
659 	u32 pdn;
660 	struct mlx5_uars_page *uar;
661 	enum mlx5dr_domain_type type;
662 	refcount_t refcount;
663 	struct mutex mutex; /* protect domain */
664 	struct mlx5dr_icm_pool *ste_icm_pool;
665 	struct mlx5dr_icm_pool *action_icm_pool;
666 	struct mlx5dr_send_ring *send_ring;
667 	struct mlx5dr_domain_info info;
668 	struct mlx5dr_domain_cache cache;
669 };
670 
671 struct mlx5dr_table_rx_tx {
672 	struct mlx5dr_ste_htbl *s_anchor;
673 	struct mlx5dr_domain_rx_tx *nic_dmn;
674 	u64 default_icm_addr;
675 };
676 
677 struct mlx5dr_table {
678 	struct mlx5dr_domain *dmn;
679 	struct mlx5dr_table_rx_tx rx;
680 	struct mlx5dr_table_rx_tx tx;
681 	u32 level;
682 	u32 table_type;
683 	u32 table_id;
684 	u32 flags;
685 	struct list_head matcher_list;
686 	struct mlx5dr_action *miss_action;
687 	refcount_t refcount;
688 };
689 
690 struct mlx5dr_matcher_rx_tx {
691 	struct mlx5dr_ste_htbl *s_htbl;
692 	struct mlx5dr_ste_htbl *e_anchor;
693 	struct mlx5dr_ste_build *ste_builder;
694 	struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
695 					       [DR_RULE_IPV_MAX]
696 					       [DR_RULE_MAX_STES];
697 	u8 num_of_builders;
698 	u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
699 	u64 default_icm_addr;
700 	struct mlx5dr_table_rx_tx *nic_tbl;
701 };
702 
703 struct mlx5dr_matcher {
704 	struct mlx5dr_table *tbl;
705 	struct mlx5dr_matcher_rx_tx rx;
706 	struct mlx5dr_matcher_rx_tx tx;
707 	struct list_head matcher_list;
708 	u16 prio;
709 	struct mlx5dr_match_param mask;
710 	u8 match_criteria;
711 	refcount_t refcount;
712 	struct mlx5dv_flow_matcher *dv_matcher;
713 };
714 
715 struct mlx5dr_rule_member {
716 	struct mlx5dr_ste *ste;
717 	/* attached to mlx5dr_rule via this */
718 	struct list_head list;
719 	/* attached to mlx5dr_ste via this */
720 	struct list_head use_ste_list;
721 };
722 
723 struct mlx5dr_action {
724 	enum mlx5dr_action_type action_type;
725 	refcount_t refcount;
726 	union {
727 		struct {
728 			struct mlx5dr_domain *dmn;
729 			struct mlx5dr_icm_chunk *chunk;
730 			u8 *data;
731 			u32 data_size;
732 			u16 num_of_actions;
733 			u32 index;
734 			u8 allow_rx:1;
735 			u8 allow_tx:1;
736 			u8 modify_ttl:1;
737 		} rewrite;
738 		struct {
739 			struct mlx5dr_domain *dmn;
740 			u32 reformat_id;
741 			u32 reformat_size;
742 		} reformat;
743 		struct {
744 			u8 is_fw_tbl:1;
745 			union {
746 				struct mlx5dr_table *tbl;
747 				struct {
748 					struct mlx5dr_domain *dmn;
749 					u32 id;
750 					u32 group_id;
751 					enum fs_flow_table_type type;
752 					u64 rx_icm_addr;
753 					u64 tx_icm_addr;
754 					struct mlx5dr_action **ref_actions;
755 					u32 num_of_ref_actions;
756 				} fw_tbl;
757 			};
758 		} dest_tbl;
759 		struct {
760 			u32 ctr_id;
761 			u32 offeset;
762 		} ctr;
763 		struct {
764 			struct mlx5dr_domain *dmn;
765 			struct mlx5dr_cmd_vport_cap *caps;
766 		} vport;
767 		struct {
768 			u32 vlan_hdr; /* tpid_pcp_dei_vid */
769 		} push_vlan;
770 		u32 flow_tag;
771 	};
772 };
773 
774 enum mlx5dr_connect_type {
775 	CONNECT_HIT	= 1,
776 	CONNECT_MISS	= 2,
777 };
778 
779 struct mlx5dr_htbl_connect_info {
780 	enum mlx5dr_connect_type type;
781 	union {
782 		struct mlx5dr_ste_htbl *hit_next_htbl;
783 		u64 miss_icm_addr;
784 	};
785 };
786 
787 struct mlx5dr_rule_rx_tx {
788 	struct list_head rule_members_list;
789 	struct mlx5dr_matcher_rx_tx *nic_matcher;
790 };
791 
792 struct mlx5dr_rule {
793 	struct mlx5dr_matcher *matcher;
794 	struct mlx5dr_rule_rx_tx rx;
795 	struct mlx5dr_rule_rx_tx tx;
796 	struct list_head rule_actions_list;
797 };
798 
799 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
800 				    struct mlx5dr_ste *ste);
801 
802 struct mlx5dr_icm_chunk {
803 	struct mlx5dr_icm_bucket *bucket;
804 	struct list_head chunk_list;
805 	u32 rkey;
806 	u32 num_of_entries;
807 	u32 byte_size;
808 	u64 icm_addr;
809 	u64 mr_addr;
810 
811 	/* Memory optimisation */
812 	struct mlx5dr_ste *ste_arr;
813 	u8 *hw_ste_arr;
814 	struct list_head *miss_list;
815 };
816 
817 static inline int
818 mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
819 {
820 	return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
821 }
822 
823 static inline int
824 mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
825 {
826 	return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
827 }
828 
829 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
830 				   struct mlx5dr_matcher_rx_tx *nic_matcher,
831 				   enum mlx5dr_ipv outer_ipv,
832 				   enum mlx5dr_ipv inner_ipv);
833 
834 static inline u32
835 mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
836 {
837 	return 1 << chunk_size;
838 }
839 
840 static inline int
841 mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
842 				   enum mlx5dr_icm_type icm_type)
843 {
844 	int num_of_entries;
845 	int entry_size;
846 
847 	if (icm_type == DR_ICM_TYPE_STE)
848 		entry_size = DR_STE_SIZE;
849 	else
850 		entry_size = DR_MODIFY_ACTION_SIZE;
851 
852 	num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
853 
854 	return entry_size * num_of_entries;
855 }
856 
857 static inline struct mlx5dr_cmd_vport_cap *
858 mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
859 {
860 	if (!caps->vports_caps ||
861 	    (vport >= caps->num_vports && vport != WIRE_PORT))
862 		return NULL;
863 
864 	if (vport == WIRE_PORT)
865 		vport = caps->num_vports;
866 
867 	return &caps->vports_caps[vport];
868 }
869 
870 struct mlx5dr_cmd_query_flow_table_details {
871 	u8 status;
872 	u8 level;
873 	u64 sw_owner_icm_root_1;
874 	u64 sw_owner_icm_root_0;
875 };
876 
877 struct mlx5dr_cmd_create_flow_table_attr {
878 	u32 table_type;
879 	u64 icm_addr_rx;
880 	u64 icm_addr_tx;
881 	u8 level;
882 	bool sw_owner;
883 	bool term_tbl;
884 	bool decap_en;
885 	bool reformat_en;
886 };
887 
888 /* internal API functions */
889 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
890 			    struct mlx5dr_cmd_caps *caps);
891 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
892 				       bool other_vport, u16 vport_number,
893 				       u64 *icm_address_rx,
894 				       u64 *icm_address_tx);
895 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
896 			  bool other_vport, u16 vport_number, u16 *gvmi);
897 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
898 			      struct mlx5dr_esw_caps *caps);
899 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
900 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
901 					u32 table_type,
902 					u32 table_id,
903 					u32 group_id,
904 					u32 modify_header_id,
905 					u32 vport_id);
906 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
907 				    u32 table_type,
908 				    u32 table_id);
909 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
910 				   u32 table_type,
911 				   u8 num_of_actions,
912 				   u64 *actions,
913 				   u32 *modify_header_id);
914 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
915 				     u32 modify_header_id);
916 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
917 				       u32 table_type,
918 				       u32 table_id,
919 				       u32 *group_id);
920 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
921 				  u32 table_type,
922 				  u32 table_id,
923 				  u32 group_id);
924 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
925 				 struct mlx5dr_cmd_create_flow_table_attr *attr,
926 				 u64 *fdb_rx_icm_addr,
927 				 u32 *table_id);
928 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
929 				  u32 table_id,
930 				  u32 table_type);
931 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
932 				enum fs_flow_table_type type,
933 				u32 table_id,
934 				struct mlx5dr_cmd_query_flow_table_details *output);
935 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
936 				   enum mlx5_reformat_ctx_type rt,
937 				   size_t reformat_size,
938 				   void *reformat_data,
939 				   u32 *reformat_id);
940 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
941 				     u32 reformat_id);
942 
943 struct mlx5dr_cmd_gid_attr {
944 	u8 gid[16];
945 	u8 mac[6];
946 	u32 roce_ver;
947 };
948 
949 struct mlx5dr_cmd_qp_create_attr {
950 	u32 page_id;
951 	u32 pdn;
952 	u32 cqn;
953 	u32 pm_state;
954 	u32 service_type;
955 	u32 buff_umem_id;
956 	u32 db_umem_id;
957 	u32 sq_wqe_cnt;
958 	u32 rq_wqe_cnt;
959 	u32 rq_wqe_shift;
960 };
961 
962 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
963 			 u16 index, struct mlx5dr_cmd_gid_attr *attr);
964 
965 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
966 					       enum mlx5dr_icm_type icm_type);
967 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
968 
969 struct mlx5dr_icm_chunk *
970 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
971 		       enum mlx5dr_icm_chunk_size chunk_size);
972 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
973 bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste);
974 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
975 				      struct mlx5dr_domain_rx_tx *nic_dmn,
976 				      struct mlx5dr_ste_htbl *htbl,
977 				      struct mlx5dr_htbl_connect_info *connect_info,
978 				      bool update_hw_ste);
979 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
980 				  struct mlx5dr_domain_rx_tx *nic_dmn,
981 				  struct mlx5dr_ste_htbl *htbl,
982 				  u8 *formatted_ste,
983 				  struct mlx5dr_htbl_connect_info *connect_info);
984 void mlx5dr_ste_copy_param(u8 match_criteria,
985 			   struct mlx5dr_match_param *set_param,
986 			   struct mlx5dr_match_parameters *mask);
987 
988 struct mlx5dr_qp {
989 	struct mlx5_core_dev *mdev;
990 	struct mlx5_wq_qp wq;
991 	struct mlx5_uars_page *uar;
992 	struct mlx5_wq_ctrl wq_ctrl;
993 	struct mlx5_core_qp mqp;
994 	struct {
995 		unsigned int pc;
996 		unsigned int cc;
997 		unsigned int size;
998 		unsigned int *wqe_head;
999 		unsigned int wqe_cnt;
1000 	} sq;
1001 	struct {
1002 		unsigned int pc;
1003 		unsigned int cc;
1004 		unsigned int size;
1005 		unsigned int wqe_cnt;
1006 	} rq;
1007 	int max_inline_data;
1008 };
1009 
1010 struct mlx5dr_cq {
1011 	struct mlx5_core_dev *mdev;
1012 	struct mlx5_cqwq wq;
1013 	struct mlx5_wq_ctrl wq_ctrl;
1014 	struct mlx5_core_cq mcq;
1015 	struct mlx5dr_qp *qp;
1016 };
1017 
1018 struct mlx5dr_mr {
1019 	struct mlx5_core_dev *mdev;
1020 	struct mlx5_core_mkey mkey;
1021 	dma_addr_t dma_addr;
1022 	void *addr;
1023 	size_t size;
1024 };
1025 
1026 #define MAX_SEND_CQE		64
1027 #define MIN_READ_SYNC		64
1028 
1029 struct mlx5dr_send_ring {
1030 	struct mlx5dr_cq *cq;
1031 	struct mlx5dr_qp *qp;
1032 	struct mlx5dr_mr *mr;
1033 	/* How much wqes are waiting for completion */
1034 	u32 pending_wqe;
1035 	/* Signal request per this trash hold value */
1036 	u16 signal_th;
1037 	/* Each post_send_size less than max_post_send_size */
1038 	u32 max_post_send_size;
1039 	/* manage the send queue */
1040 	u32 tx_head;
1041 	void *buf;
1042 	u32 buf_size;
1043 	struct ib_wc wc[MAX_SEND_CQE];
1044 	u8 sync_buff[MIN_READ_SYNC];
1045 	struct mlx5dr_mr *sync_mr;
1046 };
1047 
1048 int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
1049 void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
1050 			   struct mlx5dr_send_ring *send_ring);
1051 int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
1052 int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
1053 			     struct mlx5dr_ste *ste,
1054 			     u8 *data,
1055 			     u16 size,
1056 			     u16 offset);
1057 int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
1058 			      struct mlx5dr_ste_htbl *htbl,
1059 			      u8 *formatted_ste, u8 *mask);
1060 int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
1061 					struct mlx5dr_ste_htbl *htbl,
1062 					u8 *ste_init_data,
1063 					bool update_hw_ste);
1064 int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
1065 				struct mlx5dr_action *action);
1066 
1067 struct mlx5dr_cmd_ft_info {
1068 	u32 id;
1069 	u16 vport;
1070 	enum fs_flow_table_type type;
1071 };
1072 
1073 struct mlx5dr_cmd_flow_destination_hw_info {
1074 	enum mlx5_flow_destination_type type;
1075 	union {
1076 		u32 tir_num;
1077 		u32 ft_num;
1078 		u32 ft_id;
1079 		u32 counter_id;
1080 		struct {
1081 			u16 num;
1082 			u16 vhca_id;
1083 			u32 reformat_id;
1084 			u8 flags;
1085 		} vport;
1086 	};
1087 };
1088 
1089 struct mlx5dr_cmd_fte_info {
1090 	u32 dests_size;
1091 	u32 index;
1092 	struct mlx5_flow_context flow_context;
1093 	u32 *val;
1094 	struct mlx5_flow_act action;
1095 	struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
1096 };
1097 
1098 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
1099 		       int opmod, int modify_mask,
1100 		       struct mlx5dr_cmd_ft_info *ft,
1101 		       u32 group_id,
1102 		       struct mlx5dr_cmd_fte_info *fte);
1103 
1104 struct mlx5dr_fw_recalc_cs_ft {
1105 	u64 rx_icm_addr;
1106 	u32 table_id;
1107 	u32 group_id;
1108 	u32 modify_hdr_id;
1109 };
1110 
1111 struct mlx5dr_fw_recalc_cs_ft *
1112 mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
1113 void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
1114 				    struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
1115 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
1116 					      u32 vport_num,
1117 					      u64 *rx_icm_addr);
1118 int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
1119 			    struct mlx5dr_cmd_flow_destination_hw_info *dest,
1120 			    int num_dest,
1121 			    bool reformat_req,
1122 			    u32 *tbl_id,
1123 			    u32 *group_id);
1124 void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
1125 			      u32 group_id);
1126 #endif  /* _DR_TYPES_H_ */
1127