1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #ifndef __MLX5_EN_TC_PRIV_H__
5 #define __MLX5_EN_TC_PRIV_H__
6 
7 #include "en_tc.h"
8 #include "en/tc/act/act.h"
9 
10 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
11 
12 #define MLX5E_TC_MAX_SPLITS 1
13 
14 #define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
15 
16 enum {
17 	MLX5E_TC_FLOW_FLAG_INGRESS               = MLX5E_TC_FLAG_INGRESS_BIT,
18 	MLX5E_TC_FLOW_FLAG_EGRESS                = MLX5E_TC_FLAG_EGRESS_BIT,
19 	MLX5E_TC_FLOW_FLAG_ESWITCH               = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
20 	MLX5E_TC_FLOW_FLAG_FT                    = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
21 	MLX5E_TC_FLOW_FLAG_NIC                   = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
22 	MLX5E_TC_FLOW_FLAG_OFFLOADED             = MLX5E_TC_FLOW_BASE,
23 	MLX5E_TC_FLOW_FLAG_HAIRPIN               = MLX5E_TC_FLOW_BASE + 1,
24 	MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS           = MLX5E_TC_FLOW_BASE + 2,
25 	MLX5E_TC_FLOW_FLAG_SLOW                  = MLX5E_TC_FLOW_BASE + 3,
26 	MLX5E_TC_FLOW_FLAG_DUP                   = MLX5E_TC_FLOW_BASE + 4,
27 	MLX5E_TC_FLOW_FLAG_NOT_READY             = MLX5E_TC_FLOW_BASE + 5,
28 	MLX5E_TC_FLOW_FLAG_DELETED               = MLX5E_TC_FLOW_BASE + 6,
29 	MLX5E_TC_FLOW_FLAG_CT                    = MLX5E_TC_FLOW_BASE + 7,
30 	MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP        = MLX5E_TC_FLOW_BASE + 8,
31 	MLX5E_TC_FLOW_FLAG_TUN_RX                = MLX5E_TC_FLOW_BASE + 9,
32 	MLX5E_TC_FLOW_FLAG_FAILED                = MLX5E_TC_FLOW_BASE + 10,
33 	MLX5E_TC_FLOW_FLAG_SAMPLE                = MLX5E_TC_FLOW_BASE + 11,
34 };
35 
36 struct mlx5e_tc_flow_parse_attr {
37 	const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
38 	struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
39 	struct net_device *filter_dev;
40 	struct mlx5_flow_spec spec;
41 	struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
42 	struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
43 	int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
44 	struct mlx5e_tc_act_parse_state parse_state;
45 };
46 
47 /* Helper struct for accessing a struct containing list_head array.
48  * Containing struct
49  *   |- Helper array
50  *      [0] Helper item 0
51  *          |- list_head item 0
52  *          |- index (0)
53  *      [1] Helper item 1
54  *          |- list_head item 1
55  *          |- index (1)
56  * To access the containing struct from one of the list_head items:
57  * 1. Get the helper item from the list_head item using
58  *    helper item =
59  *        container_of(list_head item, helper struct type, list_head field)
60  * 2. Get the contining struct from the helper item and its index in the array:
61  *    containing struct =
62  *        container_of(helper item, containing struct type, helper field[index])
63  */
64 struct encap_flow_item {
65 	struct mlx5e_encap_entry *e; /* attached encap instance */
66 	struct list_head list;
67 	int index;
68 };
69 
70 struct encap_route_flow_item {
71 	struct mlx5e_route_entry *r; /* attached route instance */
72 	int index;
73 };
74 
75 struct mlx5e_tc_flow {
76 	struct rhash_head node;
77 	struct mlx5e_priv *priv;
78 	u64 cookie;
79 	unsigned long flags;
80 	struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
81 
82 	/* flows sharing the same reformat object - currently mpls decap */
83 	struct list_head l3_to_l2_reformat;
84 	struct mlx5e_decap_entry *decap_reformat;
85 
86 	/* flows sharing same route entry */
87 	struct list_head decap_routes;
88 	struct mlx5e_route_entry *decap_route;
89 	struct encap_route_flow_item encap_routes[MLX5_MAX_FLOW_FWD_VPORTS];
90 
91 	/* Flow can be associated with multiple encap IDs.
92 	 * The number of encaps is bounded by the number of supported
93 	 * destinations.
94 	 */
95 	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
96 	struct mlx5e_tc_flow *peer_flow;
97 	struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
98 	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
99 	struct list_head hairpin; /* flows sharing the same hairpin */
100 	struct list_head peer;    /* flows with peer flow */
101 	struct list_head unready; /* flows not ready to be offloaded (e.g
102 				   * due to missing route)
103 				   */
104 	struct net_device *orig_dev; /* netdev adding flow first */
105 	int tmp_entry_index;
106 	struct list_head tmp_list; /* temporary flow list used by neigh update */
107 	refcount_t refcnt;
108 	struct rcu_head rcu_head;
109 	struct completion init_done;
110 	struct completion del_hw_done;
111 	struct mlx5_flow_attr *attr;
112 	struct list_head attrs;
113 };
114 
115 struct mlx5_flow_handle *
116 mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
117 		      struct mlx5_flow_spec *spec,
118 		      struct mlx5_flow_attr *attr);
119 
120 void
121 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
122 			struct mlx5_flow_handle *rule,
123 			struct mlx5_flow_attr *attr);
124 
125 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer);
126 
127 struct mlx5_flow_handle *
128 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
129 			   struct mlx5e_tc_flow *flow,
130 			   struct mlx5_flow_spec *spec,
131 			   struct mlx5_flow_attr *attr);
132 
133 struct mlx5_flow_attr *
134 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
135 
136 void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
137 int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
138 
139 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
140 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
141 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
142 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow);
143 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
144 
145 static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
146 {
147 	/* Complete all memory stores before setting bit. */
148 	smp_mb__before_atomic();
149 	set_bit(flag, &flow->flags);
150 }
151 
152 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
153 
154 static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
155 					    unsigned long flag)
156 {
157 	/* test_and_set_bit() provides all necessary barriers */
158 	return test_and_set_bit(flag, &flow->flags);
159 }
160 
161 #define flow_flag_test_and_set(flow, flag)			\
162 	__flow_flag_test_and_set(flow,				\
163 				 MLX5E_TC_FLOW_FLAG_##flag)
164 
165 static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
166 {
167 	/* Complete all memory stores before clearing bit. */
168 	smp_mb__before_atomic();
169 	clear_bit(flag, &flow->flags);
170 }
171 
172 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow,		\
173 						      MLX5E_TC_FLOW_FLAG_##flag)
174 
175 static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
176 {
177 	bool ret = test_bit(flag, &flow->flags);
178 
179 	/* Read fields of flow structure only after checking flags. */
180 	smp_mb__after_atomic();
181 	return ret;
182 }
183 
184 #define flow_flag_test(flow, flag) __flow_flag_test(flow,		\
185 						    MLX5E_TC_FLOW_FLAG_##flag)
186 
187 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
188 				       struct mlx5e_tc_flow *flow);
189 struct mlx5_flow_handle *
190 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
191 			      struct mlx5e_tc_flow *flow,
192 			      struct mlx5_flow_spec *spec);
193 
194 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
195 				  struct mlx5e_tc_flow *flow,
196 				  struct mlx5_flow_attr *attr);
197 
198 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow);
199 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow);
200 
201 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
202 
203 struct mlx5e_tc_int_port_priv *
204 mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
205 
206 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
207 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
208 
209 #endif /* __MLX5_EN_TC_PRIV_H__ */
210