1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2021 Mellanox Technologies. */ 3 4 #ifndef __MLX5_EN_TC_PRIV_H__ 5 #define __MLX5_EN_TC_PRIV_H__ 6 7 #include "en_tc.h" 8 #include "en/tc/act/act.h" 9 10 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1) 11 12 #define MLX5E_TC_MAX_SPLITS 1 13 14 15 enum { 16 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, 17 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, 18 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, 19 MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, 20 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, 21 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, 22 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, 23 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2, 24 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3, 25 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4, 26 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, 27 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, 28 MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 7, 29 MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 8, 30 MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 9, 31 MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 10, 32 MLX5E_TC_FLOW_FLAG_USE_ACT_STATS = MLX5E_TC_FLOW_BASE + 11, 33 }; 34 35 struct mlx5e_tc_flow_parse_attr { 36 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; 37 struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS]; 38 struct net_device *filter_dev; 39 struct mlx5_flow_spec spec; 40 struct pedit_headers_action hdrs[__PEDIT_CMD_MAX]; 41 struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; 42 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; 43 struct mlx5e_tc_act_parse_state parse_state; 44 }; 45 46 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc); 47 48 /* Helper struct for accessing a struct containing list_head array. 49 * Containing struct 50 * |- Helper array 51 * [0] Helper item 0 52 * |- list_head item 0 53 * |- index (0) 54 * [1] Helper item 1 55 * |- list_head item 1 56 * |- index (1) 57 * To access the containing struct from one of the list_head items: 58 * 1. Get the helper item from the list_head item using 59 * helper item = 60 * container_of(list_head item, helper struct type, list_head field) 61 * 2. Get the contining struct from the helper item and its index in the array: 62 * containing struct = 63 * container_of(helper item, containing struct type, helper field[index]) 64 */ 65 struct encap_flow_item { 66 struct mlx5e_encap_entry *e; /* attached encap instance */ 67 struct list_head list; 68 int index; 69 }; 70 71 struct encap_route_flow_item { 72 struct mlx5e_route_entry *r; /* attached route instance */ 73 int index; 74 }; 75 76 struct mlx5e_tc_flow { 77 struct rhash_head node; 78 struct mlx5e_priv *priv; 79 u64 cookie; 80 unsigned long flags; 81 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; 82 83 /* flows sharing the same reformat object - currently mpls decap */ 84 struct list_head l3_to_l2_reformat; 85 struct mlx5e_decap_entry *decap_reformat; 86 87 /* flows sharing same route entry */ 88 struct list_head decap_routes; 89 struct mlx5e_route_entry *decap_route; 90 struct encap_route_flow_item encap_routes[MLX5_MAX_FLOW_FWD_VPORTS]; 91 92 /* Flow can be associated with multiple encap IDs. 93 * The number of encaps is bounded by the number of supported 94 * destinations. 95 */ 96 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; 97 struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ 98 struct list_head hairpin; /* flows sharing the same hairpin */ 99 struct list_head peer[MLX5_MAX_PORTS]; /* flows with peer flow */ 100 struct list_head unready; /* flows not ready to be offloaded (e.g 101 * due to missing route) 102 */ 103 struct list_head peer_flows; /* flows on peer */ 104 struct net_device *orig_dev; /* netdev adding flow first */ 105 int tmp_entry_index; 106 struct list_head tmp_list; /* temporary flow list used by neigh update */ 107 refcount_t refcnt; 108 struct rcu_head rcu_head; 109 struct completion init_done; 110 struct completion del_hw_done; 111 struct mlx5_flow_attr *attr; 112 struct list_head attrs; 113 u32 chain_mapping; 114 }; 115 116 struct mlx5_flow_handle * 117 mlx5e_tc_rule_offload(struct mlx5e_priv *priv, 118 struct mlx5_flow_spec *spec, 119 struct mlx5_flow_attr *attr); 120 121 void 122 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv, 123 struct mlx5_flow_handle *rule, 124 struct mlx5_flow_attr *attr); 125 126 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer); 127 128 struct mlx5_flow_handle * 129 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, 130 struct mlx5e_tc_flow *flow, 131 struct mlx5_flow_spec *spec, 132 struct mlx5_flow_attr *attr); 133 134 struct mlx5_flow_attr * 135 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow); 136 137 void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow); 138 int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow); 139 140 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow); 141 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow); 142 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow); 143 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow); 144 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv); 145 146 static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) 147 { 148 /* Complete all memory stores before setting bit. */ 149 smp_mb__before_atomic(); 150 set_bit(flag, &flow->flags); 151 } 152 153 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag) 154 155 static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, 156 unsigned long flag) 157 { 158 /* test_and_set_bit() provides all necessary barriers */ 159 return test_and_set_bit(flag, &flow->flags); 160 } 161 162 #define flow_flag_test_and_set(flow, flag) \ 163 __flow_flag_test_and_set(flow, \ 164 MLX5E_TC_FLOW_FLAG_##flag) 165 166 static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) 167 { 168 /* Complete all memory stores before clearing bit. */ 169 smp_mb__before_atomic(); 170 clear_bit(flag, &flow->flags); 171 } 172 173 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ 174 MLX5E_TC_FLOW_FLAG_##flag) 175 176 static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) 177 { 178 bool ret = test_bit(flag, &flow->flags); 179 180 /* Read fields of flow structure only after checking flags. */ 181 smp_mb__after_atomic(); 182 return ret; 183 } 184 185 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \ 186 MLX5E_TC_FLOW_FLAG_##flag) 187 188 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, 189 struct mlx5e_tc_flow *flow); 190 struct mlx5_flow_handle * 191 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, 192 struct mlx5e_tc_flow *flow, 193 struct mlx5_flow_spec *spec); 194 195 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, 196 struct mlx5e_tc_flow *flow, 197 struct mlx5_flow_attr *attr); 198 199 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow); 200 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow); 201 202 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow); 203 204 struct mlx5e_tc_int_port_priv * 205 mlx5e_get_int_port_priv(struct mlx5e_priv *priv); 206 207 struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev); 208 209 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec); 210 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec); 211 212 #endif /* __MLX5_EN_TC_PRIV_H__ */ 213