1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ 3 4 #include <mlx5_core.h> 5 #include "en_accel/fs_tcp.h" 6 #include "fs_core.h" 7 8 enum accel_fs_tcp_type { 9 ACCEL_FS_IPV4_TCP, 10 ACCEL_FS_IPV6_TCP, 11 ACCEL_FS_TCP_NUM_TYPES, 12 }; 13 14 struct mlx5e_accel_fs_tcp { 15 struct mlx5e_flow_table tables[ACCEL_FS_TCP_NUM_TYPES]; 16 struct mlx5_flow_handle *default_rules[ACCEL_FS_TCP_NUM_TYPES]; 17 }; 18 19 static enum mlx5_traffic_types fs_accel2tt(enum accel_fs_tcp_type i) 20 { 21 switch (i) { 22 case ACCEL_FS_IPV4_TCP: 23 return MLX5_TT_IPV4_TCP; 24 default: /* ACCEL_FS_IPV6_TCP */ 25 return MLX5_TT_IPV6_TCP; 26 } 27 } 28 29 static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock *sk) 30 { 31 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); 32 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP); 33 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); 34 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4); 35 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 36 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), 37 &inet_sk(sk)->inet_daddr, 4); 38 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 39 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 40 &inet_sk(sk)->inet_rcv_saddr, 4); 41 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 42 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); 43 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 44 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 45 } 46 47 #if IS_ENABLED(CONFIG_IPV6) 48 static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk) 49 { 50 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); 51 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP); 52 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); 53 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6); 54 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 55 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 56 &sk->sk_v6_daddr, 16); 57 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 58 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 59 &inet6_sk(sk)->saddr, 16); 60 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 61 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 62 0xff, 16); 63 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 64 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 65 0xff, 16); 66 } 67 #endif 68 69 void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) 70 { 71 mlx5_del_flow_rules(rule); 72 } 73 74 struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs, 75 struct sock *sk, u32 tirn, 76 uint32_t flow_tag) 77 { 78 struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs); 79 struct mlx5_flow_destination dest = {}; 80 struct mlx5e_flow_table *ft = NULL; 81 MLX5_DECLARE_FLOW_ACT(flow_act); 82 struct mlx5_flow_handle *flow; 83 struct mlx5_flow_spec *spec; 84 85 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 86 if (!spec) 87 return ERR_PTR(-ENOMEM); 88 89 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 90 91 switch (sk->sk_family) { 92 case AF_INET: 93 accel_fs_tcp_set_ipv4_flow(spec, sk); 94 ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP]; 95 fs_dbg(fs, "%s flow is %pI4:%d -> %pI4:%d\n", __func__, 96 &inet_sk(sk)->inet_rcv_saddr, 97 inet_sk(sk)->inet_sport, 98 &inet_sk(sk)->inet_daddr, 99 inet_sk(sk)->inet_dport); 100 break; 101 #if IS_ENABLED(CONFIG_IPV6) 102 case AF_INET6: 103 if (!ipv6_only_sock(sk) && 104 ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { 105 accel_fs_tcp_set_ipv4_flow(spec, sk); 106 ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP]; 107 } else { 108 accel_fs_tcp_set_ipv6_flow(spec, sk); 109 ft = &fs_tcp->tables[ACCEL_FS_IPV6_TCP]; 110 } 111 break; 112 #endif 113 default: 114 break; 115 } 116 117 if (!ft) { 118 flow = ERR_PTR(-EINVAL); 119 goto out; 120 } 121 122 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 123 outer_headers.tcp_dport); 124 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 125 outer_headers.tcp_sport); 126 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport, 127 ntohs(inet_sk(sk)->inet_sport)); 128 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport, 129 ntohs(inet_sk(sk)->inet_dport)); 130 131 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 132 dest.tir_num = tirn; 133 if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG) { 134 spec->flow_context.flow_tag = flow_tag; 135 spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG; 136 } 137 138 flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1); 139 140 if (IS_ERR(flow)) 141 fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow)); 142 143 out: 144 kvfree(spec); 145 return flow; 146 } 147 148 static int accel_fs_tcp_add_default_rule(struct mlx5e_flow_steering *fs, 149 enum accel_fs_tcp_type type) 150 { 151 struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs); 152 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false); 153 struct mlx5e_flow_table *accel_fs_t; 154 struct mlx5_flow_destination dest; 155 MLX5_DECLARE_FLOW_ACT(flow_act); 156 struct mlx5_flow_handle *rule; 157 int err = 0; 158 159 accel_fs_t = &fs_tcp->tables[type]; 160 161 dest = mlx5_ttc_get_default_dest(ttc, fs_accel2tt(type)); 162 rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1); 163 if (IS_ERR(rule)) { 164 err = PTR_ERR(rule); 165 fs_err(fs, "%s: add default rule failed, accel_fs type=%d, err %d\n", 166 __func__, type, err); 167 return err; 168 } 169 170 fs_tcp->default_rules[type] = rule; 171 return 0; 172 } 173 174 #define MLX5E_ACCEL_FS_TCP_NUM_GROUPS (2) 175 #define MLX5E_ACCEL_FS_TCP_GROUP1_SIZE (BIT(16) - 1) 176 #define MLX5E_ACCEL_FS_TCP_GROUP2_SIZE (BIT(0)) 177 #define MLX5E_ACCEL_FS_TCP_TABLE_SIZE (MLX5E_ACCEL_FS_TCP_GROUP1_SIZE +\ 178 MLX5E_ACCEL_FS_TCP_GROUP2_SIZE) 179 static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft, 180 enum accel_fs_tcp_type type) 181 { 182 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 183 void *outer_headers_c; 184 int ix = 0; 185 u32 *in; 186 int err; 187 u8 *mc; 188 189 ft->g = kcalloc(MLX5E_ACCEL_FS_TCP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); 190 in = kvzalloc(inlen, GFP_KERNEL); 191 if (!in || !ft->g) { 192 kfree(ft->g); 193 kvfree(in); 194 return -ENOMEM; 195 } 196 197 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 198 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); 199 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol); 200 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version); 201 202 switch (type) { 203 case ACCEL_FS_IPV4_TCP: 204 case ACCEL_FS_IPV6_TCP: 205 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport); 206 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport); 207 break; 208 default: 209 err = -EINVAL; 210 goto out; 211 } 212 213 switch (type) { 214 case ACCEL_FS_IPV4_TCP: 215 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, 216 src_ipv4_src_ipv6.ipv4_layout.ipv4); 217 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, 218 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 219 break; 220 case ACCEL_FS_IPV6_TCP: 221 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 222 src_ipv4_src_ipv6.ipv6_layout.ipv6), 223 0xff, 16); 224 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 225 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 226 0xff, 16); 227 break; 228 default: 229 err = -EINVAL; 230 goto out; 231 } 232 233 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 234 MLX5_SET_CFG(in, start_flow_index, ix); 235 ix += MLX5E_ACCEL_FS_TCP_GROUP1_SIZE; 236 MLX5_SET_CFG(in, end_flow_index, ix - 1); 237 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 238 if (IS_ERR(ft->g[ft->num_groups])) 239 goto err; 240 ft->num_groups++; 241 242 /* Default Flow Group */ 243 memset(in, 0, inlen); 244 MLX5_SET_CFG(in, start_flow_index, ix); 245 ix += MLX5E_ACCEL_FS_TCP_GROUP2_SIZE; 246 MLX5_SET_CFG(in, end_flow_index, ix - 1); 247 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 248 if (IS_ERR(ft->g[ft->num_groups])) 249 goto err; 250 ft->num_groups++; 251 252 kvfree(in); 253 return 0; 254 255 err: 256 err = PTR_ERR(ft->g[ft->num_groups]); 257 ft->g[ft->num_groups] = NULL; 258 out: 259 kvfree(in); 260 261 return err; 262 } 263 264 static int accel_fs_tcp_create_table(struct mlx5e_flow_steering *fs, enum accel_fs_tcp_type type) 265 { 266 struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs); 267 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false); 268 struct mlx5e_flow_table *ft = &accel_tcp->tables[type]; 269 struct mlx5_flow_table_attr ft_attr = {}; 270 int err; 271 272 ft->num_groups = 0; 273 274 ft_attr.max_fte = MLX5E_ACCEL_FS_TCP_TABLE_SIZE; 275 ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL; 276 ft_attr.prio = MLX5E_NIC_PRIO; 277 278 ft->t = mlx5_create_flow_table(ns, &ft_attr); 279 if (IS_ERR(ft->t)) { 280 err = PTR_ERR(ft->t); 281 ft->t = NULL; 282 return err; 283 } 284 285 fs_dbg(fs, "Created fs accel table id %u level %u\n", 286 ft->t->id, ft->t->level); 287 288 err = accel_fs_tcp_create_groups(ft, type); 289 if (err) 290 goto err; 291 292 err = accel_fs_tcp_add_default_rule(fs, type); 293 if (err) 294 goto err; 295 296 return 0; 297 err: 298 mlx5e_destroy_flow_table(ft); 299 return err; 300 } 301 302 static int accel_fs_tcp_disable(struct mlx5e_flow_steering *fs) 303 { 304 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false); 305 int err, i; 306 307 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { 308 /* Modify ttc rules destination to point back to the indir TIRs */ 309 err = mlx5_ttc_fwd_default_dest(ttc, fs_accel2tt(i)); 310 if (err) { 311 fs_err(fs, 312 "%s: modify ttc[%d] default destination failed, err(%d)\n", 313 __func__, fs_accel2tt(i), err); 314 return err; 315 } 316 } 317 318 return 0; 319 } 320 321 static int accel_fs_tcp_enable(struct mlx5e_flow_steering *fs) 322 { 323 struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs); 324 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false); 325 struct mlx5_flow_destination dest = {}; 326 int err, i; 327 328 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 329 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { 330 dest.ft = accel_tcp->tables[i].t; 331 332 /* Modify ttc rules destination to point on the accel_fs FTs */ 333 err = mlx5_ttc_fwd_dest(ttc, fs_accel2tt(i), &dest); 334 if (err) { 335 fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", 336 __func__, fs_accel2tt(i), err); 337 return err; 338 } 339 } 340 return 0; 341 } 342 343 static void accel_fs_tcp_destroy_table(struct mlx5e_flow_steering *fs, int i) 344 { 345 struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs); 346 347 if (IS_ERR_OR_NULL(fs_tcp->tables[i].t)) 348 return; 349 350 mlx5_del_flow_rules(fs_tcp->default_rules[i]); 351 mlx5e_destroy_flow_table(&fs_tcp->tables[i]); 352 fs_tcp->tables[i].t = NULL; 353 } 354 355 void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) 356 { 357 struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs); 358 int i; 359 360 if (!accel_tcp) 361 return; 362 363 accel_fs_tcp_disable(fs); 364 365 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) 366 accel_fs_tcp_destroy_table(fs, i); 367 368 kvfree(accel_tcp); 369 mlx5e_fs_set_accel_tcp(fs, NULL); 370 } 371 372 int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) 373 { 374 struct mlx5e_accel_fs_tcp *accel_tcp; 375 int i, err; 376 377 if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version)) 378 return -EOPNOTSUPP; 379 380 accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL); 381 if (!accel_tcp) 382 return -ENOMEM; 383 mlx5e_fs_set_accel_tcp(fs, accel_tcp); 384 385 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { 386 err = accel_fs_tcp_create_table(fs, i); 387 if (err) 388 goto err_destroy_tables; 389 } 390 391 err = accel_fs_tcp_enable(fs); 392 if (err) 393 goto err_destroy_tables; 394 395 return 0; 396 397 err_destroy_tables: 398 while (--i >= 0) 399 accel_fs_tcp_destroy_table(fs, i); 400 kvfree(accel_tcp); 401 mlx5e_fs_set_accel_tcp(fs, NULL); 402 return err; 403 } 404