1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "ipsec.h"
8 #include "fs_core.h"
9 
10 #define NUM_IPSEC_FTE BIT(15)
11 
12 struct mlx5e_ipsec_fc {
13 	struct mlx5_fc *cnt;
14 	struct mlx5_fc *drop;
15 };
16 
17 struct mlx5e_ipsec_ft {
18 	struct mutex mutex; /* Protect changes to this struct */
19 	struct mlx5_flow_table *pol;
20 	struct mlx5_flow_table *sa;
21 	struct mlx5_flow_table *status;
22 	u32 refcnt;
23 };
24 
25 struct mlx5e_ipsec_miss {
26 	struct mlx5_flow_group *group;
27 	struct mlx5_flow_handle *rule;
28 };
29 
30 struct mlx5e_ipsec_rx {
31 	struct mlx5e_ipsec_ft ft;
32 	struct mlx5e_ipsec_miss pol;
33 	struct mlx5e_ipsec_miss sa;
34 	struct mlx5e_ipsec_rule status;
35 	struct mlx5e_ipsec_fc *fc;
36 };
37 
38 struct mlx5e_ipsec_tx {
39 	struct mlx5e_ipsec_ft ft;
40 	struct mlx5e_ipsec_miss pol;
41 	struct mlx5_flow_namespace *ns;
42 	struct mlx5e_ipsec_fc *fc;
43 };
44 
45 /* IPsec RX flow steering */
46 static enum mlx5_traffic_types family2tt(u32 family)
47 {
48 	if (family == AF_INET)
49 		return MLX5_TT_IPV4_IPSEC_ESP;
50 	return MLX5_TT_IPV6_IPSEC_ESP;
51 }
52 
53 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
54 					       int level, int prio,
55 					       int max_num_groups)
56 {
57 	struct mlx5_flow_table_attr ft_attr = {};
58 
59 	ft_attr.autogroup.num_reserved_entries = 1;
60 	ft_attr.autogroup.max_num_groups = max_num_groups;
61 	ft_attr.max_fte = NUM_IPSEC_FTE;
62 	ft_attr.level = level;
63 	ft_attr.prio = prio;
64 
65 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
66 }
67 
68 static int ipsec_status_rule(struct mlx5_core_dev *mdev,
69 			     struct mlx5e_ipsec_rx *rx,
70 			     struct mlx5_flow_destination *dest)
71 {
72 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
73 	struct mlx5_flow_act flow_act = {};
74 	struct mlx5_modify_hdr *modify_hdr;
75 	struct mlx5_flow_handle *fte;
76 	struct mlx5_flow_spec *spec;
77 	int err;
78 
79 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
80 	if (!spec)
81 		return -ENOMEM;
82 
83 	/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
84 	MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
85 	MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
86 	MLX5_SET(copy_action_in, action, src_offset, 0);
87 	MLX5_SET(copy_action_in, action, length, 7);
88 	MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
89 	MLX5_SET(copy_action_in, action, dst_offset, 24);
90 
91 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
92 					      1, action);
93 
94 	if (IS_ERR(modify_hdr)) {
95 		err = PTR_ERR(modify_hdr);
96 		mlx5_core_err(mdev,
97 			      "fail to alloc ipsec copy modify_header_id err=%d\n", err);
98 		goto out_spec;
99 	}
100 
101 	/* create fte */
102 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
103 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
104 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
105 	flow_act.modify_hdr = modify_hdr;
106 	fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
107 	if (IS_ERR(fte)) {
108 		err = PTR_ERR(fte);
109 		mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
110 		goto out;
111 	}
112 
113 	kvfree(spec);
114 	rx->status.rule = fte;
115 	rx->status.modify_hdr = modify_hdr;
116 	return 0;
117 
118 out:
119 	mlx5_modify_header_dealloc(mdev, modify_hdr);
120 out_spec:
121 	kvfree(spec);
122 	return err;
123 }
124 
125 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
126 			     struct mlx5_flow_table *ft,
127 			     struct mlx5e_ipsec_miss *miss,
128 			     struct mlx5_flow_destination *dest)
129 {
130 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
131 	MLX5_DECLARE_FLOW_ACT(flow_act);
132 	struct mlx5_flow_spec *spec;
133 	u32 *flow_group_in;
134 	int err = 0;
135 
136 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
137 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
138 	if (!flow_group_in || !spec) {
139 		err = -ENOMEM;
140 		goto out;
141 	}
142 
143 	/* Create miss_group */
144 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
145 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
146 	miss->group = mlx5_create_flow_group(ft, flow_group_in);
147 	if (IS_ERR(miss->group)) {
148 		err = PTR_ERR(miss->group);
149 		mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
150 			      err);
151 		goto out;
152 	}
153 
154 	/* Create miss rule */
155 	miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
156 	if (IS_ERR(miss->rule)) {
157 		mlx5_destroy_flow_group(miss->group);
158 		err = PTR_ERR(miss->rule);
159 		mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
160 			      err);
161 		goto out;
162 	}
163 out:
164 	kvfree(flow_group_in);
165 	kvfree(spec);
166 	return err;
167 }
168 
169 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
170 {
171 	mlx5_del_flow_rules(rx->pol.rule);
172 	mlx5_destroy_flow_group(rx->pol.group);
173 	mlx5_destroy_flow_table(rx->ft.pol);
174 
175 	mlx5_del_flow_rules(rx->sa.rule);
176 	mlx5_destroy_flow_group(rx->sa.group);
177 	mlx5_destroy_flow_table(rx->ft.sa);
178 
179 	mlx5_del_flow_rules(rx->status.rule);
180 	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
181 	mlx5_destroy_flow_table(rx->ft.status);
182 }
183 
184 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
185 		     struct mlx5e_ipsec_rx *rx, u32 family)
186 {
187 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
188 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
189 	struct mlx5_flow_destination dest[2];
190 	struct mlx5_flow_table *ft;
191 	int err;
192 
193 	ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
194 			     MLX5E_NIC_PRIO, 1);
195 	if (IS_ERR(ft))
196 		return PTR_ERR(ft);
197 
198 	rx->ft.status = ft;
199 
200 	dest[0] = mlx5_ttc_get_default_dest(ttc, family2tt(family));
201 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
202 	dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
203 	err = ipsec_status_rule(mdev, rx, dest);
204 	if (err)
205 		goto err_add;
206 
207 	/* Create FT */
208 	ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
209 			     2);
210 	if (IS_ERR(ft)) {
211 		err = PTR_ERR(ft);
212 		goto err_fs_ft;
213 	}
214 	rx->ft.sa = ft;
215 
216 	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
217 	if (err)
218 		goto err_fs;
219 
220 	ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
221 			     2);
222 	if (IS_ERR(ft)) {
223 		err = PTR_ERR(ft);
224 		goto err_pol_ft;
225 	}
226 	rx->ft.pol = ft;
227 	memset(dest, 0x00, 2 * sizeof(*dest));
228 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
229 	dest[0].ft = rx->ft.sa;
230 	err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
231 	if (err)
232 		goto err_pol_miss;
233 
234 	return 0;
235 
236 err_pol_miss:
237 	mlx5_destroy_flow_table(rx->ft.pol);
238 err_pol_ft:
239 	mlx5_del_flow_rules(rx->sa.rule);
240 	mlx5_destroy_flow_group(rx->sa.group);
241 err_fs:
242 	mlx5_destroy_flow_table(rx->ft.sa);
243 err_fs_ft:
244 	mlx5_del_flow_rules(rx->status.rule);
245 	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
246 err_add:
247 	mlx5_destroy_flow_table(rx->ft.status);
248 	return err;
249 }
250 
251 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
252 					struct mlx5e_ipsec *ipsec, u32 family)
253 {
254 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
255 	struct mlx5_flow_destination dest = {};
256 	struct mlx5e_ipsec_rx *rx;
257 	int err = 0;
258 
259 	if (family == AF_INET)
260 		rx = ipsec->rx_ipv4;
261 	else
262 		rx = ipsec->rx_ipv6;
263 
264 	mutex_lock(&rx->ft.mutex);
265 	if (rx->ft.refcnt)
266 		goto skip;
267 
268 	/* create FT */
269 	err = rx_create(mdev, ipsec, rx, family);
270 	if (err)
271 		goto out;
272 
273 	/* connect */
274 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
275 	dest.ft = rx->ft.pol;
276 	mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
277 
278 skip:
279 	rx->ft.refcnt++;
280 out:
281 	mutex_unlock(&rx->ft.mutex);
282 	if (err)
283 		return ERR_PTR(err);
284 	return rx;
285 }
286 
287 static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
288 		      u32 family)
289 {
290 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
291 	struct mlx5e_ipsec_rx *rx;
292 
293 	if (family == AF_INET)
294 		rx = ipsec->rx_ipv4;
295 	else
296 		rx = ipsec->rx_ipv6;
297 
298 	mutex_lock(&rx->ft.mutex);
299 	rx->ft.refcnt--;
300 	if (rx->ft.refcnt)
301 		goto out;
302 
303 	/* disconnect */
304 	mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
305 
306 	/* remove FT */
307 	rx_destroy(mdev, rx);
308 
309 out:
310 	mutex_unlock(&rx->ft.mutex);
311 }
312 
313 /* IPsec TX flow steering */
314 static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
315 {
316 	struct mlx5_flow_destination dest = {};
317 	struct mlx5_flow_table *ft;
318 	int err;
319 
320 	ft = ipsec_ft_create(tx->ns, 1, 0, 4);
321 	if (IS_ERR(ft))
322 		return PTR_ERR(ft);
323 
324 	tx->ft.sa = ft;
325 
326 	ft = ipsec_ft_create(tx->ns, 0, 0, 2);
327 	if (IS_ERR(ft)) {
328 		err = PTR_ERR(ft);
329 		goto err_pol_ft;
330 	}
331 	tx->ft.pol = ft;
332 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
333 	dest.ft = tx->ft.sa;
334 	err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
335 	if (err)
336 		goto err_pol_miss;
337 	return 0;
338 
339 err_pol_miss:
340 	mlx5_destroy_flow_table(tx->ft.pol);
341 err_pol_ft:
342 	mlx5_destroy_flow_table(tx->ft.sa);
343 	return err;
344 }
345 
346 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
347 					struct mlx5e_ipsec *ipsec)
348 {
349 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
350 	int err = 0;
351 
352 	mutex_lock(&tx->ft.mutex);
353 	if (tx->ft.refcnt)
354 		goto skip;
355 
356 	err = tx_create(mdev, tx);
357 	if (err)
358 		goto out;
359 skip:
360 	tx->ft.refcnt++;
361 out:
362 	mutex_unlock(&tx->ft.mutex);
363 	if (err)
364 		return ERR_PTR(err);
365 	return tx;
366 }
367 
368 static void tx_ft_put(struct mlx5e_ipsec *ipsec)
369 {
370 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
371 
372 	mutex_lock(&tx->ft.mutex);
373 	tx->ft.refcnt--;
374 	if (tx->ft.refcnt)
375 		goto out;
376 
377 	mlx5_del_flow_rules(tx->pol.rule);
378 	mlx5_destroy_flow_group(tx->pol.group);
379 	mlx5_destroy_flow_table(tx->ft.pol);
380 	mlx5_destroy_flow_table(tx->ft.sa);
381 out:
382 	mutex_unlock(&tx->ft.mutex);
383 }
384 
385 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
386 			    __be32 *daddr)
387 {
388 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
389 
390 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
391 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
392 
393 	memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
394 			    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
395 	memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
396 			    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
397 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
398 			 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
399 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
400 			 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
401 }
402 
403 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
404 			    __be32 *daddr)
405 {
406 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
407 
408 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
409 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
410 
411 	memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
412 			    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
413 	memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
414 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
415 	memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
416 			    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
417 	memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
418 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
419 }
420 
421 static void setup_fte_esp(struct mlx5_flow_spec *spec)
422 {
423 	/* ESP header */
424 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
425 
426 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
427 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
428 }
429 
430 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
431 {
432 	/* SPI number */
433 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
434 
435 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
436 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
437 }
438 
439 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
440 {
441 	/* Non fragmented */
442 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
443 
444 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
445 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
446 }
447 
448 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
449 {
450 	/* Add IPsec indicator in metadata_reg_a */
451 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
452 
453 	MLX5_SET(fte_match_param, spec->match_criteria,
454 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
455 	MLX5_SET(fte_match_param, spec->match_value,
456 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
457 }
458 
459 static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
460 {
461 	/* Pass policy check before choosing this SA */
462 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
463 
464 	MLX5_SET(fte_match_param, spec->match_criteria,
465 		 misc_parameters_2.metadata_reg_c_0, reqid);
466 	MLX5_SET(fte_match_param, spec->match_value,
467 		 misc_parameters_2.metadata_reg_c_0, reqid);
468 }
469 
470 static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
471 			       struct mlx5_flow_act *flow_act)
472 {
473 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
474 	enum mlx5_flow_namespace_type ns_type;
475 	struct mlx5_modify_hdr *modify_hdr;
476 
477 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
478 	switch (dir) {
479 	case XFRM_DEV_OFFLOAD_IN:
480 		MLX5_SET(set_action_in, action, field,
481 			 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
482 		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
483 		break;
484 	case XFRM_DEV_OFFLOAD_OUT:
485 		MLX5_SET(set_action_in, action, field,
486 			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
487 		ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
488 		break;
489 	default:
490 		return -EINVAL;
491 	}
492 
493 	MLX5_SET(set_action_in, action, data, val);
494 	MLX5_SET(set_action_in, action, offset, 0);
495 	MLX5_SET(set_action_in, action, length, 32);
496 
497 	modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
498 	if (IS_ERR(modify_hdr)) {
499 		mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
500 			      PTR_ERR(modify_hdr));
501 		return PTR_ERR(modify_hdr);
502 	}
503 
504 	flow_act->modify_hdr = modify_hdr;
505 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
506 	return 0;
507 }
508 
509 static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
510 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
511 			      struct mlx5_flow_act *flow_act)
512 {
513 	enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
514 	struct mlx5_pkt_reformat_params reformat_params = {};
515 	struct mlx5_pkt_reformat *pkt_reformat;
516 	u8 reformatbf[16] = {};
517 	__be32 spi;
518 
519 	if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
520 		reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
521 		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
522 		goto cmd;
523 	}
524 
525 	if (attrs->family == AF_INET)
526 		reformat_params.type =
527 			MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
528 	else
529 		reformat_params.type =
530 			MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
531 
532 	/* convert to network format */
533 	spi = htonl(attrs->spi);
534 	memcpy(reformatbf, &spi, 4);
535 
536 	reformat_params.param_0 = attrs->authsize;
537 	reformat_params.size = sizeof(reformatbf);
538 	reformat_params.data = &reformatbf;
539 
540 cmd:
541 	pkt_reformat =
542 		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
543 	if (IS_ERR(pkt_reformat))
544 		return PTR_ERR(pkt_reformat);
545 
546 	flow_act->pkt_reformat = pkt_reformat;
547 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
548 	return 0;
549 }
550 
551 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
552 {
553 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
554 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
555 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
556 	struct mlx5_flow_destination dest = {};
557 	struct mlx5_flow_act flow_act = {};
558 	struct mlx5_flow_handle *rule;
559 	struct mlx5_flow_spec *spec;
560 	struct mlx5e_ipsec_rx *rx;
561 	int err;
562 
563 	rx = rx_ft_get(mdev, ipsec, attrs->family);
564 	if (IS_ERR(rx))
565 		return PTR_ERR(rx);
566 
567 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
568 	if (!spec) {
569 		err = -ENOMEM;
570 		goto err_alloc;
571 	}
572 
573 	if (attrs->family == AF_INET)
574 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
575 	else
576 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
577 
578 	setup_fte_spi(spec, attrs->spi);
579 	setup_fte_esp(spec);
580 	setup_fte_no_frags(spec);
581 
582 	err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31),
583 				  XFRM_DEV_OFFLOAD_IN, &flow_act);
584 	if (err)
585 		goto err_mod_header;
586 
587 	switch (attrs->type) {
588 	case XFRM_DEV_OFFLOAD_PACKET:
589 		err = setup_pkt_reformat(mdev, attrs, &flow_act);
590 		if (err)
591 			goto err_pkt_reformat;
592 		break;
593 	default:
594 		break;
595 	}
596 
597 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
598 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
599 	flow_act.flags |= FLOW_ACT_NO_APPEND;
600 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
601 			   MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT;
602 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
603 	dest.ft = rx->ft.status;
604 	rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, &dest, 1);
605 	if (IS_ERR(rule)) {
606 		err = PTR_ERR(rule);
607 		mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
608 		goto err_add_flow;
609 	}
610 	kvfree(spec);
611 
612 	sa_entry->ipsec_rule.rule = rule;
613 	sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
614 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
615 	return 0;
616 
617 err_add_flow:
618 	if (flow_act.pkt_reformat)
619 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
620 err_pkt_reformat:
621 	mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
622 err_mod_header:
623 	kvfree(spec);
624 err_alloc:
625 	rx_ft_put(mdev, ipsec, attrs->family);
626 	return err;
627 }
628 
629 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
630 {
631 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
632 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
633 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
634 	struct mlx5_flow_destination dest = {};
635 	struct mlx5_flow_act flow_act = {};
636 	struct mlx5_flow_handle *rule;
637 	struct mlx5_flow_spec *spec;
638 	struct mlx5e_ipsec_tx *tx;
639 	int err = 0;
640 
641 	tx = tx_ft_get(mdev, ipsec);
642 	if (IS_ERR(tx))
643 		return PTR_ERR(tx);
644 
645 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
646 	if (!spec) {
647 		err = -ENOMEM;
648 		goto err_alloc;
649 	}
650 
651 	if (attrs->family == AF_INET)
652 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
653 	else
654 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
655 
656 	setup_fte_no_frags(spec);
657 
658 	switch (attrs->type) {
659 	case XFRM_DEV_OFFLOAD_CRYPTO:
660 		setup_fte_spi(spec, attrs->spi);
661 		setup_fte_esp(spec);
662 		setup_fte_reg_a(spec);
663 		break;
664 	case XFRM_DEV_OFFLOAD_PACKET:
665 		setup_fte_reg_c0(spec, attrs->reqid);
666 		err = setup_pkt_reformat(mdev, attrs, &flow_act);
667 		if (err)
668 			goto err_pkt_reformat;
669 		break;
670 	default:
671 		break;
672 	}
673 
674 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
675 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
676 	flow_act.flags |= FLOW_ACT_NO_APPEND;
677 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW |
678 			   MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
679 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
680 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
681 	dest.counter_id = mlx5_fc_id(tx->fc->cnt);
682 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, &dest, 1);
683 	if (IS_ERR(rule)) {
684 		err = PTR_ERR(rule);
685 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
686 		goto err_add_flow;
687 	}
688 
689 	kvfree(spec);
690 	sa_entry->ipsec_rule.rule = rule;
691 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
692 	return 0;
693 
694 err_add_flow:
695 	if (flow_act.pkt_reformat)
696 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
697 err_pkt_reformat:
698 	kvfree(spec);
699 err_alloc:
700 	tx_ft_put(ipsec);
701 	return err;
702 }
703 
704 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
705 {
706 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
707 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
708 	struct mlx5_flow_destination dest[2] = {};
709 	struct mlx5_flow_act flow_act = {};
710 	struct mlx5_flow_handle *rule;
711 	struct mlx5_flow_spec *spec;
712 	struct mlx5e_ipsec_tx *tx;
713 	int err, dstn = 0;
714 
715 	tx = tx_ft_get(mdev, pol_entry->ipsec);
716 	if (IS_ERR(tx))
717 		return PTR_ERR(tx);
718 
719 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
720 	if (!spec) {
721 		err = -ENOMEM;
722 		goto err_alloc;
723 	}
724 
725 	if (attrs->family == AF_INET)
726 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
727 	else
728 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
729 
730 	setup_fte_no_frags(spec);
731 
732 	err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
733 				  &flow_act);
734 	if (err)
735 		goto err_mod_header;
736 
737 	switch (attrs->action) {
738 	case XFRM_POLICY_ALLOW:
739 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
740 		break;
741 	case XFRM_POLICY_BLOCK:
742 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
743 				   MLX5_FLOW_CONTEXT_ACTION_COUNT;
744 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
745 		dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
746 		dstn++;
747 		break;
748 	default:
749 		WARN_ON(true);
750 		err = -EINVAL;
751 		goto err_action;
752 	}
753 
754 	flow_act.flags |= FLOW_ACT_NO_APPEND;
755 	dest[dstn].ft = tx->ft.sa;
756 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
757 	dstn++;
758 	rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, dest, dstn);
759 	if (IS_ERR(rule)) {
760 		err = PTR_ERR(rule);
761 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
762 		goto err_action;
763 	}
764 
765 	kvfree(spec);
766 	pol_entry->ipsec_rule.rule = rule;
767 	pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
768 	return 0;
769 
770 err_action:
771 	mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
772 err_mod_header:
773 	kvfree(spec);
774 err_alloc:
775 	tx_ft_put(pol_entry->ipsec);
776 	return err;
777 }
778 
779 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
780 {
781 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
782 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
783 	struct mlx5_flow_destination dest[2];
784 	struct mlx5_flow_act flow_act = {};
785 	struct mlx5_flow_handle *rule;
786 	struct mlx5_flow_spec *spec;
787 	struct mlx5e_ipsec_rx *rx;
788 	int err, dstn = 0;
789 
790 	rx = rx_ft_get(mdev, pol_entry->ipsec, attrs->family);
791 	if (IS_ERR(rx))
792 		return PTR_ERR(rx);
793 
794 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
795 	if (!spec) {
796 		err = -ENOMEM;
797 		goto err_alloc;
798 	}
799 
800 	if (attrs->family == AF_INET)
801 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
802 	else
803 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
804 
805 	setup_fte_no_frags(spec);
806 
807 	switch (attrs->action) {
808 	case XFRM_POLICY_ALLOW:
809 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
810 		break;
811 	case XFRM_POLICY_BLOCK:
812 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
813 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
814 		dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
815 		dstn++;
816 		break;
817 	default:
818 		WARN_ON(true);
819 		err = -EINVAL;
820 		goto err_action;
821 	}
822 
823 	flow_act.flags |= FLOW_ACT_NO_APPEND;
824 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
825 	dest[dstn].ft = rx->ft.sa;
826 	dstn++;
827 	rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, dest, dstn);
828 	if (IS_ERR(rule)) {
829 		err = PTR_ERR(rule);
830 		mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
831 		goto err_action;
832 	}
833 
834 	kvfree(spec);
835 	pol_entry->ipsec_rule.rule = rule;
836 	return 0;
837 
838 err_action:
839 	kvfree(spec);
840 err_alloc:
841 	rx_ft_put(mdev, pol_entry->ipsec, attrs->family);
842 	return err;
843 }
844 
845 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
846 {
847 	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
848 	struct mlx5_core_dev *mdev = ipsec->mdev;
849 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
850 
851 	mlx5_fc_destroy(mdev, tx->fc->drop);
852 	mlx5_fc_destroy(mdev, tx->fc->cnt);
853 	kfree(tx->fc);
854 	mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
855 	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
856 	kfree(rx_ipv4->fc);
857 }
858 
859 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
860 {
861 	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
862 	struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
863 	struct mlx5_core_dev *mdev = ipsec->mdev;
864 	struct mlx5e_ipsec_tx *tx = ipsec->tx;
865 	struct mlx5e_ipsec_fc *fc;
866 	struct mlx5_fc *counter;
867 	int err;
868 
869 	fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL);
870 	if (!fc)
871 		return -ENOMEM;
872 
873 	/* Both IPv4 and IPv6 point to same flow counters struct. */
874 	rx_ipv4->fc = fc;
875 	rx_ipv6->fc = fc;
876 	counter = mlx5_fc_create(mdev, false);
877 	if (IS_ERR(counter)) {
878 		err = PTR_ERR(counter);
879 		goto err_rx_cnt;
880 	}
881 
882 	fc->cnt = counter;
883 	counter = mlx5_fc_create(mdev, false);
884 	if (IS_ERR(counter)) {
885 		err = PTR_ERR(counter);
886 		goto err_rx_drop;
887 	}
888 
889 	fc->drop = counter;
890 	fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
891 	if (!fc) {
892 		err = -ENOMEM;
893 		goto err_tx_fc;
894 	}
895 
896 	tx->fc = fc;
897 	counter = mlx5_fc_create(mdev, false);
898 	if (IS_ERR(counter)) {
899 		err = PTR_ERR(counter);
900 		goto err_tx_cnt;
901 	}
902 
903 	fc->cnt = counter;
904 	counter = mlx5_fc_create(mdev, false);
905 	if (IS_ERR(counter)) {
906 		err = PTR_ERR(counter);
907 		goto err_tx_drop;
908 	}
909 
910 	fc->drop = counter;
911 	return 0;
912 
913 err_tx_drop:
914 	mlx5_fc_destroy(mdev, tx->fc->cnt);
915 err_tx_cnt:
916 	kfree(tx->fc);
917 err_tx_fc:
918 	mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
919 err_rx_drop:
920 	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
921 err_rx_cnt:
922 	kfree(rx_ipv4->fc);
923 	return err;
924 }
925 
926 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
927 {
928 	struct mlx5_core_dev *mdev = priv->mdev;
929 	struct mlx5e_ipsec *ipsec = priv->ipsec;
930 	struct mlx5e_ipsec_hw_stats *stats;
931 	struct mlx5e_ipsec_fc *fc;
932 
933 	stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
934 
935 	stats->ipsec_rx_pkts = 0;
936 	stats->ipsec_rx_bytes = 0;
937 	stats->ipsec_rx_drop_pkts = 0;
938 	stats->ipsec_rx_drop_bytes = 0;
939 	stats->ipsec_tx_pkts = 0;
940 	stats->ipsec_tx_bytes = 0;
941 	stats->ipsec_tx_drop_pkts = 0;
942 	stats->ipsec_tx_drop_bytes = 0;
943 
944 	fc = ipsec->rx_ipv4->fc;
945 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
946 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
947 		      &stats->ipsec_rx_drop_bytes);
948 
949 	fc = ipsec->tx->fc;
950 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
951 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
952 		      &stats->ipsec_tx_drop_bytes);
953 }
954 
955 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
956 {
957 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
958 		return tx_add_rule(sa_entry);
959 
960 	return rx_add_rule(sa_entry);
961 }
962 
963 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
964 {
965 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
966 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
967 
968 	mlx5_del_flow_rules(ipsec_rule->rule);
969 
970 	if (ipsec_rule->pkt_reformat)
971 		mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
972 
973 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
974 		tx_ft_put(sa_entry->ipsec);
975 		return;
976 	}
977 
978 	mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
979 	rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
980 }
981 
982 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
983 {
984 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
985 		return tx_add_policy(pol_entry);
986 
987 	return rx_add_policy(pol_entry);
988 }
989 
990 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
991 {
992 	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
993 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
994 
995 	mlx5_del_flow_rules(ipsec_rule->rule);
996 
997 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
998 		rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family);
999 		return;
1000 	}
1001 
1002 	mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1003 	tx_ft_put(pol_entry->ipsec);
1004 }
1005 
1006 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
1007 {
1008 	if (!ipsec->tx)
1009 		return;
1010 
1011 	ipsec_fs_destroy_counters(ipsec);
1012 	mutex_destroy(&ipsec->tx->ft.mutex);
1013 	WARN_ON(ipsec->tx->ft.refcnt);
1014 	kfree(ipsec->tx);
1015 
1016 	mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
1017 	WARN_ON(ipsec->rx_ipv4->ft.refcnt);
1018 	kfree(ipsec->rx_ipv4);
1019 
1020 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
1021 	WARN_ON(ipsec->rx_ipv6->ft.refcnt);
1022 	kfree(ipsec->rx_ipv6);
1023 }
1024 
1025 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
1026 {
1027 	struct mlx5_flow_namespace *ns;
1028 	int err = -ENOMEM;
1029 
1030 	ns = mlx5_get_flow_namespace(ipsec->mdev,
1031 				     MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
1032 	if (!ns)
1033 		return -EOPNOTSUPP;
1034 
1035 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
1036 	if (!ipsec->tx)
1037 		return -ENOMEM;
1038 
1039 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
1040 	if (!ipsec->rx_ipv4)
1041 		goto err_rx_ipv4;
1042 
1043 	ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
1044 	if (!ipsec->rx_ipv6)
1045 		goto err_rx_ipv6;
1046 
1047 	err = ipsec_fs_init_counters(ipsec);
1048 	if (err)
1049 		goto err_counters;
1050 
1051 	mutex_init(&ipsec->tx->ft.mutex);
1052 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
1053 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
1054 	ipsec->tx->ns = ns;
1055 
1056 	return 0;
1057 
1058 err_counters:
1059 	kfree(ipsec->rx_ipv6);
1060 err_rx_ipv6:
1061 	kfree(ipsec->rx_ipv4);
1062 err_rx_ipv4:
1063 	kfree(ipsec->tx);
1064 	return err;
1065 }
1066