1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3 
4 #include "mlx5_core.h"
5 #include "en.h"
6 #include "ipsec.h"
7 #include "lib/crypto.h"
8 
9 enum {
10 	MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
11 	MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET,
12 };
13 
14 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
15 {
16 	u32 caps = 0;
17 
18 	if (!MLX5_CAP_GEN(mdev, ipsec_offload))
19 		return 0;
20 
21 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
22 		return 0;
23 
24 	if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
25 	    MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
26 		return 0;
27 
28 	if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
29 	    !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
30 		return 0;
31 
32 	if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
33 	    !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
34 		return 0;
35 
36 	if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
37 	    MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
38 		caps |= MLX5_IPSEC_CAP_CRYPTO;
39 
40 	if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
41 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
42 					      reformat_add_esp_trasport) &&
43 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
44 					      reformat_del_esp_trasport) &&
45 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
46 			caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
47 
48 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
49 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
50 			caps |= MLX5_IPSEC_CAP_PRIO;
51 
52 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
53 					      reformat_l2_to_l3_esp_tunnel) &&
54 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
55 					      reformat_l3_esp_tunnel_to_l2))
56 			caps |= MLX5_IPSEC_CAP_TUNNEL;
57 	}
58 
59 	if (mlx5_get_roce_state(mdev) &&
60 	    MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
61 	    MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
62 		caps |= MLX5_IPSEC_CAP_ROCE;
63 
64 	if (!caps)
65 		return 0;
66 
67 	if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
68 		caps |= MLX5_IPSEC_CAP_ESN;
69 
70 	/* We can accommodate up to 2^24 different IPsec objects
71 	 * because we use up to 24 bit in flow table metadata
72 	 * to hold the IPsec Object unique handle.
73 	 */
74 	WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
75 	return caps;
76 }
77 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
78 
79 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
80 				     struct mlx5_accel_esp_xfrm_attrs *attrs)
81 {
82 	void *aso_ctx;
83 
84 	aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
85 	if (attrs->replay_esn.trigger) {
86 		MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
87 
88 		if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
89 			MLX5_SET(ipsec_aso, aso_ctx, window_sz,
90 				 attrs->replay_esn.replay_window / 64);
91 			MLX5_SET(ipsec_aso, aso_ctx, mode,
92 				 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
93 		}
94 		MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
95 			 attrs->replay_esn.esn);
96 	}
97 
98 	/* ASO context */
99 	MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
100 	MLX5_SET(ipsec_obj, obj, full_offload, 1);
101 	MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
102 	/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
103 	 * in flow steering to perform matching against. Please be
104 	 * aware that this register was chosen arbitrary and can't
105 	 * be used in other places as long as IPsec packet offload
106 	 * active.
107 	 */
108 	MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
109 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
110 		MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
111 
112 	if (attrs->lft.hard_packet_limit != XFRM_INF) {
113 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
114 			 attrs->lft.hard_packet_limit);
115 		MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
116 	}
117 
118 	if (attrs->lft.soft_packet_limit != XFRM_INF) {
119 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
120 			 attrs->lft.soft_packet_limit);
121 
122 		MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
123 	}
124 }
125 
126 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
127 {
128 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
129 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
130 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
131 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
132 	u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
133 	void *obj, *salt_p, *salt_iv_p;
134 	struct mlx5e_hw_objs *res;
135 	int err;
136 
137 	obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
138 
139 	/* salt and seq_iv */
140 	salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
141 	memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
142 
143 	MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
144 	salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
145 	memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
146 	/* esn */
147 	if (attrs->replay_esn.trigger) {
148 		MLX5_SET(ipsec_obj, obj, esn_en, 1);
149 		MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
150 		MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
151 	}
152 
153 	MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
154 
155 	/* general object fields set */
156 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
157 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
158 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
159 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
160 
161 	res = &mdev->mlx5e_res.hw_objs;
162 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
163 		mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
164 
165 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
166 	if (!err)
167 		sa_entry->ipsec_obj_id =
168 			MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
169 
170 	return err;
171 }
172 
173 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
174 {
175 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
176 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
177 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
178 
179 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
180 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
181 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
182 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
183 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
184 
185 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
186 }
187 
188 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
189 {
190 	struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
191 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
192 	int err;
193 
194 	/* key */
195 	err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
196 					 aes_gcm->key_len / BITS_PER_BYTE,
197 					 MLX5_ACCEL_OBJ_IPSEC_KEY,
198 					 &sa_entry->enc_key_id);
199 	if (err) {
200 		mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
201 		return err;
202 	}
203 
204 	err = mlx5_create_ipsec_obj(sa_entry);
205 	if (err) {
206 		mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
207 		goto err_enc_key;
208 	}
209 
210 	return 0;
211 
212 err_enc_key:
213 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
214 	return err;
215 }
216 
217 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
218 {
219 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
220 
221 	mlx5_destroy_ipsec_obj(sa_entry);
222 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
223 }
224 
225 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
226 				 const struct mlx5_accel_esp_xfrm_attrs *attrs)
227 {
228 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
229 	u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
230 	u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
231 	u64 modify_field_select = 0;
232 	u64 general_obj_types;
233 	void *obj;
234 	int err;
235 
236 	general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
237 	if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
238 		return -EINVAL;
239 
240 	/* general object fields set */
241 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
242 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
243 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
244 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
245 	if (err) {
246 		mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
247 			      sa_entry->ipsec_obj_id, err);
248 		return err;
249 	}
250 
251 	obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
252 	modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
253 
254 	/* esn */
255 	if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
256 	    !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
257 		return -EOPNOTSUPP;
258 
259 	obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
260 	MLX5_SET64(ipsec_obj, obj, modify_field_select,
261 		   MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
262 			   MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
263 	MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
264 	MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
265 
266 	/* general object fields set */
267 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
268 
269 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
270 }
271 
272 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
273 				const struct mlx5_accel_esp_xfrm_attrs *attrs)
274 {
275 	int err;
276 
277 	err = mlx5_modify_ipsec_obj(sa_entry, attrs);
278 	if (err)
279 		return;
280 
281 	memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
282 }
283 
284 static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
285 				   struct mlx5_wqe_aso_ctrl_seg *data)
286 {
287 	data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
288 	data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
289 				      MLX5_ASO_ALWAYS_TRUE << 4;
290 
291 	mlx5e_ipsec_aso_query(sa_entry, data);
292 }
293 
294 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
295 					 u32 mode_param)
296 {
297 	struct mlx5_accel_esp_xfrm_attrs attrs = {};
298 	struct mlx5_wqe_aso_ctrl_seg data = {};
299 
300 	if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
301 		sa_entry->esn_state.esn_msb++;
302 		sa_entry->esn_state.overlap = 0;
303 	} else {
304 		sa_entry->esn_state.overlap = 1;
305 	}
306 
307 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
308 	mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
309 
310 	data.data_offset_condition_operand =
311 		MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
312 	data.bitwise_data = cpu_to_be64(BIT_ULL(54));
313 	data.data_mask = data.bitwise_data;
314 
315 	mlx5e_ipsec_aso_update(sa_entry, &data);
316 }
317 
318 static void mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry *sa_entry)
319 {
320 	struct mlx5_wqe_aso_ctrl_seg data = {};
321 
322 	data.data_offset_condition_operand =
323 		MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
324 	data.bitwise_data = cpu_to_be64(BIT_ULL(57) + BIT_ULL(31));
325 	data.data_mask = data.bitwise_data;
326 	mlx5e_ipsec_aso_update(sa_entry, &data);
327 }
328 
329 static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
330 					u32 val)
331 {
332 	struct mlx5_wqe_aso_ctrl_seg data = {};
333 
334 	data.data_offset_condition_operand =
335 		MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET;
336 	data.bitwise_data = cpu_to_be64(val);
337 	data.data_mask = cpu_to_be64(U32_MAX);
338 	mlx5e_ipsec_aso_update(sa_entry, &data);
339 }
340 
341 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
342 {
343 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
344 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
345 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
346 	bool soft_arm, hard_arm;
347 	u64 hard_cnt;
348 
349 	lockdep_assert_held(&sa_entry->x->lock);
350 
351 	soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm);
352 	hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm);
353 	if (!soft_arm && !hard_arm)
354 		/* It is not lifetime event */
355 		return;
356 
357 	hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
358 	if (!hard_cnt || hard_arm) {
359 		/* It is possible to see packet counter equal to zero without
360 		 * hard limit event armed. Such situation can be if packet
361 		 * decreased, while we handled soft limit event.
362 		 *
363 		 * However it will be HW/FW bug if hard limit event is raised
364 		 * and packet counter is not zero.
365 		 */
366 		WARN_ON_ONCE(hard_arm && hard_cnt);
367 
368 		/* Notify about hard limit */
369 		xfrm_state_check_expire(sa_entry->x);
370 		return;
371 	}
372 
373 	/* We are in soft limit event. */
374 	if (!sa_entry->limits.soft_limit_hit &&
375 	    sa_entry->limits.round == attrs->lft.numb_rounds_soft) {
376 		sa_entry->limits.soft_limit_hit = true;
377 		/* Notify about soft limit */
378 		xfrm_state_check_expire(sa_entry->x);
379 
380 		if (sa_entry->limits.round == attrs->lft.numb_rounds_hard)
381 			goto hard;
382 
383 		if (attrs->lft.soft_packet_limit > BIT_ULL(31)) {
384 			/* We cannot avoid a soft_value that might have the high
385 			 * bit set. For instance soft_value=2^31+1 cannot be
386 			 * adjusted to the low bit clear version of soft_value=1
387 			 * because it is too close to 0.
388 			 *
389 			 * Thus we have this corner case where we can hit the
390 			 * soft_limit with the high bit set, but cannot adjust
391 			 * the counter. Thus we set a temporary interrupt_value
392 			 * at least 2^30 away from here and do the adjustment
393 			 * then.
394 			 */
395 			mlx5e_ipsec_aso_update_soft(sa_entry,
396 						    BIT_ULL(31) - BIT_ULL(30));
397 			sa_entry->limits.fix_limit = true;
398 			return;
399 		}
400 
401 		sa_entry->limits.fix_limit = true;
402 	}
403 
404 hard:
405 	if (sa_entry->limits.round == attrs->lft.numb_rounds_hard) {
406 		mlx5e_ipsec_aso_update_soft(sa_entry, 0);
407 		attrs->lft.soft_packet_limit = XFRM_INF;
408 		return;
409 	}
410 
411 	mlx5e_ipsec_aso_update_hard(sa_entry);
412 	sa_entry->limits.round++;
413 	if (sa_entry->limits.round == attrs->lft.numb_rounds_soft)
414 		mlx5e_ipsec_aso_update_soft(sa_entry,
415 					    attrs->lft.soft_packet_limit);
416 	if (sa_entry->limits.fix_limit) {
417 		sa_entry->limits.fix_limit = false;
418 		mlx5e_ipsec_aso_update_soft(sa_entry, BIT_ULL(31) - 1);
419 	}
420 }
421 
422 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
423 {
424 	struct mlx5e_ipsec_work *work =
425 		container_of(_work, struct mlx5e_ipsec_work, work);
426 	struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
427 	struct mlx5_accel_esp_xfrm_attrs *attrs;
428 	struct mlx5e_ipsec_aso *aso;
429 	int ret;
430 
431 	aso = sa_entry->ipsec->aso;
432 	attrs = &sa_entry->attrs;
433 
434 	spin_lock(&sa_entry->x->lock);
435 	ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
436 	if (ret)
437 		goto unlock;
438 
439 	if (attrs->replay_esn.trigger &&
440 	    !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
441 		u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
442 
443 		mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
444 	}
445 
446 	if (attrs->lft.soft_packet_limit != XFRM_INF)
447 		mlx5e_ipsec_handle_limits(sa_entry);
448 
449 unlock:
450 	spin_unlock(&sa_entry->x->lock);
451 	kfree(work);
452 }
453 
454 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
455 			     void *data)
456 {
457 	struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
458 	struct mlx5e_ipsec_sa_entry *sa_entry;
459 	struct mlx5_eqe_obj_change *object;
460 	struct mlx5e_ipsec_work *work;
461 	struct mlx5_eqe *eqe = data;
462 	u16 type;
463 
464 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
465 		return NOTIFY_DONE;
466 
467 	object = &eqe->data.obj_change;
468 	type = be16_to_cpu(object->obj_type);
469 
470 	if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
471 		return NOTIFY_DONE;
472 
473 	sa_entry = xa_load(&ipsec->sadb, be32_to_cpu(object->obj_id));
474 	if (!sa_entry)
475 		return NOTIFY_DONE;
476 
477 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
478 	if (!work)
479 		return NOTIFY_DONE;
480 
481 	INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
482 	work->data = sa_entry;
483 
484 	queue_work(ipsec->wq, &work->work);
485 	return NOTIFY_OK;
486 }
487 
488 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
489 {
490 	struct mlx5_core_dev *mdev = ipsec->mdev;
491 	struct mlx5e_ipsec_aso *aso;
492 	struct mlx5e_hw_objs *res;
493 	struct device *pdev;
494 	int err;
495 
496 	aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
497 	if (!aso)
498 		return -ENOMEM;
499 
500 	res = &mdev->mlx5e_res.hw_objs;
501 
502 	pdev = mlx5_core_dma_dev(mdev);
503 	aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
504 				       DMA_BIDIRECTIONAL);
505 	err = dma_mapping_error(pdev, aso->dma_addr);
506 	if (err)
507 		goto err_dma;
508 
509 	aso->aso = mlx5_aso_create(mdev, res->pdn);
510 	if (IS_ERR(aso->aso)) {
511 		err = PTR_ERR(aso->aso);
512 		goto err_aso_create;
513 	}
514 
515 	spin_lock_init(&aso->lock);
516 	ipsec->nb.notifier_call = mlx5e_ipsec_event;
517 	mlx5_notifier_register(mdev, &ipsec->nb);
518 
519 	ipsec->aso = aso;
520 	return 0;
521 
522 err_aso_create:
523 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
524 			 DMA_BIDIRECTIONAL);
525 err_dma:
526 	kfree(aso);
527 	return err;
528 }
529 
530 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
531 {
532 	struct mlx5_core_dev *mdev = ipsec->mdev;
533 	struct mlx5e_ipsec_aso *aso;
534 	struct device *pdev;
535 
536 	aso = ipsec->aso;
537 	pdev = mlx5_core_dma_dev(mdev);
538 
539 	mlx5_notifier_unregister(mdev, &ipsec->nb);
540 	mlx5_aso_destroy(aso->aso);
541 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
542 			 DMA_BIDIRECTIONAL);
543 	kfree(aso);
544 }
545 
546 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
547 				 struct mlx5_wqe_aso_ctrl_seg *data)
548 {
549 	if (!data)
550 		return;
551 
552 	ctrl->data_mask_mode = data->data_mask_mode;
553 	ctrl->condition_1_0_operand = data->condition_1_0_operand;
554 	ctrl->condition_1_0_offset = data->condition_1_0_offset;
555 	ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
556 	ctrl->condition_0_data = data->condition_0_data;
557 	ctrl->condition_0_mask = data->condition_0_mask;
558 	ctrl->condition_1_data = data->condition_1_data;
559 	ctrl->condition_1_mask = data->condition_1_mask;
560 	ctrl->bitwise_data = data->bitwise_data;
561 	ctrl->data_mask = data->data_mask;
562 }
563 
564 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
565 			  struct mlx5_wqe_aso_ctrl_seg *data)
566 {
567 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
568 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
569 	struct mlx5_core_dev *mdev = ipsec->mdev;
570 	struct mlx5_wqe_aso_ctrl_seg *ctrl;
571 	struct mlx5e_hw_objs *res;
572 	struct mlx5_aso_wqe *wqe;
573 	unsigned long expires;
574 	u8 ds_cnt;
575 	int ret;
576 
577 	lockdep_assert_held(&sa_entry->x->lock);
578 	res = &mdev->mlx5e_res.hw_objs;
579 
580 	spin_lock_bh(&aso->lock);
581 	memset(aso->ctx, 0, sizeof(aso->ctx));
582 	wqe = mlx5_aso_get_wqe(aso->aso);
583 	ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
584 	mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
585 			   MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
586 
587 	ctrl = &wqe->aso_ctrl;
588 	ctrl->va_l =
589 		cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
590 	ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
591 	ctrl->l_key = cpu_to_be32(res->mkey);
592 	mlx5e_ipsec_aso_copy(ctrl, data);
593 
594 	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
595 	expires = jiffies + msecs_to_jiffies(10);
596 	do {
597 		ret = mlx5_aso_poll_cq(aso->aso, false);
598 		if (ret)
599 			usleep_range(2, 10);
600 	} while (ret && time_is_after_jiffies(expires));
601 	spin_unlock_bh(&aso->lock);
602 	return ret;
603 }
604