1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/transobj.h>
6 #include "aso.h"
7 #include "wq.h"
8 
9 struct mlx5_aso_cq {
10 	/* data path - accessed per cqe */
11 	struct mlx5_cqwq           wq;
12 
13 	/* data path - accessed per napi poll */
14 	struct mlx5_core_cq        mcq;
15 
16 	/* control */
17 	struct mlx5_core_dev      *mdev;
18 	struct mlx5_wq_ctrl        wq_ctrl;
19 } ____cacheline_aligned_in_smp;
20 
21 struct mlx5_aso {
22 	/* data path */
23 	u16                        cc;
24 	u16                        pc;
25 
26 	struct mlx5_wqe_ctrl_seg  *doorbell_cseg;
27 	struct mlx5_aso_cq         cq;
28 
29 	/* read only */
30 	struct mlx5_wq_cyc         wq;
31 	void __iomem              *uar_map;
32 	u32                        sqn;
33 
34 	/* control path */
35 	struct mlx5_wq_ctrl        wq_ctrl;
36 
37 } ____cacheline_aligned_in_smp;
38 
39 static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
40 {
41 	mlx5_wq_destroy(&cq->wq_ctrl);
42 }
43 
44 static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
45 			     void *cqc_data, struct mlx5_aso_cq *cq)
46 {
47 	struct mlx5_core_cq *mcq = &cq->mcq;
48 	struct mlx5_wq_param param;
49 	int err;
50 	u32 i;
51 
52 	param.buf_numa_node = numa_node;
53 	param.db_numa_node = numa_node;
54 
55 	err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
56 	if (err)
57 		return err;
58 
59 	mcq->cqe_sz     = 64;
60 	mcq->set_ci_db  = cq->wq_ctrl.db.db;
61 	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
62 
63 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
64 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
65 
66 		cqe->op_own = 0xf1;
67 	}
68 
69 	cq->mdev = mdev;
70 
71 	return 0;
72 }
73 
74 static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
75 {
76 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
77 	struct mlx5_core_dev *mdev = cq->mdev;
78 	struct mlx5_core_cq *mcq = &cq->mcq;
79 	void *in, *cqc;
80 	int inlen, eqn;
81 	int err;
82 
83 	err = mlx5_vector2eqn(mdev, 0, &eqn);
84 	if (err)
85 		return err;
86 
87 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
88 		sizeof(u64) * cq->wq_ctrl.buf.npages;
89 	in = kvzalloc(inlen, GFP_KERNEL);
90 	if (!in)
91 		return -ENOMEM;
92 
93 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
94 
95 	memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
96 
97 	mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
98 				  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
99 
100 	MLX5_SET(cqc,   cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
101 	MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
102 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
103 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
104 					    MLX5_ADAPTER_PAGE_SHIFT);
105 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
106 
107 	err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
108 
109 	kvfree(in);
110 
111 	return err;
112 }
113 
114 static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
115 {
116 	mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
117 	mlx5_wq_destroy(&cq->wq_ctrl);
118 }
119 
120 static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
121 			      struct mlx5_aso_cq *cq)
122 {
123 	void *cqc_data;
124 	int err;
125 
126 	cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
127 	if (!cqc_data)
128 		return -ENOMEM;
129 
130 	MLX5_SET(cqc, cqc_data, log_cq_size, 1);
131 	MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
132 	if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
133 		MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
134 
135 	err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
136 	if (err) {
137 		mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
138 		goto err_out;
139 	}
140 
141 	err = create_aso_cq(cq, cqc_data);
142 	if (err) {
143 		mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
144 		goto err_free_cq;
145 	}
146 
147 	kvfree(cqc_data);
148 	return 0;
149 
150 err_free_cq:
151 	mlx5_aso_free_cq(cq);
152 err_out:
153 	kvfree(cqc_data);
154 	return err;
155 }
156 
157 static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
158 			     void *sqc_data, struct mlx5_aso *sq)
159 {
160 	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
161 	struct mlx5_wq_cyc *wq = &sq->wq;
162 	struct mlx5_wq_param param;
163 	int err;
164 
165 	sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
166 
167 	param.db_numa_node = numa_node;
168 	param.buf_numa_node = numa_node;
169 	err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
170 	if (err)
171 		return err;
172 	wq->db = &wq->db[MLX5_SND_DBR];
173 
174 	return 0;
175 }
176 
177 static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
178 			 void *sqc_data, struct mlx5_aso *sq)
179 {
180 	void *in, *sqc, *wq;
181 	int inlen, err;
182 
183 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
184 		sizeof(u64) * sq->wq_ctrl.buf.npages;
185 	in = kvzalloc(inlen, GFP_KERNEL);
186 	if (!in)
187 		return -ENOMEM;
188 
189 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
190 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
191 
192 	memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
193 	MLX5_SET(sqc,  sqc, cqn, sq->cq.mcq.cqn);
194 
195 	MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
196 	MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
197 
198 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
199 	MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.hw_objs.bfreg.index);
200 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
201 					  MLX5_ADAPTER_PAGE_SHIFT);
202 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
203 
204 	mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
205 				  (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
206 
207 	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
208 
209 	kvfree(in);
210 
211 	return err;
212 }
213 
214 static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
215 {
216 	void *in, *sqc;
217 	int inlen, err;
218 
219 	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
220 	in = kvzalloc(inlen, GFP_KERNEL);
221 	if (!in)
222 		return -ENOMEM;
223 
224 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
225 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
226 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
227 
228 	err = mlx5_core_modify_sq(mdev, sqn, in);
229 
230 	kvfree(in);
231 
232 	return err;
233 }
234 
235 static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
236 				  void *sqc_data, struct mlx5_aso *sq)
237 {
238 	int err;
239 
240 	err = create_aso_sq(mdev, pdn, sqc_data, sq);
241 	if (err)
242 		return err;
243 
244 	err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
245 	if (err)
246 		mlx5_core_destroy_sq(mdev, sq->sqn);
247 
248 	return err;
249 }
250 
251 static void mlx5_aso_free_sq(struct mlx5_aso *sq)
252 {
253 	mlx5_wq_destroy(&sq->wq_ctrl);
254 }
255 
256 static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
257 {
258 	mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
259 	mlx5_aso_free_sq(sq);
260 }
261 
262 static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
263 			      u32 pdn, struct mlx5_aso *sq)
264 {
265 	void *sqc_data, *wq;
266 	int err;
267 
268 	sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
269 	if (!sqc_data)
270 		return -ENOMEM;
271 
272 	wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
273 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
274 	MLX5_SET(wq, wq, pd, pdn);
275 	MLX5_SET(wq, wq, log_wq_sz, 1);
276 
277 	err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
278 	if (err) {
279 		mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
280 		goto err_out;
281 	}
282 
283 	err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
284 	if (err) {
285 		mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
286 		goto err_free_asosq;
287 	}
288 
289 	mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
290 
291 	kvfree(sqc_data);
292 	return 0;
293 
294 err_free_asosq:
295 	mlx5_aso_free_sq(sq);
296 err_out:
297 	kvfree(sqc_data);
298 	return err;
299 }
300 
301 struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
302 {
303 	int numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
304 	struct mlx5_aso *aso;
305 	int err;
306 
307 	aso = kzalloc(sizeof(*aso), GFP_KERNEL);
308 	if (!aso)
309 		return ERR_PTR(-ENOMEM);
310 
311 	err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
312 	if (err)
313 		goto err_cq;
314 
315 	err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
316 	if (err)
317 		goto err_sq;
318 
319 	return aso;
320 
321 err_sq:
322 	mlx5_aso_destroy_cq(&aso->cq);
323 err_cq:
324 	kfree(aso);
325 	return ERR_PTR(err);
326 }
327 
328 void mlx5_aso_destroy(struct mlx5_aso *aso)
329 {
330 	if (IS_ERR_OR_NULL(aso))
331 		return;
332 
333 	mlx5_aso_destroy_sq(aso);
334 	mlx5_aso_destroy_cq(&aso->cq);
335 	kfree(aso);
336 }
337 
338 void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
339 			struct mlx5_aso_wqe *aso_wqe,
340 			u32 obj_id, u32 opc_mode)
341 {
342 	struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
343 
344 	cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
345 					     (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
346 					     MLX5_OPCODE_ACCESS_ASO);
347 	cseg->qpn_ds     = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
348 	cseg->fm_ce_se   = MLX5_WQE_CTRL_CQ_UPDATE;
349 	cseg->general_id = cpu_to_be32(obj_id);
350 }
351 
352 void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
353 {
354 	u16 pi;
355 
356 	pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
357 	return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
358 }
359 
360 void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
361 		       struct mlx5_wqe_ctrl_seg *doorbell_cseg)
362 {
363 	doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
364 	/* ensure wqe is visible to device before updating doorbell record */
365 	dma_wmb();
366 
367 	if (with_data)
368 		aso->pc += MLX5_ASO_WQEBBS_DATA;
369 	else
370 		aso->pc += MLX5_ASO_WQEBBS;
371 	*aso->wq.db = cpu_to_be32(aso->pc);
372 
373 	/* ensure doorbell record is visible to device before ringing the
374 	 * doorbell
375 	 */
376 	wmb();
377 
378 	mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
379 
380 	/* Ensure doorbell is written on uar_page before poll_cq */
381 	WRITE_ONCE(doorbell_cseg, NULL);
382 }
383 
384 int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
385 {
386 	struct mlx5_aso_cq *cq = &aso->cq;
387 	struct mlx5_cqe64 *cqe;
388 
389 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
390 	if (!cqe)
391 		return -ETIMEDOUT;
392 
393 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
394 	 * otherwise a cq overrun may occur
395 	 */
396 	mlx5_cqwq_pop(&cq->wq);
397 
398 	if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
399 		struct mlx5_err_cqe *err_cqe;
400 
401 		mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
402 			      get_cqe_opcode(cqe));
403 
404 		err_cqe = (struct mlx5_err_cqe *)cqe;
405 		mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
406 			      err_cqe->vendor_err_synd);
407 		mlx5_core_err(cq->mdev, "syndrome=%x\n",
408 			      err_cqe->syndrome);
409 		print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
410 			       16, 1, err_cqe,
411 			       sizeof(*err_cqe), false);
412 	}
413 
414 	mlx5_cqwq_update_db_record(&cq->wq);
415 
416 	/* ensure cq space is freed before enabling more cqes */
417 	wmb();
418 
419 	if (with_data)
420 		aso->cc += MLX5_ASO_WQEBBS_DATA;
421 	else
422 		aso->cc += MLX5_ASO_WQEBBS;
423 
424 	return 0;
425 }
426