xref: /freebsd/sys/dev/ice/ice_controlq.c (revision 9e54973f)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2024, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "ice_common.h"
33 
34 #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
35 do {								\
36 	(qinfo)->sq.head = prefix##_ATQH;			\
37 	(qinfo)->sq.tail = prefix##_ATQT;			\
38 	(qinfo)->sq.len = prefix##_ATQLEN;			\
39 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
40 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
41 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
42 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
43 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
44 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
45 	(qinfo)->rq.head = prefix##_ARQH;			\
46 	(qinfo)->rq.tail = prefix##_ARQT;			\
47 	(qinfo)->rq.len = prefix##_ARQLEN;			\
48 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
49 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
50 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
51 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
52 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
53 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
54 } while (0)
55 
56 /**
57  * ice_adminq_init_regs - Initialize AdminQ registers
58  * @hw: pointer to the hardware structure
59  *
60  * This assumes the alloc_sq and alloc_rq functions have already been called
61  */
ice_adminq_init_regs(struct ice_hw * hw)62 static void ice_adminq_init_regs(struct ice_hw *hw)
63 {
64 	struct ice_ctl_q_info *cq = &hw->adminq;
65 
66 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
67 
68 	ICE_CQ_INIT_REGS(cq, PF_FW);
69 }
70 
71 /**
72  * ice_mailbox_init_regs - Initialize Mailbox registers
73  * @hw: pointer to the hardware structure
74  *
75  * This assumes the alloc_sq and alloc_rq functions have already been called
76  */
ice_mailbox_init_regs(struct ice_hw * hw)77 static void ice_mailbox_init_regs(struct ice_hw *hw)
78 {
79 	struct ice_ctl_q_info *cq = &hw->mailboxq;
80 
81 	ICE_CQ_INIT_REGS(cq, PF_MBX);
82 }
83 
84 /**
85  * ice_check_sq_alive
86  * @hw: pointer to the HW struct
87  * @cq: pointer to the specific Control queue
88  *
89  * Returns true if Queue is enabled else false.
90  */
ice_check_sq_alive(struct ice_hw * hw,struct ice_ctl_q_info * cq)91 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
92 {
93 	/* check both queue-length and queue-enable fields */
94 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
95 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
96 						cq->sq.len_ena_mask)) ==
97 			(cq->num_sq_entries | cq->sq.len_ena_mask);
98 
99 	return false;
100 }
101 
102 /**
103  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
104  * @hw: pointer to the hardware structure
105  * @cq: pointer to the specific Control queue
106  */
107 static enum ice_status
ice_alloc_ctrlq_sq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)108 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
109 {
110 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
111 
112 	cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
113 	if (!cq->sq.desc_buf.va)
114 		return ICE_ERR_NO_MEMORY;
115 
116 	return ICE_SUCCESS;
117 }
118 
119 /**
120  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
121  * @hw: pointer to the hardware structure
122  * @cq: pointer to the specific Control queue
123  */
124 static enum ice_status
ice_alloc_ctrlq_rq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)125 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
126 {
127 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
128 
129 	cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
130 	if (!cq->rq.desc_buf.va)
131 		return ICE_ERR_NO_MEMORY;
132 	return ICE_SUCCESS;
133 }
134 
135 /**
136  * ice_free_cq_ring - Free control queue ring
137  * @hw: pointer to the hardware structure
138  * @ring: pointer to the specific control queue ring
139  *
140  * This assumes the posted buffers have already been cleaned
141  * and de-allocated
142  */
ice_free_cq_ring(struct ice_hw * hw,struct ice_ctl_q_ring * ring)143 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
144 {
145 	ice_free_dma_mem(hw, &ring->desc_buf);
146 }
147 
148 /**
149  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
150  * @hw: pointer to the hardware structure
151  * @cq: pointer to the specific Control queue
152  */
153 static enum ice_status
ice_alloc_rq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)154 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
155 {
156 	int i;
157 
158 	/* We'll be allocating the buffer info memory first, then we can
159 	 * allocate the mapped buffers for the event processing
160 	 */
161 	cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
162 				     sizeof(cq->rq.desc_buf));
163 	if (!cq->rq.dma_head)
164 		return ICE_ERR_NO_MEMORY;
165 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
166 
167 	/* allocate the mapped buffers */
168 	for (i = 0; i < cq->num_rq_entries; i++) {
169 		struct ice_aq_desc *desc;
170 		struct ice_dma_mem *bi;
171 
172 		bi = &cq->rq.r.rq_bi[i];
173 		bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
174 		if (!bi->va)
175 			goto unwind_alloc_rq_bufs;
176 
177 		/* now configure the descriptors for use */
178 		desc = ICE_CTL_Q_DESC(cq->rq, i);
179 
180 		desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
181 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
182 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
183 		desc->opcode = 0;
184 		/* This is in accordance with control queue design, there is no
185 		 * register for buffer size configuration
186 		 */
187 		desc->datalen = CPU_TO_LE16(bi->size);
188 		desc->retval = 0;
189 		desc->cookie_high = 0;
190 		desc->cookie_low = 0;
191 		desc->params.generic.addr_high =
192 			CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
193 		desc->params.generic.addr_low =
194 			CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
195 		desc->params.generic.param0 = 0;
196 		desc->params.generic.param1 = 0;
197 	}
198 	return ICE_SUCCESS;
199 
200 unwind_alloc_rq_bufs:
201 	/* don't try to free the one that failed... */
202 	i--;
203 	for (; i >= 0; i--)
204 		ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
205 	cq->rq.r.rq_bi = NULL;
206 	ice_free(hw, cq->rq.dma_head);
207 	cq->rq.dma_head = NULL;
208 
209 	return ICE_ERR_NO_MEMORY;
210 }
211 
212 /**
213  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
214  * @hw: pointer to the hardware structure
215  * @cq: pointer to the specific Control queue
216  */
217 static enum ice_status
ice_alloc_sq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)218 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
219 {
220 	int i;
221 
222 	/* No mapped memory needed yet, just the buffer info structures */
223 	cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
224 				     sizeof(cq->sq.desc_buf));
225 	if (!cq->sq.dma_head)
226 		return ICE_ERR_NO_MEMORY;
227 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
228 
229 	/* allocate the mapped buffers */
230 	for (i = 0; i < cq->num_sq_entries; i++) {
231 		struct ice_dma_mem *bi;
232 
233 		bi = &cq->sq.r.sq_bi[i];
234 		bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
235 		if (!bi->va)
236 			goto unwind_alloc_sq_bufs;
237 	}
238 	return ICE_SUCCESS;
239 
240 unwind_alloc_sq_bufs:
241 	/* don't try to free the one that failed... */
242 	i--;
243 	for (; i >= 0; i--)
244 		ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
245 	cq->sq.r.sq_bi = NULL;
246 	ice_free(hw, cq->sq.dma_head);
247 	cq->sq.dma_head = NULL;
248 
249 	return ICE_ERR_NO_MEMORY;
250 }
251 
252 static enum ice_status
ice_cfg_cq_regs(struct ice_hw * hw,struct ice_ctl_q_ring * ring,u16 num_entries)253 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
254 {
255 	/* Clear Head and Tail */
256 	wr32(hw, ring->head, 0);
257 	wr32(hw, ring->tail, 0);
258 
259 	/* set starting point */
260 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
261 	wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
262 	wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
263 
264 	/* Check one register to verify that config was applied */
265 	if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
266 		return ICE_ERR_AQ_ERROR;
267 
268 	return ICE_SUCCESS;
269 }
270 
271 /**
272  * ice_cfg_sq_regs - configure Control ATQ registers
273  * @hw: pointer to the hardware structure
274  * @cq: pointer to the specific Control queue
275  *
276  * Configure base address and length registers for the transmit queue
277  */
278 static enum ice_status
ice_cfg_sq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)279 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
280 {
281 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
282 }
283 
284 /**
285  * ice_cfg_rq_regs - configure Control ARQ register
286  * @hw: pointer to the hardware structure
287  * @cq: pointer to the specific Control queue
288  *
289  * Configure base address and length registers for the receive (event queue)
290  */
291 static enum ice_status
ice_cfg_rq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)292 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
293 {
294 	enum ice_status status;
295 
296 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
297 	if (status)
298 		return status;
299 
300 	/* Update tail in the HW to post pre-allocated buffers */
301 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
302 
303 	return ICE_SUCCESS;
304 }
305 
306 #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
307 do {									\
308 	/* free descriptors */						\
309 	if ((qi)->ring.r.ring##_bi) {					\
310 		int i;							\
311 									\
312 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
313 			if ((qi)->ring.r.ring##_bi[i].pa)		\
314 				ice_free_dma_mem((hw),			\
315 					&(qi)->ring.r.ring##_bi[i]);	\
316 	}								\
317 	/* free DMA head */						\
318 	ice_free(hw, (qi)->ring.dma_head);				\
319 } while (0)
320 
321 /**
322  * ice_init_sq - main initialization routine for Control ATQ
323  * @hw: pointer to the hardware structure
324  * @cq: pointer to the specific Control queue
325  *
326  * This is the main initialization routine for the Control Send Queue
327  * Prior to calling this function, the driver *MUST* set the following fields
328  * in the cq->structure:
329  *     - cq->num_sq_entries
330  *     - cq->sq_buf_size
331  *
332  * Do *NOT* hold the lock when calling this as the memory allocation routines
333  * called are not going to be atomic context safe
334  */
ice_init_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)335 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
336 {
337 	enum ice_status ret_code;
338 
339 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
340 
341 	if (cq->sq.count > 0) {
342 		/* queue already initialized */
343 		ret_code = ICE_ERR_NOT_READY;
344 		goto init_ctrlq_exit;
345 	}
346 
347 	/* verify input for valid configuration */
348 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
349 		ret_code = ICE_ERR_CFG;
350 		goto init_ctrlq_exit;
351 	}
352 
353 	cq->sq.next_to_use = 0;
354 	cq->sq.next_to_clean = 0;
355 
356 	/* allocate the ring memory */
357 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
358 	if (ret_code)
359 		goto init_ctrlq_exit;
360 
361 	/* allocate buffers in the rings */
362 	ret_code = ice_alloc_sq_bufs(hw, cq);
363 	if (ret_code)
364 		goto init_ctrlq_free_rings;
365 
366 	/* initialize base registers */
367 	ret_code = ice_cfg_sq_regs(hw, cq);
368 	if (ret_code)
369 		goto init_ctrlq_free_rings;
370 
371 	/* success! */
372 	cq->sq.count = cq->num_sq_entries;
373 	goto init_ctrlq_exit;
374 
375 init_ctrlq_free_rings:
376 	ICE_FREE_CQ_BUFS(hw, cq, sq);
377 	ice_free_cq_ring(hw, &cq->sq);
378 
379 init_ctrlq_exit:
380 	return ret_code;
381 }
382 
383 /**
384  * ice_init_rq - initialize receive side of a control queue
385  * @hw: pointer to the hardware structure
386  * @cq: pointer to the specific Control queue
387  *
388  * The main initialization routine for Receive side of a control queue.
389  * Prior to calling this function, the driver *MUST* set the following fields
390  * in the cq->structure:
391  *     - cq->num_rq_entries
392  *     - cq->rq_buf_size
393  *
394  * Do *NOT* hold the lock when calling this as the memory allocation routines
395  * called are not going to be atomic context safe
396  */
ice_init_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)397 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
398 {
399 	enum ice_status ret_code;
400 
401 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
402 
403 	if (cq->rq.count > 0) {
404 		/* queue already initialized */
405 		ret_code = ICE_ERR_NOT_READY;
406 		goto init_ctrlq_exit;
407 	}
408 
409 	/* verify input for valid configuration */
410 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
411 		ret_code = ICE_ERR_CFG;
412 		goto init_ctrlq_exit;
413 	}
414 
415 	cq->rq.next_to_use = 0;
416 	cq->rq.next_to_clean = 0;
417 
418 	/* allocate the ring memory */
419 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
420 	if (ret_code)
421 		goto init_ctrlq_exit;
422 
423 	/* allocate buffers in the rings */
424 	ret_code = ice_alloc_rq_bufs(hw, cq);
425 	if (ret_code)
426 		goto init_ctrlq_free_rings;
427 
428 	/* initialize base registers */
429 	ret_code = ice_cfg_rq_regs(hw, cq);
430 	if (ret_code)
431 		goto init_ctrlq_free_rings;
432 
433 	/* success! */
434 	cq->rq.count = cq->num_rq_entries;
435 	goto init_ctrlq_exit;
436 
437 init_ctrlq_free_rings:
438 	ICE_FREE_CQ_BUFS(hw, cq, rq);
439 	ice_free_cq_ring(hw, &cq->rq);
440 
441 init_ctrlq_exit:
442 	return ret_code;
443 }
444 
445 /**
446  * ice_shutdown_sq - shutdown the transmit side of a control queue
447  * @hw: pointer to the hardware structure
448  * @cq: pointer to the specific Control queue
449  *
450  * The main shutdown routine for the Control Transmit Queue
451  */
452 static enum ice_status
ice_shutdown_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)453 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
454 {
455 	enum ice_status ret_code = ICE_SUCCESS;
456 
457 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
458 
459 	ice_acquire_lock(&cq->sq_lock);
460 
461 	if (!cq->sq.count) {
462 		ret_code = ICE_ERR_NOT_READY;
463 		goto shutdown_sq_out;
464 	}
465 
466 	/* Stop processing of the control queue */
467 	wr32(hw, cq->sq.head, 0);
468 	wr32(hw, cq->sq.tail, 0);
469 	wr32(hw, cq->sq.len, 0);
470 	wr32(hw, cq->sq.bal, 0);
471 	wr32(hw, cq->sq.bah, 0);
472 
473 	cq->sq.count = 0;	/* to indicate uninitialized queue */
474 
475 	/* free ring buffers and the ring itself */
476 	ICE_FREE_CQ_BUFS(hw, cq, sq);
477 	ice_free_cq_ring(hw, &cq->sq);
478 
479 shutdown_sq_out:
480 	ice_release_lock(&cq->sq_lock);
481 	return ret_code;
482 }
483 
484 /**
485  * ice_aq_ver_check - Check the reported AQ API version
486  * @hw: pointer to the hardware structure
487  *
488  * Checks if the driver should load on a given AQ API version.
489  *
490  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
491  */
ice_aq_ver_check(struct ice_hw * hw)492 static bool ice_aq_ver_check(struct ice_hw *hw)
493 {
494 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
495 		/* Major API version is newer than expected, don't load */
496 		ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
497 		return false;
498 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
499 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
500 			ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
501 				 hw->api_maj_ver, hw->api_min_ver,
502 				 EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
503 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
504 			ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
505 				 hw->api_maj_ver, hw->api_min_ver,
506 				 EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
507 	} else {
508 		/* Major API version is older than expected, log a warning */
509 		ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
510 			 hw->api_maj_ver, hw->api_min_ver,
511 			 EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
512 	}
513 	return true;
514 }
515 
516 /**
517  * ice_shutdown_rq - shutdown Control ARQ
518  * @hw: pointer to the hardware structure
519  * @cq: pointer to the specific Control queue
520  *
521  * The main shutdown routine for the Control Receive Queue
522  */
523 static enum ice_status
ice_shutdown_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)524 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
525 {
526 	enum ice_status ret_code = ICE_SUCCESS;
527 
528 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
529 
530 	ice_acquire_lock(&cq->rq_lock);
531 
532 	if (!cq->rq.count) {
533 		ret_code = ICE_ERR_NOT_READY;
534 		goto shutdown_rq_out;
535 	}
536 
537 	/* Stop Control Queue processing */
538 	wr32(hw, cq->rq.head, 0);
539 	wr32(hw, cq->rq.tail, 0);
540 	wr32(hw, cq->rq.len, 0);
541 	wr32(hw, cq->rq.bal, 0);
542 	wr32(hw, cq->rq.bah, 0);
543 
544 	/* set rq.count to 0 to indicate uninitialized queue */
545 	cq->rq.count = 0;
546 
547 	/* free ring buffers and the ring itself */
548 	ICE_FREE_CQ_BUFS(hw, cq, rq);
549 	ice_free_cq_ring(hw, &cq->rq);
550 
551 shutdown_rq_out:
552 	ice_release_lock(&cq->rq_lock);
553 	return ret_code;
554 }
555 
556 /**
557  * ice_idle_aq - stop ARQ/ATQ processing momentarily
558  * @hw: pointer to the hardware structure
559  * @cq: pointer to the specific Control queue
560  */
ice_idle_aq(struct ice_hw * hw,struct ice_ctl_q_info * cq)561 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
562 {
563 	wr32(hw, cq->sq.len, 0);
564 	wr32(hw, cq->rq.len, 0);
565 
566 	ice_msec_delay(2, false);
567 }
568 
569 /**
570  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
571  * @hw: pointer to the hardware structure
572  */
ice_init_check_adminq(struct ice_hw * hw)573 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
574 {
575 	struct ice_ctl_q_info *cq = &hw->adminq;
576 	enum ice_status status;
577 
578 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
579 
580 	status = ice_aq_get_fw_ver(hw, NULL);
581 	if (status)
582 		goto init_ctrlq_free_rq;
583 
584 	if (!ice_aq_ver_check(hw)) {
585 		status = ICE_ERR_FW_API_VER;
586 		goto init_ctrlq_free_rq;
587 	}
588 
589 	return ICE_SUCCESS;
590 
591 init_ctrlq_free_rq:
592 	ice_shutdown_rq(hw, cq);
593 	ice_shutdown_sq(hw, cq);
594 	return status;
595 }
596 
597 /**
598  * ice_init_ctrlq - main initialization routine for any control Queue
599  * @hw: pointer to the hardware structure
600  * @q_type: specific Control queue type
601  *
602  * Prior to calling this function, the driver *MUST* set the following fields
603  * in the cq->structure:
604  *     - cq->num_sq_entries
605  *     - cq->num_rq_entries
606  *     - cq->rq_buf_size
607  *     - cq->sq_buf_size
608  *
609  * NOTE: this function does not initialize the controlq locks
610  */
ice_init_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type)611 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
612 {
613 	struct ice_ctl_q_info *cq;
614 	enum ice_status ret_code;
615 
616 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
617 
618 	switch (q_type) {
619 	case ICE_CTL_Q_ADMIN:
620 		ice_adminq_init_regs(hw);
621 		cq = &hw->adminq;
622 		break;
623 	case ICE_CTL_Q_MAILBOX:
624 		ice_mailbox_init_regs(hw);
625 		cq = &hw->mailboxq;
626 		break;
627 	default:
628 		return ICE_ERR_PARAM;
629 	}
630 	cq->qtype = q_type;
631 
632 	/* verify input for valid configuration */
633 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
634 	    !cq->rq_buf_size || !cq->sq_buf_size) {
635 		return ICE_ERR_CFG;
636 	}
637 
638 	/* setup SQ command write back timeout */
639 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
640 
641 	/* allocate the ATQ */
642 	ret_code = ice_init_sq(hw, cq);
643 	if (ret_code)
644 		return ret_code;
645 
646 	/* allocate the ARQ */
647 	ret_code = ice_init_rq(hw, cq);
648 	if (ret_code)
649 		goto init_ctrlq_free_sq;
650 
651 	/* success! */
652 	return ICE_SUCCESS;
653 
654 init_ctrlq_free_sq:
655 	ice_shutdown_sq(hw, cq);
656 	return ret_code;
657 }
658 
659 /**
660  * ice_shutdown_ctrlq - shutdown routine for any control queue
661  * @hw: pointer to the hardware structure
662  * @q_type: specific Control queue type
663  * @unloading: is the driver unloading itself
664  *
665  * NOTE: this function does not destroy the control queue locks.
666  */
667 static void
ice_shutdown_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type,bool unloading)668 ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
669 		   bool unloading)
670 {
671 	struct ice_ctl_q_info *cq;
672 
673 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
674 
675 	switch (q_type) {
676 	case ICE_CTL_Q_ADMIN:
677 		cq = &hw->adminq;
678 		if (ice_check_sq_alive(hw, cq))
679 			ice_aq_q_shutdown(hw, unloading);
680 		break;
681 	case ICE_CTL_Q_MAILBOX:
682 		cq = &hw->mailboxq;
683 		break;
684 	default:
685 		return;
686 	}
687 
688 	ice_shutdown_sq(hw, cq);
689 	ice_shutdown_rq(hw, cq);
690 }
691 
692 /**
693  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
694  * @hw: pointer to the hardware structure
695  * @unloading: is the driver unloading itself
696  *
697  * NOTE: this function does not destroy the control queue locks. The driver
698  * may call this at runtime to shutdown and later restart control queues, such
699  * as in response to a reset event.
700  */
ice_shutdown_all_ctrlq(struct ice_hw * hw,bool unloading)701 void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
702 {
703 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
704 	/* Shutdown FW admin queue */
705 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
706 	/* Shutdown PF-VF Mailbox */
707 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
708 }
709 
710 /**
711  * ice_init_all_ctrlq - main initialization routine for all control queues
712  * @hw: pointer to the hardware structure
713  *
714  * Prior to calling this function, the driver MUST* set the following fields
715  * in the cq->structure for all control queues:
716  *     - cq->num_sq_entries
717  *     - cq->num_rq_entries
718  *     - cq->rq_buf_size
719  *     - cq->sq_buf_size
720  *
721  * NOTE: this function does not initialize the controlq locks.
722  */
ice_init_all_ctrlq(struct ice_hw * hw)723 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
724 {
725 	enum ice_status status;
726 	u32 retry = 0;
727 
728 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
729 
730 	/* Init FW admin queue */
731 	do {
732 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
733 		if (status)
734 			return status;
735 
736 		status = ice_init_check_adminq(hw);
737 		if (status != ICE_ERR_AQ_FW_CRITICAL)
738 			break;
739 
740 		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
741 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
742 		ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
743 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
744 
745 	if (status)
746 		return status;
747 	/* Init Mailbox queue */
748 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
749 }
750 
751 /**
752  * ice_init_ctrlq_locks - Initialize locks for a control queue
753  * @cq: pointer to the control queue
754  *
755  * Initializes the send and receive queue locks for a given control queue.
756  */
ice_init_ctrlq_locks(struct ice_ctl_q_info * cq)757 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
758 {
759 	ice_init_lock(&cq->sq_lock);
760 	ice_init_lock(&cq->rq_lock);
761 }
762 
763 /**
764  * ice_create_all_ctrlq - main initialization routine for all control queues
765  * @hw: pointer to the hardware structure
766  *
767  * Prior to calling this function, the driver *MUST* set the following fields
768  * in the cq->structure for all control queues:
769  *     - cq->num_sq_entries
770  *     - cq->num_rq_entries
771  *     - cq->rq_buf_size
772  *     - cq->sq_buf_size
773  *
774  * This function creates all the control queue locks and then calls
775  * ice_init_all_ctrlq. It should be called once during driver load. If the
776  * driver needs to re-initialize control queues at run time it should call
777  * ice_init_all_ctrlq instead.
778  */
ice_create_all_ctrlq(struct ice_hw * hw)779 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
780 {
781 	ice_init_ctrlq_locks(&hw->adminq);
782 	ice_init_ctrlq_locks(&hw->mailboxq);
783 
784 	return ice_init_all_ctrlq(hw);
785 }
786 
787 /**
788  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
789  * @cq: pointer to the control queue
790  *
791  * Destroys the send and receive queue locks for a given control queue.
792  */
ice_destroy_ctrlq_locks(struct ice_ctl_q_info * cq)793 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
794 {
795 	ice_destroy_lock(&cq->sq_lock);
796 	ice_destroy_lock(&cq->rq_lock);
797 }
798 
799 /**
800  * ice_destroy_all_ctrlq - exit routine for all control queues
801  * @hw: pointer to the hardware structure
802  *
803  * This function shuts down all the control queues and then destroys the
804  * control queue locks. It should be called once during driver unload. The
805  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
806  * reinitialize control queues, such as in response to a reset event.
807  */
ice_destroy_all_ctrlq(struct ice_hw * hw)808 void ice_destroy_all_ctrlq(struct ice_hw *hw)
809 {
810 	/* shut down all the control queues first */
811 	ice_shutdown_all_ctrlq(hw, true);
812 
813 	ice_destroy_ctrlq_locks(&hw->adminq);
814 	ice_destroy_ctrlq_locks(&hw->mailboxq);
815 }
816 
817 /**
818  * ice_clean_sq - cleans send side of a control queue
819  * @hw: pointer to the hardware structure
820  * @cq: pointer to the specific Control queue
821  *
822  * returns the number of free desc
823  */
ice_clean_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)824 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
825 {
826 	struct ice_ctl_q_ring *sq = &cq->sq;
827 	u16 ntc = sq->next_to_clean;
828 	struct ice_aq_desc *desc;
829 
830 	desc = ICE_CTL_Q_DESC(*sq, ntc);
831 
832 	while (rd32(hw, cq->sq.head) != ntc) {
833 		ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
834 		ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
835 		ntc++;
836 		if (ntc == sq->count)
837 			ntc = 0;
838 		desc = ICE_CTL_Q_DESC(*sq, ntc);
839 	}
840 
841 	sq->next_to_clean = ntc;
842 
843 	return ICE_CTL_Q_DESC_UNUSED(sq);
844 }
845 
846 /**
847  * ice_ctl_q_str - Convert control queue type to string
848  * @qtype: the control queue type
849  *
850  * Returns: A string name for the given control queue type.
851  */
ice_ctl_q_str(enum ice_ctl_q qtype)852 static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
853 {
854 	switch (qtype) {
855 	case ICE_CTL_Q_UNKNOWN:
856 		return "Unknown CQ";
857 	case ICE_CTL_Q_ADMIN:
858 		return "AQ";
859 	case ICE_CTL_Q_MAILBOX:
860 		return "MBXQ";
861 	default:
862 		return "Unrecognized CQ";
863 	}
864 }
865 
866 /**
867  * ice_debug_cq
868  * @hw: pointer to the hardware structure
869  * @cq: pointer to the specific Control queue
870  * @desc: pointer to control queue descriptor
871  * @buf: pointer to command buffer
872  * @buf_len: max length of buf
873  * @response: true if this is the writeback response
874  *
875  * Dumps debug log about control command with descriptor contents.
876  */
877 static void
ice_debug_cq(struct ice_hw * hw,struct ice_ctl_q_info * cq,void * desc,void * buf,u16 buf_len,bool response)878 ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
879 	     void *desc, void *buf, u16 buf_len, bool response)
880 {
881 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
882 	u16 datalen, flags;
883 
884 	if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
885 		return;
886 
887 	if (!desc)
888 		return;
889 
890 	datalen = LE16_TO_CPU(cq_desc->datalen);
891 	flags = LE16_TO_CPU(cq_desc->flags);
892 
893 	ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
894 		  ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
895 		  LE16_TO_CPU(cq_desc->opcode), flags, datalen,
896 		  LE16_TO_CPU(cq_desc->retval));
897 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
898 		  LE32_TO_CPU(cq_desc->cookie_high),
899 		  LE32_TO_CPU(cq_desc->cookie_low));
900 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
901 		  LE32_TO_CPU(cq_desc->params.generic.param0),
902 		  LE32_TO_CPU(cq_desc->params.generic.param1));
903 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
904 		  LE32_TO_CPU(cq_desc->params.generic.addr_high),
905 		  LE32_TO_CPU(cq_desc->params.generic.addr_low));
906 	/* Dump buffer iff 1) one exists and 2) is either a response indicated
907 	 * by the DD and/or CMP flag set or a command with the RD flag set.
908 	 */
909 	if (buf && cq_desc->datalen != 0 &&
910 	    (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
911 	     flags & ICE_AQ_FLAG_RD)) {
912 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
913 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
914 				MIN_T(u16, buf_len, datalen));
915 	}
916 }
917 
918 /**
919  * ice_sq_done - check if the last send on a control queue has completed
920  * @hw: pointer to the HW struct
921  * @cq: pointer to the specific Control queue
922  *
923  * Returns: true if all the descriptors on the send side of a control queue
924  *          are finished processing, false otherwise.
925  */
ice_sq_done(struct ice_hw * hw,struct ice_ctl_q_info * cq)926 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
927 {
928 	/* control queue designers suggest use of head for better
929 	 * timing reliability than DD bit
930 	 */
931 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
932 }
933 
934 /**
935  * ice_sq_send_cmd_nolock - send command to a control queue
936  * @hw: pointer to the HW struct
937  * @cq: pointer to the specific Control queue
938  * @desc: prefilled descriptor describing the command (non DMA mem)
939  * @buf: buffer to use for indirect commands (or NULL for direct commands)
940  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
941  * @cd: pointer to command details structure
942  *
943  * This is the main send command routine for a control queue. It prepares the
944  * command into a descriptor, bumps the send queue tail, waits for the command
945  * to complete, captures status and data for the command, etc.
946  */
947 static enum ice_status
ice_sq_send_cmd_nolock(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)948 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
949 		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
950 		       struct ice_sq_cd *cd)
951 {
952 	struct ice_dma_mem *dma_buf = NULL;
953 	struct ice_aq_desc *desc_on_ring;
954 	bool cmd_completed = false;
955 	enum ice_status status = ICE_SUCCESS;
956 	u32 total_delay = 0;
957 	u16 retval = 0;
958 	u32 val = 0;
959 
960 	/* if reset is in progress return a soft error */
961 	if (hw->reset_ongoing)
962 		return ICE_ERR_RESET_ONGOING;
963 
964 	cq->sq_last_status = ICE_AQ_RC_OK;
965 
966 	if (!cq->sq.count) {
967 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
968 		status = ICE_ERR_AQ_EMPTY;
969 		goto sq_send_command_error;
970 	}
971 
972 	if ((buf && !buf_size) || (!buf && buf_size)) {
973 		status = ICE_ERR_PARAM;
974 		goto sq_send_command_error;
975 	}
976 
977 	if (buf) {
978 		if (buf_size > cq->sq_buf_size) {
979 			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
980 				  buf_size);
981 			status = ICE_ERR_INVAL_SIZE;
982 			goto sq_send_command_error;
983 		}
984 
985 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
986 		if (buf_size > ICE_AQ_LG_BUF)
987 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
988 	}
989 
990 	val = rd32(hw, cq->sq.head);
991 	if (val >= cq->num_sq_entries) {
992 		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
993 			  val);
994 		status = ICE_ERR_AQ_EMPTY;
995 		goto sq_send_command_error;
996 	}
997 
998 	/* Call clean and check queue available function to reclaim the
999 	 * descriptors that were processed by FW/MBX; the function returns the
1000 	 * number of desc available. The clean function called here could be
1001 	 * called in a separate thread in case of asynchronous completions.
1002 	 */
1003 	if (ice_clean_sq(hw, cq) == 0) {
1004 		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1005 		status = ICE_ERR_AQ_FULL;
1006 		goto sq_send_command_error;
1007 	}
1008 
1009 	/* initialize the temp desc pointer with the right desc */
1010 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1011 
1012 	/* if the desc is available copy the temp desc to the right place */
1013 	ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
1014 		   ICE_NONDMA_TO_DMA);
1015 
1016 	/* if buf is not NULL assume indirect command */
1017 	if (buf) {
1018 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1019 		/* copy the user buf into the respective DMA buf */
1020 		ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
1021 		desc_on_ring->datalen = CPU_TO_LE16(buf_size);
1022 
1023 		/* Update the address values in the desc with the pa value
1024 		 * for respective buffer
1025 		 */
1026 		desc_on_ring->params.generic.addr_high =
1027 			CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1028 		desc_on_ring->params.generic.addr_low =
1029 			CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1030 	}
1031 
1032 	/* Debug desc and buffer */
1033 	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1034 	ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
1035 
1036 	(cq->sq.next_to_use)++;
1037 	if (cq->sq.next_to_use == cq->sq.count)
1038 		cq->sq.next_to_use = 0;
1039 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1040 	ice_flush(hw);
1041 
1042 	/* Wait a short time before initial ice_sq_done() check, to allow
1043 	 * hardware time for completion.
1044 	 */
1045 	ice_usec_delay(5, false);
1046 
1047 	do {
1048 		if (ice_sq_done(hw, cq))
1049 			break;
1050 
1051 		ice_usec_delay(10, false);
1052 		total_delay++;
1053 	} while (total_delay < cq->sq_cmd_timeout);
1054 
1055 	/* if ready, copy the desc back to temp */
1056 	if (ice_sq_done(hw, cq)) {
1057 		ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1058 			   ICE_DMA_TO_NONDMA);
1059 		if (buf) {
1060 			/* get returned length to copy */
1061 			u16 copy_size = LE16_TO_CPU(desc->datalen);
1062 
1063 			if (copy_size > buf_size) {
1064 				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1065 					  copy_size, buf_size);
1066 				status = ICE_ERR_AQ_ERROR;
1067 			} else {
1068 				ice_memcpy(buf, dma_buf->va, copy_size,
1069 					   ICE_DMA_TO_NONDMA);
1070 			}
1071 		}
1072 		retval = LE16_TO_CPU(desc->retval);
1073 		if (retval) {
1074 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1075 				  LE16_TO_CPU(desc->opcode),
1076 				  retval);
1077 
1078 			/* strip off FW internal code */
1079 			retval &= 0xff;
1080 		}
1081 		cmd_completed = true;
1082 		if (!status && retval != ICE_AQ_RC_OK)
1083 			status = ICE_ERR_AQ_ERROR;
1084 		cq->sq_last_status = (enum ice_aq_err)retval;
1085 	}
1086 
1087 	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1088 	ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
1089 
1090 	/* save writeback AQ if requested */
1091 	if (cd && cd->wb_desc)
1092 		ice_memcpy(cd->wb_desc, desc_on_ring,
1093 			   sizeof(*cd->wb_desc), ICE_DMA_TO_NONDMA);
1094 
1095 	/* update the error if time out occurred */
1096 	if (!cmd_completed) {
1097 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1098 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1099 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1100 			status = ICE_ERR_AQ_FW_CRITICAL;
1101 		} else {
1102 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1103 			status = ICE_ERR_AQ_TIMEOUT;
1104 		}
1105 	}
1106 
1107 sq_send_command_error:
1108 	return status;
1109 }
1110 
1111 /**
1112  * ice_sq_send_cmd - send command to a control queue
1113  * @hw: pointer to the HW struct
1114  * @cq: pointer to the specific Control queue
1115  * @desc: prefilled descriptor describing the command
1116  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1117  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1118  * @cd: pointer to command details structure
1119  *
1120  * Main command for the transmit side of a control queue. It puts the command
1121  * on the queue, bumps the tail, waits for processing of the command, captures
1122  * command status and results, etc.
1123  */
1124 enum ice_status
ice_sq_send_cmd(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1125 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1126 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
1127 		struct ice_sq_cd *cd)
1128 {
1129 	enum ice_status status = ICE_SUCCESS;
1130 
1131 	/* if reset is in progress return a soft error */
1132 	if (hw->reset_ongoing)
1133 		return ICE_ERR_RESET_ONGOING;
1134 
1135 	ice_acquire_lock(&cq->sq_lock);
1136 	status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1137 	ice_release_lock(&cq->sq_lock);
1138 
1139 	return status;
1140 }
1141 
1142 /**
1143  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1144  * @desc: pointer to the temp descriptor (non DMA mem)
1145  * @opcode: the opcode can be used to decide which flags to turn off or on
1146  *
1147  * Fill the desc with default values
1148  */
ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc * desc,u16 opcode)1149 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1150 {
1151 	/* zero out the desc */
1152 	ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1153 	desc->opcode = CPU_TO_LE16(opcode);
1154 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1155 }
1156 
1157 /**
1158  * ice_clean_rq_elem
1159  * @hw: pointer to the HW struct
1160  * @cq: pointer to the specific Control queue
1161  * @e: event info from the receive descriptor, includes any buffers
1162  * @pending: number of events that could be left to process
1163  *
1164  * Clean one element from the receive side of a control queue. On return 'e'
1165  * contains contents of the message, and 'pending' contains the number of
1166  * events left to process.
1167  */
1168 enum ice_status
ice_clean_rq_elem(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_rq_event_info * e,u16 * pending)1169 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1170 		  struct ice_rq_event_info *e, u16 *pending)
1171 {
1172 	u16 ntc = cq->rq.next_to_clean;
1173 	enum ice_aq_err rq_last_status;
1174 	enum ice_status ret_code = ICE_SUCCESS;
1175 	struct ice_aq_desc *desc;
1176 	struct ice_dma_mem *bi;
1177 	u16 desc_idx;
1178 	u16 datalen;
1179 	u16 flags;
1180 	u16 ntu;
1181 
1182 	/* pre-clean the event info */
1183 	ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1184 
1185 	/* take the lock before we start messing with the ring */
1186 	ice_acquire_lock(&cq->rq_lock);
1187 
1188 	if (!cq->rq.count) {
1189 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1190 		ret_code = ICE_ERR_AQ_EMPTY;
1191 		goto clean_rq_elem_err;
1192 	}
1193 
1194 	/* set next_to_use to head */
1195 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1196 
1197 	if (ntu == ntc) {
1198 		/* nothing to do - shouldn't need to update ring's values */
1199 		ret_code = ICE_ERR_AQ_NO_WORK;
1200 		goto clean_rq_elem_out;
1201 	}
1202 
1203 	/* now clean the next descriptor */
1204 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1205 	desc_idx = ntc;
1206 
1207 	rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1208 	flags = LE16_TO_CPU(desc->flags);
1209 	if (flags & ICE_AQ_FLAG_ERR) {
1210 		ret_code = ICE_ERR_AQ_ERROR;
1211 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1212 			  LE16_TO_CPU(desc->opcode), rq_last_status);
1213 	}
1214 	ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1215 	datalen = LE16_TO_CPU(desc->datalen);
1216 	e->msg_len = MIN_T(u16, datalen, e->buf_len);
1217 	if (e->msg_buf && e->msg_len)
1218 		ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1219 			   e->msg_len, ICE_DMA_TO_NONDMA);
1220 
1221 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1222 	ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
1223 
1224 	/* Restore the original datalen and buffer address in the desc,
1225 	 * FW updates datalen to indicate the event message size
1226 	 */
1227 	bi = &cq->rq.r.rq_bi[ntc];
1228 	ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1229 
1230 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1231 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1232 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1233 	desc->datalen = CPU_TO_LE16(bi->size);
1234 	desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1235 	desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1236 
1237 	/* set tail = the last cleaned desc index. */
1238 	wr32(hw, cq->rq.tail, ntc);
1239 	/* ntc is updated to tail + 1 */
1240 	ntc++;
1241 	if (ntc == cq->num_rq_entries)
1242 		ntc = 0;
1243 	cq->rq.next_to_clean = ntc;
1244 	cq->rq.next_to_use = ntu;
1245 
1246 clean_rq_elem_out:
1247 	/* Set pending if needed, unlock and return */
1248 	if (pending) {
1249 		/* re-read HW head to calculate actual pending messages */
1250 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1251 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1252 	}
1253 clean_rq_elem_err:
1254 	ice_release_lock(&cq->rq_lock);
1255 
1256 	return ret_code;
1257 }
1258