xref: /freebsd/sys/dev/ice/ice_controlq.c (revision 535af610)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2023, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 
35 #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
36 do {								\
37 	(qinfo)->sq.head = prefix##_ATQH;			\
38 	(qinfo)->sq.tail = prefix##_ATQT;			\
39 	(qinfo)->sq.len = prefix##_ATQLEN;			\
40 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
41 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
42 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
43 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
44 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
45 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
46 	(qinfo)->rq.head = prefix##_ARQH;			\
47 	(qinfo)->rq.tail = prefix##_ARQT;			\
48 	(qinfo)->rq.len = prefix##_ARQLEN;			\
49 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
50 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
51 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
52 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
53 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
54 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
55 } while (0)
56 
57 /**
58  * ice_adminq_init_regs - Initialize AdminQ registers
59  * @hw: pointer to the hardware structure
60  *
61  * This assumes the alloc_sq and alloc_rq functions have already been called
62  */
63 static void ice_adminq_init_regs(struct ice_hw *hw)
64 {
65 	struct ice_ctl_q_info *cq = &hw->adminq;
66 
67 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
68 
69 	ICE_CQ_INIT_REGS(cq, PF_FW);
70 }
71 
72 /**
73  * ice_mailbox_init_regs - Initialize Mailbox registers
74  * @hw: pointer to the hardware structure
75  *
76  * This assumes the alloc_sq and alloc_rq functions have already been called
77  */
78 static void ice_mailbox_init_regs(struct ice_hw *hw)
79 {
80 	struct ice_ctl_q_info *cq = &hw->mailboxq;
81 
82 	ICE_CQ_INIT_REGS(cq, PF_MBX);
83 }
84 
85 /**
86  * ice_check_sq_alive
87  * @hw: pointer to the HW struct
88  * @cq: pointer to the specific Control queue
89  *
90  * Returns true if Queue is enabled else false.
91  */
92 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
93 {
94 	/* check both queue-length and queue-enable fields */
95 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
96 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
97 						cq->sq.len_ena_mask)) ==
98 			(cq->num_sq_entries | cq->sq.len_ena_mask);
99 
100 	return false;
101 }
102 
103 /**
104  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
105  * @hw: pointer to the hardware structure
106  * @cq: pointer to the specific Control queue
107  */
108 static enum ice_status
109 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
110 {
111 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
112 
113 	cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
114 	if (!cq->sq.desc_buf.va)
115 		return ICE_ERR_NO_MEMORY;
116 
117 	cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
118 				    sizeof(struct ice_sq_cd));
119 	if (!cq->sq.cmd_buf) {
120 		ice_free_dma_mem(hw, &cq->sq.desc_buf);
121 		return ICE_ERR_NO_MEMORY;
122 	}
123 
124 	return ICE_SUCCESS;
125 }
126 
127 /**
128  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
129  * @hw: pointer to the hardware structure
130  * @cq: pointer to the specific Control queue
131  */
132 static enum ice_status
133 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
134 {
135 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
136 
137 	cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
138 	if (!cq->rq.desc_buf.va)
139 		return ICE_ERR_NO_MEMORY;
140 	return ICE_SUCCESS;
141 }
142 
143 /**
144  * ice_free_cq_ring - Free control queue ring
145  * @hw: pointer to the hardware structure
146  * @ring: pointer to the specific control queue ring
147  *
148  * This assumes the posted buffers have already been cleaned
149  * and de-allocated
150  */
151 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
152 {
153 	ice_free_dma_mem(hw, &ring->desc_buf);
154 }
155 
156 /**
157  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
158  * @hw: pointer to the hardware structure
159  * @cq: pointer to the specific Control queue
160  */
161 static enum ice_status
162 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
163 {
164 	int i;
165 
166 	/* We'll be allocating the buffer info memory first, then we can
167 	 * allocate the mapped buffers for the event processing
168 	 */
169 	cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
170 				     sizeof(cq->rq.desc_buf));
171 	if (!cq->rq.dma_head)
172 		return ICE_ERR_NO_MEMORY;
173 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
174 
175 	/* allocate the mapped buffers */
176 	for (i = 0; i < cq->num_rq_entries; i++) {
177 		struct ice_aq_desc *desc;
178 		struct ice_dma_mem *bi;
179 
180 		bi = &cq->rq.r.rq_bi[i];
181 		bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
182 		if (!bi->va)
183 			goto unwind_alloc_rq_bufs;
184 
185 		/* now configure the descriptors for use */
186 		desc = ICE_CTL_Q_DESC(cq->rq, i);
187 
188 		desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
189 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
190 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
191 		desc->opcode = 0;
192 		/* This is in accordance with Admin queue design, there is no
193 		 * register for buffer size configuration
194 		 */
195 		desc->datalen = CPU_TO_LE16(bi->size);
196 		desc->retval = 0;
197 		desc->cookie_high = 0;
198 		desc->cookie_low = 0;
199 		desc->params.generic.addr_high =
200 			CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
201 		desc->params.generic.addr_low =
202 			CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
203 		desc->params.generic.param0 = 0;
204 		desc->params.generic.param1 = 0;
205 	}
206 	return ICE_SUCCESS;
207 
208 unwind_alloc_rq_bufs:
209 	/* don't try to free the one that failed... */
210 	i--;
211 	for (; i >= 0; i--)
212 		ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
213 	cq->rq.r.rq_bi = NULL;
214 	ice_free(hw, cq->rq.dma_head);
215 	cq->rq.dma_head = NULL;
216 
217 	return ICE_ERR_NO_MEMORY;
218 }
219 
220 /**
221  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
222  * @hw: pointer to the hardware structure
223  * @cq: pointer to the specific Control queue
224  */
225 static enum ice_status
226 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
227 {
228 	int i;
229 
230 	/* No mapped memory needed yet, just the buffer info structures */
231 	cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
232 				     sizeof(cq->sq.desc_buf));
233 	if (!cq->sq.dma_head)
234 		return ICE_ERR_NO_MEMORY;
235 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
236 
237 	/* allocate the mapped buffers */
238 	for (i = 0; i < cq->num_sq_entries; i++) {
239 		struct ice_dma_mem *bi;
240 
241 		bi = &cq->sq.r.sq_bi[i];
242 		bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
243 		if (!bi->va)
244 			goto unwind_alloc_sq_bufs;
245 	}
246 	return ICE_SUCCESS;
247 
248 unwind_alloc_sq_bufs:
249 	/* don't try to free the one that failed... */
250 	i--;
251 	for (; i >= 0; i--)
252 		ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
253 	cq->sq.r.sq_bi = NULL;
254 	ice_free(hw, cq->sq.dma_head);
255 	cq->sq.dma_head = NULL;
256 
257 	return ICE_ERR_NO_MEMORY;
258 }
259 
260 static enum ice_status
261 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
262 {
263 	/* Clear Head and Tail */
264 	wr32(hw, ring->head, 0);
265 	wr32(hw, ring->tail, 0);
266 
267 	/* set starting point */
268 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
269 	wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
270 	wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
271 
272 	/* Check one register to verify that config was applied */
273 	if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
274 		return ICE_ERR_AQ_ERROR;
275 
276 	return ICE_SUCCESS;
277 }
278 
279 /**
280  * ice_cfg_sq_regs - configure Control ATQ registers
281  * @hw: pointer to the hardware structure
282  * @cq: pointer to the specific Control queue
283  *
284  * Configure base address and length registers for the transmit queue
285  */
286 static enum ice_status
287 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
288 {
289 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
290 }
291 
292 /**
293  * ice_cfg_rq_regs - configure Control ARQ register
294  * @hw: pointer to the hardware structure
295  * @cq: pointer to the specific Control queue
296  *
297  * Configure base address and length registers for the receive (event queue)
298  */
299 static enum ice_status
300 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
301 {
302 	enum ice_status status;
303 
304 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
305 	if (status)
306 		return status;
307 
308 	/* Update tail in the HW to post pre-allocated buffers */
309 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
310 
311 	return ICE_SUCCESS;
312 }
313 
314 #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
315 do {									\
316 	/* free descriptors */						\
317 	if ((qi)->ring.r.ring##_bi) {					\
318 		int i;							\
319 									\
320 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
321 			if ((qi)->ring.r.ring##_bi[i].pa)		\
322 				ice_free_dma_mem((hw),			\
323 					&(qi)->ring.r.ring##_bi[i]);	\
324 	}								\
325 	/* free the buffer info list */					\
326 	if ((qi)->ring.cmd_buf)						\
327 		ice_free(hw, (qi)->ring.cmd_buf);			\
328 	/* free DMA head */						\
329 	ice_free(hw, (qi)->ring.dma_head);				\
330 } while (0)
331 
332 /**
333  * ice_init_sq - main initialization routine for Control ATQ
334  * @hw: pointer to the hardware structure
335  * @cq: pointer to the specific Control queue
336  *
337  * This is the main initialization routine for the Control Send Queue
338  * Prior to calling this function, the driver *MUST* set the following fields
339  * in the cq->structure:
340  *     - cq->num_sq_entries
341  *     - cq->sq_buf_size
342  *
343  * Do *NOT* hold the lock when calling this as the memory allocation routines
344  * called are not going to be atomic context safe
345  */
346 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
347 {
348 	enum ice_status ret_code;
349 
350 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
351 
352 	if (cq->sq.count > 0) {
353 		/* queue already initialized */
354 		ret_code = ICE_ERR_NOT_READY;
355 		goto init_ctrlq_exit;
356 	}
357 
358 	/* verify input for valid configuration */
359 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
360 		ret_code = ICE_ERR_CFG;
361 		goto init_ctrlq_exit;
362 	}
363 
364 	cq->sq.next_to_use = 0;
365 	cq->sq.next_to_clean = 0;
366 
367 	/* allocate the ring memory */
368 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
369 	if (ret_code)
370 		goto init_ctrlq_exit;
371 
372 	/* allocate buffers in the rings */
373 	ret_code = ice_alloc_sq_bufs(hw, cq);
374 	if (ret_code)
375 		goto init_ctrlq_free_rings;
376 
377 	/* initialize base registers */
378 	ret_code = ice_cfg_sq_regs(hw, cq);
379 	if (ret_code)
380 		goto init_ctrlq_free_rings;
381 
382 	/* success! */
383 	cq->sq.count = cq->num_sq_entries;
384 	goto init_ctrlq_exit;
385 
386 init_ctrlq_free_rings:
387 	ICE_FREE_CQ_BUFS(hw, cq, sq);
388 	ice_free_cq_ring(hw, &cq->sq);
389 
390 init_ctrlq_exit:
391 	return ret_code;
392 }
393 
394 /**
395  * ice_init_rq - initialize ARQ
396  * @hw: pointer to the hardware structure
397  * @cq: pointer to the specific Control queue
398  *
399  * The main initialization routine for the Admin Receive (Event) Queue.
400  * Prior to calling this function, the driver *MUST* set the following fields
401  * in the cq->structure:
402  *     - cq->num_rq_entries
403  *     - cq->rq_buf_size
404  *
405  * Do *NOT* hold the lock when calling this as the memory allocation routines
406  * called are not going to be atomic context safe
407  */
408 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
409 {
410 	enum ice_status ret_code;
411 
412 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
413 
414 	if (cq->rq.count > 0) {
415 		/* queue already initialized */
416 		ret_code = ICE_ERR_NOT_READY;
417 		goto init_ctrlq_exit;
418 	}
419 
420 	/* verify input for valid configuration */
421 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
422 		ret_code = ICE_ERR_CFG;
423 		goto init_ctrlq_exit;
424 	}
425 
426 	cq->rq.next_to_use = 0;
427 	cq->rq.next_to_clean = 0;
428 
429 	/* allocate the ring memory */
430 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
431 	if (ret_code)
432 		goto init_ctrlq_exit;
433 
434 	/* allocate buffers in the rings */
435 	ret_code = ice_alloc_rq_bufs(hw, cq);
436 	if (ret_code)
437 		goto init_ctrlq_free_rings;
438 
439 	/* initialize base registers */
440 	ret_code = ice_cfg_rq_regs(hw, cq);
441 	if (ret_code)
442 		goto init_ctrlq_free_rings;
443 
444 	/* success! */
445 	cq->rq.count = cq->num_rq_entries;
446 	goto init_ctrlq_exit;
447 
448 init_ctrlq_free_rings:
449 	ICE_FREE_CQ_BUFS(hw, cq, rq);
450 	ice_free_cq_ring(hw, &cq->rq);
451 
452 init_ctrlq_exit:
453 	return ret_code;
454 }
455 
456 /**
457  * ice_shutdown_sq - shutdown the Control ATQ
458  * @hw: pointer to the hardware structure
459  * @cq: pointer to the specific Control queue
460  *
461  * The main shutdown routine for the Control Transmit Queue
462  */
463 static enum ice_status
464 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
465 {
466 	enum ice_status ret_code = ICE_SUCCESS;
467 
468 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
469 
470 	ice_acquire_lock(&cq->sq_lock);
471 
472 	if (!cq->sq.count) {
473 		ret_code = ICE_ERR_NOT_READY;
474 		goto shutdown_sq_out;
475 	}
476 
477 	/* Stop firmware AdminQ processing */
478 	wr32(hw, cq->sq.head, 0);
479 	wr32(hw, cq->sq.tail, 0);
480 	wr32(hw, cq->sq.len, 0);
481 	wr32(hw, cq->sq.bal, 0);
482 	wr32(hw, cq->sq.bah, 0);
483 
484 	cq->sq.count = 0;	/* to indicate uninitialized queue */
485 
486 	/* free ring buffers and the ring itself */
487 	ICE_FREE_CQ_BUFS(hw, cq, sq);
488 	ice_free_cq_ring(hw, &cq->sq);
489 
490 shutdown_sq_out:
491 	ice_release_lock(&cq->sq_lock);
492 	return ret_code;
493 }
494 
495 /**
496  * ice_aq_ver_check - Check the reported AQ API version.
497  * @hw: pointer to the hardware structure
498  *
499  * Checks if the driver should load on a given AQ API version.
500  *
501  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
502  */
503 static bool ice_aq_ver_check(struct ice_hw *hw)
504 {
505 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
506 		/* Major API version is newer than expected, don't load */
507 		ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
508 		return false;
509 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
510 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
511 			ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
512 				 hw->api_maj_ver, hw->api_min_ver,
513 				 EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
514 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
515 			ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
516 				 hw->api_maj_ver, hw->api_min_ver,
517 				 EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
518 	} else {
519 		/* Major API version is older than expected, log a warning */
520 		ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
521 			 hw->api_maj_ver, hw->api_min_ver,
522 			 EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
523 	}
524 	return true;
525 }
526 
527 /**
528  * ice_shutdown_rq - shutdown Control ARQ
529  * @hw: pointer to the hardware structure
530  * @cq: pointer to the specific Control queue
531  *
532  * The main shutdown routine for the Control Receive Queue
533  */
534 static enum ice_status
535 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
536 {
537 	enum ice_status ret_code = ICE_SUCCESS;
538 
539 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
540 
541 	ice_acquire_lock(&cq->rq_lock);
542 
543 	if (!cq->rq.count) {
544 		ret_code = ICE_ERR_NOT_READY;
545 		goto shutdown_rq_out;
546 	}
547 
548 	/* Stop Control Queue processing */
549 	wr32(hw, cq->rq.head, 0);
550 	wr32(hw, cq->rq.tail, 0);
551 	wr32(hw, cq->rq.len, 0);
552 	wr32(hw, cq->rq.bal, 0);
553 	wr32(hw, cq->rq.bah, 0);
554 
555 	/* set rq.count to 0 to indicate uninitialized queue */
556 	cq->rq.count = 0;
557 
558 	/* free ring buffers and the ring itself */
559 	ICE_FREE_CQ_BUFS(hw, cq, rq);
560 	ice_free_cq_ring(hw, &cq->rq);
561 
562 shutdown_rq_out:
563 	ice_release_lock(&cq->rq_lock);
564 	return ret_code;
565 }
566 
567 /**
568  * ice_idle_aq - stop ARQ/ATQ processing momentarily
569  * @hw: pointer to the hardware structure
570  * @cq: pointer to the specific Control queue
571  */
572 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
573 {
574 	wr32(hw, cq->sq.len, 0);
575 	wr32(hw, cq->rq.len, 0);
576 
577 	ice_msec_delay(2, false);
578 }
579 
580 /**
581  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
582  * @hw: pointer to the hardware structure
583  */
584 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
585 {
586 	struct ice_ctl_q_info *cq = &hw->adminq;
587 	enum ice_status status;
588 
589 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
590 
591 	status = ice_aq_get_fw_ver(hw, NULL);
592 	if (status)
593 		goto init_ctrlq_free_rq;
594 
595 	if (!ice_aq_ver_check(hw)) {
596 		status = ICE_ERR_FW_API_VER;
597 		goto init_ctrlq_free_rq;
598 	}
599 
600 	return ICE_SUCCESS;
601 
602 init_ctrlq_free_rq:
603 	ice_shutdown_rq(hw, cq);
604 	ice_shutdown_sq(hw, cq);
605 	return status;
606 }
607 
608 /**
609  * ice_init_ctrlq - main initialization routine for any control Queue
610  * @hw: pointer to the hardware structure
611  * @q_type: specific Control queue type
612  *
613  * Prior to calling this function, the driver *MUST* set the following fields
614  * in the cq->structure:
615  *     - cq->num_sq_entries
616  *     - cq->num_rq_entries
617  *     - cq->rq_buf_size
618  *     - cq->sq_buf_size
619  *
620  * NOTE: this function does not initialize the controlq locks
621  */
622 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
623 {
624 	struct ice_ctl_q_info *cq;
625 	enum ice_status ret_code;
626 
627 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
628 
629 	switch (q_type) {
630 	case ICE_CTL_Q_ADMIN:
631 		ice_adminq_init_regs(hw);
632 		cq = &hw->adminq;
633 		break;
634 	case ICE_CTL_Q_MAILBOX:
635 		ice_mailbox_init_regs(hw);
636 		cq = &hw->mailboxq;
637 		break;
638 	default:
639 		return ICE_ERR_PARAM;
640 	}
641 	cq->qtype = q_type;
642 
643 	/* verify input for valid configuration */
644 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
645 	    !cq->rq_buf_size || !cq->sq_buf_size) {
646 		return ICE_ERR_CFG;
647 	}
648 
649 	/* setup SQ command write back timeout */
650 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
651 
652 	/* allocate the ATQ */
653 	ret_code = ice_init_sq(hw, cq);
654 	if (ret_code)
655 		return ret_code;
656 
657 	/* allocate the ARQ */
658 	ret_code = ice_init_rq(hw, cq);
659 	if (ret_code)
660 		goto init_ctrlq_free_sq;
661 
662 	/* success! */
663 	return ICE_SUCCESS;
664 
665 init_ctrlq_free_sq:
666 	ice_shutdown_sq(hw, cq);
667 	return ret_code;
668 }
669 
670 /**
671  * ice_shutdown_ctrlq - shutdown routine for any control queue
672  * @hw: pointer to the hardware structure
673  * @q_type: specific Control queue type
674  * @unloading: is the driver unloading itself
675  *
676  * NOTE: this function does not destroy the control queue locks.
677  */
678 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
679 			       bool unloading)
680 {
681 	struct ice_ctl_q_info *cq;
682 
683 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
684 
685 	switch (q_type) {
686 	case ICE_CTL_Q_ADMIN:
687 		cq = &hw->adminq;
688 		if (ice_check_sq_alive(hw, cq))
689 			ice_aq_q_shutdown(hw, unloading);
690 		break;
691 	case ICE_CTL_Q_MAILBOX:
692 		cq = &hw->mailboxq;
693 		break;
694 	default:
695 		return;
696 	}
697 
698 	ice_shutdown_sq(hw, cq);
699 	ice_shutdown_rq(hw, cq);
700 }
701 
702 /**
703  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
704  * @hw: pointer to the hardware structure
705  * @unloading: is the driver unloading itself
706  *
707  * NOTE: this function does not destroy the control queue locks. The driver
708  * may call this at runtime to shutdown and later restart control queues, such
709  * as in response to a reset event.
710  */
711 void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
712 {
713 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
714 	/* Shutdown FW admin queue */
715 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
716 	/* Shutdown PF-VF Mailbox */
717 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
718 }
719 
720 /**
721  * ice_init_all_ctrlq - main initialization routine for all control queues
722  * @hw: pointer to the hardware structure
723  *
724  * Prior to calling this function, the driver MUST* set the following fields
725  * in the cq->structure for all control queues:
726  *     - cq->num_sq_entries
727  *     - cq->num_rq_entries
728  *     - cq->rq_buf_size
729  *     - cq->sq_buf_size
730  *
731  * NOTE: this function does not initialize the controlq locks.
732  */
733 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
734 {
735 	enum ice_status status;
736 	u32 retry = 0;
737 
738 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
739 
740 	/* Init FW admin queue */
741 	do {
742 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
743 		if (status)
744 			return status;
745 
746 		status = ice_init_check_adminq(hw);
747 		if (status != ICE_ERR_AQ_FW_CRITICAL)
748 			break;
749 
750 		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
751 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
752 		ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
753 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
754 
755 	if (status)
756 		return status;
757 	/* Init Mailbox queue */
758 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
759 }
760 
761 /**
762  * ice_init_ctrlq_locks - Initialize locks for a control queue
763  * @cq: pointer to the control queue
764  *
765  * Initializes the send and receive queue locks for a given control queue.
766  */
767 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
768 {
769 	ice_init_lock(&cq->sq_lock);
770 	ice_init_lock(&cq->rq_lock);
771 }
772 
773 /**
774  * ice_create_all_ctrlq - main initialization routine for all control queues
775  * @hw: pointer to the hardware structure
776  *
777  * Prior to calling this function, the driver *MUST* set the following fields
778  * in the cq->structure for all control queues:
779  *     - cq->num_sq_entries
780  *     - cq->num_rq_entries
781  *     - cq->rq_buf_size
782  *     - cq->sq_buf_size
783  *
784  * This function creates all the control queue locks and then calls
785  * ice_init_all_ctrlq. It should be called once during driver load. If the
786  * driver needs to re-initialize control queues at run time it should call
787  * ice_init_all_ctrlq instead.
788  */
789 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
790 {
791 	ice_init_ctrlq_locks(&hw->adminq);
792 	ice_init_ctrlq_locks(&hw->mailboxq);
793 
794 	return ice_init_all_ctrlq(hw);
795 }
796 
797 /**
798  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
799  * @cq: pointer to the control queue
800  *
801  * Destroys the send and receive queue locks for a given control queue.
802  */
803 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
804 {
805 	ice_destroy_lock(&cq->sq_lock);
806 	ice_destroy_lock(&cq->rq_lock);
807 }
808 
809 /**
810  * ice_destroy_all_ctrlq - exit routine for all control queues
811  * @hw: pointer to the hardware structure
812  *
813  * This function shuts down all the control queues and then destroys the
814  * control queue locks. It should be called once during driver unload. The
815  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
816  * reinitialize control queues, such as in response to a reset event.
817  */
818 void ice_destroy_all_ctrlq(struct ice_hw *hw)
819 {
820 	/* shut down all the control queues first */
821 	ice_shutdown_all_ctrlq(hw, true);
822 
823 	ice_destroy_ctrlq_locks(&hw->adminq);
824 	ice_destroy_ctrlq_locks(&hw->mailboxq);
825 }
826 
827 /**
828  * ice_clean_sq - cleans Admin send queue (ATQ)
829  * @hw: pointer to the hardware structure
830  * @cq: pointer to the specific Control queue
831  *
832  * returns the number of free desc
833  */
834 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
835 {
836 	struct ice_ctl_q_ring *sq = &cq->sq;
837 	u16 ntc = sq->next_to_clean;
838 	struct ice_sq_cd *details;
839 	struct ice_aq_desc *desc;
840 
841 	desc = ICE_CTL_Q_DESC(*sq, ntc);
842 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
843 
844 	while (rd32(hw, cq->sq.head) != ntc) {
845 		ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
846 		ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
847 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
848 		ntc++;
849 		if (ntc == sq->count)
850 			ntc = 0;
851 		desc = ICE_CTL_Q_DESC(*sq, ntc);
852 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
853 	}
854 
855 	sq->next_to_clean = ntc;
856 
857 	return ICE_CTL_Q_DESC_UNUSED(sq);
858 }
859 
860 /**
861  * ice_debug_cq
862  * @hw: pointer to the hardware structure
863  * @desc: pointer to control queue descriptor
864  * @buf: pointer to command buffer
865  * @buf_len: max length of buf
866  *
867  * Dumps debug log about control command with descriptor contents.
868  */
869 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
870 {
871 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
872 	u16 datalen, flags;
873 
874 	if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
875 		return;
876 
877 	if (!desc)
878 		return;
879 
880 	datalen = LE16_TO_CPU(cq_desc->datalen);
881 	flags = LE16_TO_CPU(cq_desc->flags);
882 
883 	ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
884 		  LE16_TO_CPU(cq_desc->opcode), flags, datalen,
885 		  LE16_TO_CPU(cq_desc->retval));
886 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
887 		  LE32_TO_CPU(cq_desc->cookie_high),
888 		  LE32_TO_CPU(cq_desc->cookie_low));
889 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
890 		  LE32_TO_CPU(cq_desc->params.generic.param0),
891 		  LE32_TO_CPU(cq_desc->params.generic.param1));
892 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
893 		  LE32_TO_CPU(cq_desc->params.generic.addr_high),
894 		  LE32_TO_CPU(cq_desc->params.generic.addr_low));
895 	/* Dump buffer iff 1) one exists and 2) is either a response indicated
896 	 * by the DD and/or CMP flag set or a command with the RD flag set.
897 	 */
898 	if (buf && cq_desc->datalen != 0 &&
899 	    (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
900 	     flags & ICE_AQ_FLAG_RD)) {
901 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
902 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
903 				MIN_T(u16, buf_len, datalen));
904 	}
905 }
906 
907 /**
908  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
909  * @hw: pointer to the HW struct
910  * @cq: pointer to the specific Control queue
911  *
912  * Returns true if the firmware has processed all descriptors on the
913  * admin send queue. Returns false if there are still requests pending.
914  */
915 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
916 {
917 	/* AQ designers suggest use of head for better
918 	 * timing reliability than DD bit
919 	 */
920 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
921 }
922 
923 /**
924  * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
925  * @hw: pointer to the HW struct
926  * @cq: pointer to the specific Control queue
927  * @desc: prefilled descriptor describing the command (non DMA mem)
928  * @buf: buffer to use for indirect commands (or NULL for direct commands)
929  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
930  * @cd: pointer to command details structure
931  *
932  * This is the main send command routine for the ATQ. It runs the queue,
933  * cleans the queue, etc.
934  */
935 static enum ice_status
936 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
937 		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
938 		       struct ice_sq_cd *cd)
939 {
940 	struct ice_dma_mem *dma_buf = NULL;
941 	struct ice_aq_desc *desc_on_ring;
942 	bool cmd_completed = false;
943 	enum ice_status status = ICE_SUCCESS;
944 	struct ice_sq_cd *details;
945 	u32 total_delay = 0;
946 	u16 retval = 0;
947 	u32 val = 0;
948 
949 	/* if reset is in progress return a soft error */
950 	if (hw->reset_ongoing)
951 		return ICE_ERR_RESET_ONGOING;
952 
953 	cq->sq_last_status = ICE_AQ_RC_OK;
954 
955 	if (!cq->sq.count) {
956 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
957 		status = ICE_ERR_AQ_EMPTY;
958 		goto sq_send_command_error;
959 	}
960 
961 	if ((buf && !buf_size) || (!buf && buf_size)) {
962 		status = ICE_ERR_PARAM;
963 		goto sq_send_command_error;
964 	}
965 
966 	if (buf) {
967 		if (buf_size > cq->sq_buf_size) {
968 			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
969 				  buf_size);
970 			status = ICE_ERR_INVAL_SIZE;
971 			goto sq_send_command_error;
972 		}
973 
974 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
975 		if (buf_size > ICE_AQ_LG_BUF)
976 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
977 	}
978 
979 	val = rd32(hw, cq->sq.head);
980 	if (val >= cq->num_sq_entries) {
981 		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
982 			  val);
983 		status = ICE_ERR_AQ_EMPTY;
984 		goto sq_send_command_error;
985 	}
986 
987 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
988 	if (cd)
989 		*details = *cd;
990 	else
991 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
992 
993 	/* Call clean and check queue available function to reclaim the
994 	 * descriptors that were processed by FW/MBX; the function returns the
995 	 * number of desc available. The clean function called here could be
996 	 * called in a separate thread in case of asynchronous completions.
997 	 */
998 	if (ice_clean_sq(hw, cq) == 0) {
999 		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1000 		status = ICE_ERR_AQ_FULL;
1001 		goto sq_send_command_error;
1002 	}
1003 
1004 	/* initialize the temp desc pointer with the right desc */
1005 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1006 
1007 	/* if the desc is available copy the temp desc to the right place */
1008 	ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
1009 		   ICE_NONDMA_TO_DMA);
1010 
1011 	/* if buf is not NULL assume indirect command */
1012 	if (buf) {
1013 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1014 		/* copy the user buf into the respective DMA buf */
1015 		ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
1016 		desc_on_ring->datalen = CPU_TO_LE16(buf_size);
1017 
1018 		/* Update the address values in the desc with the pa value
1019 		 * for respective buffer
1020 		 */
1021 		desc_on_ring->params.generic.addr_high =
1022 			CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1023 		desc_on_ring->params.generic.addr_low =
1024 			CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1025 	}
1026 
1027 	/* Debug desc and buffer */
1028 	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1029 
1030 	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1031 
1032 	(cq->sq.next_to_use)++;
1033 	if (cq->sq.next_to_use == cq->sq.count)
1034 		cq->sq.next_to_use = 0;
1035 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1036 
1037 	do {
1038 		if (ice_sq_done(hw, cq))
1039 			break;
1040 
1041 		ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
1042 		total_delay++;
1043 	} while (total_delay < cq->sq_cmd_timeout);
1044 
1045 	/* if ready, copy the desc back to temp */
1046 	if (ice_sq_done(hw, cq)) {
1047 		ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1048 			   ICE_DMA_TO_NONDMA);
1049 		if (buf) {
1050 			/* get returned length to copy */
1051 			u16 copy_size = LE16_TO_CPU(desc->datalen);
1052 
1053 			if (copy_size > buf_size) {
1054 				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1055 					  copy_size, buf_size);
1056 				status = ICE_ERR_AQ_ERROR;
1057 			} else {
1058 				ice_memcpy(buf, dma_buf->va, copy_size,
1059 					   ICE_DMA_TO_NONDMA);
1060 			}
1061 		}
1062 		retval = LE16_TO_CPU(desc->retval);
1063 		if (retval) {
1064 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1065 				  LE16_TO_CPU(desc->opcode),
1066 				  retval);
1067 
1068 			/* strip off FW internal code */
1069 			retval &= 0xff;
1070 		}
1071 		cmd_completed = true;
1072 		if (!status && retval != ICE_AQ_RC_OK)
1073 			status = ICE_ERR_AQ_ERROR;
1074 		cq->sq_last_status = (enum ice_aq_err)retval;
1075 	}
1076 
1077 	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1078 
1079 	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1080 
1081 	/* save writeback AQ if requested */
1082 	if (details->wb_desc)
1083 		ice_memcpy(details->wb_desc, desc_on_ring,
1084 			   sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1085 
1086 	/* update the error if time out occurred */
1087 	if (!cmd_completed) {
1088 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1089 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1090 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1091 			status = ICE_ERR_AQ_FW_CRITICAL;
1092 		} else {
1093 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1094 			status = ICE_ERR_AQ_TIMEOUT;
1095 		}
1096 	}
1097 
1098 sq_send_command_error:
1099 	return status;
1100 }
1101 
1102 /**
1103  * ice_sq_send_cmd - send command to Control Queue (ATQ)
1104  * @hw: pointer to the HW struct
1105  * @cq: pointer to the specific Control queue
1106  * @desc: prefilled descriptor describing the command
1107  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1108  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1109  * @cd: pointer to command details structure
1110  *
1111  * This is the main send command routine for the ATQ. It runs the queue,
1112  * cleans the queue, etc.
1113  */
1114 enum ice_status
1115 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1116 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
1117 		struct ice_sq_cd *cd)
1118 {
1119 	enum ice_status status = ICE_SUCCESS;
1120 
1121 	/* if reset is in progress return a soft error */
1122 	if (hw->reset_ongoing)
1123 		return ICE_ERR_RESET_ONGOING;
1124 
1125 	ice_acquire_lock(&cq->sq_lock);
1126 	status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1127 	ice_release_lock(&cq->sq_lock);
1128 
1129 	return status;
1130 }
1131 
1132 /**
1133  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1134  * @desc: pointer to the temp descriptor (non DMA mem)
1135  * @opcode: the opcode can be used to decide which flags to turn off or on
1136  *
1137  * Fill the desc with default values
1138  */
1139 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1140 {
1141 	/* zero out the desc */
1142 	ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1143 	desc->opcode = CPU_TO_LE16(opcode);
1144 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1145 }
1146 
1147 /**
1148  * ice_clean_rq_elem
1149  * @hw: pointer to the HW struct
1150  * @cq: pointer to the specific Control queue
1151  * @e: event info from the receive descriptor, includes any buffers
1152  * @pending: number of events that could be left to process
1153  *
1154  * This function cleans one Admin Receive Queue element and returns
1155  * the contents through e. It can also return how many events are
1156  * left to process through 'pending'.
1157  */
1158 enum ice_status
1159 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1160 		  struct ice_rq_event_info *e, u16 *pending)
1161 {
1162 	u16 ntc = cq->rq.next_to_clean;
1163 	enum ice_aq_err rq_last_status;
1164 	enum ice_status ret_code = ICE_SUCCESS;
1165 	struct ice_aq_desc *desc;
1166 	struct ice_dma_mem *bi;
1167 	u16 desc_idx;
1168 	u16 datalen;
1169 	u16 flags;
1170 	u16 ntu;
1171 
1172 	/* pre-clean the event info */
1173 	ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1174 
1175 	/* take the lock before we start messing with the ring */
1176 	ice_acquire_lock(&cq->rq_lock);
1177 
1178 	if (!cq->rq.count) {
1179 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1180 		ret_code = ICE_ERR_AQ_EMPTY;
1181 		goto clean_rq_elem_err;
1182 	}
1183 
1184 	/* set next_to_use to head */
1185 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1186 
1187 	if (ntu == ntc) {
1188 		/* nothing to do - shouldn't need to update ring's values */
1189 		ret_code = ICE_ERR_AQ_NO_WORK;
1190 		goto clean_rq_elem_out;
1191 	}
1192 
1193 	/* now clean the next descriptor */
1194 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1195 	desc_idx = ntc;
1196 
1197 	rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1198 	flags = LE16_TO_CPU(desc->flags);
1199 	if (flags & ICE_AQ_FLAG_ERR) {
1200 		ret_code = ICE_ERR_AQ_ERROR;
1201 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1202 			  LE16_TO_CPU(desc->opcode), rq_last_status);
1203 	}
1204 	ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1205 	datalen = LE16_TO_CPU(desc->datalen);
1206 	e->msg_len = MIN_T(u16, datalen, e->buf_len);
1207 	if (e->msg_buf && e->msg_len)
1208 		ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1209 			   e->msg_len, ICE_DMA_TO_NONDMA);
1210 
1211 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1212 
1213 	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1214 
1215 	/* Restore the original datalen and buffer address in the desc,
1216 	 * FW updates datalen to indicate the event message size
1217 	 */
1218 	bi = &cq->rq.r.rq_bi[ntc];
1219 	ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1220 
1221 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1222 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1223 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1224 	desc->datalen = CPU_TO_LE16(bi->size);
1225 	desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1226 	desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1227 
1228 	/* set tail = the last cleaned desc index. */
1229 	wr32(hw, cq->rq.tail, ntc);
1230 	/* ntc is updated to tail + 1 */
1231 	ntc++;
1232 	if (ntc == cq->num_rq_entries)
1233 		ntc = 0;
1234 	cq->rq.next_to_clean = ntc;
1235 	cq->rq.next_to_use = ntu;
1236 
1237 clean_rq_elem_out:
1238 	/* Set pending if needed, unlock and return */
1239 	if (pending) {
1240 		/* re-read HW head to calculate actual pending messages */
1241 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1242 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1243 	}
1244 clean_rq_elem_err:
1245 	ice_release_lock(&cq->rq_lock);
1246 
1247 	return ret_code;
1248 }
1249