xref: /freebsd/sys/dev/ice/ice_controlq.c (revision d0b2dbfa)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2023, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "ice_common.h"
33 
34 #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
35 do {								\
36 	(qinfo)->sq.head = prefix##_ATQH;			\
37 	(qinfo)->sq.tail = prefix##_ATQT;			\
38 	(qinfo)->sq.len = prefix##_ATQLEN;			\
39 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
40 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
41 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
42 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
43 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
44 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
45 	(qinfo)->rq.head = prefix##_ARQH;			\
46 	(qinfo)->rq.tail = prefix##_ARQT;			\
47 	(qinfo)->rq.len = prefix##_ARQLEN;			\
48 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
49 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
50 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
51 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
52 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
53 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
54 } while (0)
55 
56 /**
57  * ice_adminq_init_regs - Initialize AdminQ registers
58  * @hw: pointer to the hardware structure
59  *
60  * This assumes the alloc_sq and alloc_rq functions have already been called
61  */
62 static void ice_adminq_init_regs(struct ice_hw *hw)
63 {
64 	struct ice_ctl_q_info *cq = &hw->adminq;
65 
66 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
67 
68 	ICE_CQ_INIT_REGS(cq, PF_FW);
69 }
70 
71 /**
72  * ice_mailbox_init_regs - Initialize Mailbox registers
73  * @hw: pointer to the hardware structure
74  *
75  * This assumes the alloc_sq and alloc_rq functions have already been called
76  */
77 static void ice_mailbox_init_regs(struct ice_hw *hw)
78 {
79 	struct ice_ctl_q_info *cq = &hw->mailboxq;
80 
81 	ICE_CQ_INIT_REGS(cq, PF_MBX);
82 }
83 
84 /**
85  * ice_check_sq_alive
86  * @hw: pointer to the HW struct
87  * @cq: pointer to the specific Control queue
88  *
89  * Returns true if Queue is enabled else false.
90  */
91 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
92 {
93 	/* check both queue-length and queue-enable fields */
94 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
95 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
96 						cq->sq.len_ena_mask)) ==
97 			(cq->num_sq_entries | cq->sq.len_ena_mask);
98 
99 	return false;
100 }
101 
102 /**
103  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
104  * @hw: pointer to the hardware structure
105  * @cq: pointer to the specific Control queue
106  */
107 static enum ice_status
108 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
109 {
110 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
111 
112 	cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
113 	if (!cq->sq.desc_buf.va)
114 		return ICE_ERR_NO_MEMORY;
115 
116 	cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
117 				    sizeof(struct ice_sq_cd));
118 	if (!cq->sq.cmd_buf) {
119 		ice_free_dma_mem(hw, &cq->sq.desc_buf);
120 		return ICE_ERR_NO_MEMORY;
121 	}
122 
123 	return ICE_SUCCESS;
124 }
125 
126 /**
127  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
128  * @hw: pointer to the hardware structure
129  * @cq: pointer to the specific Control queue
130  */
131 static enum ice_status
132 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
133 {
134 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
135 
136 	cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
137 	if (!cq->rq.desc_buf.va)
138 		return ICE_ERR_NO_MEMORY;
139 	return ICE_SUCCESS;
140 }
141 
142 /**
143  * ice_free_cq_ring - Free control queue ring
144  * @hw: pointer to the hardware structure
145  * @ring: pointer to the specific control queue ring
146  *
147  * This assumes the posted buffers have already been cleaned
148  * and de-allocated
149  */
150 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
151 {
152 	ice_free_dma_mem(hw, &ring->desc_buf);
153 }
154 
155 /**
156  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
157  * @hw: pointer to the hardware structure
158  * @cq: pointer to the specific Control queue
159  */
160 static enum ice_status
161 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
162 {
163 	int i;
164 
165 	/* We'll be allocating the buffer info memory first, then we can
166 	 * allocate the mapped buffers for the event processing
167 	 */
168 	cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
169 				     sizeof(cq->rq.desc_buf));
170 	if (!cq->rq.dma_head)
171 		return ICE_ERR_NO_MEMORY;
172 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
173 
174 	/* allocate the mapped buffers */
175 	for (i = 0; i < cq->num_rq_entries; i++) {
176 		struct ice_aq_desc *desc;
177 		struct ice_dma_mem *bi;
178 
179 		bi = &cq->rq.r.rq_bi[i];
180 		bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
181 		if (!bi->va)
182 			goto unwind_alloc_rq_bufs;
183 
184 		/* now configure the descriptors for use */
185 		desc = ICE_CTL_Q_DESC(cq->rq, i);
186 
187 		desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
188 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
189 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
190 		desc->opcode = 0;
191 		/* This is in accordance with Admin queue design, there is no
192 		 * register for buffer size configuration
193 		 */
194 		desc->datalen = CPU_TO_LE16(bi->size);
195 		desc->retval = 0;
196 		desc->cookie_high = 0;
197 		desc->cookie_low = 0;
198 		desc->params.generic.addr_high =
199 			CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
200 		desc->params.generic.addr_low =
201 			CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
202 		desc->params.generic.param0 = 0;
203 		desc->params.generic.param1 = 0;
204 	}
205 	return ICE_SUCCESS;
206 
207 unwind_alloc_rq_bufs:
208 	/* don't try to free the one that failed... */
209 	i--;
210 	for (; i >= 0; i--)
211 		ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
212 	cq->rq.r.rq_bi = NULL;
213 	ice_free(hw, cq->rq.dma_head);
214 	cq->rq.dma_head = NULL;
215 
216 	return ICE_ERR_NO_MEMORY;
217 }
218 
219 /**
220  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
221  * @hw: pointer to the hardware structure
222  * @cq: pointer to the specific Control queue
223  */
224 static enum ice_status
225 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
226 {
227 	int i;
228 
229 	/* No mapped memory needed yet, just the buffer info structures */
230 	cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
231 				     sizeof(cq->sq.desc_buf));
232 	if (!cq->sq.dma_head)
233 		return ICE_ERR_NO_MEMORY;
234 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
235 
236 	/* allocate the mapped buffers */
237 	for (i = 0; i < cq->num_sq_entries; i++) {
238 		struct ice_dma_mem *bi;
239 
240 		bi = &cq->sq.r.sq_bi[i];
241 		bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
242 		if (!bi->va)
243 			goto unwind_alloc_sq_bufs;
244 	}
245 	return ICE_SUCCESS;
246 
247 unwind_alloc_sq_bufs:
248 	/* don't try to free the one that failed... */
249 	i--;
250 	for (; i >= 0; i--)
251 		ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
252 	cq->sq.r.sq_bi = NULL;
253 	ice_free(hw, cq->sq.dma_head);
254 	cq->sq.dma_head = NULL;
255 
256 	return ICE_ERR_NO_MEMORY;
257 }
258 
259 static enum ice_status
260 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
261 {
262 	/* Clear Head and Tail */
263 	wr32(hw, ring->head, 0);
264 	wr32(hw, ring->tail, 0);
265 
266 	/* set starting point */
267 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
268 	wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
269 	wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
270 
271 	/* Check one register to verify that config was applied */
272 	if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
273 		return ICE_ERR_AQ_ERROR;
274 
275 	return ICE_SUCCESS;
276 }
277 
278 /**
279  * ice_cfg_sq_regs - configure Control ATQ registers
280  * @hw: pointer to the hardware structure
281  * @cq: pointer to the specific Control queue
282  *
283  * Configure base address and length registers for the transmit queue
284  */
285 static enum ice_status
286 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
287 {
288 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
289 }
290 
291 /**
292  * ice_cfg_rq_regs - configure Control ARQ register
293  * @hw: pointer to the hardware structure
294  * @cq: pointer to the specific Control queue
295  *
296  * Configure base address and length registers for the receive (event queue)
297  */
298 static enum ice_status
299 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
300 {
301 	enum ice_status status;
302 
303 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
304 	if (status)
305 		return status;
306 
307 	/* Update tail in the HW to post pre-allocated buffers */
308 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
309 
310 	return ICE_SUCCESS;
311 }
312 
313 #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
314 do {									\
315 	/* free descriptors */						\
316 	if ((qi)->ring.r.ring##_bi) {					\
317 		int i;							\
318 									\
319 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
320 			if ((qi)->ring.r.ring##_bi[i].pa)		\
321 				ice_free_dma_mem((hw),			\
322 					&(qi)->ring.r.ring##_bi[i]);	\
323 	}								\
324 	/* free the buffer info list */					\
325 	if ((qi)->ring.cmd_buf)						\
326 		ice_free(hw, (qi)->ring.cmd_buf);			\
327 	/* free DMA head */						\
328 	ice_free(hw, (qi)->ring.dma_head);				\
329 } while (0)
330 
331 /**
332  * ice_init_sq - main initialization routine for Control ATQ
333  * @hw: pointer to the hardware structure
334  * @cq: pointer to the specific Control queue
335  *
336  * This is the main initialization routine for the Control Send Queue
337  * Prior to calling this function, the driver *MUST* set the following fields
338  * in the cq->structure:
339  *     - cq->num_sq_entries
340  *     - cq->sq_buf_size
341  *
342  * Do *NOT* hold the lock when calling this as the memory allocation routines
343  * called are not going to be atomic context safe
344  */
345 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
346 {
347 	enum ice_status ret_code;
348 
349 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
350 
351 	if (cq->sq.count > 0) {
352 		/* queue already initialized */
353 		ret_code = ICE_ERR_NOT_READY;
354 		goto init_ctrlq_exit;
355 	}
356 
357 	/* verify input for valid configuration */
358 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
359 		ret_code = ICE_ERR_CFG;
360 		goto init_ctrlq_exit;
361 	}
362 
363 	cq->sq.next_to_use = 0;
364 	cq->sq.next_to_clean = 0;
365 
366 	/* allocate the ring memory */
367 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
368 	if (ret_code)
369 		goto init_ctrlq_exit;
370 
371 	/* allocate buffers in the rings */
372 	ret_code = ice_alloc_sq_bufs(hw, cq);
373 	if (ret_code)
374 		goto init_ctrlq_free_rings;
375 
376 	/* initialize base registers */
377 	ret_code = ice_cfg_sq_regs(hw, cq);
378 	if (ret_code)
379 		goto init_ctrlq_free_rings;
380 
381 	/* success! */
382 	cq->sq.count = cq->num_sq_entries;
383 	goto init_ctrlq_exit;
384 
385 init_ctrlq_free_rings:
386 	ICE_FREE_CQ_BUFS(hw, cq, sq);
387 	ice_free_cq_ring(hw, &cq->sq);
388 
389 init_ctrlq_exit:
390 	return ret_code;
391 }
392 
393 /**
394  * ice_init_rq - initialize ARQ
395  * @hw: pointer to the hardware structure
396  * @cq: pointer to the specific Control queue
397  *
398  * The main initialization routine for the Admin Receive (Event) Queue.
399  * Prior to calling this function, the driver *MUST* set the following fields
400  * in the cq->structure:
401  *     - cq->num_rq_entries
402  *     - cq->rq_buf_size
403  *
404  * Do *NOT* hold the lock when calling this as the memory allocation routines
405  * called are not going to be atomic context safe
406  */
407 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
408 {
409 	enum ice_status ret_code;
410 
411 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
412 
413 	if (cq->rq.count > 0) {
414 		/* queue already initialized */
415 		ret_code = ICE_ERR_NOT_READY;
416 		goto init_ctrlq_exit;
417 	}
418 
419 	/* verify input for valid configuration */
420 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
421 		ret_code = ICE_ERR_CFG;
422 		goto init_ctrlq_exit;
423 	}
424 
425 	cq->rq.next_to_use = 0;
426 	cq->rq.next_to_clean = 0;
427 
428 	/* allocate the ring memory */
429 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
430 	if (ret_code)
431 		goto init_ctrlq_exit;
432 
433 	/* allocate buffers in the rings */
434 	ret_code = ice_alloc_rq_bufs(hw, cq);
435 	if (ret_code)
436 		goto init_ctrlq_free_rings;
437 
438 	/* initialize base registers */
439 	ret_code = ice_cfg_rq_regs(hw, cq);
440 	if (ret_code)
441 		goto init_ctrlq_free_rings;
442 
443 	/* success! */
444 	cq->rq.count = cq->num_rq_entries;
445 	goto init_ctrlq_exit;
446 
447 init_ctrlq_free_rings:
448 	ICE_FREE_CQ_BUFS(hw, cq, rq);
449 	ice_free_cq_ring(hw, &cq->rq);
450 
451 init_ctrlq_exit:
452 	return ret_code;
453 }
454 
455 /**
456  * ice_shutdown_sq - shutdown the Control ATQ
457  * @hw: pointer to the hardware structure
458  * @cq: pointer to the specific Control queue
459  *
460  * The main shutdown routine for the Control Transmit Queue
461  */
462 static enum ice_status
463 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
464 {
465 	enum ice_status ret_code = ICE_SUCCESS;
466 
467 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
468 
469 	ice_acquire_lock(&cq->sq_lock);
470 
471 	if (!cq->sq.count) {
472 		ret_code = ICE_ERR_NOT_READY;
473 		goto shutdown_sq_out;
474 	}
475 
476 	/* Stop firmware AdminQ processing */
477 	wr32(hw, cq->sq.head, 0);
478 	wr32(hw, cq->sq.tail, 0);
479 	wr32(hw, cq->sq.len, 0);
480 	wr32(hw, cq->sq.bal, 0);
481 	wr32(hw, cq->sq.bah, 0);
482 
483 	cq->sq.count = 0;	/* to indicate uninitialized queue */
484 
485 	/* free ring buffers and the ring itself */
486 	ICE_FREE_CQ_BUFS(hw, cq, sq);
487 	ice_free_cq_ring(hw, &cq->sq);
488 
489 shutdown_sq_out:
490 	ice_release_lock(&cq->sq_lock);
491 	return ret_code;
492 }
493 
494 /**
495  * ice_aq_ver_check - Check the reported AQ API version.
496  * @hw: pointer to the hardware structure
497  *
498  * Checks if the driver should load on a given AQ API version.
499  *
500  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
501  */
502 static bool ice_aq_ver_check(struct ice_hw *hw)
503 {
504 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
505 		/* Major API version is newer than expected, don't load */
506 		ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
507 		return false;
508 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
509 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
510 			ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
511 				 hw->api_maj_ver, hw->api_min_ver,
512 				 EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
513 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
514 			ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
515 				 hw->api_maj_ver, hw->api_min_ver,
516 				 EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
517 	} else {
518 		/* Major API version is older than expected, log a warning */
519 		ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
520 			 hw->api_maj_ver, hw->api_min_ver,
521 			 EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
522 	}
523 	return true;
524 }
525 
526 /**
527  * ice_shutdown_rq - shutdown Control ARQ
528  * @hw: pointer to the hardware structure
529  * @cq: pointer to the specific Control queue
530  *
531  * The main shutdown routine for the Control Receive Queue
532  */
533 static enum ice_status
534 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
535 {
536 	enum ice_status ret_code = ICE_SUCCESS;
537 
538 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
539 
540 	ice_acquire_lock(&cq->rq_lock);
541 
542 	if (!cq->rq.count) {
543 		ret_code = ICE_ERR_NOT_READY;
544 		goto shutdown_rq_out;
545 	}
546 
547 	/* Stop Control Queue processing */
548 	wr32(hw, cq->rq.head, 0);
549 	wr32(hw, cq->rq.tail, 0);
550 	wr32(hw, cq->rq.len, 0);
551 	wr32(hw, cq->rq.bal, 0);
552 	wr32(hw, cq->rq.bah, 0);
553 
554 	/* set rq.count to 0 to indicate uninitialized queue */
555 	cq->rq.count = 0;
556 
557 	/* free ring buffers and the ring itself */
558 	ICE_FREE_CQ_BUFS(hw, cq, rq);
559 	ice_free_cq_ring(hw, &cq->rq);
560 
561 shutdown_rq_out:
562 	ice_release_lock(&cq->rq_lock);
563 	return ret_code;
564 }
565 
566 /**
567  * ice_idle_aq - stop ARQ/ATQ processing momentarily
568  * @hw: pointer to the hardware structure
569  * @cq: pointer to the specific Control queue
570  */
571 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
572 {
573 	wr32(hw, cq->sq.len, 0);
574 	wr32(hw, cq->rq.len, 0);
575 
576 	ice_msec_delay(2, false);
577 }
578 
579 /**
580  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
581  * @hw: pointer to the hardware structure
582  */
583 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
584 {
585 	struct ice_ctl_q_info *cq = &hw->adminq;
586 	enum ice_status status;
587 
588 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
589 
590 	status = ice_aq_get_fw_ver(hw, NULL);
591 	if (status)
592 		goto init_ctrlq_free_rq;
593 
594 	if (!ice_aq_ver_check(hw)) {
595 		status = ICE_ERR_FW_API_VER;
596 		goto init_ctrlq_free_rq;
597 	}
598 
599 	return ICE_SUCCESS;
600 
601 init_ctrlq_free_rq:
602 	ice_shutdown_rq(hw, cq);
603 	ice_shutdown_sq(hw, cq);
604 	return status;
605 }
606 
607 /**
608  * ice_init_ctrlq - main initialization routine for any control Queue
609  * @hw: pointer to the hardware structure
610  * @q_type: specific Control queue type
611  *
612  * Prior to calling this function, the driver *MUST* set the following fields
613  * in the cq->structure:
614  *     - cq->num_sq_entries
615  *     - cq->num_rq_entries
616  *     - cq->rq_buf_size
617  *     - cq->sq_buf_size
618  *
619  * NOTE: this function does not initialize the controlq locks
620  */
621 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
622 {
623 	struct ice_ctl_q_info *cq;
624 	enum ice_status ret_code;
625 
626 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
627 
628 	switch (q_type) {
629 	case ICE_CTL_Q_ADMIN:
630 		ice_adminq_init_regs(hw);
631 		cq = &hw->adminq;
632 		break;
633 	case ICE_CTL_Q_MAILBOX:
634 		ice_mailbox_init_regs(hw);
635 		cq = &hw->mailboxq;
636 		break;
637 	default:
638 		return ICE_ERR_PARAM;
639 	}
640 	cq->qtype = q_type;
641 
642 	/* verify input for valid configuration */
643 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
644 	    !cq->rq_buf_size || !cq->sq_buf_size) {
645 		return ICE_ERR_CFG;
646 	}
647 
648 	/* setup SQ command write back timeout */
649 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
650 
651 	/* allocate the ATQ */
652 	ret_code = ice_init_sq(hw, cq);
653 	if (ret_code)
654 		return ret_code;
655 
656 	/* allocate the ARQ */
657 	ret_code = ice_init_rq(hw, cq);
658 	if (ret_code)
659 		goto init_ctrlq_free_sq;
660 
661 	/* success! */
662 	return ICE_SUCCESS;
663 
664 init_ctrlq_free_sq:
665 	ice_shutdown_sq(hw, cq);
666 	return ret_code;
667 }
668 
669 /**
670  * ice_shutdown_ctrlq - shutdown routine for any control queue
671  * @hw: pointer to the hardware structure
672  * @q_type: specific Control queue type
673  * @unloading: is the driver unloading itself
674  *
675  * NOTE: this function does not destroy the control queue locks.
676  */
677 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
678 			       bool unloading)
679 {
680 	struct ice_ctl_q_info *cq;
681 
682 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
683 
684 	switch (q_type) {
685 	case ICE_CTL_Q_ADMIN:
686 		cq = &hw->adminq;
687 		if (ice_check_sq_alive(hw, cq))
688 			ice_aq_q_shutdown(hw, unloading);
689 		break;
690 	case ICE_CTL_Q_MAILBOX:
691 		cq = &hw->mailboxq;
692 		break;
693 	default:
694 		return;
695 	}
696 
697 	ice_shutdown_sq(hw, cq);
698 	ice_shutdown_rq(hw, cq);
699 }
700 
701 /**
702  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
703  * @hw: pointer to the hardware structure
704  * @unloading: is the driver unloading itself
705  *
706  * NOTE: this function does not destroy the control queue locks. The driver
707  * may call this at runtime to shutdown and later restart control queues, such
708  * as in response to a reset event.
709  */
710 void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
711 {
712 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
713 	/* Shutdown FW admin queue */
714 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
715 	/* Shutdown PF-VF Mailbox */
716 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
717 }
718 
719 /**
720  * ice_init_all_ctrlq - main initialization routine for all control queues
721  * @hw: pointer to the hardware structure
722  *
723  * Prior to calling this function, the driver MUST* set the following fields
724  * in the cq->structure for all control queues:
725  *     - cq->num_sq_entries
726  *     - cq->num_rq_entries
727  *     - cq->rq_buf_size
728  *     - cq->sq_buf_size
729  *
730  * NOTE: this function does not initialize the controlq locks.
731  */
732 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
733 {
734 	enum ice_status status;
735 	u32 retry = 0;
736 
737 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
738 
739 	/* Init FW admin queue */
740 	do {
741 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
742 		if (status)
743 			return status;
744 
745 		status = ice_init_check_adminq(hw);
746 		if (status != ICE_ERR_AQ_FW_CRITICAL)
747 			break;
748 
749 		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
750 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
751 		ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
752 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
753 
754 	if (status)
755 		return status;
756 	/* Init Mailbox queue */
757 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
758 }
759 
760 /**
761  * ice_init_ctrlq_locks - Initialize locks for a control queue
762  * @cq: pointer to the control queue
763  *
764  * Initializes the send and receive queue locks for a given control queue.
765  */
766 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
767 {
768 	ice_init_lock(&cq->sq_lock);
769 	ice_init_lock(&cq->rq_lock);
770 }
771 
772 /**
773  * ice_create_all_ctrlq - main initialization routine for all control queues
774  * @hw: pointer to the hardware structure
775  *
776  * Prior to calling this function, the driver *MUST* set the following fields
777  * in the cq->structure for all control queues:
778  *     - cq->num_sq_entries
779  *     - cq->num_rq_entries
780  *     - cq->rq_buf_size
781  *     - cq->sq_buf_size
782  *
783  * This function creates all the control queue locks and then calls
784  * ice_init_all_ctrlq. It should be called once during driver load. If the
785  * driver needs to re-initialize control queues at run time it should call
786  * ice_init_all_ctrlq instead.
787  */
788 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
789 {
790 	ice_init_ctrlq_locks(&hw->adminq);
791 	ice_init_ctrlq_locks(&hw->mailboxq);
792 
793 	return ice_init_all_ctrlq(hw);
794 }
795 
796 /**
797  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
798  * @cq: pointer to the control queue
799  *
800  * Destroys the send and receive queue locks for a given control queue.
801  */
802 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
803 {
804 	ice_destroy_lock(&cq->sq_lock);
805 	ice_destroy_lock(&cq->rq_lock);
806 }
807 
808 /**
809  * ice_destroy_all_ctrlq - exit routine for all control queues
810  * @hw: pointer to the hardware structure
811  *
812  * This function shuts down all the control queues and then destroys the
813  * control queue locks. It should be called once during driver unload. The
814  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
815  * reinitialize control queues, such as in response to a reset event.
816  */
817 void ice_destroy_all_ctrlq(struct ice_hw *hw)
818 {
819 	/* shut down all the control queues first */
820 	ice_shutdown_all_ctrlq(hw, true);
821 
822 	ice_destroy_ctrlq_locks(&hw->adminq);
823 	ice_destroy_ctrlq_locks(&hw->mailboxq);
824 }
825 
826 /**
827  * ice_clean_sq - cleans Admin send queue (ATQ)
828  * @hw: pointer to the hardware structure
829  * @cq: pointer to the specific Control queue
830  *
831  * returns the number of free desc
832  */
833 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
834 {
835 	struct ice_ctl_q_ring *sq = &cq->sq;
836 	u16 ntc = sq->next_to_clean;
837 	struct ice_sq_cd *details;
838 	struct ice_aq_desc *desc;
839 
840 	desc = ICE_CTL_Q_DESC(*sq, ntc);
841 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
842 
843 	while (rd32(hw, cq->sq.head) != ntc) {
844 		ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
845 		ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
846 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
847 		ntc++;
848 		if (ntc == sq->count)
849 			ntc = 0;
850 		desc = ICE_CTL_Q_DESC(*sq, ntc);
851 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
852 	}
853 
854 	sq->next_to_clean = ntc;
855 
856 	return ICE_CTL_Q_DESC_UNUSED(sq);
857 }
858 
859 /**
860  * ice_debug_cq
861  * @hw: pointer to the hardware structure
862  * @desc: pointer to control queue descriptor
863  * @buf: pointer to command buffer
864  * @buf_len: max length of buf
865  *
866  * Dumps debug log about control command with descriptor contents.
867  */
868 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
869 {
870 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
871 	u16 datalen, flags;
872 
873 	if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
874 		return;
875 
876 	if (!desc)
877 		return;
878 
879 	datalen = LE16_TO_CPU(cq_desc->datalen);
880 	flags = LE16_TO_CPU(cq_desc->flags);
881 
882 	ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
883 		  LE16_TO_CPU(cq_desc->opcode), flags, datalen,
884 		  LE16_TO_CPU(cq_desc->retval));
885 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
886 		  LE32_TO_CPU(cq_desc->cookie_high),
887 		  LE32_TO_CPU(cq_desc->cookie_low));
888 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
889 		  LE32_TO_CPU(cq_desc->params.generic.param0),
890 		  LE32_TO_CPU(cq_desc->params.generic.param1));
891 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
892 		  LE32_TO_CPU(cq_desc->params.generic.addr_high),
893 		  LE32_TO_CPU(cq_desc->params.generic.addr_low));
894 	/* Dump buffer iff 1) one exists and 2) is either a response indicated
895 	 * by the DD and/or CMP flag set or a command with the RD flag set.
896 	 */
897 	if (buf && cq_desc->datalen != 0 &&
898 	    (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
899 	     flags & ICE_AQ_FLAG_RD)) {
900 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
901 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
902 				MIN_T(u16, buf_len, datalen));
903 	}
904 }
905 
906 /**
907  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
908  * @hw: pointer to the HW struct
909  * @cq: pointer to the specific Control queue
910  *
911  * Returns true if the firmware has processed all descriptors on the
912  * admin send queue. Returns false if there are still requests pending.
913  */
914 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
915 {
916 	/* AQ designers suggest use of head for better
917 	 * timing reliability than DD bit
918 	 */
919 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
920 }
921 
922 /**
923  * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
924  * @hw: pointer to the HW struct
925  * @cq: pointer to the specific Control queue
926  * @desc: prefilled descriptor describing the command (non DMA mem)
927  * @buf: buffer to use for indirect commands (or NULL for direct commands)
928  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
929  * @cd: pointer to command details structure
930  *
931  * This is the main send command routine for the ATQ. It runs the queue,
932  * cleans the queue, etc.
933  */
934 static enum ice_status
935 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
936 		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
937 		       struct ice_sq_cd *cd)
938 {
939 	struct ice_dma_mem *dma_buf = NULL;
940 	struct ice_aq_desc *desc_on_ring;
941 	bool cmd_completed = false;
942 	enum ice_status status = ICE_SUCCESS;
943 	struct ice_sq_cd *details;
944 	u32 total_delay = 0;
945 	u16 retval = 0;
946 	u32 val = 0;
947 
948 	/* if reset is in progress return a soft error */
949 	if (hw->reset_ongoing)
950 		return ICE_ERR_RESET_ONGOING;
951 
952 	cq->sq_last_status = ICE_AQ_RC_OK;
953 
954 	if (!cq->sq.count) {
955 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
956 		status = ICE_ERR_AQ_EMPTY;
957 		goto sq_send_command_error;
958 	}
959 
960 	if ((buf && !buf_size) || (!buf && buf_size)) {
961 		status = ICE_ERR_PARAM;
962 		goto sq_send_command_error;
963 	}
964 
965 	if (buf) {
966 		if (buf_size > cq->sq_buf_size) {
967 			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
968 				  buf_size);
969 			status = ICE_ERR_INVAL_SIZE;
970 			goto sq_send_command_error;
971 		}
972 
973 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
974 		if (buf_size > ICE_AQ_LG_BUF)
975 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
976 	}
977 
978 	val = rd32(hw, cq->sq.head);
979 	if (val >= cq->num_sq_entries) {
980 		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
981 			  val);
982 		status = ICE_ERR_AQ_EMPTY;
983 		goto sq_send_command_error;
984 	}
985 
986 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
987 	if (cd)
988 		*details = *cd;
989 	else
990 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
991 
992 	/* Call clean and check queue available function to reclaim the
993 	 * descriptors that were processed by FW/MBX; the function returns the
994 	 * number of desc available. The clean function called here could be
995 	 * called in a separate thread in case of asynchronous completions.
996 	 */
997 	if (ice_clean_sq(hw, cq) == 0) {
998 		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
999 		status = ICE_ERR_AQ_FULL;
1000 		goto sq_send_command_error;
1001 	}
1002 
1003 	/* initialize the temp desc pointer with the right desc */
1004 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1005 
1006 	/* if the desc is available copy the temp desc to the right place */
1007 	ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
1008 		   ICE_NONDMA_TO_DMA);
1009 
1010 	/* if buf is not NULL assume indirect command */
1011 	if (buf) {
1012 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1013 		/* copy the user buf into the respective DMA buf */
1014 		ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
1015 		desc_on_ring->datalen = CPU_TO_LE16(buf_size);
1016 
1017 		/* Update the address values in the desc with the pa value
1018 		 * for respective buffer
1019 		 */
1020 		desc_on_ring->params.generic.addr_high =
1021 			CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1022 		desc_on_ring->params.generic.addr_low =
1023 			CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1024 	}
1025 
1026 	/* Debug desc and buffer */
1027 	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1028 
1029 	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1030 
1031 	(cq->sq.next_to_use)++;
1032 	if (cq->sq.next_to_use == cq->sq.count)
1033 		cq->sq.next_to_use = 0;
1034 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1035 
1036 	do {
1037 		if (ice_sq_done(hw, cq))
1038 			break;
1039 
1040 		ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
1041 		total_delay++;
1042 	} while (total_delay < cq->sq_cmd_timeout);
1043 
1044 	/* if ready, copy the desc back to temp */
1045 	if (ice_sq_done(hw, cq)) {
1046 		ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1047 			   ICE_DMA_TO_NONDMA);
1048 		if (buf) {
1049 			/* get returned length to copy */
1050 			u16 copy_size = LE16_TO_CPU(desc->datalen);
1051 
1052 			if (copy_size > buf_size) {
1053 				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1054 					  copy_size, buf_size);
1055 				status = ICE_ERR_AQ_ERROR;
1056 			} else {
1057 				ice_memcpy(buf, dma_buf->va, copy_size,
1058 					   ICE_DMA_TO_NONDMA);
1059 			}
1060 		}
1061 		retval = LE16_TO_CPU(desc->retval);
1062 		if (retval) {
1063 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1064 				  LE16_TO_CPU(desc->opcode),
1065 				  retval);
1066 
1067 			/* strip off FW internal code */
1068 			retval &= 0xff;
1069 		}
1070 		cmd_completed = true;
1071 		if (!status && retval != ICE_AQ_RC_OK)
1072 			status = ICE_ERR_AQ_ERROR;
1073 		cq->sq_last_status = (enum ice_aq_err)retval;
1074 	}
1075 
1076 	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1077 
1078 	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1079 
1080 	/* save writeback AQ if requested */
1081 	if (details->wb_desc)
1082 		ice_memcpy(details->wb_desc, desc_on_ring,
1083 			   sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1084 
1085 	/* update the error if time out occurred */
1086 	if (!cmd_completed) {
1087 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1088 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1089 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1090 			status = ICE_ERR_AQ_FW_CRITICAL;
1091 		} else {
1092 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1093 			status = ICE_ERR_AQ_TIMEOUT;
1094 		}
1095 	}
1096 
1097 sq_send_command_error:
1098 	return status;
1099 }
1100 
1101 /**
1102  * ice_sq_send_cmd - send command to Control Queue (ATQ)
1103  * @hw: pointer to the HW struct
1104  * @cq: pointer to the specific Control queue
1105  * @desc: prefilled descriptor describing the command
1106  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1107  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1108  * @cd: pointer to command details structure
1109  *
1110  * This is the main send command routine for the ATQ. It runs the queue,
1111  * cleans the queue, etc.
1112  */
1113 enum ice_status
1114 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1115 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
1116 		struct ice_sq_cd *cd)
1117 {
1118 	enum ice_status status = ICE_SUCCESS;
1119 
1120 	/* if reset is in progress return a soft error */
1121 	if (hw->reset_ongoing)
1122 		return ICE_ERR_RESET_ONGOING;
1123 
1124 	ice_acquire_lock(&cq->sq_lock);
1125 	status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1126 	ice_release_lock(&cq->sq_lock);
1127 
1128 	return status;
1129 }
1130 
1131 /**
1132  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1133  * @desc: pointer to the temp descriptor (non DMA mem)
1134  * @opcode: the opcode can be used to decide which flags to turn off or on
1135  *
1136  * Fill the desc with default values
1137  */
1138 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1139 {
1140 	/* zero out the desc */
1141 	ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1142 	desc->opcode = CPU_TO_LE16(opcode);
1143 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1144 }
1145 
1146 /**
1147  * ice_clean_rq_elem
1148  * @hw: pointer to the HW struct
1149  * @cq: pointer to the specific Control queue
1150  * @e: event info from the receive descriptor, includes any buffers
1151  * @pending: number of events that could be left to process
1152  *
1153  * This function cleans one Admin Receive Queue element and returns
1154  * the contents through e. It can also return how many events are
1155  * left to process through 'pending'.
1156  */
1157 enum ice_status
1158 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1159 		  struct ice_rq_event_info *e, u16 *pending)
1160 {
1161 	u16 ntc = cq->rq.next_to_clean;
1162 	enum ice_aq_err rq_last_status;
1163 	enum ice_status ret_code = ICE_SUCCESS;
1164 	struct ice_aq_desc *desc;
1165 	struct ice_dma_mem *bi;
1166 	u16 desc_idx;
1167 	u16 datalen;
1168 	u16 flags;
1169 	u16 ntu;
1170 
1171 	/* pre-clean the event info */
1172 	ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1173 
1174 	/* take the lock before we start messing with the ring */
1175 	ice_acquire_lock(&cq->rq_lock);
1176 
1177 	if (!cq->rq.count) {
1178 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1179 		ret_code = ICE_ERR_AQ_EMPTY;
1180 		goto clean_rq_elem_err;
1181 	}
1182 
1183 	/* set next_to_use to head */
1184 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1185 
1186 	if (ntu == ntc) {
1187 		/* nothing to do - shouldn't need to update ring's values */
1188 		ret_code = ICE_ERR_AQ_NO_WORK;
1189 		goto clean_rq_elem_out;
1190 	}
1191 
1192 	/* now clean the next descriptor */
1193 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1194 	desc_idx = ntc;
1195 
1196 	rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1197 	flags = LE16_TO_CPU(desc->flags);
1198 	if (flags & ICE_AQ_FLAG_ERR) {
1199 		ret_code = ICE_ERR_AQ_ERROR;
1200 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1201 			  LE16_TO_CPU(desc->opcode), rq_last_status);
1202 	}
1203 	ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1204 	datalen = LE16_TO_CPU(desc->datalen);
1205 	e->msg_len = MIN_T(u16, datalen, e->buf_len);
1206 	if (e->msg_buf && e->msg_len)
1207 		ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1208 			   e->msg_len, ICE_DMA_TO_NONDMA);
1209 
1210 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1211 
1212 	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1213 
1214 	/* Restore the original datalen and buffer address in the desc,
1215 	 * FW updates datalen to indicate the event message size
1216 	 */
1217 	bi = &cq->rq.r.rq_bi[ntc];
1218 	ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1219 
1220 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1221 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1222 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1223 	desc->datalen = CPU_TO_LE16(bi->size);
1224 	desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1225 	desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1226 
1227 	/* set tail = the last cleaned desc index. */
1228 	wr32(hw, cq->rq.tail, ntc);
1229 	/* ntc is updated to tail + 1 */
1230 	ntc++;
1231 	if (ntc == cq->num_rq_entries)
1232 		ntc = 0;
1233 	cq->rq.next_to_clean = ntc;
1234 	cq->rq.next_to_use = ntu;
1235 
1236 clean_rq_elem_out:
1237 	/* Set pending if needed, unlock and return */
1238 	if (pending) {
1239 		/* re-read HW head to calculate actual pending messages */
1240 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1241 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1242 	}
1243 clean_rq_elem_err:
1244 	ice_release_lock(&cq->rq_lock);
1245 
1246 	return ret_code;
1247 }
1248