xref: /freebsd/sys/dev/ice/ice_controlq.c (revision 9cf1841c)
171d10453SEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */
2d08b8680SEric Joyner /*  Copyright (c) 2021, Intel Corporation
371d10453SEric Joyner  *  All rights reserved.
471d10453SEric Joyner  *
571d10453SEric Joyner  *  Redistribution and use in source and binary forms, with or without
671d10453SEric Joyner  *  modification, are permitted provided that the following conditions are met:
771d10453SEric Joyner  *
871d10453SEric Joyner  *   1. Redistributions of source code must retain the above copyright notice,
971d10453SEric Joyner  *      this list of conditions and the following disclaimer.
1071d10453SEric Joyner  *
1171d10453SEric Joyner  *   2. Redistributions in binary form must reproduce the above copyright
1271d10453SEric Joyner  *      notice, this list of conditions and the following disclaimer in the
1371d10453SEric Joyner  *      documentation and/or other materials provided with the distribution.
1471d10453SEric Joyner  *
1571d10453SEric Joyner  *   3. Neither the name of the Intel Corporation nor the names of its
1671d10453SEric Joyner  *      contributors may be used to endorse or promote products derived from
1771d10453SEric Joyner  *      this software without specific prior written permission.
1871d10453SEric Joyner  *
1971d10453SEric Joyner  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2071d10453SEric Joyner  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2171d10453SEric Joyner  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2271d10453SEric Joyner  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
2371d10453SEric Joyner  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2471d10453SEric Joyner  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2571d10453SEric Joyner  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2671d10453SEric Joyner  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2771d10453SEric Joyner  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2871d10453SEric Joyner  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2971d10453SEric Joyner  *  POSSIBILITY OF SUCH DAMAGE.
3071d10453SEric Joyner  */
3171d10453SEric Joyner /*$FreeBSD$*/
3271d10453SEric Joyner 
3371d10453SEric Joyner #include "ice_common.h"
3471d10453SEric Joyner 
3571d10453SEric Joyner #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
3671d10453SEric Joyner do {								\
3771d10453SEric Joyner 	(qinfo)->sq.head = prefix##_ATQH;			\
3871d10453SEric Joyner 	(qinfo)->sq.tail = prefix##_ATQT;			\
3971d10453SEric Joyner 	(qinfo)->sq.len = prefix##_ATQLEN;			\
4071d10453SEric Joyner 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
4171d10453SEric Joyner 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
4271d10453SEric Joyner 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
4371d10453SEric Joyner 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
447d7af7f8SEric Joyner 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
4571d10453SEric Joyner 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
4671d10453SEric Joyner 	(qinfo)->rq.head = prefix##_ARQH;			\
4771d10453SEric Joyner 	(qinfo)->rq.tail = prefix##_ARQT;			\
4871d10453SEric Joyner 	(qinfo)->rq.len = prefix##_ARQLEN;			\
4971d10453SEric Joyner 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
5071d10453SEric Joyner 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
5171d10453SEric Joyner 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
5271d10453SEric Joyner 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
537d7af7f8SEric Joyner 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
5471d10453SEric Joyner 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
5571d10453SEric Joyner } while (0)
5671d10453SEric Joyner 
5771d10453SEric Joyner /**
5871d10453SEric Joyner  * ice_adminq_init_regs - Initialize AdminQ registers
5971d10453SEric Joyner  * @hw: pointer to the hardware structure
6071d10453SEric Joyner  *
6171d10453SEric Joyner  * This assumes the alloc_sq and alloc_rq functions have already been called
6271d10453SEric Joyner  */
6371d10453SEric Joyner static void ice_adminq_init_regs(struct ice_hw *hw)
6471d10453SEric Joyner {
6571d10453SEric Joyner 	struct ice_ctl_q_info *cq = &hw->adminq;
6671d10453SEric Joyner 
6771d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6871d10453SEric Joyner 
6971d10453SEric Joyner 	ICE_CQ_INIT_REGS(cq, PF_FW);
7071d10453SEric Joyner }
7171d10453SEric Joyner 
7271d10453SEric Joyner /**
7371d10453SEric Joyner  * ice_mailbox_init_regs - Initialize Mailbox registers
7471d10453SEric Joyner  * @hw: pointer to the hardware structure
7571d10453SEric Joyner  *
7671d10453SEric Joyner  * This assumes the alloc_sq and alloc_rq functions have already been called
7771d10453SEric Joyner  */
7871d10453SEric Joyner static void ice_mailbox_init_regs(struct ice_hw *hw)
7971d10453SEric Joyner {
8071d10453SEric Joyner 	struct ice_ctl_q_info *cq = &hw->mailboxq;
8171d10453SEric Joyner 
8271d10453SEric Joyner 	ICE_CQ_INIT_REGS(cq, PF_MBX);
8371d10453SEric Joyner }
8471d10453SEric Joyner 
8571d10453SEric Joyner /**
8671d10453SEric Joyner  * ice_check_sq_alive
8771d10453SEric Joyner  * @hw: pointer to the HW struct
8871d10453SEric Joyner  * @cq: pointer to the specific Control queue
8971d10453SEric Joyner  *
9071d10453SEric Joyner  * Returns true if Queue is enabled else false.
9171d10453SEric Joyner  */
9271d10453SEric Joyner bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
9371d10453SEric Joyner {
9471d10453SEric Joyner 	/* check both queue-length and queue-enable fields */
9571d10453SEric Joyner 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
9671d10453SEric Joyner 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
9771d10453SEric Joyner 						cq->sq.len_ena_mask)) ==
9871d10453SEric Joyner 			(cq->num_sq_entries | cq->sq.len_ena_mask);
9971d10453SEric Joyner 
10071d10453SEric Joyner 	return false;
10171d10453SEric Joyner }
10271d10453SEric Joyner 
10371d10453SEric Joyner /**
10471d10453SEric Joyner  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
10571d10453SEric Joyner  * @hw: pointer to the hardware structure
10671d10453SEric Joyner  * @cq: pointer to the specific Control queue
10771d10453SEric Joyner  */
10871d10453SEric Joyner static enum ice_status
10971d10453SEric Joyner ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
11071d10453SEric Joyner {
11171d10453SEric Joyner 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
11271d10453SEric Joyner 
11371d10453SEric Joyner 	cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
11471d10453SEric Joyner 	if (!cq->sq.desc_buf.va)
11571d10453SEric Joyner 		return ICE_ERR_NO_MEMORY;
11671d10453SEric Joyner 
11771d10453SEric Joyner 	cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
11871d10453SEric Joyner 				    sizeof(struct ice_sq_cd));
11971d10453SEric Joyner 	if (!cq->sq.cmd_buf) {
12071d10453SEric Joyner 		ice_free_dma_mem(hw, &cq->sq.desc_buf);
12171d10453SEric Joyner 		return ICE_ERR_NO_MEMORY;
12271d10453SEric Joyner 	}
12371d10453SEric Joyner 
12471d10453SEric Joyner 	return ICE_SUCCESS;
12571d10453SEric Joyner }
12671d10453SEric Joyner 
12771d10453SEric Joyner /**
12871d10453SEric Joyner  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
12971d10453SEric Joyner  * @hw: pointer to the hardware structure
13071d10453SEric Joyner  * @cq: pointer to the specific Control queue
13171d10453SEric Joyner  */
13271d10453SEric Joyner static enum ice_status
13371d10453SEric Joyner ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
13471d10453SEric Joyner {
13571d10453SEric Joyner 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
13671d10453SEric Joyner 
13771d10453SEric Joyner 	cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
13871d10453SEric Joyner 	if (!cq->rq.desc_buf.va)
13971d10453SEric Joyner 		return ICE_ERR_NO_MEMORY;
14071d10453SEric Joyner 	return ICE_SUCCESS;
14171d10453SEric Joyner }
14271d10453SEric Joyner 
14371d10453SEric Joyner /**
14471d10453SEric Joyner  * ice_free_cq_ring - Free control queue ring
14571d10453SEric Joyner  * @hw: pointer to the hardware structure
14671d10453SEric Joyner  * @ring: pointer to the specific control queue ring
14771d10453SEric Joyner  *
14871d10453SEric Joyner  * This assumes the posted buffers have already been cleaned
14971d10453SEric Joyner  * and de-allocated
15071d10453SEric Joyner  */
15171d10453SEric Joyner static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
15271d10453SEric Joyner {
15371d10453SEric Joyner 	ice_free_dma_mem(hw, &ring->desc_buf);
15471d10453SEric Joyner }
15571d10453SEric Joyner 
15671d10453SEric Joyner /**
15771d10453SEric Joyner  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
15871d10453SEric Joyner  * @hw: pointer to the hardware structure
15971d10453SEric Joyner  * @cq: pointer to the specific Control queue
16071d10453SEric Joyner  */
16171d10453SEric Joyner static enum ice_status
16271d10453SEric Joyner ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
16371d10453SEric Joyner {
16471d10453SEric Joyner 	int i;
16571d10453SEric Joyner 
16671d10453SEric Joyner 	/* We'll be allocating the buffer info memory first, then we can
16771d10453SEric Joyner 	 * allocate the mapped buffers for the event processing
16871d10453SEric Joyner 	 */
16971d10453SEric Joyner 	cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
17071d10453SEric Joyner 				     sizeof(cq->rq.desc_buf));
17171d10453SEric Joyner 	if (!cq->rq.dma_head)
17271d10453SEric Joyner 		return ICE_ERR_NO_MEMORY;
17371d10453SEric Joyner 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
17471d10453SEric Joyner 
17571d10453SEric Joyner 	/* allocate the mapped buffers */
17671d10453SEric Joyner 	for (i = 0; i < cq->num_rq_entries; i++) {
17771d10453SEric Joyner 		struct ice_aq_desc *desc;
17871d10453SEric Joyner 		struct ice_dma_mem *bi;
17971d10453SEric Joyner 
18071d10453SEric Joyner 		bi = &cq->rq.r.rq_bi[i];
18171d10453SEric Joyner 		bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
18271d10453SEric Joyner 		if (!bi->va)
18371d10453SEric Joyner 			goto unwind_alloc_rq_bufs;
18471d10453SEric Joyner 
18571d10453SEric Joyner 		/* now configure the descriptors for use */
18671d10453SEric Joyner 		desc = ICE_CTL_Q_DESC(cq->rq, i);
18771d10453SEric Joyner 
18871d10453SEric Joyner 		desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
18971d10453SEric Joyner 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
19071d10453SEric Joyner 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
19171d10453SEric Joyner 		desc->opcode = 0;
19271d10453SEric Joyner 		/* This is in accordance with Admin queue design, there is no
19371d10453SEric Joyner 		 * register for buffer size configuration
19471d10453SEric Joyner 		 */
19571d10453SEric Joyner 		desc->datalen = CPU_TO_LE16(bi->size);
19671d10453SEric Joyner 		desc->retval = 0;
19771d10453SEric Joyner 		desc->cookie_high = 0;
19871d10453SEric Joyner 		desc->cookie_low = 0;
19971d10453SEric Joyner 		desc->params.generic.addr_high =
20071d10453SEric Joyner 			CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
20171d10453SEric Joyner 		desc->params.generic.addr_low =
20271d10453SEric Joyner 			CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
20371d10453SEric Joyner 		desc->params.generic.param0 = 0;
20471d10453SEric Joyner 		desc->params.generic.param1 = 0;
20571d10453SEric Joyner 	}
20671d10453SEric Joyner 	return ICE_SUCCESS;
20771d10453SEric Joyner 
20871d10453SEric Joyner unwind_alloc_rq_bufs:
20971d10453SEric Joyner 	/* don't try to free the one that failed... */
21071d10453SEric Joyner 	i--;
21171d10453SEric Joyner 	for (; i >= 0; i--)
21271d10453SEric Joyner 		ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
2137d7af7f8SEric Joyner 	cq->rq.r.rq_bi = NULL;
21471d10453SEric Joyner 	ice_free(hw, cq->rq.dma_head);
2157d7af7f8SEric Joyner 	cq->rq.dma_head = NULL;
21671d10453SEric Joyner 
21771d10453SEric Joyner 	return ICE_ERR_NO_MEMORY;
21871d10453SEric Joyner }
21971d10453SEric Joyner 
22071d10453SEric Joyner /**
22171d10453SEric Joyner  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
22271d10453SEric Joyner  * @hw: pointer to the hardware structure
22371d10453SEric Joyner  * @cq: pointer to the specific Control queue
22471d10453SEric Joyner  */
22571d10453SEric Joyner static enum ice_status
22671d10453SEric Joyner ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
22771d10453SEric Joyner {
22871d10453SEric Joyner 	int i;
22971d10453SEric Joyner 
23071d10453SEric Joyner 	/* No mapped memory needed yet, just the buffer info structures */
23171d10453SEric Joyner 	cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
23271d10453SEric Joyner 				     sizeof(cq->sq.desc_buf));
23371d10453SEric Joyner 	if (!cq->sq.dma_head)
23471d10453SEric Joyner 		return ICE_ERR_NO_MEMORY;
23571d10453SEric Joyner 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
23671d10453SEric Joyner 
23771d10453SEric Joyner 	/* allocate the mapped buffers */
23871d10453SEric Joyner 	for (i = 0; i < cq->num_sq_entries; i++) {
23971d10453SEric Joyner 		struct ice_dma_mem *bi;
24071d10453SEric Joyner 
24171d10453SEric Joyner 		bi = &cq->sq.r.sq_bi[i];
24271d10453SEric Joyner 		bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
24371d10453SEric Joyner 		if (!bi->va)
24471d10453SEric Joyner 			goto unwind_alloc_sq_bufs;
24571d10453SEric Joyner 	}
24671d10453SEric Joyner 	return ICE_SUCCESS;
24771d10453SEric Joyner 
24871d10453SEric Joyner unwind_alloc_sq_bufs:
24971d10453SEric Joyner 	/* don't try to free the one that failed... */
25071d10453SEric Joyner 	i--;
25171d10453SEric Joyner 	for (; i >= 0; i--)
25271d10453SEric Joyner 		ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
2537d7af7f8SEric Joyner 	cq->sq.r.sq_bi = NULL;
25471d10453SEric Joyner 	ice_free(hw, cq->sq.dma_head);
2557d7af7f8SEric Joyner 	cq->sq.dma_head = NULL;
25671d10453SEric Joyner 
25771d10453SEric Joyner 	return ICE_ERR_NO_MEMORY;
25871d10453SEric Joyner }
25971d10453SEric Joyner 
26071d10453SEric Joyner static enum ice_status
26171d10453SEric Joyner ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
26271d10453SEric Joyner {
26371d10453SEric Joyner 	/* Clear Head and Tail */
26471d10453SEric Joyner 	wr32(hw, ring->head, 0);
26571d10453SEric Joyner 	wr32(hw, ring->tail, 0);
26671d10453SEric Joyner 
26771d10453SEric Joyner 	/* set starting point */
26871d10453SEric Joyner 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
26971d10453SEric Joyner 	wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
27071d10453SEric Joyner 	wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
27171d10453SEric Joyner 
27271d10453SEric Joyner 	/* Check one register to verify that config was applied */
27371d10453SEric Joyner 	if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
27471d10453SEric Joyner 		return ICE_ERR_AQ_ERROR;
27571d10453SEric Joyner 
27671d10453SEric Joyner 	return ICE_SUCCESS;
27771d10453SEric Joyner }
27871d10453SEric Joyner 
27971d10453SEric Joyner /**
28071d10453SEric Joyner  * ice_cfg_sq_regs - configure Control ATQ registers
28171d10453SEric Joyner  * @hw: pointer to the hardware structure
28271d10453SEric Joyner  * @cq: pointer to the specific Control queue
28371d10453SEric Joyner  *
28471d10453SEric Joyner  * Configure base address and length registers for the transmit queue
28571d10453SEric Joyner  */
28671d10453SEric Joyner static enum ice_status
28771d10453SEric Joyner ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
28871d10453SEric Joyner {
28971d10453SEric Joyner 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
29071d10453SEric Joyner }
29171d10453SEric Joyner 
29271d10453SEric Joyner /**
29371d10453SEric Joyner  * ice_cfg_rq_regs - configure Control ARQ register
29471d10453SEric Joyner  * @hw: pointer to the hardware structure
29571d10453SEric Joyner  * @cq: pointer to the specific Control queue
29671d10453SEric Joyner  *
29771d10453SEric Joyner  * Configure base address and length registers for the receive (event queue)
29871d10453SEric Joyner  */
29971d10453SEric Joyner static enum ice_status
30071d10453SEric Joyner ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
30171d10453SEric Joyner {
30271d10453SEric Joyner 	enum ice_status status;
30371d10453SEric Joyner 
30471d10453SEric Joyner 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
30571d10453SEric Joyner 	if (status)
30671d10453SEric Joyner 		return status;
30771d10453SEric Joyner 
30871d10453SEric Joyner 	/* Update tail in the HW to post pre-allocated buffers */
30971d10453SEric Joyner 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
31071d10453SEric Joyner 
31171d10453SEric Joyner 	return ICE_SUCCESS;
31271d10453SEric Joyner }
31371d10453SEric Joyner 
3147d7af7f8SEric Joyner #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
3157d7af7f8SEric Joyner do {									\
3167d7af7f8SEric Joyner 	/* free descriptors */						\
3177d7af7f8SEric Joyner 	if ((qi)->ring.r.ring##_bi) {					\
3187d7af7f8SEric Joyner 		int i;							\
3197d7af7f8SEric Joyner 									\
3207d7af7f8SEric Joyner 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
3217d7af7f8SEric Joyner 			if ((qi)->ring.r.ring##_bi[i].pa)		\
3227d7af7f8SEric Joyner 				ice_free_dma_mem((hw),			\
3237d7af7f8SEric Joyner 					&(qi)->ring.r.ring##_bi[i]);	\
3247d7af7f8SEric Joyner 	}								\
3257d7af7f8SEric Joyner 	/* free the buffer info list */					\
3267d7af7f8SEric Joyner 	if ((qi)->ring.cmd_buf)						\
3277d7af7f8SEric Joyner 		ice_free(hw, (qi)->ring.cmd_buf);			\
3287d7af7f8SEric Joyner 	/* free DMA head */						\
3297d7af7f8SEric Joyner 	ice_free(hw, (qi)->ring.dma_head);				\
3307d7af7f8SEric Joyner } while (0)
3317d7af7f8SEric Joyner 
33271d10453SEric Joyner /**
33371d10453SEric Joyner  * ice_init_sq - main initialization routine for Control ATQ
33471d10453SEric Joyner  * @hw: pointer to the hardware structure
33571d10453SEric Joyner  * @cq: pointer to the specific Control queue
33671d10453SEric Joyner  *
33771d10453SEric Joyner  * This is the main initialization routine for the Control Send Queue
33871d10453SEric Joyner  * Prior to calling this function, the driver *MUST* set the following fields
33971d10453SEric Joyner  * in the cq->structure:
34071d10453SEric Joyner  *     - cq->num_sq_entries
34171d10453SEric Joyner  *     - cq->sq_buf_size
34271d10453SEric Joyner  *
34371d10453SEric Joyner  * Do *NOT* hold the lock when calling this as the memory allocation routines
34471d10453SEric Joyner  * called are not going to be atomic context safe
34571d10453SEric Joyner  */
34671d10453SEric Joyner static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
34771d10453SEric Joyner {
34871d10453SEric Joyner 	enum ice_status ret_code;
34971d10453SEric Joyner 
35071d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
35171d10453SEric Joyner 
35271d10453SEric Joyner 	if (cq->sq.count > 0) {
35371d10453SEric Joyner 		/* queue already initialized */
35471d10453SEric Joyner 		ret_code = ICE_ERR_NOT_READY;
35571d10453SEric Joyner 		goto init_ctrlq_exit;
35671d10453SEric Joyner 	}
35771d10453SEric Joyner 
35871d10453SEric Joyner 	/* verify input for valid configuration */
35971d10453SEric Joyner 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
36071d10453SEric Joyner 		ret_code = ICE_ERR_CFG;
36171d10453SEric Joyner 		goto init_ctrlq_exit;
36271d10453SEric Joyner 	}
36371d10453SEric Joyner 
36471d10453SEric Joyner 	cq->sq.next_to_use = 0;
36571d10453SEric Joyner 	cq->sq.next_to_clean = 0;
36671d10453SEric Joyner 
36771d10453SEric Joyner 	/* allocate the ring memory */
36871d10453SEric Joyner 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
36971d10453SEric Joyner 	if (ret_code)
37071d10453SEric Joyner 		goto init_ctrlq_exit;
37171d10453SEric Joyner 
37271d10453SEric Joyner 	/* allocate buffers in the rings */
37371d10453SEric Joyner 	ret_code = ice_alloc_sq_bufs(hw, cq);
37471d10453SEric Joyner 	if (ret_code)
37571d10453SEric Joyner 		goto init_ctrlq_free_rings;
37671d10453SEric Joyner 
37771d10453SEric Joyner 	/* initialize base registers */
37871d10453SEric Joyner 	ret_code = ice_cfg_sq_regs(hw, cq);
37971d10453SEric Joyner 	if (ret_code)
38071d10453SEric Joyner 		goto init_ctrlq_free_rings;
38171d10453SEric Joyner 
38271d10453SEric Joyner 	/* success! */
38371d10453SEric Joyner 	cq->sq.count = cq->num_sq_entries;
38471d10453SEric Joyner 	goto init_ctrlq_exit;
38571d10453SEric Joyner 
38671d10453SEric Joyner init_ctrlq_free_rings:
3877d7af7f8SEric Joyner 	ICE_FREE_CQ_BUFS(hw, cq, sq);
38871d10453SEric Joyner 	ice_free_cq_ring(hw, &cq->sq);
38971d10453SEric Joyner 
39071d10453SEric Joyner init_ctrlq_exit:
39171d10453SEric Joyner 	return ret_code;
39271d10453SEric Joyner }
39371d10453SEric Joyner 
39471d10453SEric Joyner /**
39571d10453SEric Joyner  * ice_init_rq - initialize ARQ
39671d10453SEric Joyner  * @hw: pointer to the hardware structure
39771d10453SEric Joyner  * @cq: pointer to the specific Control queue
39871d10453SEric Joyner  *
39971d10453SEric Joyner  * The main initialization routine for the Admin Receive (Event) Queue.
40071d10453SEric Joyner  * Prior to calling this function, the driver *MUST* set the following fields
40171d10453SEric Joyner  * in the cq->structure:
40271d10453SEric Joyner  *     - cq->num_rq_entries
40371d10453SEric Joyner  *     - cq->rq_buf_size
40471d10453SEric Joyner  *
40571d10453SEric Joyner  * Do *NOT* hold the lock when calling this as the memory allocation routines
40671d10453SEric Joyner  * called are not going to be atomic context safe
40771d10453SEric Joyner  */
40871d10453SEric Joyner static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
40971d10453SEric Joyner {
41071d10453SEric Joyner 	enum ice_status ret_code;
41171d10453SEric Joyner 
41271d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
41371d10453SEric Joyner 
41471d10453SEric Joyner 	if (cq->rq.count > 0) {
41571d10453SEric Joyner 		/* queue already initialized */
41671d10453SEric Joyner 		ret_code = ICE_ERR_NOT_READY;
41771d10453SEric Joyner 		goto init_ctrlq_exit;
41871d10453SEric Joyner 	}
41971d10453SEric Joyner 
42071d10453SEric Joyner 	/* verify input for valid configuration */
42171d10453SEric Joyner 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
42271d10453SEric Joyner 		ret_code = ICE_ERR_CFG;
42371d10453SEric Joyner 		goto init_ctrlq_exit;
42471d10453SEric Joyner 	}
42571d10453SEric Joyner 
42671d10453SEric Joyner 	cq->rq.next_to_use = 0;
42771d10453SEric Joyner 	cq->rq.next_to_clean = 0;
42871d10453SEric Joyner 
42971d10453SEric Joyner 	/* allocate the ring memory */
43071d10453SEric Joyner 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
43171d10453SEric Joyner 	if (ret_code)
43271d10453SEric Joyner 		goto init_ctrlq_exit;
43371d10453SEric Joyner 
43471d10453SEric Joyner 	/* allocate buffers in the rings */
43571d10453SEric Joyner 	ret_code = ice_alloc_rq_bufs(hw, cq);
43671d10453SEric Joyner 	if (ret_code)
43771d10453SEric Joyner 		goto init_ctrlq_free_rings;
43871d10453SEric Joyner 
43971d10453SEric Joyner 	/* initialize base registers */
44071d10453SEric Joyner 	ret_code = ice_cfg_rq_regs(hw, cq);
44171d10453SEric Joyner 	if (ret_code)
44271d10453SEric Joyner 		goto init_ctrlq_free_rings;
44371d10453SEric Joyner 
44471d10453SEric Joyner 	/* success! */
44571d10453SEric Joyner 	cq->rq.count = cq->num_rq_entries;
44671d10453SEric Joyner 	goto init_ctrlq_exit;
44771d10453SEric Joyner 
44871d10453SEric Joyner init_ctrlq_free_rings:
4497d7af7f8SEric Joyner 	ICE_FREE_CQ_BUFS(hw, cq, rq);
45071d10453SEric Joyner 	ice_free_cq_ring(hw, &cq->rq);
45171d10453SEric Joyner 
45271d10453SEric Joyner init_ctrlq_exit:
45371d10453SEric Joyner 	return ret_code;
45471d10453SEric Joyner }
45571d10453SEric Joyner 
45671d10453SEric Joyner /**
45771d10453SEric Joyner  * ice_shutdown_sq - shutdown the Control ATQ
45871d10453SEric Joyner  * @hw: pointer to the hardware structure
45971d10453SEric Joyner  * @cq: pointer to the specific Control queue
46071d10453SEric Joyner  *
46171d10453SEric Joyner  * The main shutdown routine for the Control Transmit Queue
46271d10453SEric Joyner  */
46371d10453SEric Joyner static enum ice_status
46471d10453SEric Joyner ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
46571d10453SEric Joyner {
46671d10453SEric Joyner 	enum ice_status ret_code = ICE_SUCCESS;
46771d10453SEric Joyner 
46871d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
46971d10453SEric Joyner 
47071d10453SEric Joyner 	ice_acquire_lock(&cq->sq_lock);
47171d10453SEric Joyner 
47271d10453SEric Joyner 	if (!cq->sq.count) {
47371d10453SEric Joyner 		ret_code = ICE_ERR_NOT_READY;
47471d10453SEric Joyner 		goto shutdown_sq_out;
47571d10453SEric Joyner 	}
47671d10453SEric Joyner 
47771d10453SEric Joyner 	/* Stop firmware AdminQ processing */
47871d10453SEric Joyner 	wr32(hw, cq->sq.head, 0);
47971d10453SEric Joyner 	wr32(hw, cq->sq.tail, 0);
48071d10453SEric Joyner 	wr32(hw, cq->sq.len, 0);
48171d10453SEric Joyner 	wr32(hw, cq->sq.bal, 0);
48271d10453SEric Joyner 	wr32(hw, cq->sq.bah, 0);
48371d10453SEric Joyner 
48471d10453SEric Joyner 	cq->sq.count = 0;	/* to indicate uninitialized queue */
48571d10453SEric Joyner 
48671d10453SEric Joyner 	/* free ring buffers and the ring itself */
48771d10453SEric Joyner 	ICE_FREE_CQ_BUFS(hw, cq, sq);
48871d10453SEric Joyner 	ice_free_cq_ring(hw, &cq->sq);
48971d10453SEric Joyner 
49071d10453SEric Joyner shutdown_sq_out:
49171d10453SEric Joyner 	ice_release_lock(&cq->sq_lock);
49271d10453SEric Joyner 	return ret_code;
49371d10453SEric Joyner }
49471d10453SEric Joyner 
49571d10453SEric Joyner /**
49671d10453SEric Joyner  * ice_aq_ver_check - Check the reported AQ API version.
49771d10453SEric Joyner  * @hw: pointer to the hardware structure
49871d10453SEric Joyner  *
49971d10453SEric Joyner  * Checks if the driver should load on a given AQ API version.
50071d10453SEric Joyner  *
50171d10453SEric Joyner  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
50271d10453SEric Joyner  */
50371d10453SEric Joyner static bool ice_aq_ver_check(struct ice_hw *hw)
50471d10453SEric Joyner {
50571d10453SEric Joyner 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
50671d10453SEric Joyner 		/* Major API version is newer than expected, don't load */
50771d10453SEric Joyner 		ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
50871d10453SEric Joyner 		return false;
50971d10453SEric Joyner 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
51071d10453SEric Joyner 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
51171d10453SEric Joyner 			ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
51271d10453SEric Joyner 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
51371d10453SEric Joyner 			ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
51471d10453SEric Joyner 	} else {
51571d10453SEric Joyner 		/* Major API version is older than expected, log a warning */
51671d10453SEric Joyner 		ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
51771d10453SEric Joyner 	}
51871d10453SEric Joyner 	return true;
51971d10453SEric Joyner }
52071d10453SEric Joyner 
52171d10453SEric Joyner /**
52271d10453SEric Joyner  * ice_shutdown_rq - shutdown Control ARQ
52371d10453SEric Joyner  * @hw: pointer to the hardware structure
52471d10453SEric Joyner  * @cq: pointer to the specific Control queue
52571d10453SEric Joyner  *
52671d10453SEric Joyner  * The main shutdown routine for the Control Receive Queue
52771d10453SEric Joyner  */
52871d10453SEric Joyner static enum ice_status
52971d10453SEric Joyner ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
53071d10453SEric Joyner {
53171d10453SEric Joyner 	enum ice_status ret_code = ICE_SUCCESS;
53271d10453SEric Joyner 
53371d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
53471d10453SEric Joyner 
53571d10453SEric Joyner 	ice_acquire_lock(&cq->rq_lock);
53671d10453SEric Joyner 
53771d10453SEric Joyner 	if (!cq->rq.count) {
53871d10453SEric Joyner 		ret_code = ICE_ERR_NOT_READY;
53971d10453SEric Joyner 		goto shutdown_rq_out;
54071d10453SEric Joyner 	}
54171d10453SEric Joyner 
54271d10453SEric Joyner 	/* Stop Control Queue processing */
54371d10453SEric Joyner 	wr32(hw, cq->rq.head, 0);
54471d10453SEric Joyner 	wr32(hw, cq->rq.tail, 0);
54571d10453SEric Joyner 	wr32(hw, cq->rq.len, 0);
54671d10453SEric Joyner 	wr32(hw, cq->rq.bal, 0);
54771d10453SEric Joyner 	wr32(hw, cq->rq.bah, 0);
54871d10453SEric Joyner 
54971d10453SEric Joyner 	/* set rq.count to 0 to indicate uninitialized queue */
55071d10453SEric Joyner 	cq->rq.count = 0;
55171d10453SEric Joyner 
55271d10453SEric Joyner 	/* free ring buffers and the ring itself */
55371d10453SEric Joyner 	ICE_FREE_CQ_BUFS(hw, cq, rq);
55471d10453SEric Joyner 	ice_free_cq_ring(hw, &cq->rq);
55571d10453SEric Joyner 
55671d10453SEric Joyner shutdown_rq_out:
55771d10453SEric Joyner 	ice_release_lock(&cq->rq_lock);
55871d10453SEric Joyner 	return ret_code;
55971d10453SEric Joyner }
56071d10453SEric Joyner 
56171d10453SEric Joyner /**
56271d10453SEric Joyner  * ice_idle_aq - stop ARQ/ATQ processing momentarily
56371d10453SEric Joyner  * @hw: pointer to the hardware structure
56471d10453SEric Joyner  * @cq: pointer to the specific Control queue
56571d10453SEric Joyner  */
56671d10453SEric Joyner void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
56771d10453SEric Joyner {
56871d10453SEric Joyner 	wr32(hw, cq->sq.len, 0);
56971d10453SEric Joyner 	wr32(hw, cq->rq.len, 0);
57071d10453SEric Joyner 
57171d10453SEric Joyner 	ice_msec_delay(2, false);
57271d10453SEric Joyner }
57371d10453SEric Joyner 
57471d10453SEric Joyner /**
57571d10453SEric Joyner  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
57671d10453SEric Joyner  * @hw: pointer to the hardware structure
57771d10453SEric Joyner  */
57871d10453SEric Joyner static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
57971d10453SEric Joyner {
58071d10453SEric Joyner 	struct ice_ctl_q_info *cq = &hw->adminq;
58171d10453SEric Joyner 	enum ice_status status;
58271d10453SEric Joyner 
58371d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
58471d10453SEric Joyner 
58571d10453SEric Joyner 	status = ice_aq_get_fw_ver(hw, NULL);
58671d10453SEric Joyner 	if (status)
58771d10453SEric Joyner 		goto init_ctrlq_free_rq;
58871d10453SEric Joyner 
58971d10453SEric Joyner 	if (!ice_aq_ver_check(hw)) {
59071d10453SEric Joyner 		status = ICE_ERR_FW_API_VER;
59171d10453SEric Joyner 		goto init_ctrlq_free_rq;
59271d10453SEric Joyner 	}
59371d10453SEric Joyner 
59471d10453SEric Joyner 	return ICE_SUCCESS;
59571d10453SEric Joyner 
59671d10453SEric Joyner init_ctrlq_free_rq:
59771d10453SEric Joyner 	ice_shutdown_rq(hw, cq);
59871d10453SEric Joyner 	ice_shutdown_sq(hw, cq);
59971d10453SEric Joyner 	return status;
60071d10453SEric Joyner }
60171d10453SEric Joyner 
60271d10453SEric Joyner /**
60371d10453SEric Joyner  * ice_init_ctrlq - main initialization routine for any control Queue
60471d10453SEric Joyner  * @hw: pointer to the hardware structure
60571d10453SEric Joyner  * @q_type: specific Control queue type
60671d10453SEric Joyner  *
60771d10453SEric Joyner  * Prior to calling this function, the driver *MUST* set the following fields
60871d10453SEric Joyner  * in the cq->structure:
60971d10453SEric Joyner  *     - cq->num_sq_entries
61071d10453SEric Joyner  *     - cq->num_rq_entries
61171d10453SEric Joyner  *     - cq->rq_buf_size
61271d10453SEric Joyner  *     - cq->sq_buf_size
61371d10453SEric Joyner  *
61471d10453SEric Joyner  * NOTE: this function does not initialize the controlq locks
61571d10453SEric Joyner  */
61671d10453SEric Joyner static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
61771d10453SEric Joyner {
61871d10453SEric Joyner 	struct ice_ctl_q_info *cq;
61971d10453SEric Joyner 	enum ice_status ret_code;
62071d10453SEric Joyner 
62171d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
62271d10453SEric Joyner 
62371d10453SEric Joyner 	switch (q_type) {
62471d10453SEric Joyner 	case ICE_CTL_Q_ADMIN:
62571d10453SEric Joyner 		ice_adminq_init_regs(hw);
62671d10453SEric Joyner 		cq = &hw->adminq;
62771d10453SEric Joyner 		break;
62871d10453SEric Joyner 	case ICE_CTL_Q_MAILBOX:
62971d10453SEric Joyner 		ice_mailbox_init_regs(hw);
63071d10453SEric Joyner 		cq = &hw->mailboxq;
63171d10453SEric Joyner 		break;
63271d10453SEric Joyner 	default:
63371d10453SEric Joyner 		return ICE_ERR_PARAM;
63471d10453SEric Joyner 	}
63571d10453SEric Joyner 	cq->qtype = q_type;
63671d10453SEric Joyner 
63771d10453SEric Joyner 	/* verify input for valid configuration */
63871d10453SEric Joyner 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
63971d10453SEric Joyner 	    !cq->rq_buf_size || !cq->sq_buf_size) {
64071d10453SEric Joyner 		return ICE_ERR_CFG;
64171d10453SEric Joyner 	}
64271d10453SEric Joyner 
64371d10453SEric Joyner 	/* setup SQ command write back timeout */
64471d10453SEric Joyner 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
64571d10453SEric Joyner 
64671d10453SEric Joyner 	/* allocate the ATQ */
64771d10453SEric Joyner 	ret_code = ice_init_sq(hw, cq);
64871d10453SEric Joyner 	if (ret_code)
64971d10453SEric Joyner 		return ret_code;
65071d10453SEric Joyner 
65171d10453SEric Joyner 	/* allocate the ARQ */
65271d10453SEric Joyner 	ret_code = ice_init_rq(hw, cq);
65371d10453SEric Joyner 	if (ret_code)
65471d10453SEric Joyner 		goto init_ctrlq_free_sq;
65571d10453SEric Joyner 
65671d10453SEric Joyner 	/* success! */
65771d10453SEric Joyner 	return ICE_SUCCESS;
65871d10453SEric Joyner 
65971d10453SEric Joyner init_ctrlq_free_sq:
66071d10453SEric Joyner 	ice_shutdown_sq(hw, cq);
66171d10453SEric Joyner 	return ret_code;
66271d10453SEric Joyner }
66371d10453SEric Joyner 
66471d10453SEric Joyner /**
66571d10453SEric Joyner  * ice_shutdown_ctrlq - shutdown routine for any control queue
66671d10453SEric Joyner  * @hw: pointer to the hardware structure
66771d10453SEric Joyner  * @q_type: specific Control queue type
66871d10453SEric Joyner  *
66971d10453SEric Joyner  * NOTE: this function does not destroy the control queue locks.
67071d10453SEric Joyner  */
67171d10453SEric Joyner static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
67271d10453SEric Joyner {
67371d10453SEric Joyner 	struct ice_ctl_q_info *cq;
67471d10453SEric Joyner 
67571d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
67671d10453SEric Joyner 
67771d10453SEric Joyner 	switch (q_type) {
67871d10453SEric Joyner 	case ICE_CTL_Q_ADMIN:
67971d10453SEric Joyner 		cq = &hw->adminq;
68071d10453SEric Joyner 		if (ice_check_sq_alive(hw, cq))
68171d10453SEric Joyner 			ice_aq_q_shutdown(hw, true);
68271d10453SEric Joyner 		break;
68371d10453SEric Joyner 	case ICE_CTL_Q_MAILBOX:
68471d10453SEric Joyner 		cq = &hw->mailboxq;
68571d10453SEric Joyner 		break;
68671d10453SEric Joyner 	default:
68771d10453SEric Joyner 		return;
68871d10453SEric Joyner 	}
68971d10453SEric Joyner 
69071d10453SEric Joyner 	ice_shutdown_sq(hw, cq);
69171d10453SEric Joyner 	ice_shutdown_rq(hw, cq);
69271d10453SEric Joyner }
69371d10453SEric Joyner 
69471d10453SEric Joyner /**
69571d10453SEric Joyner  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
69671d10453SEric Joyner  * @hw: pointer to the hardware structure
69771d10453SEric Joyner  *
69871d10453SEric Joyner  * NOTE: this function does not destroy the control queue locks. The driver
69971d10453SEric Joyner  * may call this at runtime to shutdown and later restart control queues, such
70071d10453SEric Joyner  * as in response to a reset event.
70171d10453SEric Joyner  */
70271d10453SEric Joyner void ice_shutdown_all_ctrlq(struct ice_hw *hw)
70371d10453SEric Joyner {
70471d10453SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
70571d10453SEric Joyner 	/* Shutdown FW admin queue */
70671d10453SEric Joyner 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
70771d10453SEric Joyner 	/* Shutdown PF-VF Mailbox */
70871d10453SEric Joyner 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
70971d10453SEric Joyner }
71071d10453SEric Joyner 
71171d10453SEric Joyner /**
7127d7af7f8SEric Joyner  * ice_init_all_ctrlq - main initialization routine for all control queues
7137d7af7f8SEric Joyner  * @hw: pointer to the hardware structure
7147d7af7f8SEric Joyner  *
7157d7af7f8SEric Joyner  * Prior to calling this function, the driver MUST* set the following fields
7167d7af7f8SEric Joyner  * in the cq->structure for all control queues:
7177d7af7f8SEric Joyner  *     - cq->num_sq_entries
7187d7af7f8SEric Joyner  *     - cq->num_rq_entries
7197d7af7f8SEric Joyner  *     - cq->rq_buf_size
7207d7af7f8SEric Joyner  *     - cq->sq_buf_size
7217d7af7f8SEric Joyner  *
7227d7af7f8SEric Joyner  * NOTE: this function does not initialize the controlq locks.
7237d7af7f8SEric Joyner  */
7247d7af7f8SEric Joyner enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
7257d7af7f8SEric Joyner {
7267d7af7f8SEric Joyner 	enum ice_status status;
7277d7af7f8SEric Joyner 	u32 retry = 0;
7287d7af7f8SEric Joyner 
7297d7af7f8SEric Joyner 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
7307d7af7f8SEric Joyner 
7317d7af7f8SEric Joyner 	/* Init FW admin queue */
7327d7af7f8SEric Joyner 	do {
7337d7af7f8SEric Joyner 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
7347d7af7f8SEric Joyner 		if (status)
7357d7af7f8SEric Joyner 			return status;
7367d7af7f8SEric Joyner 
7377d7af7f8SEric Joyner 		status = ice_init_check_adminq(hw);
7387d7af7f8SEric Joyner 		if (status != ICE_ERR_AQ_FW_CRITICAL)
7397d7af7f8SEric Joyner 			break;
7407d7af7f8SEric Joyner 
7417d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
7427d7af7f8SEric Joyner 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
7437d7af7f8SEric Joyner 		ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
7447d7af7f8SEric Joyner 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
7457d7af7f8SEric Joyner 
7467d7af7f8SEric Joyner 	if (status)
7477d7af7f8SEric Joyner 		return status;
7487d7af7f8SEric Joyner 	/* Init Mailbox queue */
7497d7af7f8SEric Joyner 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
7507d7af7f8SEric Joyner }
7517d7af7f8SEric Joyner 
7527d7af7f8SEric Joyner /**
7537d7af7f8SEric Joyner  * ice_init_ctrlq_locks - Initialize locks for a control queue
7547d7af7f8SEric Joyner  * @cq: pointer to the control queue
7557d7af7f8SEric Joyner  *
7567d7af7f8SEric Joyner  * Initializes the send and receive queue locks for a given control queue.
7577d7af7f8SEric Joyner  */
7587d7af7f8SEric Joyner static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
7597d7af7f8SEric Joyner {
7607d7af7f8SEric Joyner 	ice_init_lock(&cq->sq_lock);
7617d7af7f8SEric Joyner 	ice_init_lock(&cq->rq_lock);
7627d7af7f8SEric Joyner }
7637d7af7f8SEric Joyner 
7647d7af7f8SEric Joyner /**
7657d7af7f8SEric Joyner  * ice_create_all_ctrlq - main initialization routine for all control queues
7667d7af7f8SEric Joyner  * @hw: pointer to the hardware structure
7677d7af7f8SEric Joyner  *
7687d7af7f8SEric Joyner  * Prior to calling this function, the driver *MUST* set the following fields
7697d7af7f8SEric Joyner  * in the cq->structure for all control queues:
7707d7af7f8SEric Joyner  *     - cq->num_sq_entries
7717d7af7f8SEric Joyner  *     - cq->num_rq_entries
7727d7af7f8SEric Joyner  *     - cq->rq_buf_size
7737d7af7f8SEric Joyner  *     - cq->sq_buf_size
7747d7af7f8SEric Joyner  *
7757d7af7f8SEric Joyner  * This function creates all the control queue locks and then calls
7767d7af7f8SEric Joyner  * ice_init_all_ctrlq. It should be called once during driver load. If the
7777d7af7f8SEric Joyner  * driver needs to re-initialize control queues at run time it should call
7787d7af7f8SEric Joyner  * ice_init_all_ctrlq instead.
7797d7af7f8SEric Joyner  */
7807d7af7f8SEric Joyner enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
7817d7af7f8SEric Joyner {
7827d7af7f8SEric Joyner 	ice_init_ctrlq_locks(&hw->adminq);
7837d7af7f8SEric Joyner 	ice_init_ctrlq_locks(&hw->mailboxq);
7847d7af7f8SEric Joyner 
7857d7af7f8SEric Joyner 	return ice_init_all_ctrlq(hw);
7867d7af7f8SEric Joyner }
7877d7af7f8SEric Joyner 
7887d7af7f8SEric Joyner /**
78971d10453SEric Joyner  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
79071d10453SEric Joyner  * @cq: pointer to the control queue
79171d10453SEric Joyner  *
79271d10453SEric Joyner  * Destroys the send and receive queue locks for a given control queue.
79371d10453SEric Joyner  */
7947d7af7f8SEric Joyner static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
79571d10453SEric Joyner {
79671d10453SEric Joyner 	ice_destroy_lock(&cq->sq_lock);
79771d10453SEric Joyner 	ice_destroy_lock(&cq->rq_lock);
79871d10453SEric Joyner }
79971d10453SEric Joyner 
80071d10453SEric Joyner /**
80171d10453SEric Joyner  * ice_destroy_all_ctrlq - exit routine for all control queues
80271d10453SEric Joyner  * @hw: pointer to the hardware structure
80371d10453SEric Joyner  *
80471d10453SEric Joyner  * This function shuts down all the control queues and then destroys the
80571d10453SEric Joyner  * control queue locks. It should be called once during driver unload. The
80671d10453SEric Joyner  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
80771d10453SEric Joyner  * reinitialize control queues, such as in response to a reset event.
80871d10453SEric Joyner  */
80971d10453SEric Joyner void ice_destroy_all_ctrlq(struct ice_hw *hw)
81071d10453SEric Joyner {
81171d10453SEric Joyner 	/* shut down all the control queues first */
81271d10453SEric Joyner 	ice_shutdown_all_ctrlq(hw);
81371d10453SEric Joyner 
81471d10453SEric Joyner 	ice_destroy_ctrlq_locks(&hw->adminq);
81571d10453SEric Joyner 	ice_destroy_ctrlq_locks(&hw->mailboxq);
81671d10453SEric Joyner }
81771d10453SEric Joyner 
81871d10453SEric Joyner /**
81971d10453SEric Joyner  * ice_clean_sq - cleans Admin send queue (ATQ)
82071d10453SEric Joyner  * @hw: pointer to the hardware structure
82171d10453SEric Joyner  * @cq: pointer to the specific Control queue
82271d10453SEric Joyner  *
82371d10453SEric Joyner  * returns the number of free desc
82471d10453SEric Joyner  */
82571d10453SEric Joyner static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
82671d10453SEric Joyner {
82771d10453SEric Joyner 	struct ice_ctl_q_ring *sq = &cq->sq;
82871d10453SEric Joyner 	u16 ntc = sq->next_to_clean;
82971d10453SEric Joyner 	struct ice_sq_cd *details;
83071d10453SEric Joyner 	struct ice_aq_desc *desc;
83171d10453SEric Joyner 
83271d10453SEric Joyner 	desc = ICE_CTL_Q_DESC(*sq, ntc);
83371d10453SEric Joyner 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
83471d10453SEric Joyner 
83571d10453SEric Joyner 	while (rd32(hw, cq->sq.head) != ntc) {
8367d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
83771d10453SEric Joyner 		ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
83871d10453SEric Joyner 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
83971d10453SEric Joyner 		ntc++;
84071d10453SEric Joyner 		if (ntc == sq->count)
84171d10453SEric Joyner 			ntc = 0;
84271d10453SEric Joyner 		desc = ICE_CTL_Q_DESC(*sq, ntc);
84371d10453SEric Joyner 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
84471d10453SEric Joyner 	}
84571d10453SEric Joyner 
84671d10453SEric Joyner 	sq->next_to_clean = ntc;
84771d10453SEric Joyner 
84871d10453SEric Joyner 	return ICE_CTL_Q_DESC_UNUSED(sq);
84971d10453SEric Joyner }
85071d10453SEric Joyner 
85171d10453SEric Joyner /**
85271d10453SEric Joyner  * ice_debug_cq
85371d10453SEric Joyner  * @hw: pointer to the hardware structure
85471d10453SEric Joyner  * @desc: pointer to control queue descriptor
85571d10453SEric Joyner  * @buf: pointer to command buffer
85671d10453SEric Joyner  * @buf_len: max length of buf
85771d10453SEric Joyner  *
85871d10453SEric Joyner  * Dumps debug log about control command with descriptor contents.
85971d10453SEric Joyner  */
86071d10453SEric Joyner static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
86171d10453SEric Joyner {
86271d10453SEric Joyner 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
86371d10453SEric Joyner 	u16 datalen, flags;
86471d10453SEric Joyner 
86571d10453SEric Joyner 	if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
86671d10453SEric Joyner 		return;
86771d10453SEric Joyner 
86871d10453SEric Joyner 	if (!desc)
86971d10453SEric Joyner 		return;
87071d10453SEric Joyner 
87171d10453SEric Joyner 	datalen = LE16_TO_CPU(cq_desc->datalen);
87271d10453SEric Joyner 	flags = LE16_TO_CPU(cq_desc->flags);
87371d10453SEric Joyner 
8747d7af7f8SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
87571d10453SEric Joyner 		  LE16_TO_CPU(cq_desc->opcode), flags, datalen,
87671d10453SEric Joyner 		  LE16_TO_CPU(cq_desc->retval));
87771d10453SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
87871d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->cookie_high),
87971d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->cookie_low));
88071d10453SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
88171d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->params.generic.param0),
88271d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->params.generic.param1));
88371d10453SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
88471d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->params.generic.addr_high),
88571d10453SEric Joyner 		  LE32_TO_CPU(cq_desc->params.generic.addr_low));
88671d10453SEric Joyner 	/* Dump buffer iff 1) one exists and 2) is either a response indicated
88771d10453SEric Joyner 	 * by the DD and/or CMP flag set or a command with the RD flag set.
88871d10453SEric Joyner 	 */
88971d10453SEric Joyner 	if (buf && cq_desc->datalen != 0 &&
89071d10453SEric Joyner 	    (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
89171d10453SEric Joyner 	     flags & ICE_AQ_FLAG_RD)) {
89271d10453SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
89371d10453SEric Joyner 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
89471d10453SEric Joyner 				MIN_T(u16, buf_len, datalen));
89571d10453SEric Joyner 	}
89671d10453SEric Joyner }
89771d10453SEric Joyner 
89871d10453SEric Joyner /**
89971d10453SEric Joyner  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
90071d10453SEric Joyner  * @hw: pointer to the HW struct
90171d10453SEric Joyner  * @cq: pointer to the specific Control queue
90271d10453SEric Joyner  *
90371d10453SEric Joyner  * Returns true if the firmware has processed all descriptors on the
90471d10453SEric Joyner  * admin send queue. Returns false if there are still requests pending.
90571d10453SEric Joyner  */
90671d10453SEric Joyner bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
90771d10453SEric Joyner {
90871d10453SEric Joyner 	/* AQ designers suggest use of head for better
90971d10453SEric Joyner 	 * timing reliability than DD bit
91071d10453SEric Joyner 	 */
91171d10453SEric Joyner 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
91271d10453SEric Joyner }
91371d10453SEric Joyner 
91471d10453SEric Joyner /**
91571d10453SEric Joyner  * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
91671d10453SEric Joyner  * @hw: pointer to the HW struct
91771d10453SEric Joyner  * @cq: pointer to the specific Control queue
91871d10453SEric Joyner  * @desc: prefilled descriptor describing the command (non DMA mem)
91971d10453SEric Joyner  * @buf: buffer to use for indirect commands (or NULL for direct commands)
92071d10453SEric Joyner  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
92171d10453SEric Joyner  * @cd: pointer to command details structure
92271d10453SEric Joyner  *
92371d10453SEric Joyner  * This is the main send command routine for the ATQ. It runs the queue,
92471d10453SEric Joyner  * cleans the queue, etc.
92571d10453SEric Joyner  */
92671d10453SEric Joyner static enum ice_status
92771d10453SEric Joyner ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
92871d10453SEric Joyner 		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
92971d10453SEric Joyner 		       struct ice_sq_cd *cd)
93071d10453SEric Joyner {
93171d10453SEric Joyner 	struct ice_dma_mem *dma_buf = NULL;
93271d10453SEric Joyner 	struct ice_aq_desc *desc_on_ring;
93371d10453SEric Joyner 	bool cmd_completed = false;
93471d10453SEric Joyner 	enum ice_status status = ICE_SUCCESS;
93571d10453SEric Joyner 	struct ice_sq_cd *details;
93671d10453SEric Joyner 	u32 total_delay = 0;
93771d10453SEric Joyner 	u16 retval = 0;
93871d10453SEric Joyner 	u32 val = 0;
93971d10453SEric Joyner 
94071d10453SEric Joyner 	/* if reset is in progress return a soft error */
94171d10453SEric Joyner 	if (hw->reset_ongoing)
94271d10453SEric Joyner 		return ICE_ERR_RESET_ONGOING;
94371d10453SEric Joyner 
94471d10453SEric Joyner 	cq->sq_last_status = ICE_AQ_RC_OK;
94571d10453SEric Joyner 
94671d10453SEric Joyner 	if (!cq->sq.count) {
9477d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
94871d10453SEric Joyner 		status = ICE_ERR_AQ_EMPTY;
94971d10453SEric Joyner 		goto sq_send_command_error;
95071d10453SEric Joyner 	}
95171d10453SEric Joyner 
95271d10453SEric Joyner 	if ((buf && !buf_size) || (!buf && buf_size)) {
95371d10453SEric Joyner 		status = ICE_ERR_PARAM;
95471d10453SEric Joyner 		goto sq_send_command_error;
95571d10453SEric Joyner 	}
95671d10453SEric Joyner 
95771d10453SEric Joyner 	if (buf) {
95871d10453SEric Joyner 		if (buf_size > cq->sq_buf_size) {
9597d7af7f8SEric Joyner 			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
96071d10453SEric Joyner 				  buf_size);
96171d10453SEric Joyner 			status = ICE_ERR_INVAL_SIZE;
96271d10453SEric Joyner 			goto sq_send_command_error;
96371d10453SEric Joyner 		}
96471d10453SEric Joyner 
96571d10453SEric Joyner 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
96671d10453SEric Joyner 		if (buf_size > ICE_AQ_LG_BUF)
96771d10453SEric Joyner 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
96871d10453SEric Joyner 	}
96971d10453SEric Joyner 
97071d10453SEric Joyner 	val = rd32(hw, cq->sq.head);
97171d10453SEric Joyner 	if (val >= cq->num_sq_entries) {
9727d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
97371d10453SEric Joyner 			  val);
97471d10453SEric Joyner 		status = ICE_ERR_AQ_EMPTY;
97571d10453SEric Joyner 		goto sq_send_command_error;
97671d10453SEric Joyner 	}
97771d10453SEric Joyner 
97871d10453SEric Joyner 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
97971d10453SEric Joyner 	if (cd)
98071d10453SEric Joyner 		*details = *cd;
98171d10453SEric Joyner 	else
98271d10453SEric Joyner 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
98371d10453SEric Joyner 
98471d10453SEric Joyner 	/* Call clean and check queue available function to reclaim the
98571d10453SEric Joyner 	 * descriptors that were processed by FW/MBX; the function returns the
98671d10453SEric Joyner 	 * number of desc available. The clean function called here could be
98771d10453SEric Joyner 	 * called in a separate thread in case of asynchronous completions.
98871d10453SEric Joyner 	 */
98971d10453SEric Joyner 	if (ice_clean_sq(hw, cq) == 0) {
9907d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
99171d10453SEric Joyner 		status = ICE_ERR_AQ_FULL;
99271d10453SEric Joyner 		goto sq_send_command_error;
99371d10453SEric Joyner 	}
99471d10453SEric Joyner 
99571d10453SEric Joyner 	/* initialize the temp desc pointer with the right desc */
99671d10453SEric Joyner 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
99771d10453SEric Joyner 
99871d10453SEric Joyner 	/* if the desc is available copy the temp desc to the right place */
99971d10453SEric Joyner 	ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
100071d10453SEric Joyner 		   ICE_NONDMA_TO_DMA);
100171d10453SEric Joyner 
100271d10453SEric Joyner 	/* if buf is not NULL assume indirect command */
100371d10453SEric Joyner 	if (buf) {
100471d10453SEric Joyner 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
100571d10453SEric Joyner 		/* copy the user buf into the respective DMA buf */
100671d10453SEric Joyner 		ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
100771d10453SEric Joyner 		desc_on_ring->datalen = CPU_TO_LE16(buf_size);
100871d10453SEric Joyner 
100971d10453SEric Joyner 		/* Update the address values in the desc with the pa value
101071d10453SEric Joyner 		 * for respective buffer
101171d10453SEric Joyner 		 */
101271d10453SEric Joyner 		desc_on_ring->params.generic.addr_high =
101371d10453SEric Joyner 			CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
101471d10453SEric Joyner 		desc_on_ring->params.generic.addr_low =
101571d10453SEric Joyner 			CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
101671d10453SEric Joyner 	}
101771d10453SEric Joyner 
101871d10453SEric Joyner 	/* Debug desc and buffer */
10197d7af7f8SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
102071d10453SEric Joyner 
102171d10453SEric Joyner 	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
102271d10453SEric Joyner 
102371d10453SEric Joyner 	(cq->sq.next_to_use)++;
102471d10453SEric Joyner 	if (cq->sq.next_to_use == cq->sq.count)
102571d10453SEric Joyner 		cq->sq.next_to_use = 0;
102671d10453SEric Joyner 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
102771d10453SEric Joyner 
102871d10453SEric Joyner 	do {
102971d10453SEric Joyner 		if (ice_sq_done(hw, cq))
103071d10453SEric Joyner 			break;
103171d10453SEric Joyner 
103271d10453SEric Joyner 		ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
103371d10453SEric Joyner 		total_delay++;
103471d10453SEric Joyner 	} while (total_delay < cq->sq_cmd_timeout);
103571d10453SEric Joyner 
103671d10453SEric Joyner 	/* if ready, copy the desc back to temp */
103771d10453SEric Joyner 	if (ice_sq_done(hw, cq)) {
103871d10453SEric Joyner 		ice_memcpy(desc, desc_on_ring, sizeof(*desc),
103971d10453SEric Joyner 			   ICE_DMA_TO_NONDMA);
104071d10453SEric Joyner 		if (buf) {
104171d10453SEric Joyner 			/* get returned length to copy */
104271d10453SEric Joyner 			u16 copy_size = LE16_TO_CPU(desc->datalen);
104371d10453SEric Joyner 
104471d10453SEric Joyner 			if (copy_size > buf_size) {
10457d7af7f8SEric Joyner 				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
104671d10453SEric Joyner 					  copy_size, buf_size);
104771d10453SEric Joyner 				status = ICE_ERR_AQ_ERROR;
104871d10453SEric Joyner 			} else {
104971d10453SEric Joyner 				ice_memcpy(buf, dma_buf->va, copy_size,
105071d10453SEric Joyner 					   ICE_DMA_TO_NONDMA);
105171d10453SEric Joyner 			}
105271d10453SEric Joyner 		}
105371d10453SEric Joyner 		retval = LE16_TO_CPU(desc->retval);
105471d10453SEric Joyner 		if (retval) {
10557d7af7f8SEric Joyner 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
105671d10453SEric Joyner 				  LE16_TO_CPU(desc->opcode),
105771d10453SEric Joyner 				  retval);
105871d10453SEric Joyner 
105971d10453SEric Joyner 			/* strip off FW internal code */
106071d10453SEric Joyner 			retval &= 0xff;
106171d10453SEric Joyner 		}
106271d10453SEric Joyner 		cmd_completed = true;
106371d10453SEric Joyner 		if (!status && retval != ICE_AQ_RC_OK)
106471d10453SEric Joyner 			status = ICE_ERR_AQ_ERROR;
106571d10453SEric Joyner 		cq->sq_last_status = (enum ice_aq_err)retval;
106671d10453SEric Joyner 	}
106771d10453SEric Joyner 
10687d7af7f8SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
106971d10453SEric Joyner 
107071d10453SEric Joyner 	ice_debug_cq(hw, (void *)desc, buf, buf_size);
107171d10453SEric Joyner 
107271d10453SEric Joyner 	/* save writeback AQ if requested */
107371d10453SEric Joyner 	if (details->wb_desc)
107471d10453SEric Joyner 		ice_memcpy(details->wb_desc, desc_on_ring,
107571d10453SEric Joyner 			   sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
107671d10453SEric Joyner 
107771d10453SEric Joyner 	/* update the error if time out occurred */
107871d10453SEric Joyner 	if (!cmd_completed) {
10797d7af7f8SEric Joyner 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
10807d7af7f8SEric Joyner 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
10817d7af7f8SEric Joyner 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
10827d7af7f8SEric Joyner 			status = ICE_ERR_AQ_FW_CRITICAL;
10837d7af7f8SEric Joyner 		} else {
10847d7af7f8SEric Joyner 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
108571d10453SEric Joyner 			status = ICE_ERR_AQ_TIMEOUT;
108671d10453SEric Joyner 		}
10877d7af7f8SEric Joyner 	}
108871d10453SEric Joyner 
108971d10453SEric Joyner sq_send_command_error:
109071d10453SEric Joyner 	return status;
109171d10453SEric Joyner }
109271d10453SEric Joyner 
109371d10453SEric Joyner /**
109471d10453SEric Joyner  * ice_sq_send_cmd - send command to Control Queue (ATQ)
109571d10453SEric Joyner  * @hw: pointer to the HW struct
109671d10453SEric Joyner  * @cq: pointer to the specific Control queue
10979cf1841cSEric Joyner  * @desc: prefilled descriptor describing the command
109871d10453SEric Joyner  * @buf: buffer to use for indirect commands (or NULL for direct commands)
109971d10453SEric Joyner  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
110071d10453SEric Joyner  * @cd: pointer to command details structure
110171d10453SEric Joyner  *
110271d10453SEric Joyner  * This is the main send command routine for the ATQ. It runs the queue,
110371d10453SEric Joyner  * cleans the queue, etc.
110471d10453SEric Joyner  */
110571d10453SEric Joyner enum ice_status
110671d10453SEric Joyner ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
110771d10453SEric Joyner 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
110871d10453SEric Joyner 		struct ice_sq_cd *cd)
110971d10453SEric Joyner {
111071d10453SEric Joyner 	enum ice_status status = ICE_SUCCESS;
111171d10453SEric Joyner 
111271d10453SEric Joyner 	/* if reset is in progress return a soft error */
111371d10453SEric Joyner 	if (hw->reset_ongoing)
111471d10453SEric Joyner 		return ICE_ERR_RESET_ONGOING;
111571d10453SEric Joyner 
111671d10453SEric Joyner 	ice_acquire_lock(&cq->sq_lock);
111771d10453SEric Joyner 	status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
111871d10453SEric Joyner 	ice_release_lock(&cq->sq_lock);
111971d10453SEric Joyner 
112071d10453SEric Joyner 	return status;
112171d10453SEric Joyner }
112271d10453SEric Joyner 
112371d10453SEric Joyner /**
112471d10453SEric Joyner  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
112571d10453SEric Joyner  * @desc: pointer to the temp descriptor (non DMA mem)
112671d10453SEric Joyner  * @opcode: the opcode can be used to decide which flags to turn off or on
112771d10453SEric Joyner  *
112871d10453SEric Joyner  * Fill the desc with default values
112971d10453SEric Joyner  */
113071d10453SEric Joyner void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
113171d10453SEric Joyner {
113271d10453SEric Joyner 	/* zero out the desc */
113371d10453SEric Joyner 	ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
113471d10453SEric Joyner 	desc->opcode = CPU_TO_LE16(opcode);
113571d10453SEric Joyner 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
113671d10453SEric Joyner }
113771d10453SEric Joyner 
113871d10453SEric Joyner /**
113971d10453SEric Joyner  * ice_clean_rq_elem
114071d10453SEric Joyner  * @hw: pointer to the HW struct
114171d10453SEric Joyner  * @cq: pointer to the specific Control queue
114271d10453SEric Joyner  * @e: event info from the receive descriptor, includes any buffers
114371d10453SEric Joyner  * @pending: number of events that could be left to process
114471d10453SEric Joyner  *
114571d10453SEric Joyner  * This function cleans one Admin Receive Queue element and returns
114671d10453SEric Joyner  * the contents through e. It can also return how many events are
114771d10453SEric Joyner  * left to process through 'pending'.
114871d10453SEric Joyner  */
114971d10453SEric Joyner enum ice_status
115071d10453SEric Joyner ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
115171d10453SEric Joyner 		  struct ice_rq_event_info *e, u16 *pending)
115271d10453SEric Joyner {
115371d10453SEric Joyner 	u16 ntc = cq->rq.next_to_clean;
11549cf1841cSEric Joyner 	enum ice_aq_err rq_last_status;
115571d10453SEric Joyner 	enum ice_status ret_code = ICE_SUCCESS;
115671d10453SEric Joyner 	struct ice_aq_desc *desc;
115771d10453SEric Joyner 	struct ice_dma_mem *bi;
115871d10453SEric Joyner 	u16 desc_idx;
115971d10453SEric Joyner 	u16 datalen;
116071d10453SEric Joyner 	u16 flags;
116171d10453SEric Joyner 	u16 ntu;
116271d10453SEric Joyner 
116371d10453SEric Joyner 	/* pre-clean the event info */
116471d10453SEric Joyner 	ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
116571d10453SEric Joyner 
116671d10453SEric Joyner 	/* take the lock before we start messing with the ring */
116771d10453SEric Joyner 	ice_acquire_lock(&cq->rq_lock);
116871d10453SEric Joyner 
116971d10453SEric Joyner 	if (!cq->rq.count) {
11707d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
117171d10453SEric Joyner 		ret_code = ICE_ERR_AQ_EMPTY;
117271d10453SEric Joyner 		goto clean_rq_elem_err;
117371d10453SEric Joyner 	}
117471d10453SEric Joyner 
117571d10453SEric Joyner 	/* set next_to_use to head */
117671d10453SEric Joyner 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
117771d10453SEric Joyner 
117871d10453SEric Joyner 	if (ntu == ntc) {
117971d10453SEric Joyner 		/* nothing to do - shouldn't need to update ring's values */
118071d10453SEric Joyner 		ret_code = ICE_ERR_AQ_NO_WORK;
118171d10453SEric Joyner 		goto clean_rq_elem_out;
118271d10453SEric Joyner 	}
118371d10453SEric Joyner 
118471d10453SEric Joyner 	/* now clean the next descriptor */
118571d10453SEric Joyner 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
118671d10453SEric Joyner 	desc_idx = ntc;
118771d10453SEric Joyner 
11889cf1841cSEric Joyner 	rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
118971d10453SEric Joyner 	flags = LE16_TO_CPU(desc->flags);
119071d10453SEric Joyner 	if (flags & ICE_AQ_FLAG_ERR) {
119171d10453SEric Joyner 		ret_code = ICE_ERR_AQ_ERROR;
11927d7af7f8SEric Joyner 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
11939cf1841cSEric Joyner 			  LE16_TO_CPU(desc->opcode), rq_last_status);
119471d10453SEric Joyner 	}
119571d10453SEric Joyner 	ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
119671d10453SEric Joyner 	datalen = LE16_TO_CPU(desc->datalen);
119771d10453SEric Joyner 	e->msg_len = MIN_T(u16, datalen, e->buf_len);
119871d10453SEric Joyner 	if (e->msg_buf && e->msg_len)
119971d10453SEric Joyner 		ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
120071d10453SEric Joyner 			   e->msg_len, ICE_DMA_TO_NONDMA);
120171d10453SEric Joyner 
120271d10453SEric Joyner 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
120371d10453SEric Joyner 
12047d7af7f8SEric Joyner 	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
120571d10453SEric Joyner 
120671d10453SEric Joyner 	/* Restore the original datalen and buffer address in the desc,
120771d10453SEric Joyner 	 * FW updates datalen to indicate the event message size
120871d10453SEric Joyner 	 */
120971d10453SEric Joyner 	bi = &cq->rq.r.rq_bi[ntc];
121071d10453SEric Joyner 	ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
121171d10453SEric Joyner 
121271d10453SEric Joyner 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
121371d10453SEric Joyner 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
121471d10453SEric Joyner 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
121571d10453SEric Joyner 	desc->datalen = CPU_TO_LE16(bi->size);
121671d10453SEric Joyner 	desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
121771d10453SEric Joyner 	desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
121871d10453SEric Joyner 
121971d10453SEric Joyner 	/* set tail = the last cleaned desc index. */
122071d10453SEric Joyner 	wr32(hw, cq->rq.tail, ntc);
122171d10453SEric Joyner 	/* ntc is updated to tail + 1 */
122271d10453SEric Joyner 	ntc++;
122371d10453SEric Joyner 	if (ntc == cq->num_rq_entries)
122471d10453SEric Joyner 		ntc = 0;
122571d10453SEric Joyner 	cq->rq.next_to_clean = ntc;
122671d10453SEric Joyner 	cq->rq.next_to_use = ntu;
122771d10453SEric Joyner 
122871d10453SEric Joyner clean_rq_elem_out:
122971d10453SEric Joyner 	/* Set pending if needed, unlock and return */
123071d10453SEric Joyner 	if (pending) {
123171d10453SEric Joyner 		/* re-read HW head to calculate actual pending messages */
123271d10453SEric Joyner 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
123371d10453SEric Joyner 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
123471d10453SEric Joyner 	}
123571d10453SEric Joyner clean_rq_elem_err:
123671d10453SEric Joyner 	ice_release_lock(&cq->rq_lock);
123771d10453SEric Joyner 
123871d10453SEric Joyner 	return ret_code;
123971d10453SEric Joyner }
1240