171d10453SEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */
2015f8cc5SEric Joyner /* Copyright (c) 2024, Intel Corporation
371d10453SEric Joyner * All rights reserved.
471d10453SEric Joyner *
571d10453SEric Joyner * Redistribution and use in source and binary forms, with or without
671d10453SEric Joyner * modification, are permitted provided that the following conditions are met:
771d10453SEric Joyner *
871d10453SEric Joyner * 1. Redistributions of source code must retain the above copyright notice,
971d10453SEric Joyner * this list of conditions and the following disclaimer.
1071d10453SEric Joyner *
1171d10453SEric Joyner * 2. Redistributions in binary form must reproduce the above copyright
1271d10453SEric Joyner * notice, this list of conditions and the following disclaimer in the
1371d10453SEric Joyner * documentation and/or other materials provided with the distribution.
1471d10453SEric Joyner *
1571d10453SEric Joyner * 3. Neither the name of the Intel Corporation nor the names of its
1671d10453SEric Joyner * contributors may be used to endorse or promote products derived from
1771d10453SEric Joyner * this software without specific prior written permission.
1871d10453SEric Joyner *
1971d10453SEric Joyner * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2071d10453SEric Joyner * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2171d10453SEric Joyner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2271d10453SEric Joyner * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
2371d10453SEric Joyner * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2471d10453SEric Joyner * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2571d10453SEric Joyner * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2671d10453SEric Joyner * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2771d10453SEric Joyner * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2871d10453SEric Joyner * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2971d10453SEric Joyner * POSSIBILITY OF SUCH DAMAGE.
3071d10453SEric Joyner */
3171d10453SEric Joyner
3271d10453SEric Joyner #include "ice_common.h"
3371d10453SEric Joyner
3471d10453SEric Joyner #define ICE_CQ_INIT_REGS(qinfo, prefix) \
3571d10453SEric Joyner do { \
3671d10453SEric Joyner (qinfo)->sq.head = prefix##_ATQH; \
3771d10453SEric Joyner (qinfo)->sq.tail = prefix##_ATQT; \
3871d10453SEric Joyner (qinfo)->sq.len = prefix##_ATQLEN; \
3971d10453SEric Joyner (qinfo)->sq.bah = prefix##_ATQBAH; \
4071d10453SEric Joyner (qinfo)->sq.bal = prefix##_ATQBAL; \
4171d10453SEric Joyner (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
4271d10453SEric Joyner (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
437d7af7f8SEric Joyner (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
4471d10453SEric Joyner (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
4571d10453SEric Joyner (qinfo)->rq.head = prefix##_ARQH; \
4671d10453SEric Joyner (qinfo)->rq.tail = prefix##_ARQT; \
4771d10453SEric Joyner (qinfo)->rq.len = prefix##_ARQLEN; \
4871d10453SEric Joyner (qinfo)->rq.bah = prefix##_ARQBAH; \
4971d10453SEric Joyner (qinfo)->rq.bal = prefix##_ARQBAL; \
5071d10453SEric Joyner (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
5171d10453SEric Joyner (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
527d7af7f8SEric Joyner (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
5371d10453SEric Joyner (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
5471d10453SEric Joyner } while (0)
5571d10453SEric Joyner
5671d10453SEric Joyner /**
5771d10453SEric Joyner * ice_adminq_init_regs - Initialize AdminQ registers
5871d10453SEric Joyner * @hw: pointer to the hardware structure
5971d10453SEric Joyner *
6071d10453SEric Joyner * This assumes the alloc_sq and alloc_rq functions have already been called
6171d10453SEric Joyner */
ice_adminq_init_regs(struct ice_hw * hw)6271d10453SEric Joyner static void ice_adminq_init_regs(struct ice_hw *hw)
6371d10453SEric Joyner {
6471d10453SEric Joyner struct ice_ctl_q_info *cq = &hw->adminq;
6571d10453SEric Joyner
6671d10453SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6771d10453SEric Joyner
6871d10453SEric Joyner ICE_CQ_INIT_REGS(cq, PF_FW);
6971d10453SEric Joyner }
7071d10453SEric Joyner
7171d10453SEric Joyner /**
7271d10453SEric Joyner * ice_mailbox_init_regs - Initialize Mailbox registers
7371d10453SEric Joyner * @hw: pointer to the hardware structure
7471d10453SEric Joyner *
7571d10453SEric Joyner * This assumes the alloc_sq and alloc_rq functions have already been called
7671d10453SEric Joyner */
ice_mailbox_init_regs(struct ice_hw * hw)7771d10453SEric Joyner static void ice_mailbox_init_regs(struct ice_hw *hw)
7871d10453SEric Joyner {
7971d10453SEric Joyner struct ice_ctl_q_info *cq = &hw->mailboxq;
8071d10453SEric Joyner
8171d10453SEric Joyner ICE_CQ_INIT_REGS(cq, PF_MBX);
8271d10453SEric Joyner }
8371d10453SEric Joyner
8471d10453SEric Joyner /**
8571d10453SEric Joyner * ice_check_sq_alive
8671d10453SEric Joyner * @hw: pointer to the HW struct
8771d10453SEric Joyner * @cq: pointer to the specific Control queue
8871d10453SEric Joyner *
8971d10453SEric Joyner * Returns true if Queue is enabled else false.
9071d10453SEric Joyner */
ice_check_sq_alive(struct ice_hw * hw,struct ice_ctl_q_info * cq)9171d10453SEric Joyner bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
9271d10453SEric Joyner {
9371d10453SEric Joyner /* check both queue-length and queue-enable fields */
9471d10453SEric Joyner if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
9571d10453SEric Joyner return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
9671d10453SEric Joyner cq->sq.len_ena_mask)) ==
9771d10453SEric Joyner (cq->num_sq_entries | cq->sq.len_ena_mask);
9871d10453SEric Joyner
9971d10453SEric Joyner return false;
10071d10453SEric Joyner }
10171d10453SEric Joyner
10271d10453SEric Joyner /**
10371d10453SEric Joyner * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
10471d10453SEric Joyner * @hw: pointer to the hardware structure
10571d10453SEric Joyner * @cq: pointer to the specific Control queue
10671d10453SEric Joyner */
10771d10453SEric Joyner static enum ice_status
ice_alloc_ctrlq_sq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)10871d10453SEric Joyner ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
10971d10453SEric Joyner {
11071d10453SEric Joyner size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
11171d10453SEric Joyner
11271d10453SEric Joyner cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
11371d10453SEric Joyner if (!cq->sq.desc_buf.va)
11471d10453SEric Joyner return ICE_ERR_NO_MEMORY;
11571d10453SEric Joyner
11671d10453SEric Joyner return ICE_SUCCESS;
11771d10453SEric Joyner }
11871d10453SEric Joyner
11971d10453SEric Joyner /**
12071d10453SEric Joyner * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
12171d10453SEric Joyner * @hw: pointer to the hardware structure
12271d10453SEric Joyner * @cq: pointer to the specific Control queue
12371d10453SEric Joyner */
12471d10453SEric Joyner static enum ice_status
ice_alloc_ctrlq_rq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)12571d10453SEric Joyner ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
12671d10453SEric Joyner {
12771d10453SEric Joyner size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
12871d10453SEric Joyner
12971d10453SEric Joyner cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
13071d10453SEric Joyner if (!cq->rq.desc_buf.va)
13171d10453SEric Joyner return ICE_ERR_NO_MEMORY;
13271d10453SEric Joyner return ICE_SUCCESS;
13371d10453SEric Joyner }
13471d10453SEric Joyner
13571d10453SEric Joyner /**
13671d10453SEric Joyner * ice_free_cq_ring - Free control queue ring
13771d10453SEric Joyner * @hw: pointer to the hardware structure
13871d10453SEric Joyner * @ring: pointer to the specific control queue ring
13971d10453SEric Joyner *
14071d10453SEric Joyner * This assumes the posted buffers have already been cleaned
14171d10453SEric Joyner * and de-allocated
14271d10453SEric Joyner */
ice_free_cq_ring(struct ice_hw * hw,struct ice_ctl_q_ring * ring)14371d10453SEric Joyner static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
14471d10453SEric Joyner {
14571d10453SEric Joyner ice_free_dma_mem(hw, &ring->desc_buf);
14671d10453SEric Joyner }
14771d10453SEric Joyner
14871d10453SEric Joyner /**
14971d10453SEric Joyner * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
15071d10453SEric Joyner * @hw: pointer to the hardware structure
15171d10453SEric Joyner * @cq: pointer to the specific Control queue
15271d10453SEric Joyner */
15371d10453SEric Joyner static enum ice_status
ice_alloc_rq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)15471d10453SEric Joyner ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
15571d10453SEric Joyner {
15671d10453SEric Joyner int i;
15771d10453SEric Joyner
15871d10453SEric Joyner /* We'll be allocating the buffer info memory first, then we can
15971d10453SEric Joyner * allocate the mapped buffers for the event processing
16071d10453SEric Joyner */
16171d10453SEric Joyner cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
16271d10453SEric Joyner sizeof(cq->rq.desc_buf));
16371d10453SEric Joyner if (!cq->rq.dma_head)
16471d10453SEric Joyner return ICE_ERR_NO_MEMORY;
16571d10453SEric Joyner cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
16671d10453SEric Joyner
16771d10453SEric Joyner /* allocate the mapped buffers */
16871d10453SEric Joyner for (i = 0; i < cq->num_rq_entries; i++) {
16971d10453SEric Joyner struct ice_aq_desc *desc;
17071d10453SEric Joyner struct ice_dma_mem *bi;
17171d10453SEric Joyner
17271d10453SEric Joyner bi = &cq->rq.r.rq_bi[i];
17371d10453SEric Joyner bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
17471d10453SEric Joyner if (!bi->va)
17571d10453SEric Joyner goto unwind_alloc_rq_bufs;
17671d10453SEric Joyner
17771d10453SEric Joyner /* now configure the descriptors for use */
17871d10453SEric Joyner desc = ICE_CTL_Q_DESC(cq->rq, i);
17971d10453SEric Joyner
18071d10453SEric Joyner desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
18171d10453SEric Joyner if (cq->rq_buf_size > ICE_AQ_LG_BUF)
18271d10453SEric Joyner desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
18371d10453SEric Joyner desc->opcode = 0;
1849c30461dSEric Joyner /* This is in accordance with control queue design, there is no
18571d10453SEric Joyner * register for buffer size configuration
18671d10453SEric Joyner */
18771d10453SEric Joyner desc->datalen = CPU_TO_LE16(bi->size);
18871d10453SEric Joyner desc->retval = 0;
18971d10453SEric Joyner desc->cookie_high = 0;
19071d10453SEric Joyner desc->cookie_low = 0;
19171d10453SEric Joyner desc->params.generic.addr_high =
19271d10453SEric Joyner CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
19371d10453SEric Joyner desc->params.generic.addr_low =
19471d10453SEric Joyner CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
19571d10453SEric Joyner desc->params.generic.param0 = 0;
19671d10453SEric Joyner desc->params.generic.param1 = 0;
19771d10453SEric Joyner }
19871d10453SEric Joyner return ICE_SUCCESS;
19971d10453SEric Joyner
20071d10453SEric Joyner unwind_alloc_rq_bufs:
20171d10453SEric Joyner /* don't try to free the one that failed... */
20271d10453SEric Joyner i--;
20371d10453SEric Joyner for (; i >= 0; i--)
20471d10453SEric Joyner ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
2057d7af7f8SEric Joyner cq->rq.r.rq_bi = NULL;
20671d10453SEric Joyner ice_free(hw, cq->rq.dma_head);
2077d7af7f8SEric Joyner cq->rq.dma_head = NULL;
20871d10453SEric Joyner
20971d10453SEric Joyner return ICE_ERR_NO_MEMORY;
21071d10453SEric Joyner }
21171d10453SEric Joyner
21271d10453SEric Joyner /**
21371d10453SEric Joyner * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
21471d10453SEric Joyner * @hw: pointer to the hardware structure
21571d10453SEric Joyner * @cq: pointer to the specific Control queue
21671d10453SEric Joyner */
21771d10453SEric Joyner static enum ice_status
ice_alloc_sq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)21871d10453SEric Joyner ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
21971d10453SEric Joyner {
22071d10453SEric Joyner int i;
22171d10453SEric Joyner
22271d10453SEric Joyner /* No mapped memory needed yet, just the buffer info structures */
22371d10453SEric Joyner cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
22471d10453SEric Joyner sizeof(cq->sq.desc_buf));
22571d10453SEric Joyner if (!cq->sq.dma_head)
22671d10453SEric Joyner return ICE_ERR_NO_MEMORY;
22771d10453SEric Joyner cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
22871d10453SEric Joyner
22971d10453SEric Joyner /* allocate the mapped buffers */
23071d10453SEric Joyner for (i = 0; i < cq->num_sq_entries; i++) {
23171d10453SEric Joyner struct ice_dma_mem *bi;
23271d10453SEric Joyner
23371d10453SEric Joyner bi = &cq->sq.r.sq_bi[i];
23471d10453SEric Joyner bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
23571d10453SEric Joyner if (!bi->va)
23671d10453SEric Joyner goto unwind_alloc_sq_bufs;
23771d10453SEric Joyner }
23871d10453SEric Joyner return ICE_SUCCESS;
23971d10453SEric Joyner
24071d10453SEric Joyner unwind_alloc_sq_bufs:
24171d10453SEric Joyner /* don't try to free the one that failed... */
24271d10453SEric Joyner i--;
24371d10453SEric Joyner for (; i >= 0; i--)
24471d10453SEric Joyner ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
2457d7af7f8SEric Joyner cq->sq.r.sq_bi = NULL;
24671d10453SEric Joyner ice_free(hw, cq->sq.dma_head);
2477d7af7f8SEric Joyner cq->sq.dma_head = NULL;
24871d10453SEric Joyner
24971d10453SEric Joyner return ICE_ERR_NO_MEMORY;
25071d10453SEric Joyner }
25171d10453SEric Joyner
25271d10453SEric Joyner static enum ice_status
ice_cfg_cq_regs(struct ice_hw * hw,struct ice_ctl_q_ring * ring,u16 num_entries)25371d10453SEric Joyner ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
25471d10453SEric Joyner {
25571d10453SEric Joyner /* Clear Head and Tail */
25671d10453SEric Joyner wr32(hw, ring->head, 0);
25771d10453SEric Joyner wr32(hw, ring->tail, 0);
25871d10453SEric Joyner
25971d10453SEric Joyner /* set starting point */
26071d10453SEric Joyner wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
26171d10453SEric Joyner wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
26271d10453SEric Joyner wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
26371d10453SEric Joyner
26471d10453SEric Joyner /* Check one register to verify that config was applied */
26571d10453SEric Joyner if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
26671d10453SEric Joyner return ICE_ERR_AQ_ERROR;
26771d10453SEric Joyner
26871d10453SEric Joyner return ICE_SUCCESS;
26971d10453SEric Joyner }
27071d10453SEric Joyner
27171d10453SEric Joyner /**
27271d10453SEric Joyner * ice_cfg_sq_regs - configure Control ATQ registers
27371d10453SEric Joyner * @hw: pointer to the hardware structure
27471d10453SEric Joyner * @cq: pointer to the specific Control queue
27571d10453SEric Joyner *
27671d10453SEric Joyner * Configure base address and length registers for the transmit queue
27771d10453SEric Joyner */
27871d10453SEric Joyner static enum ice_status
ice_cfg_sq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)27971d10453SEric Joyner ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
28071d10453SEric Joyner {
28171d10453SEric Joyner return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
28271d10453SEric Joyner }
28371d10453SEric Joyner
28471d10453SEric Joyner /**
28571d10453SEric Joyner * ice_cfg_rq_regs - configure Control ARQ register
28671d10453SEric Joyner * @hw: pointer to the hardware structure
28771d10453SEric Joyner * @cq: pointer to the specific Control queue
28871d10453SEric Joyner *
28971d10453SEric Joyner * Configure base address and length registers for the receive (event queue)
29071d10453SEric Joyner */
29171d10453SEric Joyner static enum ice_status
ice_cfg_rq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)29271d10453SEric Joyner ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
29371d10453SEric Joyner {
29471d10453SEric Joyner enum ice_status status;
29571d10453SEric Joyner
29671d10453SEric Joyner status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
29771d10453SEric Joyner if (status)
29871d10453SEric Joyner return status;
29971d10453SEric Joyner
30071d10453SEric Joyner /* Update tail in the HW to post pre-allocated buffers */
30171d10453SEric Joyner wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
30271d10453SEric Joyner
30371d10453SEric Joyner return ICE_SUCCESS;
30471d10453SEric Joyner }
30571d10453SEric Joyner
3067d7af7f8SEric Joyner #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
3077d7af7f8SEric Joyner do { \
3087d7af7f8SEric Joyner /* free descriptors */ \
3097d7af7f8SEric Joyner if ((qi)->ring.r.ring##_bi) { \
3107d7af7f8SEric Joyner int i; \
3117d7af7f8SEric Joyner \
3127d7af7f8SEric Joyner for (i = 0; i < (qi)->num_##ring##_entries; i++) \
3137d7af7f8SEric Joyner if ((qi)->ring.r.ring##_bi[i].pa) \
3147d7af7f8SEric Joyner ice_free_dma_mem((hw), \
3157d7af7f8SEric Joyner &(qi)->ring.r.ring##_bi[i]); \
3167d7af7f8SEric Joyner } \
3177d7af7f8SEric Joyner /* free DMA head */ \
3187d7af7f8SEric Joyner ice_free(hw, (qi)->ring.dma_head); \
3197d7af7f8SEric Joyner } while (0)
3207d7af7f8SEric Joyner
32171d10453SEric Joyner /**
32271d10453SEric Joyner * ice_init_sq - main initialization routine for Control ATQ
32371d10453SEric Joyner * @hw: pointer to the hardware structure
32471d10453SEric Joyner * @cq: pointer to the specific Control queue
32571d10453SEric Joyner *
32671d10453SEric Joyner * This is the main initialization routine for the Control Send Queue
32771d10453SEric Joyner * Prior to calling this function, the driver *MUST* set the following fields
32871d10453SEric Joyner * in the cq->structure:
32971d10453SEric Joyner * - cq->num_sq_entries
33071d10453SEric Joyner * - cq->sq_buf_size
33171d10453SEric Joyner *
33271d10453SEric Joyner * Do *NOT* hold the lock when calling this as the memory allocation routines
33371d10453SEric Joyner * called are not going to be atomic context safe
33471d10453SEric Joyner */
ice_init_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)33571d10453SEric Joyner static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
33671d10453SEric Joyner {
33771d10453SEric Joyner enum ice_status ret_code;
33871d10453SEric Joyner
33971d10453SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
34071d10453SEric Joyner
34171d10453SEric Joyner if (cq->sq.count > 0) {
34271d10453SEric Joyner /* queue already initialized */
34371d10453SEric Joyner ret_code = ICE_ERR_NOT_READY;
34471d10453SEric Joyner goto init_ctrlq_exit;
34571d10453SEric Joyner }
34671d10453SEric Joyner
34771d10453SEric Joyner /* verify input for valid configuration */
34871d10453SEric Joyner if (!cq->num_sq_entries || !cq->sq_buf_size) {
34971d10453SEric Joyner ret_code = ICE_ERR_CFG;
35071d10453SEric Joyner goto init_ctrlq_exit;
35171d10453SEric Joyner }
35271d10453SEric Joyner
35371d10453SEric Joyner cq->sq.next_to_use = 0;
35471d10453SEric Joyner cq->sq.next_to_clean = 0;
35571d10453SEric Joyner
35671d10453SEric Joyner /* allocate the ring memory */
35771d10453SEric Joyner ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
35871d10453SEric Joyner if (ret_code)
35971d10453SEric Joyner goto init_ctrlq_exit;
36071d10453SEric Joyner
36171d10453SEric Joyner /* allocate buffers in the rings */
36271d10453SEric Joyner ret_code = ice_alloc_sq_bufs(hw, cq);
36371d10453SEric Joyner if (ret_code)
36471d10453SEric Joyner goto init_ctrlq_free_rings;
36571d10453SEric Joyner
36671d10453SEric Joyner /* initialize base registers */
36771d10453SEric Joyner ret_code = ice_cfg_sq_regs(hw, cq);
36871d10453SEric Joyner if (ret_code)
36971d10453SEric Joyner goto init_ctrlq_free_rings;
37071d10453SEric Joyner
37171d10453SEric Joyner /* success! */
37271d10453SEric Joyner cq->sq.count = cq->num_sq_entries;
37371d10453SEric Joyner goto init_ctrlq_exit;
37471d10453SEric Joyner
37571d10453SEric Joyner init_ctrlq_free_rings:
3767d7af7f8SEric Joyner ICE_FREE_CQ_BUFS(hw, cq, sq);
37771d10453SEric Joyner ice_free_cq_ring(hw, &cq->sq);
37871d10453SEric Joyner
37971d10453SEric Joyner init_ctrlq_exit:
38071d10453SEric Joyner return ret_code;
38171d10453SEric Joyner }
38271d10453SEric Joyner
38371d10453SEric Joyner /**
3849c30461dSEric Joyner * ice_init_rq - initialize receive side of a control queue
38571d10453SEric Joyner * @hw: pointer to the hardware structure
38671d10453SEric Joyner * @cq: pointer to the specific Control queue
38771d10453SEric Joyner *
3889c30461dSEric Joyner * The main initialization routine for Receive side of a control queue.
38971d10453SEric Joyner * Prior to calling this function, the driver *MUST* set the following fields
39071d10453SEric Joyner * in the cq->structure:
39171d10453SEric Joyner * - cq->num_rq_entries
39271d10453SEric Joyner * - cq->rq_buf_size
39371d10453SEric Joyner *
39471d10453SEric Joyner * Do *NOT* hold the lock when calling this as the memory allocation routines
39571d10453SEric Joyner * called are not going to be atomic context safe
39671d10453SEric Joyner */
ice_init_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)39771d10453SEric Joyner static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
39871d10453SEric Joyner {
39971d10453SEric Joyner enum ice_status ret_code;
40071d10453SEric Joyner
40171d10453SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
40271d10453SEric Joyner
40371d10453SEric Joyner if (cq->rq.count > 0) {
40471d10453SEric Joyner /* queue already initialized */
40571d10453SEric Joyner ret_code = ICE_ERR_NOT_READY;
40671d10453SEric Joyner goto init_ctrlq_exit;
40771d10453SEric Joyner }
40871d10453SEric Joyner
40971d10453SEric Joyner /* verify input for valid configuration */
41071d10453SEric Joyner if (!cq->num_rq_entries || !cq->rq_buf_size) {
41171d10453SEric Joyner ret_code = ICE_ERR_CFG;
41271d10453SEric Joyner goto init_ctrlq_exit;
41371d10453SEric Joyner }
41471d10453SEric Joyner
41571d10453SEric Joyner cq->rq.next_to_use = 0;
41671d10453SEric Joyner cq->rq.next_to_clean = 0;
41771d10453SEric Joyner
41871d10453SEric Joyner /* allocate the ring memory */
41971d10453SEric Joyner ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
42071d10453SEric Joyner if (ret_code)
42171d10453SEric Joyner goto init_ctrlq_exit;
42271d10453SEric Joyner
42371d10453SEric Joyner /* allocate buffers in the rings */
42471d10453SEric Joyner ret_code = ice_alloc_rq_bufs(hw, cq);
42571d10453SEric Joyner if (ret_code)
42671d10453SEric Joyner goto init_ctrlq_free_rings;
42771d10453SEric Joyner
42871d10453SEric Joyner /* initialize base registers */
42971d10453SEric Joyner ret_code = ice_cfg_rq_regs(hw, cq);
43071d10453SEric Joyner if (ret_code)
43171d10453SEric Joyner goto init_ctrlq_free_rings;
43271d10453SEric Joyner
43371d10453SEric Joyner /* success! */
43471d10453SEric Joyner cq->rq.count = cq->num_rq_entries;
43571d10453SEric Joyner goto init_ctrlq_exit;
43671d10453SEric Joyner
43771d10453SEric Joyner init_ctrlq_free_rings:
4387d7af7f8SEric Joyner ICE_FREE_CQ_BUFS(hw, cq, rq);
43971d10453SEric Joyner ice_free_cq_ring(hw, &cq->rq);
44071d10453SEric Joyner
44171d10453SEric Joyner init_ctrlq_exit:
44271d10453SEric Joyner return ret_code;
44371d10453SEric Joyner }
44471d10453SEric Joyner
44571d10453SEric Joyner /**
4469c30461dSEric Joyner * ice_shutdown_sq - shutdown the transmit side of a control queue
44771d10453SEric Joyner * @hw: pointer to the hardware structure
44871d10453SEric Joyner * @cq: pointer to the specific Control queue
44971d10453SEric Joyner *
45071d10453SEric Joyner * The main shutdown routine for the Control Transmit Queue
45171d10453SEric Joyner */
45271d10453SEric Joyner static enum ice_status
ice_shutdown_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)45371d10453SEric Joyner ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
45471d10453SEric Joyner {
45571d10453SEric Joyner enum ice_status ret_code = ICE_SUCCESS;
45671d10453SEric Joyner
45771d10453SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
45871d10453SEric Joyner
45971d10453SEric Joyner ice_acquire_lock(&cq->sq_lock);
46071d10453SEric Joyner
46171d10453SEric Joyner if (!cq->sq.count) {
46271d10453SEric Joyner ret_code = ICE_ERR_NOT_READY;
46371d10453SEric Joyner goto shutdown_sq_out;
46471d10453SEric Joyner }
46571d10453SEric Joyner
4669c30461dSEric Joyner /* Stop processing of the control queue */
46771d10453SEric Joyner wr32(hw, cq->sq.head, 0);
46871d10453SEric Joyner wr32(hw, cq->sq.tail, 0);
46971d10453SEric Joyner wr32(hw, cq->sq.len, 0);
47071d10453SEric Joyner wr32(hw, cq->sq.bal, 0);
47171d10453SEric Joyner wr32(hw, cq->sq.bah, 0);
47271d10453SEric Joyner
47371d10453SEric Joyner cq->sq.count = 0; /* to indicate uninitialized queue */
47471d10453SEric Joyner
47571d10453SEric Joyner /* free ring buffers and the ring itself */
47671d10453SEric Joyner ICE_FREE_CQ_BUFS(hw, cq, sq);
47771d10453SEric Joyner ice_free_cq_ring(hw, &cq->sq);
47871d10453SEric Joyner
47971d10453SEric Joyner shutdown_sq_out:
48071d10453SEric Joyner ice_release_lock(&cq->sq_lock);
48171d10453SEric Joyner return ret_code;
48271d10453SEric Joyner }
48371d10453SEric Joyner
48471d10453SEric Joyner /**
4859e54973fSEric Joyner * ice_aq_ver_check - Check the reported AQ API version
48671d10453SEric Joyner * @hw: pointer to the hardware structure
48771d10453SEric Joyner *
48871d10453SEric Joyner * Checks if the driver should load on a given AQ API version.
48971d10453SEric Joyner *
49071d10453SEric Joyner * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
49171d10453SEric Joyner */
ice_aq_ver_check(struct ice_hw * hw)49271d10453SEric Joyner static bool ice_aq_ver_check(struct ice_hw *hw)
49371d10453SEric Joyner {
49471d10453SEric Joyner if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
49571d10453SEric Joyner /* Major API version is newer than expected, don't load */
49671d10453SEric Joyner ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
49771d10453SEric Joyner return false;
49871d10453SEric Joyner } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
49971d10453SEric Joyner if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
5008923de59SPiotr Kubaj ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
5018923de59SPiotr Kubaj hw->api_maj_ver, hw->api_min_ver,
5028923de59SPiotr Kubaj EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
50371d10453SEric Joyner else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
5048923de59SPiotr Kubaj ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
5058923de59SPiotr Kubaj hw->api_maj_ver, hw->api_min_ver,
5068923de59SPiotr Kubaj EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
50771d10453SEric Joyner } else {
50871d10453SEric Joyner /* Major API version is older than expected, log a warning */
5098923de59SPiotr Kubaj ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
5108923de59SPiotr Kubaj hw->api_maj_ver, hw->api_min_ver,
5118923de59SPiotr Kubaj EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
51271d10453SEric Joyner }
51371d10453SEric Joyner return true;
51471d10453SEric Joyner }
51571d10453SEric Joyner
51671d10453SEric Joyner /**
51771d10453SEric Joyner * ice_shutdown_rq - shutdown Control ARQ
51871d10453SEric Joyner * @hw: pointer to the hardware structure
51971d10453SEric Joyner * @cq: pointer to the specific Control queue
52071d10453SEric Joyner *
52171d10453SEric Joyner * The main shutdown routine for the Control Receive Queue
52271d10453SEric Joyner */
52371d10453SEric Joyner static enum ice_status
ice_shutdown_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)52471d10453SEric Joyner ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
52571d10453SEric Joyner {
52671d10453SEric Joyner enum ice_status ret_code = ICE_SUCCESS;
52771d10453SEric Joyner
52871d10453SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
52971d10453SEric Joyner
53071d10453SEric Joyner ice_acquire_lock(&cq->rq_lock);
53171d10453SEric Joyner
53271d10453SEric Joyner if (!cq->rq.count) {
53371d10453SEric Joyner ret_code = ICE_ERR_NOT_READY;
53471d10453SEric Joyner goto shutdown_rq_out;
53571d10453SEric Joyner }
53671d10453SEric Joyner
53771d10453SEric Joyner /* Stop Control Queue processing */
53871d10453SEric Joyner wr32(hw, cq->rq.head, 0);
53971d10453SEric Joyner wr32(hw, cq->rq.tail, 0);
54071d10453SEric Joyner wr32(hw, cq->rq.len, 0);
54171d10453SEric Joyner wr32(hw, cq->rq.bal, 0);
54271d10453SEric Joyner wr32(hw, cq->rq.bah, 0);
54371d10453SEric Joyner
54471d10453SEric Joyner /* set rq.count to 0 to indicate uninitialized queue */
54571d10453SEric Joyner cq->rq.count = 0;
54671d10453SEric Joyner
54771d10453SEric Joyner /* free ring buffers and the ring itself */
54871d10453SEric Joyner ICE_FREE_CQ_BUFS(hw, cq, rq);
54971d10453SEric Joyner ice_free_cq_ring(hw, &cq->rq);
55071d10453SEric Joyner
55171d10453SEric Joyner shutdown_rq_out:
55271d10453SEric Joyner ice_release_lock(&cq->rq_lock);
55371d10453SEric Joyner return ret_code;
55471d10453SEric Joyner }
55571d10453SEric Joyner
55671d10453SEric Joyner /**
55771d10453SEric Joyner * ice_idle_aq - stop ARQ/ATQ processing momentarily
55871d10453SEric Joyner * @hw: pointer to the hardware structure
55971d10453SEric Joyner * @cq: pointer to the specific Control queue
56071d10453SEric Joyner */
ice_idle_aq(struct ice_hw * hw,struct ice_ctl_q_info * cq)56171d10453SEric Joyner void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
56271d10453SEric Joyner {
56371d10453SEric Joyner wr32(hw, cq->sq.len, 0);
56471d10453SEric Joyner wr32(hw, cq->rq.len, 0);
56571d10453SEric Joyner
56671d10453SEric Joyner ice_msec_delay(2, false);
56771d10453SEric Joyner }
56871d10453SEric Joyner
56971d10453SEric Joyner /**
57071d10453SEric Joyner * ice_init_check_adminq - Check version for Admin Queue to know if its alive
57171d10453SEric Joyner * @hw: pointer to the hardware structure
57271d10453SEric Joyner */
ice_init_check_adminq(struct ice_hw * hw)57371d10453SEric Joyner static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
57471d10453SEric Joyner {
57571d10453SEric Joyner struct ice_ctl_q_info *cq = &hw->adminq;
57671d10453SEric Joyner enum ice_status status;
57771d10453SEric Joyner
57871d10453SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
57971d10453SEric Joyner
58071d10453SEric Joyner status = ice_aq_get_fw_ver(hw, NULL);
58171d10453SEric Joyner if (status)
58271d10453SEric Joyner goto init_ctrlq_free_rq;
58371d10453SEric Joyner
58471d10453SEric Joyner if (!ice_aq_ver_check(hw)) {
58571d10453SEric Joyner status = ICE_ERR_FW_API_VER;
58671d10453SEric Joyner goto init_ctrlq_free_rq;
58771d10453SEric Joyner }
58871d10453SEric Joyner
58971d10453SEric Joyner return ICE_SUCCESS;
59071d10453SEric Joyner
59171d10453SEric Joyner init_ctrlq_free_rq:
59271d10453SEric Joyner ice_shutdown_rq(hw, cq);
59371d10453SEric Joyner ice_shutdown_sq(hw, cq);
59471d10453SEric Joyner return status;
59571d10453SEric Joyner }
59671d10453SEric Joyner
59771d10453SEric Joyner /**
59871d10453SEric Joyner * ice_init_ctrlq - main initialization routine for any control Queue
59971d10453SEric Joyner * @hw: pointer to the hardware structure
60071d10453SEric Joyner * @q_type: specific Control queue type
60171d10453SEric Joyner *
60271d10453SEric Joyner * Prior to calling this function, the driver *MUST* set the following fields
60371d10453SEric Joyner * in the cq->structure:
60471d10453SEric Joyner * - cq->num_sq_entries
60571d10453SEric Joyner * - cq->num_rq_entries
60671d10453SEric Joyner * - cq->rq_buf_size
60771d10453SEric Joyner * - cq->sq_buf_size
60871d10453SEric Joyner *
60971d10453SEric Joyner * NOTE: this function does not initialize the controlq locks
61071d10453SEric Joyner */
ice_init_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type)61171d10453SEric Joyner static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
61271d10453SEric Joyner {
61371d10453SEric Joyner struct ice_ctl_q_info *cq;
61471d10453SEric Joyner enum ice_status ret_code;
61571d10453SEric Joyner
61671d10453SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
61771d10453SEric Joyner
61871d10453SEric Joyner switch (q_type) {
61971d10453SEric Joyner case ICE_CTL_Q_ADMIN:
62071d10453SEric Joyner ice_adminq_init_regs(hw);
62171d10453SEric Joyner cq = &hw->adminq;
62271d10453SEric Joyner break;
62371d10453SEric Joyner case ICE_CTL_Q_MAILBOX:
62471d10453SEric Joyner ice_mailbox_init_regs(hw);
62571d10453SEric Joyner cq = &hw->mailboxq;
62671d10453SEric Joyner break;
62771d10453SEric Joyner default:
62871d10453SEric Joyner return ICE_ERR_PARAM;
62971d10453SEric Joyner }
63071d10453SEric Joyner cq->qtype = q_type;
63171d10453SEric Joyner
63271d10453SEric Joyner /* verify input for valid configuration */
63371d10453SEric Joyner if (!cq->num_rq_entries || !cq->num_sq_entries ||
63471d10453SEric Joyner !cq->rq_buf_size || !cq->sq_buf_size) {
63571d10453SEric Joyner return ICE_ERR_CFG;
63671d10453SEric Joyner }
63771d10453SEric Joyner
63871d10453SEric Joyner /* setup SQ command write back timeout */
63971d10453SEric Joyner cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
64071d10453SEric Joyner
64171d10453SEric Joyner /* allocate the ATQ */
64271d10453SEric Joyner ret_code = ice_init_sq(hw, cq);
64371d10453SEric Joyner if (ret_code)
64471d10453SEric Joyner return ret_code;
64571d10453SEric Joyner
64671d10453SEric Joyner /* allocate the ARQ */
64771d10453SEric Joyner ret_code = ice_init_rq(hw, cq);
64871d10453SEric Joyner if (ret_code)
64971d10453SEric Joyner goto init_ctrlq_free_sq;
65071d10453SEric Joyner
65171d10453SEric Joyner /* success! */
65271d10453SEric Joyner return ICE_SUCCESS;
65371d10453SEric Joyner
65471d10453SEric Joyner init_ctrlq_free_sq:
65571d10453SEric Joyner ice_shutdown_sq(hw, cq);
65671d10453SEric Joyner return ret_code;
65771d10453SEric Joyner }
65871d10453SEric Joyner
65971d10453SEric Joyner /**
66071d10453SEric Joyner * ice_shutdown_ctrlq - shutdown routine for any control queue
66171d10453SEric Joyner * @hw: pointer to the hardware structure
66271d10453SEric Joyner * @q_type: specific Control queue type
6638923de59SPiotr Kubaj * @unloading: is the driver unloading itself
66471d10453SEric Joyner *
66571d10453SEric Joyner * NOTE: this function does not destroy the control queue locks.
66671d10453SEric Joyner */
6679c30461dSEric Joyner static void
ice_shutdown_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type,bool unloading)6689c30461dSEric Joyner ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
6698923de59SPiotr Kubaj bool unloading)
67071d10453SEric Joyner {
67171d10453SEric Joyner struct ice_ctl_q_info *cq;
67271d10453SEric Joyner
67371d10453SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
67471d10453SEric Joyner
67571d10453SEric Joyner switch (q_type) {
67671d10453SEric Joyner case ICE_CTL_Q_ADMIN:
67771d10453SEric Joyner cq = &hw->adminq;
67871d10453SEric Joyner if (ice_check_sq_alive(hw, cq))
6798923de59SPiotr Kubaj ice_aq_q_shutdown(hw, unloading);
68071d10453SEric Joyner break;
68171d10453SEric Joyner case ICE_CTL_Q_MAILBOX:
68271d10453SEric Joyner cq = &hw->mailboxq;
68371d10453SEric Joyner break;
68471d10453SEric Joyner default:
68571d10453SEric Joyner return;
68671d10453SEric Joyner }
68771d10453SEric Joyner
68871d10453SEric Joyner ice_shutdown_sq(hw, cq);
68971d10453SEric Joyner ice_shutdown_rq(hw, cq);
69071d10453SEric Joyner }
69171d10453SEric Joyner
69271d10453SEric Joyner /**
69371d10453SEric Joyner * ice_shutdown_all_ctrlq - shutdown routine for all control queues
69471d10453SEric Joyner * @hw: pointer to the hardware structure
6958923de59SPiotr Kubaj * @unloading: is the driver unloading itself
69671d10453SEric Joyner *
69771d10453SEric Joyner * NOTE: this function does not destroy the control queue locks. The driver
69871d10453SEric Joyner * may call this at runtime to shutdown and later restart control queues, such
69971d10453SEric Joyner * as in response to a reset event.
70071d10453SEric Joyner */
ice_shutdown_all_ctrlq(struct ice_hw * hw,bool unloading)7018923de59SPiotr Kubaj void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
70271d10453SEric Joyner {
70371d10453SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
70471d10453SEric Joyner /* Shutdown FW admin queue */
7058923de59SPiotr Kubaj ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
70671d10453SEric Joyner /* Shutdown PF-VF Mailbox */
7078923de59SPiotr Kubaj ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
70871d10453SEric Joyner }
70971d10453SEric Joyner
71071d10453SEric Joyner /**
7117d7af7f8SEric Joyner * ice_init_all_ctrlq - main initialization routine for all control queues
7127d7af7f8SEric Joyner * @hw: pointer to the hardware structure
7137d7af7f8SEric Joyner *
7147d7af7f8SEric Joyner * Prior to calling this function, the driver MUST* set the following fields
7157d7af7f8SEric Joyner * in the cq->structure for all control queues:
7167d7af7f8SEric Joyner * - cq->num_sq_entries
7177d7af7f8SEric Joyner * - cq->num_rq_entries
7187d7af7f8SEric Joyner * - cq->rq_buf_size
7197d7af7f8SEric Joyner * - cq->sq_buf_size
7207d7af7f8SEric Joyner *
7217d7af7f8SEric Joyner * NOTE: this function does not initialize the controlq locks.
7227d7af7f8SEric Joyner */
ice_init_all_ctrlq(struct ice_hw * hw)7237d7af7f8SEric Joyner enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
7247d7af7f8SEric Joyner {
7257d7af7f8SEric Joyner enum ice_status status;
7267d7af7f8SEric Joyner u32 retry = 0;
7277d7af7f8SEric Joyner
7287d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
7297d7af7f8SEric Joyner
7307d7af7f8SEric Joyner /* Init FW admin queue */
7317d7af7f8SEric Joyner do {
7327d7af7f8SEric Joyner status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
7337d7af7f8SEric Joyner if (status)
7347d7af7f8SEric Joyner return status;
7357d7af7f8SEric Joyner
7367d7af7f8SEric Joyner status = ice_init_check_adminq(hw);
7377d7af7f8SEric Joyner if (status != ICE_ERR_AQ_FW_CRITICAL)
7387d7af7f8SEric Joyner break;
7397d7af7f8SEric Joyner
7407d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
7418923de59SPiotr Kubaj ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
7427d7af7f8SEric Joyner ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
7437d7af7f8SEric Joyner } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
7447d7af7f8SEric Joyner
7457d7af7f8SEric Joyner if (status)
7467d7af7f8SEric Joyner return status;
7477d7af7f8SEric Joyner /* Init Mailbox queue */
7487d7af7f8SEric Joyner return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
7497d7af7f8SEric Joyner }
7507d7af7f8SEric Joyner
7517d7af7f8SEric Joyner /**
7527d7af7f8SEric Joyner * ice_init_ctrlq_locks - Initialize locks for a control queue
7537d7af7f8SEric Joyner * @cq: pointer to the control queue
7547d7af7f8SEric Joyner *
7557d7af7f8SEric Joyner * Initializes the send and receive queue locks for a given control queue.
7567d7af7f8SEric Joyner */
ice_init_ctrlq_locks(struct ice_ctl_q_info * cq)7577d7af7f8SEric Joyner static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
7587d7af7f8SEric Joyner {
7597d7af7f8SEric Joyner ice_init_lock(&cq->sq_lock);
7607d7af7f8SEric Joyner ice_init_lock(&cq->rq_lock);
7617d7af7f8SEric Joyner }
7627d7af7f8SEric Joyner
7637d7af7f8SEric Joyner /**
7647d7af7f8SEric Joyner * ice_create_all_ctrlq - main initialization routine for all control queues
7657d7af7f8SEric Joyner * @hw: pointer to the hardware structure
7667d7af7f8SEric Joyner *
7677d7af7f8SEric Joyner * Prior to calling this function, the driver *MUST* set the following fields
7687d7af7f8SEric Joyner * in the cq->structure for all control queues:
7697d7af7f8SEric Joyner * - cq->num_sq_entries
7707d7af7f8SEric Joyner * - cq->num_rq_entries
7717d7af7f8SEric Joyner * - cq->rq_buf_size
7727d7af7f8SEric Joyner * - cq->sq_buf_size
7737d7af7f8SEric Joyner *
7747d7af7f8SEric Joyner * This function creates all the control queue locks and then calls
7757d7af7f8SEric Joyner * ice_init_all_ctrlq. It should be called once during driver load. If the
7767d7af7f8SEric Joyner * driver needs to re-initialize control queues at run time it should call
7777d7af7f8SEric Joyner * ice_init_all_ctrlq instead.
7787d7af7f8SEric Joyner */
ice_create_all_ctrlq(struct ice_hw * hw)7797d7af7f8SEric Joyner enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
7807d7af7f8SEric Joyner {
7817d7af7f8SEric Joyner ice_init_ctrlq_locks(&hw->adminq);
7827d7af7f8SEric Joyner ice_init_ctrlq_locks(&hw->mailboxq);
7837d7af7f8SEric Joyner
7847d7af7f8SEric Joyner return ice_init_all_ctrlq(hw);
7857d7af7f8SEric Joyner }
7867d7af7f8SEric Joyner
7877d7af7f8SEric Joyner /**
78871d10453SEric Joyner * ice_destroy_ctrlq_locks - Destroy locks for a control queue
78971d10453SEric Joyner * @cq: pointer to the control queue
79071d10453SEric Joyner *
79171d10453SEric Joyner * Destroys the send and receive queue locks for a given control queue.
79271d10453SEric Joyner */
ice_destroy_ctrlq_locks(struct ice_ctl_q_info * cq)7937d7af7f8SEric Joyner static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
79471d10453SEric Joyner {
79571d10453SEric Joyner ice_destroy_lock(&cq->sq_lock);
79671d10453SEric Joyner ice_destroy_lock(&cq->rq_lock);
79771d10453SEric Joyner }
79871d10453SEric Joyner
79971d10453SEric Joyner /**
80071d10453SEric Joyner * ice_destroy_all_ctrlq - exit routine for all control queues
80171d10453SEric Joyner * @hw: pointer to the hardware structure
80271d10453SEric Joyner *
80371d10453SEric Joyner * This function shuts down all the control queues and then destroys the
80471d10453SEric Joyner * control queue locks. It should be called once during driver unload. The
80571d10453SEric Joyner * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
80671d10453SEric Joyner * reinitialize control queues, such as in response to a reset event.
80771d10453SEric Joyner */
ice_destroy_all_ctrlq(struct ice_hw * hw)80871d10453SEric Joyner void ice_destroy_all_ctrlq(struct ice_hw *hw)
80971d10453SEric Joyner {
81071d10453SEric Joyner /* shut down all the control queues first */
8118923de59SPiotr Kubaj ice_shutdown_all_ctrlq(hw, true);
81271d10453SEric Joyner
81371d10453SEric Joyner ice_destroy_ctrlq_locks(&hw->adminq);
81471d10453SEric Joyner ice_destroy_ctrlq_locks(&hw->mailboxq);
81571d10453SEric Joyner }
81671d10453SEric Joyner
81771d10453SEric Joyner /**
8189c30461dSEric Joyner * ice_clean_sq - cleans send side of a control queue
81971d10453SEric Joyner * @hw: pointer to the hardware structure
82071d10453SEric Joyner * @cq: pointer to the specific Control queue
82171d10453SEric Joyner *
82271d10453SEric Joyner * returns the number of free desc
82371d10453SEric Joyner */
ice_clean_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)82471d10453SEric Joyner static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
82571d10453SEric Joyner {
82671d10453SEric Joyner struct ice_ctl_q_ring *sq = &cq->sq;
82771d10453SEric Joyner u16 ntc = sq->next_to_clean;
82871d10453SEric Joyner struct ice_aq_desc *desc;
82971d10453SEric Joyner
83071d10453SEric Joyner desc = ICE_CTL_Q_DESC(*sq, ntc);
83171d10453SEric Joyner
83271d10453SEric Joyner while (rd32(hw, cq->sq.head) != ntc) {
8337d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
83471d10453SEric Joyner ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
83571d10453SEric Joyner ntc++;
83671d10453SEric Joyner if (ntc == sq->count)
83771d10453SEric Joyner ntc = 0;
83871d10453SEric Joyner desc = ICE_CTL_Q_DESC(*sq, ntc);
83971d10453SEric Joyner }
84071d10453SEric Joyner
84171d10453SEric Joyner sq->next_to_clean = ntc;
84271d10453SEric Joyner
84371d10453SEric Joyner return ICE_CTL_Q_DESC_UNUSED(sq);
84471d10453SEric Joyner }
84571d10453SEric Joyner
84671d10453SEric Joyner /**
8479c30461dSEric Joyner * ice_ctl_q_str - Convert control queue type to string
8489c30461dSEric Joyner * @qtype: the control queue type
8499c30461dSEric Joyner *
8509c30461dSEric Joyner * Returns: A string name for the given control queue type.
8519c30461dSEric Joyner */
ice_ctl_q_str(enum ice_ctl_q qtype)8529c30461dSEric Joyner static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
8539c30461dSEric Joyner {
8549c30461dSEric Joyner switch (qtype) {
8559c30461dSEric Joyner case ICE_CTL_Q_UNKNOWN:
8569c30461dSEric Joyner return "Unknown CQ";
8579c30461dSEric Joyner case ICE_CTL_Q_ADMIN:
8589c30461dSEric Joyner return "AQ";
8599c30461dSEric Joyner case ICE_CTL_Q_MAILBOX:
8609c30461dSEric Joyner return "MBXQ";
8619c30461dSEric Joyner default:
8629c30461dSEric Joyner return "Unrecognized CQ";
8639c30461dSEric Joyner }
8649c30461dSEric Joyner }
8659c30461dSEric Joyner
8669c30461dSEric Joyner /**
86771d10453SEric Joyner * ice_debug_cq
86871d10453SEric Joyner * @hw: pointer to the hardware structure
8699c30461dSEric Joyner * @cq: pointer to the specific Control queue
87071d10453SEric Joyner * @desc: pointer to control queue descriptor
87171d10453SEric Joyner * @buf: pointer to command buffer
87271d10453SEric Joyner * @buf_len: max length of buf
8739c30461dSEric Joyner * @response: true if this is the writeback response
87471d10453SEric Joyner *
87571d10453SEric Joyner * Dumps debug log about control command with descriptor contents.
87671d10453SEric Joyner */
8779c30461dSEric Joyner static void
ice_debug_cq(struct ice_hw * hw,struct ice_ctl_q_info * cq,void * desc,void * buf,u16 buf_len,bool response)8789c30461dSEric Joyner ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
8799c30461dSEric Joyner void *desc, void *buf, u16 buf_len, bool response)
88071d10453SEric Joyner {
88171d10453SEric Joyner struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
88271d10453SEric Joyner u16 datalen, flags;
88371d10453SEric Joyner
88471d10453SEric Joyner if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
88571d10453SEric Joyner return;
88671d10453SEric Joyner
88771d10453SEric Joyner if (!desc)
88871d10453SEric Joyner return;
88971d10453SEric Joyner
89071d10453SEric Joyner datalen = LE16_TO_CPU(cq_desc->datalen);
89171d10453SEric Joyner flags = LE16_TO_CPU(cq_desc->flags);
89271d10453SEric Joyner
8939c30461dSEric Joyner ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
8949c30461dSEric Joyner ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
89571d10453SEric Joyner LE16_TO_CPU(cq_desc->opcode), flags, datalen,
89671d10453SEric Joyner LE16_TO_CPU(cq_desc->retval));
89771d10453SEric Joyner ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
89871d10453SEric Joyner LE32_TO_CPU(cq_desc->cookie_high),
89971d10453SEric Joyner LE32_TO_CPU(cq_desc->cookie_low));
90071d10453SEric Joyner ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
90171d10453SEric Joyner LE32_TO_CPU(cq_desc->params.generic.param0),
90271d10453SEric Joyner LE32_TO_CPU(cq_desc->params.generic.param1));
90371d10453SEric Joyner ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
90471d10453SEric Joyner LE32_TO_CPU(cq_desc->params.generic.addr_high),
90571d10453SEric Joyner LE32_TO_CPU(cq_desc->params.generic.addr_low));
90671d10453SEric Joyner /* Dump buffer iff 1) one exists and 2) is either a response indicated
90771d10453SEric Joyner * by the DD and/or CMP flag set or a command with the RD flag set.
90871d10453SEric Joyner */
90971d10453SEric Joyner if (buf && cq_desc->datalen != 0 &&
91071d10453SEric Joyner (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
91171d10453SEric Joyner flags & ICE_AQ_FLAG_RD)) {
91271d10453SEric Joyner ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
91371d10453SEric Joyner ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
91471d10453SEric Joyner MIN_T(u16, buf_len, datalen));
91571d10453SEric Joyner }
91671d10453SEric Joyner }
91771d10453SEric Joyner
91871d10453SEric Joyner /**
9199c30461dSEric Joyner * ice_sq_done - check if the last send on a control queue has completed
92071d10453SEric Joyner * @hw: pointer to the HW struct
92171d10453SEric Joyner * @cq: pointer to the specific Control queue
92271d10453SEric Joyner *
9239c30461dSEric Joyner * Returns: true if all the descriptors on the send side of a control queue
9249c30461dSEric Joyner * are finished processing, false otherwise.
92571d10453SEric Joyner */
ice_sq_done(struct ice_hw * hw,struct ice_ctl_q_info * cq)92671d10453SEric Joyner bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
92771d10453SEric Joyner {
9289c30461dSEric Joyner /* control queue designers suggest use of head for better
92971d10453SEric Joyner * timing reliability than DD bit
93071d10453SEric Joyner */
93171d10453SEric Joyner return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
93271d10453SEric Joyner }
93371d10453SEric Joyner
93471d10453SEric Joyner /**
9359c30461dSEric Joyner * ice_sq_send_cmd_nolock - send command to a control queue
93671d10453SEric Joyner * @hw: pointer to the HW struct
93771d10453SEric Joyner * @cq: pointer to the specific Control queue
93871d10453SEric Joyner * @desc: prefilled descriptor describing the command (non DMA mem)
93971d10453SEric Joyner * @buf: buffer to use for indirect commands (or NULL for direct commands)
94071d10453SEric Joyner * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
94171d10453SEric Joyner * @cd: pointer to command details structure
94271d10453SEric Joyner *
9439c30461dSEric Joyner * This is the main send command routine for a control queue. It prepares the
9449c30461dSEric Joyner * command into a descriptor, bumps the send queue tail, waits for the command
9459c30461dSEric Joyner * to complete, captures status and data for the command, etc.
94671d10453SEric Joyner */
94771d10453SEric Joyner static enum ice_status
ice_sq_send_cmd_nolock(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)94871d10453SEric Joyner ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
94971d10453SEric Joyner struct ice_aq_desc *desc, void *buf, u16 buf_size,
95071d10453SEric Joyner struct ice_sq_cd *cd)
95171d10453SEric Joyner {
95271d10453SEric Joyner struct ice_dma_mem *dma_buf = NULL;
95371d10453SEric Joyner struct ice_aq_desc *desc_on_ring;
95471d10453SEric Joyner bool cmd_completed = false;
95571d10453SEric Joyner enum ice_status status = ICE_SUCCESS;
95671d10453SEric Joyner u32 total_delay = 0;
95771d10453SEric Joyner u16 retval = 0;
95871d10453SEric Joyner u32 val = 0;
95971d10453SEric Joyner
96071d10453SEric Joyner /* if reset is in progress return a soft error */
96171d10453SEric Joyner if (hw->reset_ongoing)
96271d10453SEric Joyner return ICE_ERR_RESET_ONGOING;
96371d10453SEric Joyner
96471d10453SEric Joyner cq->sq_last_status = ICE_AQ_RC_OK;
96571d10453SEric Joyner
96671d10453SEric Joyner if (!cq->sq.count) {
9677d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
96871d10453SEric Joyner status = ICE_ERR_AQ_EMPTY;
96971d10453SEric Joyner goto sq_send_command_error;
97071d10453SEric Joyner }
97171d10453SEric Joyner
97271d10453SEric Joyner if ((buf && !buf_size) || (!buf && buf_size)) {
97371d10453SEric Joyner status = ICE_ERR_PARAM;
97471d10453SEric Joyner goto sq_send_command_error;
97571d10453SEric Joyner }
97671d10453SEric Joyner
97771d10453SEric Joyner if (buf) {
97871d10453SEric Joyner if (buf_size > cq->sq_buf_size) {
9797d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
98071d10453SEric Joyner buf_size);
98171d10453SEric Joyner status = ICE_ERR_INVAL_SIZE;
98271d10453SEric Joyner goto sq_send_command_error;
98371d10453SEric Joyner }
98471d10453SEric Joyner
98571d10453SEric Joyner desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
98671d10453SEric Joyner if (buf_size > ICE_AQ_LG_BUF)
98771d10453SEric Joyner desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
98871d10453SEric Joyner }
98971d10453SEric Joyner
99071d10453SEric Joyner val = rd32(hw, cq->sq.head);
99171d10453SEric Joyner if (val >= cq->num_sq_entries) {
9927d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
99371d10453SEric Joyner val);
99471d10453SEric Joyner status = ICE_ERR_AQ_EMPTY;
99571d10453SEric Joyner goto sq_send_command_error;
99671d10453SEric Joyner }
99771d10453SEric Joyner
99871d10453SEric Joyner /* Call clean and check queue available function to reclaim the
99971d10453SEric Joyner * descriptors that were processed by FW/MBX; the function returns the
100071d10453SEric Joyner * number of desc available. The clean function called here could be
100171d10453SEric Joyner * called in a separate thread in case of asynchronous completions.
100271d10453SEric Joyner */
100371d10453SEric Joyner if (ice_clean_sq(hw, cq) == 0) {
10047d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
100571d10453SEric Joyner status = ICE_ERR_AQ_FULL;
100671d10453SEric Joyner goto sq_send_command_error;
100771d10453SEric Joyner }
100871d10453SEric Joyner
100971d10453SEric Joyner /* initialize the temp desc pointer with the right desc */
101071d10453SEric Joyner desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
101171d10453SEric Joyner
101271d10453SEric Joyner /* if the desc is available copy the temp desc to the right place */
101371d10453SEric Joyner ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
101471d10453SEric Joyner ICE_NONDMA_TO_DMA);
101571d10453SEric Joyner
101671d10453SEric Joyner /* if buf is not NULL assume indirect command */
101771d10453SEric Joyner if (buf) {
101871d10453SEric Joyner dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
101971d10453SEric Joyner /* copy the user buf into the respective DMA buf */
102071d10453SEric Joyner ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
102171d10453SEric Joyner desc_on_ring->datalen = CPU_TO_LE16(buf_size);
102271d10453SEric Joyner
102371d10453SEric Joyner /* Update the address values in the desc with the pa value
102471d10453SEric Joyner * for respective buffer
102571d10453SEric Joyner */
102671d10453SEric Joyner desc_on_ring->params.generic.addr_high =
102771d10453SEric Joyner CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
102871d10453SEric Joyner desc_on_ring->params.generic.addr_low =
102971d10453SEric Joyner CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
103071d10453SEric Joyner }
103171d10453SEric Joyner
103271d10453SEric Joyner /* Debug desc and buffer */
10337d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
10349c30461dSEric Joyner ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
103571d10453SEric Joyner
103671d10453SEric Joyner (cq->sq.next_to_use)++;
103771d10453SEric Joyner if (cq->sq.next_to_use == cq->sq.count)
103871d10453SEric Joyner cq->sq.next_to_use = 0;
103971d10453SEric Joyner wr32(hw, cq->sq.tail, cq->sq.next_to_use);
10409e54973fSEric Joyner ice_flush(hw);
10419e54973fSEric Joyner
10429e54973fSEric Joyner /* Wait a short time before initial ice_sq_done() check, to allow
10439e54973fSEric Joyner * hardware time for completion.
10449e54973fSEric Joyner */
10459e54973fSEric Joyner ice_usec_delay(5, false);
104671d10453SEric Joyner
104771d10453SEric Joyner do {
104871d10453SEric Joyner if (ice_sq_done(hw, cq))
104971d10453SEric Joyner break;
105071d10453SEric Joyner
10519e54973fSEric Joyner ice_usec_delay(10, false);
105271d10453SEric Joyner total_delay++;
105371d10453SEric Joyner } while (total_delay < cq->sq_cmd_timeout);
105471d10453SEric Joyner
105571d10453SEric Joyner /* if ready, copy the desc back to temp */
105671d10453SEric Joyner if (ice_sq_done(hw, cq)) {
105771d10453SEric Joyner ice_memcpy(desc, desc_on_ring, sizeof(*desc),
105871d10453SEric Joyner ICE_DMA_TO_NONDMA);
105971d10453SEric Joyner if (buf) {
106071d10453SEric Joyner /* get returned length to copy */
106171d10453SEric Joyner u16 copy_size = LE16_TO_CPU(desc->datalen);
106271d10453SEric Joyner
106371d10453SEric Joyner if (copy_size > buf_size) {
10647d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
106571d10453SEric Joyner copy_size, buf_size);
106671d10453SEric Joyner status = ICE_ERR_AQ_ERROR;
106771d10453SEric Joyner } else {
106871d10453SEric Joyner ice_memcpy(buf, dma_buf->va, copy_size,
106971d10453SEric Joyner ICE_DMA_TO_NONDMA);
107071d10453SEric Joyner }
107171d10453SEric Joyner }
107271d10453SEric Joyner retval = LE16_TO_CPU(desc->retval);
107371d10453SEric Joyner if (retval) {
10747d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
107571d10453SEric Joyner LE16_TO_CPU(desc->opcode),
107671d10453SEric Joyner retval);
107771d10453SEric Joyner
107871d10453SEric Joyner /* strip off FW internal code */
107971d10453SEric Joyner retval &= 0xff;
108071d10453SEric Joyner }
108171d10453SEric Joyner cmd_completed = true;
108271d10453SEric Joyner if (!status && retval != ICE_AQ_RC_OK)
108371d10453SEric Joyner status = ICE_ERR_AQ_ERROR;
108471d10453SEric Joyner cq->sq_last_status = (enum ice_aq_err)retval;
108571d10453SEric Joyner }
108671d10453SEric Joyner
10877d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
10889c30461dSEric Joyner ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
108971d10453SEric Joyner
109071d10453SEric Joyner /* save writeback AQ if requested */
10919c30461dSEric Joyner if (cd && cd->wb_desc)
10929c30461dSEric Joyner ice_memcpy(cd->wb_desc, desc_on_ring,
10939c30461dSEric Joyner sizeof(*cd->wb_desc), ICE_DMA_TO_NONDMA);
109471d10453SEric Joyner
109571d10453SEric Joyner /* update the error if time out occurred */
109671d10453SEric Joyner if (!cmd_completed) {
10977d7af7f8SEric Joyner if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
10987d7af7f8SEric Joyner rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
10997d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
11007d7af7f8SEric Joyner status = ICE_ERR_AQ_FW_CRITICAL;
11017d7af7f8SEric Joyner } else {
11027d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
110371d10453SEric Joyner status = ICE_ERR_AQ_TIMEOUT;
110471d10453SEric Joyner }
11057d7af7f8SEric Joyner }
110671d10453SEric Joyner
110771d10453SEric Joyner sq_send_command_error:
110871d10453SEric Joyner return status;
110971d10453SEric Joyner }
111071d10453SEric Joyner
111171d10453SEric Joyner /**
11129c30461dSEric Joyner * ice_sq_send_cmd - send command to a control queue
111371d10453SEric Joyner * @hw: pointer to the HW struct
111471d10453SEric Joyner * @cq: pointer to the specific Control queue
11159cf1841cSEric Joyner * @desc: prefilled descriptor describing the command
111671d10453SEric Joyner * @buf: buffer to use for indirect commands (or NULL for direct commands)
111771d10453SEric Joyner * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
111871d10453SEric Joyner * @cd: pointer to command details structure
111971d10453SEric Joyner *
11209c30461dSEric Joyner * Main command for the transmit side of a control queue. It puts the command
11219c30461dSEric Joyner * on the queue, bumps the tail, waits for processing of the command, captures
11229c30461dSEric Joyner * command status and results, etc.
112371d10453SEric Joyner */
112471d10453SEric Joyner enum ice_status
ice_sq_send_cmd(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)112571d10453SEric Joyner ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
112671d10453SEric Joyner struct ice_aq_desc *desc, void *buf, u16 buf_size,
112771d10453SEric Joyner struct ice_sq_cd *cd)
112871d10453SEric Joyner {
112971d10453SEric Joyner enum ice_status status = ICE_SUCCESS;
113071d10453SEric Joyner
113171d10453SEric Joyner /* if reset is in progress return a soft error */
113271d10453SEric Joyner if (hw->reset_ongoing)
113371d10453SEric Joyner return ICE_ERR_RESET_ONGOING;
113471d10453SEric Joyner
113571d10453SEric Joyner ice_acquire_lock(&cq->sq_lock);
113671d10453SEric Joyner status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
113771d10453SEric Joyner ice_release_lock(&cq->sq_lock);
113871d10453SEric Joyner
113971d10453SEric Joyner return status;
114071d10453SEric Joyner }
114171d10453SEric Joyner
114271d10453SEric Joyner /**
114371d10453SEric Joyner * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
114471d10453SEric Joyner * @desc: pointer to the temp descriptor (non DMA mem)
114571d10453SEric Joyner * @opcode: the opcode can be used to decide which flags to turn off or on
114671d10453SEric Joyner *
114771d10453SEric Joyner * Fill the desc with default values
114871d10453SEric Joyner */
ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc * desc,u16 opcode)114971d10453SEric Joyner void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
115071d10453SEric Joyner {
115171d10453SEric Joyner /* zero out the desc */
115271d10453SEric Joyner ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
115371d10453SEric Joyner desc->opcode = CPU_TO_LE16(opcode);
115471d10453SEric Joyner desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
115571d10453SEric Joyner }
115671d10453SEric Joyner
115771d10453SEric Joyner /**
115871d10453SEric Joyner * ice_clean_rq_elem
115971d10453SEric Joyner * @hw: pointer to the HW struct
116071d10453SEric Joyner * @cq: pointer to the specific Control queue
116171d10453SEric Joyner * @e: event info from the receive descriptor, includes any buffers
116271d10453SEric Joyner * @pending: number of events that could be left to process
116371d10453SEric Joyner *
11649c30461dSEric Joyner * Clean one element from the receive side of a control queue. On return 'e'
11659c30461dSEric Joyner * contains contents of the message, and 'pending' contains the number of
11669c30461dSEric Joyner * events left to process.
116771d10453SEric Joyner */
116871d10453SEric Joyner enum ice_status
ice_clean_rq_elem(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_rq_event_info * e,u16 * pending)116971d10453SEric Joyner ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
117071d10453SEric Joyner struct ice_rq_event_info *e, u16 *pending)
117171d10453SEric Joyner {
117271d10453SEric Joyner u16 ntc = cq->rq.next_to_clean;
11739cf1841cSEric Joyner enum ice_aq_err rq_last_status;
117471d10453SEric Joyner enum ice_status ret_code = ICE_SUCCESS;
117571d10453SEric Joyner struct ice_aq_desc *desc;
117671d10453SEric Joyner struct ice_dma_mem *bi;
117771d10453SEric Joyner u16 desc_idx;
117871d10453SEric Joyner u16 datalen;
117971d10453SEric Joyner u16 flags;
118071d10453SEric Joyner u16 ntu;
118171d10453SEric Joyner
118271d10453SEric Joyner /* pre-clean the event info */
118371d10453SEric Joyner ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
118471d10453SEric Joyner
118571d10453SEric Joyner /* take the lock before we start messing with the ring */
118671d10453SEric Joyner ice_acquire_lock(&cq->rq_lock);
118771d10453SEric Joyner
118871d10453SEric Joyner if (!cq->rq.count) {
11897d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
119071d10453SEric Joyner ret_code = ICE_ERR_AQ_EMPTY;
119171d10453SEric Joyner goto clean_rq_elem_err;
119271d10453SEric Joyner }
119371d10453SEric Joyner
119471d10453SEric Joyner /* set next_to_use to head */
119571d10453SEric Joyner ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
119671d10453SEric Joyner
119771d10453SEric Joyner if (ntu == ntc) {
119871d10453SEric Joyner /* nothing to do - shouldn't need to update ring's values */
119971d10453SEric Joyner ret_code = ICE_ERR_AQ_NO_WORK;
120071d10453SEric Joyner goto clean_rq_elem_out;
120171d10453SEric Joyner }
120271d10453SEric Joyner
120371d10453SEric Joyner /* now clean the next descriptor */
120471d10453SEric Joyner desc = ICE_CTL_Q_DESC(cq->rq, ntc);
120571d10453SEric Joyner desc_idx = ntc;
120671d10453SEric Joyner
12079cf1841cSEric Joyner rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
120871d10453SEric Joyner flags = LE16_TO_CPU(desc->flags);
120971d10453SEric Joyner if (flags & ICE_AQ_FLAG_ERR) {
121071d10453SEric Joyner ret_code = ICE_ERR_AQ_ERROR;
12117d7af7f8SEric Joyner ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
12129cf1841cSEric Joyner LE16_TO_CPU(desc->opcode), rq_last_status);
121371d10453SEric Joyner }
121471d10453SEric Joyner ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
121571d10453SEric Joyner datalen = LE16_TO_CPU(desc->datalen);
121671d10453SEric Joyner e->msg_len = MIN_T(u16, datalen, e->buf_len);
121771d10453SEric Joyner if (e->msg_buf && e->msg_len)
121871d10453SEric Joyner ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
121971d10453SEric Joyner e->msg_len, ICE_DMA_TO_NONDMA);
122071d10453SEric Joyner
122171d10453SEric Joyner ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
12229c30461dSEric Joyner ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
122371d10453SEric Joyner
122471d10453SEric Joyner /* Restore the original datalen and buffer address in the desc,
122571d10453SEric Joyner * FW updates datalen to indicate the event message size
122671d10453SEric Joyner */
122771d10453SEric Joyner bi = &cq->rq.r.rq_bi[ntc];
122871d10453SEric Joyner ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
122971d10453SEric Joyner
123071d10453SEric Joyner desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
123171d10453SEric Joyner if (cq->rq_buf_size > ICE_AQ_LG_BUF)
123271d10453SEric Joyner desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
123371d10453SEric Joyner desc->datalen = CPU_TO_LE16(bi->size);
123471d10453SEric Joyner desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
123571d10453SEric Joyner desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
123671d10453SEric Joyner
123771d10453SEric Joyner /* set tail = the last cleaned desc index. */
123871d10453SEric Joyner wr32(hw, cq->rq.tail, ntc);
123971d10453SEric Joyner /* ntc is updated to tail + 1 */
124071d10453SEric Joyner ntc++;
124171d10453SEric Joyner if (ntc == cq->num_rq_entries)
124271d10453SEric Joyner ntc = 0;
124371d10453SEric Joyner cq->rq.next_to_clean = ntc;
124471d10453SEric Joyner cq->rq.next_to_use = ntu;
124571d10453SEric Joyner
124671d10453SEric Joyner clean_rq_elem_out:
124771d10453SEric Joyner /* Set pending if needed, unlock and return */
124871d10453SEric Joyner if (pending) {
124971d10453SEric Joyner /* re-read HW head to calculate actual pending messages */
125071d10453SEric Joyner ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
125171d10453SEric Joyner *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
125271d10453SEric Joyner }
125371d10453SEric Joyner clean_rq_elem_err:
125471d10453SEric Joyner ice_release_lock(&cq->rq_lock);
125571d10453SEric Joyner
125671d10453SEric Joyner return ret_code;
125771d10453SEric Joyner }
1258