12af541bfSCheng Xu // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
22af541bfSCheng Xu
32af541bfSCheng Xu /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
42af541bfSCheng Xu /* Kai Shen <kaishen@linux.alibaba.com> */
52af541bfSCheng Xu /* Copyright (c) 2020-2022, Alibaba Group. */
62af541bfSCheng Xu
72af541bfSCheng Xu #include "erdma.h"
82af541bfSCheng Xu
arm_cmdq_cq(struct erdma_cmdq * cmdq)92af541bfSCheng Xu static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
102af541bfSCheng Xu {
112af541bfSCheng Xu struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
122af541bfSCheng Xu u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) |
132af541bfSCheng Xu FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
142af541bfSCheng Xu FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
152af541bfSCheng Xu FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
162af541bfSCheng Xu
17fdb09ed1SBoshi Yu *cmdq->cq.dbrec = db_data;
182af541bfSCheng Xu writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
192af541bfSCheng Xu
202af541bfSCheng Xu atomic64_inc(&cmdq->cq.armed_num);
212af541bfSCheng Xu }
222af541bfSCheng Xu
kick_cmdq_db(struct erdma_cmdq * cmdq)232af541bfSCheng Xu static void kick_cmdq_db(struct erdma_cmdq *cmdq)
242af541bfSCheng Xu {
252af541bfSCheng Xu struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
262af541bfSCheng Xu u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
272af541bfSCheng Xu
28fdb09ed1SBoshi Yu *cmdq->sq.dbrec = db_data;
292af541bfSCheng Xu writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
302af541bfSCheng Xu }
312af541bfSCheng Xu
get_comp_wait(struct erdma_cmdq * cmdq)322af541bfSCheng Xu static struct erdma_comp_wait *get_comp_wait(struct erdma_cmdq *cmdq)
332af541bfSCheng Xu {
342af541bfSCheng Xu int comp_idx;
352af541bfSCheng Xu
362af541bfSCheng Xu spin_lock(&cmdq->lock);
372af541bfSCheng Xu comp_idx = find_first_zero_bit(cmdq->comp_wait_bitmap,
382af541bfSCheng Xu cmdq->max_outstandings);
392af541bfSCheng Xu if (comp_idx == cmdq->max_outstandings) {
402af541bfSCheng Xu spin_unlock(&cmdq->lock);
412af541bfSCheng Xu return ERR_PTR(-ENOMEM);
422af541bfSCheng Xu }
432af541bfSCheng Xu
442af541bfSCheng Xu __set_bit(comp_idx, cmdq->comp_wait_bitmap);
452af541bfSCheng Xu spin_unlock(&cmdq->lock);
462af541bfSCheng Xu
472af541bfSCheng Xu return &cmdq->wait_pool[comp_idx];
482af541bfSCheng Xu }
492af541bfSCheng Xu
put_comp_wait(struct erdma_cmdq * cmdq,struct erdma_comp_wait * comp_wait)502af541bfSCheng Xu static void put_comp_wait(struct erdma_cmdq *cmdq,
512af541bfSCheng Xu struct erdma_comp_wait *comp_wait)
522af541bfSCheng Xu {
532af541bfSCheng Xu int used;
542af541bfSCheng Xu
552af541bfSCheng Xu cmdq->wait_pool[comp_wait->ctx_id].cmd_status = ERDMA_CMD_STATUS_INIT;
562af541bfSCheng Xu spin_lock(&cmdq->lock);
572af541bfSCheng Xu used = __test_and_clear_bit(comp_wait->ctx_id, cmdq->comp_wait_bitmap);
582af541bfSCheng Xu spin_unlock(&cmdq->lock);
592af541bfSCheng Xu
602af541bfSCheng Xu WARN_ON(!used);
612af541bfSCheng Xu }
622af541bfSCheng Xu
erdma_cmdq_wait_res_init(struct erdma_dev * dev,struct erdma_cmdq * cmdq)632af541bfSCheng Xu static int erdma_cmdq_wait_res_init(struct erdma_dev *dev,
642af541bfSCheng Xu struct erdma_cmdq *cmdq)
652af541bfSCheng Xu {
662af541bfSCheng Xu int i;
672af541bfSCheng Xu
682af541bfSCheng Xu cmdq->wait_pool =
692af541bfSCheng Xu devm_kcalloc(&dev->pdev->dev, cmdq->max_outstandings,
702af541bfSCheng Xu sizeof(struct erdma_comp_wait), GFP_KERNEL);
712af541bfSCheng Xu if (!cmdq->wait_pool)
722af541bfSCheng Xu return -ENOMEM;
732af541bfSCheng Xu
742af541bfSCheng Xu spin_lock_init(&cmdq->lock);
752af541bfSCheng Xu cmdq->comp_wait_bitmap = devm_bitmap_zalloc(
762af541bfSCheng Xu &dev->pdev->dev, cmdq->max_outstandings, GFP_KERNEL);
772af541bfSCheng Xu if (!cmdq->comp_wait_bitmap)
782af541bfSCheng Xu return -ENOMEM;
792af541bfSCheng Xu
802af541bfSCheng Xu for (i = 0; i < cmdq->max_outstandings; i++) {
812af541bfSCheng Xu init_completion(&cmdq->wait_pool[i].wait_event);
822af541bfSCheng Xu cmdq->wait_pool[i].ctx_id = i;
832af541bfSCheng Xu }
842af541bfSCheng Xu
852af541bfSCheng Xu return 0;
862af541bfSCheng Xu }
872af541bfSCheng Xu
erdma_cmdq_sq_init(struct erdma_dev * dev)882af541bfSCheng Xu static int erdma_cmdq_sq_init(struct erdma_dev *dev)
892af541bfSCheng Xu {
902af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
912af541bfSCheng Xu struct erdma_cmdq_sq *sq = &cmdq->sq;
922af541bfSCheng Xu
932af541bfSCheng Xu sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
942af541bfSCheng Xu sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
952af541bfSCheng Xu
96f0697bf0SBoshi Yu sq->qbuf = dma_alloc_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
972af541bfSCheng Xu &sq->qbuf_dma_addr, GFP_KERNEL);
982af541bfSCheng Xu if (!sq->qbuf)
992af541bfSCheng Xu return -ENOMEM;
1002af541bfSCheng Xu
101fdb09ed1SBoshi Yu sq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &sq->dbrec_dma);
102fdb09ed1SBoshi Yu if (!sq->dbrec)
103f0697bf0SBoshi Yu goto err_out;
1042af541bfSCheng Xu
1052af541bfSCheng Xu spin_lock_init(&sq->lock);
1062af541bfSCheng Xu
1072af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_H_REG,
1082af541bfSCheng Xu upper_32_bits(sq->qbuf_dma_addr));
1092af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
1102af541bfSCheng Xu lower_32_bits(sq->qbuf_dma_addr));
1112af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
112fdb09ed1SBoshi Yu erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG, sq->dbrec_dma);
1132af541bfSCheng Xu
1142af541bfSCheng Xu return 0;
115f0697bf0SBoshi Yu
116f0697bf0SBoshi Yu err_out:
117f0697bf0SBoshi Yu dma_free_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
118f0697bf0SBoshi Yu sq->qbuf, sq->qbuf_dma_addr);
119f0697bf0SBoshi Yu
120f0697bf0SBoshi Yu return -ENOMEM;
1212af541bfSCheng Xu }
1222af541bfSCheng Xu
erdma_cmdq_cq_init(struct erdma_dev * dev)1232af541bfSCheng Xu static int erdma_cmdq_cq_init(struct erdma_dev *dev)
1242af541bfSCheng Xu {
1252af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
1262af541bfSCheng Xu struct erdma_cmdq_cq *cq = &cmdq->cq;
1272af541bfSCheng Xu
1282af541bfSCheng Xu cq->depth = cmdq->sq.depth;
129f0697bf0SBoshi Yu cq->qbuf = dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
130*df0e16baSBoshi Yu &cq->qbuf_dma_addr, GFP_KERNEL);
1312af541bfSCheng Xu if (!cq->qbuf)
1322af541bfSCheng Xu return -ENOMEM;
1332af541bfSCheng Xu
1342af541bfSCheng Xu spin_lock_init(&cq->lock);
1352af541bfSCheng Xu
136fdb09ed1SBoshi Yu cq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &cq->dbrec_dma);
137fdb09ed1SBoshi Yu if (!cq->dbrec)
138f0697bf0SBoshi Yu goto err_out;
1392af541bfSCheng Xu
1402af541bfSCheng Xu atomic64_set(&cq->armed_num, 0);
1412af541bfSCheng Xu
1422af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_H_REG,
1432af541bfSCheng Xu upper_32_bits(cq->qbuf_dma_addr));
1442af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
1452af541bfSCheng Xu lower_32_bits(cq->qbuf_dma_addr));
146fdb09ed1SBoshi Yu erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG, cq->dbrec_dma);
1472af541bfSCheng Xu
1482af541bfSCheng Xu return 0;
149f0697bf0SBoshi Yu
150f0697bf0SBoshi Yu err_out:
151f0697bf0SBoshi Yu dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, cq->qbuf,
152f0697bf0SBoshi Yu cq->qbuf_dma_addr);
153f0697bf0SBoshi Yu
154f0697bf0SBoshi Yu return -ENOMEM;
1552af541bfSCheng Xu }
1562af541bfSCheng Xu
erdma_cmdq_eq_init(struct erdma_dev * dev)1572af541bfSCheng Xu static int erdma_cmdq_eq_init(struct erdma_dev *dev)
1582af541bfSCheng Xu {
1592af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
1602af541bfSCheng Xu struct erdma_eq *eq = &cmdq->eq;
1612af541bfSCheng Xu
1622af541bfSCheng Xu eq->depth = cmdq->max_outstandings;
163f0697bf0SBoshi Yu eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
164*df0e16baSBoshi Yu &eq->qbuf_dma_addr, GFP_KERNEL);
1652af541bfSCheng Xu if (!eq->qbuf)
1662af541bfSCheng Xu return -ENOMEM;
1672af541bfSCheng Xu
1682af541bfSCheng Xu spin_lock_init(&eq->lock);
1692af541bfSCheng Xu atomic64_set(&eq->event_num, 0);
1702af541bfSCheng Xu
17172769dbaSCheng Xu eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
172fdb09ed1SBoshi Yu eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
173fdb09ed1SBoshi Yu if (!eq->dbrec)
174f0697bf0SBoshi Yu goto err_out;
1752af541bfSCheng Xu
1762af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
1772af541bfSCheng Xu upper_32_bits(eq->qbuf_dma_addr));
1782af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
1792af541bfSCheng Xu lower_32_bits(eq->qbuf_dma_addr));
1802af541bfSCheng Xu erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
181fdb09ed1SBoshi Yu erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
1822af541bfSCheng Xu
1832af541bfSCheng Xu return 0;
184f0697bf0SBoshi Yu
185f0697bf0SBoshi Yu err_out:
186f0697bf0SBoshi Yu dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
187f0697bf0SBoshi Yu eq->qbuf_dma_addr);
188f0697bf0SBoshi Yu
189f0697bf0SBoshi Yu return -ENOMEM;
1902af541bfSCheng Xu }
1912af541bfSCheng Xu
erdma_cmdq_init(struct erdma_dev * dev)1922af541bfSCheng Xu int erdma_cmdq_init(struct erdma_dev *dev)
1932af541bfSCheng Xu {
1942af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
195901d9d62SCheng Xu int err;
1962af541bfSCheng Xu
1972af541bfSCheng Xu cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
1982af541bfSCheng Xu cmdq->use_event = false;
1992af541bfSCheng Xu
2002af541bfSCheng Xu sema_init(&cmdq->credits, cmdq->max_outstandings);
2012af541bfSCheng Xu
2022af541bfSCheng Xu err = erdma_cmdq_wait_res_init(dev, cmdq);
2032af541bfSCheng Xu if (err)
2042af541bfSCheng Xu return err;
2052af541bfSCheng Xu
2062af541bfSCheng Xu err = erdma_cmdq_sq_init(dev);
2072af541bfSCheng Xu if (err)
2082af541bfSCheng Xu return err;
2092af541bfSCheng Xu
2102af541bfSCheng Xu err = erdma_cmdq_cq_init(dev);
2112af541bfSCheng Xu if (err)
2122af541bfSCheng Xu goto err_destroy_sq;
2132af541bfSCheng Xu
2142af541bfSCheng Xu err = erdma_cmdq_eq_init(dev);
2152af541bfSCheng Xu if (err)
2162af541bfSCheng Xu goto err_destroy_cq;
2172af541bfSCheng Xu
2182af541bfSCheng Xu set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
2192af541bfSCheng Xu
2202af541bfSCheng Xu return 0;
2212af541bfSCheng Xu
2222af541bfSCheng Xu err_destroy_cq:
223f0697bf0SBoshi Yu dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
2242af541bfSCheng Xu cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
2252af541bfSCheng Xu
226fdb09ed1SBoshi Yu dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
227f0697bf0SBoshi Yu
2282af541bfSCheng Xu err_destroy_sq:
229f0697bf0SBoshi Yu dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
2302af541bfSCheng Xu cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
2312af541bfSCheng Xu
232fdb09ed1SBoshi Yu dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
233f0697bf0SBoshi Yu
2342af541bfSCheng Xu return err;
2352af541bfSCheng Xu }
2362af541bfSCheng Xu
erdma_finish_cmdq_init(struct erdma_dev * dev)2372af541bfSCheng Xu void erdma_finish_cmdq_init(struct erdma_dev *dev)
2382af541bfSCheng Xu {
2392af541bfSCheng Xu /* after device init successfully, change cmdq to event mode. */
2402af541bfSCheng Xu dev->cmdq.use_event = true;
2412af541bfSCheng Xu arm_cmdq_cq(&dev->cmdq);
2422af541bfSCheng Xu }
2432af541bfSCheng Xu
erdma_cmdq_destroy(struct erdma_dev * dev)2442af541bfSCheng Xu void erdma_cmdq_destroy(struct erdma_dev *dev)
2452af541bfSCheng Xu {
2462af541bfSCheng Xu struct erdma_cmdq *cmdq = &dev->cmdq;
2472af541bfSCheng Xu
2482af541bfSCheng Xu clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
2492af541bfSCheng Xu
250f0697bf0SBoshi Yu dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT,
2512af541bfSCheng Xu cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
252f0697bf0SBoshi Yu
253fdb09ed1SBoshi Yu dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma);
254f0697bf0SBoshi Yu
255f0697bf0SBoshi Yu dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
2562af541bfSCheng Xu cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
257f0697bf0SBoshi Yu
258fdb09ed1SBoshi Yu dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
259f0697bf0SBoshi Yu
260f0697bf0SBoshi Yu dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
2612af541bfSCheng Xu cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
262f0697bf0SBoshi Yu
263fdb09ed1SBoshi Yu dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
2642af541bfSCheng Xu }
2652af541bfSCheng Xu
get_next_valid_cmdq_cqe(struct erdma_cmdq * cmdq)2662af541bfSCheng Xu static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
2672af541bfSCheng Xu {
2682af541bfSCheng Xu __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
2692af541bfSCheng Xu cmdq->cq.depth, CQE_SHIFT);
2702af541bfSCheng Xu u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
271de19ec77SCheng Xu be32_to_cpu(READ_ONCE(*cqe)));
2722af541bfSCheng Xu
2732af541bfSCheng Xu return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
2742af541bfSCheng Xu }
2752af541bfSCheng Xu
push_cmdq_sqe(struct erdma_cmdq * cmdq,u64 * req,size_t req_len,struct erdma_comp_wait * comp_wait)2762af541bfSCheng Xu static void push_cmdq_sqe(struct erdma_cmdq *cmdq, u64 *req, size_t req_len,
2772af541bfSCheng Xu struct erdma_comp_wait *comp_wait)
2782af541bfSCheng Xu {
2792af541bfSCheng Xu __le64 *wqe;
2802af541bfSCheng Xu u64 hdr = *req;
2812af541bfSCheng Xu
2822af541bfSCheng Xu comp_wait->cmd_status = ERDMA_CMD_STATUS_ISSUED;
2832af541bfSCheng Xu reinit_completion(&comp_wait->wait_event);
2842af541bfSCheng Xu comp_wait->sq_pi = cmdq->sq.pi;
2852af541bfSCheng Xu
2862af541bfSCheng Xu wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth,
2872af541bfSCheng Xu SQEBB_SHIFT);
2882af541bfSCheng Xu memcpy(wqe, req, req_len);
2892af541bfSCheng Xu
2902af541bfSCheng Xu cmdq->sq.pi += cmdq->sq.wqebb_cnt;
2912af541bfSCheng Xu hdr |= FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi) |
2922af541bfSCheng Xu FIELD_PREP(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK,
2932af541bfSCheng Xu comp_wait->ctx_id) |
2942af541bfSCheng Xu FIELD_PREP(ERDMA_CMD_HDR_WQEBB_CNT_MASK, cmdq->sq.wqebb_cnt - 1);
2952af541bfSCheng Xu *wqe = cpu_to_le64(hdr);
2962af541bfSCheng Xu
2972af541bfSCheng Xu kick_cmdq_db(cmdq);
2982af541bfSCheng Xu }
2992af541bfSCheng Xu
erdma_poll_single_cmd_completion(struct erdma_cmdq * cmdq)3002af541bfSCheng Xu static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
3012af541bfSCheng Xu {
3022af541bfSCheng Xu struct erdma_comp_wait *comp_wait;
3032af541bfSCheng Xu u32 hdr0, sqe_idx;
3042af541bfSCheng Xu __be32 *cqe;
3052af541bfSCheng Xu u16 ctx_id;
3062af541bfSCheng Xu u64 *sqe;
3072af541bfSCheng Xu
3082af541bfSCheng Xu cqe = get_next_valid_cmdq_cqe(cmdq);
3092af541bfSCheng Xu if (!cqe)
3102af541bfSCheng Xu return -EAGAIN;
3112af541bfSCheng Xu
3122af541bfSCheng Xu cmdq->cq.ci++;
3132af541bfSCheng Xu
3142af541bfSCheng Xu dma_rmb();
315de19ec77SCheng Xu hdr0 = be32_to_cpu(*cqe);
316de19ec77SCheng Xu sqe_idx = be32_to_cpu(*(cqe + 1));
3172af541bfSCheng Xu
3182af541bfSCheng Xu sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
3192af541bfSCheng Xu SQEBB_SHIFT);
3202af541bfSCheng Xu ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
3212af541bfSCheng Xu comp_wait = &cmdq->wait_pool[ctx_id];
3222af541bfSCheng Xu if (comp_wait->cmd_status != ERDMA_CMD_STATUS_ISSUED)
3232af541bfSCheng Xu return -EIO;
3242af541bfSCheng Xu
3252af541bfSCheng Xu comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED;
3262af541bfSCheng Xu comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0);
3272af541bfSCheng Xu cmdq->sq.ci += cmdq->sq.wqebb_cnt;
328de19ec77SCheng Xu /* Copy 16B comp data after cqe hdr to outer */
329de19ec77SCheng Xu be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4);
3302af541bfSCheng Xu
3312af541bfSCheng Xu if (cmdq->use_event)
3322af541bfSCheng Xu complete(&comp_wait->wait_event);
3332af541bfSCheng Xu
3342af541bfSCheng Xu return 0;
3352af541bfSCheng Xu }
3362af541bfSCheng Xu
erdma_polling_cmd_completions(struct erdma_cmdq * cmdq)3372af541bfSCheng Xu static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
3382af541bfSCheng Xu {
3392af541bfSCheng Xu unsigned long flags;
3402af541bfSCheng Xu u16 comp_num;
3412af541bfSCheng Xu
3422af541bfSCheng Xu spin_lock_irqsave(&cmdq->cq.lock, flags);
3432af541bfSCheng Xu
3442af541bfSCheng Xu /* We must have less than # of max_outstandings
3452af541bfSCheng Xu * completions at one time.
3462af541bfSCheng Xu */
3472af541bfSCheng Xu for (comp_num = 0; comp_num < cmdq->max_outstandings; comp_num++)
3482af541bfSCheng Xu if (erdma_poll_single_cmd_completion(cmdq))
3492af541bfSCheng Xu break;
3502af541bfSCheng Xu
3512af541bfSCheng Xu if (comp_num && cmdq->use_event)
3522af541bfSCheng Xu arm_cmdq_cq(cmdq);
3532af541bfSCheng Xu
3542af541bfSCheng Xu spin_unlock_irqrestore(&cmdq->cq.lock, flags);
3552af541bfSCheng Xu }
3562af541bfSCheng Xu
erdma_cmdq_completion_handler(struct erdma_cmdq * cmdq)3572af541bfSCheng Xu void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
3582af541bfSCheng Xu {
3592af541bfSCheng Xu int got_event = 0;
3602af541bfSCheng Xu
3612af541bfSCheng Xu if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
3622af541bfSCheng Xu !cmdq->use_event)
3632af541bfSCheng Xu return;
3642af541bfSCheng Xu
3652af541bfSCheng Xu while (get_next_valid_eqe(&cmdq->eq)) {
3662af541bfSCheng Xu cmdq->eq.ci++;
3672af541bfSCheng Xu got_event++;
3682af541bfSCheng Xu }
3692af541bfSCheng Xu
3702af541bfSCheng Xu if (got_event) {
3712af541bfSCheng Xu cmdq->cq.cmdsn++;
3722af541bfSCheng Xu erdma_polling_cmd_completions(cmdq);
3732af541bfSCheng Xu }
3742af541bfSCheng Xu
3752af541bfSCheng Xu notify_eq(&cmdq->eq);
3762af541bfSCheng Xu }
3772af541bfSCheng Xu
erdma_poll_cmd_completion(struct erdma_comp_wait * comp_ctx,struct erdma_cmdq * cmdq,u32 timeout)3782af541bfSCheng Xu static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
3792af541bfSCheng Xu struct erdma_cmdq *cmdq, u32 timeout)
3802af541bfSCheng Xu {
3812af541bfSCheng Xu unsigned long comp_timeout = jiffies + msecs_to_jiffies(timeout);
3822af541bfSCheng Xu
3832af541bfSCheng Xu while (1) {
3842af541bfSCheng Xu erdma_polling_cmd_completions(cmdq);
3852af541bfSCheng Xu if (comp_ctx->cmd_status != ERDMA_CMD_STATUS_ISSUED)
3862af541bfSCheng Xu break;
3872af541bfSCheng Xu
3882af541bfSCheng Xu if (time_is_before_jiffies(comp_timeout))
3892af541bfSCheng Xu return -ETIME;
3902af541bfSCheng Xu
3912af541bfSCheng Xu msleep(20);
3922af541bfSCheng Xu }
3932af541bfSCheng Xu
3942af541bfSCheng Xu return 0;
3952af541bfSCheng Xu }
3962af541bfSCheng Xu
erdma_wait_cmd_completion(struct erdma_comp_wait * comp_ctx,struct erdma_cmdq * cmdq,u32 timeout)3972af541bfSCheng Xu static int erdma_wait_cmd_completion(struct erdma_comp_wait *comp_ctx,
3982af541bfSCheng Xu struct erdma_cmdq *cmdq, u32 timeout)
3992af541bfSCheng Xu {
4002af541bfSCheng Xu unsigned long flags = 0;
4012af541bfSCheng Xu
4022af541bfSCheng Xu wait_for_completion_timeout(&comp_ctx->wait_event,
4032af541bfSCheng Xu msecs_to_jiffies(timeout));
4042af541bfSCheng Xu
4052af541bfSCheng Xu if (unlikely(comp_ctx->cmd_status != ERDMA_CMD_STATUS_FINISHED)) {
4062af541bfSCheng Xu spin_lock_irqsave(&cmdq->cq.lock, flags);
4072af541bfSCheng Xu comp_ctx->cmd_status = ERDMA_CMD_STATUS_TIMEOUT;
4082af541bfSCheng Xu spin_unlock_irqrestore(&cmdq->cq.lock, flags);
4092af541bfSCheng Xu return -ETIME;
4102af541bfSCheng Xu }
4112af541bfSCheng Xu
4122af541bfSCheng Xu return 0;
4132af541bfSCheng Xu }
4142af541bfSCheng Xu
erdma_cmdq_build_reqhdr(u64 * hdr,u32 mod,u32 op)4152af541bfSCheng Xu void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
4162af541bfSCheng Xu {
4172af541bfSCheng Xu *hdr = FIELD_PREP(ERDMA_CMD_HDR_SUB_MOD_MASK, mod) |
4182af541bfSCheng Xu FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
4192af541bfSCheng Xu }
4202af541bfSCheng Xu
erdma_post_cmd_wait(struct erdma_cmdq * cmdq,void * req,u32 req_size,u64 * resp0,u64 * resp1)42195f911d9SCheng Xu int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
4222af541bfSCheng Xu u64 *resp0, u64 *resp1)
4232af541bfSCheng Xu {
4242af541bfSCheng Xu struct erdma_comp_wait *comp_wait;
4252af541bfSCheng Xu int ret;
4262af541bfSCheng Xu
4272af541bfSCheng Xu if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
4282af541bfSCheng Xu return -ENODEV;
4292af541bfSCheng Xu
4302af541bfSCheng Xu down(&cmdq->credits);
4312af541bfSCheng Xu
4322af541bfSCheng Xu comp_wait = get_comp_wait(cmdq);
4332af541bfSCheng Xu if (IS_ERR(comp_wait)) {
4342af541bfSCheng Xu clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
4352af541bfSCheng Xu set_bit(ERDMA_CMDQ_STATE_CTX_ERR_BIT, &cmdq->state);
4362af541bfSCheng Xu up(&cmdq->credits);
4372af541bfSCheng Xu return PTR_ERR(comp_wait);
4382af541bfSCheng Xu }
4392af541bfSCheng Xu
4402af541bfSCheng Xu spin_lock(&cmdq->sq.lock);
4412af541bfSCheng Xu push_cmdq_sqe(cmdq, req, req_size, comp_wait);
4422af541bfSCheng Xu spin_unlock(&cmdq->sq.lock);
4432af541bfSCheng Xu
4442af541bfSCheng Xu if (cmdq->use_event)
4452af541bfSCheng Xu ret = erdma_wait_cmd_completion(comp_wait, cmdq,
4462af541bfSCheng Xu ERDMA_CMDQ_TIMEOUT_MS);
4472af541bfSCheng Xu else
4482af541bfSCheng Xu ret = erdma_poll_cmd_completion(comp_wait, cmdq,
4492af541bfSCheng Xu ERDMA_CMDQ_TIMEOUT_MS);
4502af541bfSCheng Xu
4512af541bfSCheng Xu if (ret) {
4522af541bfSCheng Xu set_bit(ERDMA_CMDQ_STATE_TIMEOUT_BIT, &cmdq->state);
4532af541bfSCheng Xu clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
4542af541bfSCheng Xu goto out;
4552af541bfSCheng Xu }
4562af541bfSCheng Xu
4572af541bfSCheng Xu if (comp_wait->comp_status)
4582af541bfSCheng Xu ret = -EIO;
4592af541bfSCheng Xu
4602af541bfSCheng Xu if (resp0 && resp1) {
4612af541bfSCheng Xu *resp0 = *((u64 *)&comp_wait->comp_data[0]);
4622af541bfSCheng Xu *resp1 = *((u64 *)&comp_wait->comp_data[2]);
4632af541bfSCheng Xu }
4642af541bfSCheng Xu put_comp_wait(cmdq, comp_wait);
4652af541bfSCheng Xu
4662af541bfSCheng Xu out:
4672af541bfSCheng Xu up(&cmdq->credits);
4682af541bfSCheng Xu
4692af541bfSCheng Xu return ret;
4702af541bfSCheng Xu }
471