1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2021-2021 Hisilicon Limited.
3
4 #include "hnae3.h"
5 #include "hclge_comm_cmd.h"
6
hclge_comm_cmd_config_regs(struct hclge_comm_hw * hw,struct hclge_comm_cmq_ring * ring)7 static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw,
8 struct hclge_comm_cmq_ring *ring)
9 {
10 dma_addr_t dma = ring->desc_dma_addr;
11 u32 reg_val;
12
13 if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) {
14 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
15 lower_32_bits(dma));
16 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
17 upper_32_bits(dma));
18 reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
19 reg_val &= HCLGE_COMM_NIC_SW_RST_RDY;
20 reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
21 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
22 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
23 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
24 } else {
25 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
26 lower_32_bits(dma));
27 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
28 upper_32_bits(dma));
29 reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
30 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val);
31 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
32 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
33 }
34 }
35
hclge_comm_cmd_init_regs(struct hclge_comm_hw * hw)36 void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw)
37 {
38 hclge_comm_cmd_config_regs(hw, &hw->cmq.csq);
39 hclge_comm_cmd_config_regs(hw, &hw->cmq.crq);
40 }
41
hclge_comm_cmd_reuse_desc(struct hclge_desc * desc,bool is_read)42 void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
43 {
44 desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
45 HCLGE_COMM_CMD_FLAG_IN);
46 if (is_read)
47 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
48 else
49 desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
50 }
51
hclge_comm_set_default_capability(struct hnae3_ae_dev * ae_dev,bool is_pf)52 static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
53 bool is_pf)
54 {
55 set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
56 if (is_pf) {
57 set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
58 set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
59 set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
60 }
61 }
62
hclge_comm_cmd_setup_basic_desc(struct hclge_desc * desc,enum hclge_opcode_type opcode,bool is_read)63 void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
64 enum hclge_opcode_type opcode,
65 bool is_read)
66 {
67 memset((void *)desc, 0, sizeof(struct hclge_desc));
68 desc->opcode = cpu_to_le16(opcode);
69 desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
70 HCLGE_COMM_CMD_FLAG_IN);
71
72 if (is_read)
73 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
74 }
75
hclge_comm_firmware_compat_config(struct hnae3_ae_dev * ae_dev,struct hclge_comm_hw * hw,bool en)76 int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
77 struct hclge_comm_hw *hw, bool en)
78 {
79 struct hclge_comm_firmware_compat_cmd *req;
80 struct hclge_desc desc;
81 u32 compat = 0;
82
83 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
84
85 if (en) {
86 req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
87
88 hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1);
89 hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1);
90 if (hclge_comm_dev_phy_imp_supported(ae_dev))
91 hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
92 hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
93 hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
94 hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1);
95
96 req->compat = cpu_to_le32(compat);
97 }
98
99 return hclge_comm_cmd_send(hw, &desc, 1);
100 }
101
hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring * ring)102 void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
103 {
104 int size = ring->desc_num * sizeof(struct hclge_desc);
105
106 if (!ring->desc)
107 return;
108
109 dma_free_coherent(&ring->pdev->dev, size,
110 ring->desc, ring->desc_dma_addr);
111 ring->desc = NULL;
112 }
113
hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring * ring)114 static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring)
115 {
116 int size = ring->desc_num * sizeof(struct hclge_desc);
117
118 ring->desc = dma_alloc_coherent(&ring->pdev->dev,
119 size, &ring->desc_dma_addr, GFP_KERNEL);
120 if (!ring->desc)
121 return -ENOMEM;
122
123 return 0;
124 }
125
hclge_comm_build_api_caps(void)126 static __le32 hclge_comm_build_api_caps(void)
127 {
128 u32 api_caps = 0;
129
130 hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1);
131
132 return cpu_to_le32(api_caps);
133 }
134
135 static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
136 {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
137 {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
138 {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
139 {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
140 {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
141 {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
142 {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
143 {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
144 {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
145 {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
146 {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
147 {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
148 {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
149 {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
150 {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
151 HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
152 {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
153 {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
154 {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
155 {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B},
156 {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
157 {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
158 {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
159 {HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
160 {HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B},
161 {HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B, HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B},
162 };
163
164 static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
165 {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
166 {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
167 {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
168 {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
169 {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
170 {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
171 {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
172 {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
173 {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
174 {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
175 };
176
177 static void
hclge_comm_capability_to_bitmap(unsigned long * bitmap,__le32 * caps)178 hclge_comm_capability_to_bitmap(unsigned long *bitmap, __le32 *caps)
179 {
180 const unsigned int words = HCLGE_COMM_QUERY_CAP_LENGTH;
181 u32 val[HCLGE_COMM_QUERY_CAP_LENGTH];
182 unsigned int i;
183
184 for (i = 0; i < words; i++)
185 val[i] = __le32_to_cpu(caps[i]);
186
187 bitmap_from_arr32(bitmap, val,
188 HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
189 }
190
191 static void
hclge_comm_parse_capability(struct hnae3_ae_dev * ae_dev,bool is_pf,struct hclge_comm_query_version_cmd * cmd)192 hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
193 struct hclge_comm_query_version_cmd *cmd)
194 {
195 const struct hclge_comm_caps_bit_map *caps_map =
196 is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
197 u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
198 ARRAY_SIZE(hclge_vf_cmd_caps);
199 DECLARE_BITMAP(caps, HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
200 u32 i;
201
202 hclge_comm_capability_to_bitmap(caps, cmd->caps);
203 for (i = 0; i < size; i++)
204 if (test_bit(caps_map[i].imp_bit, caps))
205 set_bit(caps_map[i].local_bit, ae_dev->caps);
206 }
207
hclge_comm_alloc_cmd_queue(struct hclge_comm_hw * hw,int ring_type)208 int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type)
209 {
210 struct hclge_comm_cmq_ring *ring =
211 (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq :
212 &hw->cmq.crq;
213 int ret;
214
215 ring->ring_type = ring_type;
216
217 ret = hclge_comm_alloc_cmd_desc(ring);
218 if (ret)
219 dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n",
220 (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ",
221 ret);
222
223 return ret;
224 }
225
hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev * ae_dev,struct hclge_comm_hw * hw,u32 * fw_version,bool is_pf)226 int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
227 struct hclge_comm_hw *hw,
228 u32 *fw_version, bool is_pf)
229 {
230 struct hclge_comm_query_version_cmd *resp;
231 struct hclge_desc desc;
232 int ret;
233
234 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
235 resp = (struct hclge_comm_query_version_cmd *)desc.data;
236 resp->api_caps = hclge_comm_build_api_caps();
237
238 ret = hclge_comm_cmd_send(hw, &desc, 1);
239 if (ret)
240 return ret;
241
242 *fw_version = le32_to_cpu(resp->firmware);
243
244 ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
245 HNAE3_PCI_REVISION_BIT_SIZE;
246 ae_dev->dev_version |= ae_dev->pdev->revision;
247
248 if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
249 hclge_comm_set_default_capability(ae_dev, is_pf);
250 return 0;
251 }
252
253 hclge_comm_parse_capability(ae_dev, is_pf, resp);
254
255 return ret;
256 }
257
258 static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
259 HCLGE_OPC_STATS_32_BIT,
260 HCLGE_OPC_STATS_MAC,
261 HCLGE_OPC_STATS_MAC_ALL,
262 HCLGE_OPC_QUERY_32_BIT_REG,
263 HCLGE_OPC_QUERY_64_BIT_REG,
264 HCLGE_QUERY_CLEAR_MPF_RAS_INT,
265 HCLGE_QUERY_CLEAR_PF_RAS_INT,
266 HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
267 HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
268 HCLGE_QUERY_ALL_ERR_INFO };
269
hclge_comm_is_special_opcode(u16 opcode)270 static bool hclge_comm_is_special_opcode(u16 opcode)
271 {
272 /* these commands have several descriptors,
273 * and use the first one to save opcode and return value
274 */
275 u32 i;
276
277 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
278 if (spec_opcode[i] == opcode)
279 return true;
280
281 return false;
282 }
283
hclge_comm_ring_space(struct hclge_comm_cmq_ring * ring)284 static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
285 {
286 int ntc = ring->next_to_clean;
287 int ntu = ring->next_to_use;
288 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
289
290 return ring->desc_num - used - 1;
291 }
292
hclge_comm_cmd_copy_desc(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num)293 static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw,
294 struct hclge_desc *desc, int num)
295 {
296 struct hclge_desc *desc_to_use;
297 int handle = 0;
298
299 while (handle < num) {
300 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
301 *desc_to_use = desc[handle];
302 (hw->cmq.csq.next_to_use)++;
303 if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
304 hw->cmq.csq.next_to_use = 0;
305 handle++;
306 }
307 }
308
hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring * ring,int head)309 static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring,
310 int head)
311 {
312 int ntc = ring->next_to_clean;
313 int ntu = ring->next_to_use;
314
315 if (ntu > ntc)
316 return head >= ntc && head <= ntu;
317
318 return head >= ntc || head <= ntu;
319 }
320
hclge_comm_cmd_csq_clean(struct hclge_comm_hw * hw)321 static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
322 {
323 struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
324 int clean;
325 u32 head;
326
327 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
328 rmb(); /* Make sure head is ready before touch any data */
329
330 if (!hclge_comm_is_valid_csq_clean_head(csq, head)) {
331 dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n",
332 head, csq->next_to_use, csq->next_to_clean);
333 dev_warn(&hw->cmq.csq.pdev->dev,
334 "Disabling any further commands to IMP firmware\n");
335 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
336 dev_warn(&hw->cmq.csq.pdev->dev,
337 "IMP firmware watchdog reset soon expected!\n");
338 return -EIO;
339 }
340
341 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
342 csq->next_to_clean = head;
343 return clean;
344 }
345
hclge_comm_cmd_csq_done(struct hclge_comm_hw * hw)346 static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
347 {
348 u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
349 return head == hw->cmq.csq.next_to_use;
350 }
351
hclge_get_cmdq_tx_timeout(u16 opcode,u32 tx_timeout)352 static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
353 {
354 static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
355 {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT},
356 };
357 u32 i;
358
359 for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++)
360 if (cmdq_tx_timeout_map[i].opcode == opcode)
361 return cmdq_tx_timeout_map[i].tx_timeout;
362
363 return tx_timeout;
364 }
365
hclge_comm_wait_for_resp(struct hclge_comm_hw * hw,u16 opcode,bool * is_completed)366 static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode,
367 bool *is_completed)
368 {
369 u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode,
370 hw->cmq.tx_timeout);
371 u32 timeout = 0;
372
373 do {
374 if (hclge_comm_cmd_csq_done(hw)) {
375 *is_completed = true;
376 break;
377 }
378 udelay(1);
379 timeout++;
380 } while (timeout < cmdq_tx_timeout);
381 }
382
hclge_comm_cmd_convert_err_code(u16 desc_ret)383 static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
384 {
385 struct hclge_comm_errcode hclge_comm_cmd_errcode[] = {
386 { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 },
387 { HCLGE_COMM_CMD_NO_AUTH, -EPERM },
388 { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP },
389 { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL },
390 { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR },
391 { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK },
392 { HCLGE_COMM_CMD_PARA_ERR, -EINVAL },
393 { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE },
394 { HCLGE_COMM_CMD_TIMEOUT, -ETIME },
395 { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK },
396 { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO },
397 { HCLGE_COMM_CMD_INVALID, -EBADR },
398 };
399 u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode);
400 u32 i;
401
402 for (i = 0; i < errcode_count; i++)
403 if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret)
404 return hclge_comm_cmd_errcode[i].common_errno;
405
406 return -EIO;
407 }
408
hclge_comm_cmd_check_retval(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num,int ntc)409 static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
410 struct hclge_desc *desc, int num,
411 int ntc)
412 {
413 u16 opcode, desc_ret;
414 int handle;
415
416 opcode = le16_to_cpu(desc[0].opcode);
417 for (handle = 0; handle < num; handle++) {
418 desc[handle] = hw->cmq.csq.desc[ntc];
419 ntc++;
420 if (ntc >= hw->cmq.csq.desc_num)
421 ntc = 0;
422 }
423 if (likely(!hclge_comm_is_special_opcode(opcode)))
424 desc_ret = le16_to_cpu(desc[num - 1].retval);
425 else
426 desc_ret = le16_to_cpu(desc[0].retval);
427
428 hw->cmq.last_status = desc_ret;
429
430 return hclge_comm_cmd_convert_err_code(desc_ret);
431 }
432
hclge_comm_cmd_check_result(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num,int ntc)433 static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
434 struct hclge_desc *desc,
435 int num, int ntc)
436 {
437 bool is_completed = false;
438 int handle, ret;
439
440 /* If the command is sync, wait for the firmware to write back,
441 * if multi descriptors to be sent, use the first one to check
442 */
443 if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
444 hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode),
445 &is_completed);
446
447 if (!is_completed)
448 ret = -EBADE;
449 else
450 ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
451
452 /* Clean the command send queue */
453 handle = hclge_comm_cmd_csq_clean(hw);
454 if (handle < 0)
455 ret = handle;
456 else if (handle != num)
457 dev_warn(&hw->cmq.csq.pdev->dev,
458 "cleaned %d, need to clean %d\n", handle, num);
459 return ret;
460 }
461
462 /**
463 * hclge_comm_cmd_send - send command to command queue
464 * @hw: pointer to the hw struct
465 * @desc: prefilled descriptor for describing the command
466 * @num : the number of descriptors to be sent
467 *
468 * This is the main send command for command queue, it
469 * sends the queue, cleans the queue, etc
470 **/
hclge_comm_cmd_send(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num)471 int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
472 int num)
473 {
474 bool is_special = hclge_comm_is_special_opcode(le16_to_cpu(desc->opcode));
475 struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
476 int ret;
477 int ntc;
478
479 if (hw->cmq.ops.trace_cmd_send)
480 hw->cmq.ops.trace_cmd_send(hw, desc, num, is_special);
481
482 spin_lock_bh(&hw->cmq.csq.lock);
483
484 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
485 spin_unlock_bh(&hw->cmq.csq.lock);
486 return -EBUSY;
487 }
488
489 if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
490 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
491 * need update the SW HEAD pointer csq->next_to_clean
492 */
493 csq->next_to_clean =
494 hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
495 spin_unlock_bh(&hw->cmq.csq.lock);
496 return -EBUSY;
497 }
498
499 /**
500 * Record the location of desc in the ring for this time
501 * which will be use for hardware to write back
502 */
503 ntc = hw->cmq.csq.next_to_use;
504
505 hclge_comm_cmd_copy_desc(hw, desc, num);
506
507 /* Write to hardware */
508 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
509 hw->cmq.csq.next_to_use);
510
511 ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
512
513 spin_unlock_bh(&hw->cmq.csq.lock);
514
515 if (hw->cmq.ops.trace_cmd_get)
516 hw->cmq.ops.trace_cmd_get(hw, desc, num, is_special);
517
518 return ret;
519 }
520
hclge_comm_cmd_uninit_regs(struct hclge_comm_hw * hw)521 static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
522 {
523 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0);
524 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0);
525 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0);
526 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
527 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
528 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0);
529 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0);
530 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0);
531 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
532 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
533 }
534
hclge_comm_cmd_uninit(struct hnae3_ae_dev * ae_dev,struct hclge_comm_hw * hw)535 void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
536 struct hclge_comm_hw *hw)
537 {
538 struct hclge_comm_cmq *cmdq = &hw->cmq;
539
540 hclge_comm_firmware_compat_config(ae_dev, hw, false);
541 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
542
543 /* wait to ensure that the firmware completes the possible left
544 * over commands.
545 */
546 msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME);
547 spin_lock_bh(&cmdq->csq.lock);
548 spin_lock(&cmdq->crq.lock);
549 hclge_comm_cmd_uninit_regs(hw);
550 spin_unlock(&cmdq->crq.lock);
551 spin_unlock_bh(&cmdq->csq.lock);
552
553 hclge_comm_free_cmd_desc(&cmdq->csq);
554 hclge_comm_free_cmd_desc(&cmdq->crq);
555 }
556
hclge_comm_cmd_queue_init(struct pci_dev * pdev,struct hclge_comm_hw * hw)557 int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
558 {
559 struct hclge_comm_cmq *cmdq = &hw->cmq;
560 int ret;
561
562 /* Setup the lock for command queue */
563 spin_lock_init(&cmdq->csq.lock);
564 spin_lock_init(&cmdq->crq.lock);
565
566 cmdq->csq.pdev = pdev;
567 cmdq->crq.pdev = pdev;
568
569 /* Setup the queue entries for use cmd queue */
570 cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
571 cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
572
573 /* Setup Tx write back timeout */
574 cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT;
575
576 /* Setup queue rings */
577 ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
578 if (ret) {
579 dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret);
580 return ret;
581 }
582
583 ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ);
584 if (ret) {
585 dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret);
586 goto err_csq;
587 }
588
589 return 0;
590 err_csq:
591 hclge_comm_free_cmd_desc(&hw->cmq.csq);
592 return ret;
593 }
594
hclge_comm_cmd_init_ops(struct hclge_comm_hw * hw,const struct hclge_comm_cmq_ops * ops)595 void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
596 const struct hclge_comm_cmq_ops *ops)
597 {
598 struct hclge_comm_cmq *cmdq = &hw->cmq;
599
600 if (ops) {
601 cmdq->ops.trace_cmd_send = ops->trace_cmd_send;
602 cmdq->ops.trace_cmd_get = ops->trace_cmd_get;
603 }
604 }
605
hclge_comm_cmd_init(struct hnae3_ae_dev * ae_dev,struct hclge_comm_hw * hw,u32 * fw_version,bool is_pf,unsigned long reset_pending)606 int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
607 u32 *fw_version, bool is_pf,
608 unsigned long reset_pending)
609 {
610 struct hclge_comm_cmq *cmdq = &hw->cmq;
611 int ret;
612
613 spin_lock_bh(&cmdq->csq.lock);
614 spin_lock(&cmdq->crq.lock);
615
616 cmdq->csq.next_to_clean = 0;
617 cmdq->csq.next_to_use = 0;
618 cmdq->crq.next_to_clean = 0;
619 cmdq->crq.next_to_use = 0;
620
621 hclge_comm_cmd_init_regs(hw);
622
623 spin_unlock(&cmdq->crq.lock);
624 spin_unlock_bh(&cmdq->csq.lock);
625
626 clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
627
628 /* Check if there is new reset pending, because the higher level
629 * reset may happen when lower level reset is being processed.
630 */
631 if (reset_pending) {
632 ret = -EBUSY;
633 goto err_cmd_init;
634 }
635
636 /* get version and device capabilities */
637 ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw,
638 fw_version, is_pf);
639 if (ret) {
640 dev_err(&ae_dev->pdev->dev,
641 "failed to query version and capabilities, ret = %d\n",
642 ret);
643 goto err_cmd_init;
644 }
645
646 dev_info(&ae_dev->pdev->dev,
647 "The firmware version is %lu.%lu.%lu.%lu\n",
648 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
649 HNAE3_FW_VERSION_BYTE3_SHIFT),
650 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
651 HNAE3_FW_VERSION_BYTE2_SHIFT),
652 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
653 HNAE3_FW_VERSION_BYTE1_SHIFT),
654 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
655 HNAE3_FW_VERSION_BYTE0_SHIFT));
656
657 if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
658 return 0;
659
660 /* ask the firmware to enable some features, driver can work without
661 * it.
662 */
663 ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
664 if (ret)
665 dev_warn(&ae_dev->pdev->dev,
666 "Firmware compatible features not enabled(%d).\n",
667 ret);
668 return 0;
669
670 err_cmd_init:
671 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
672
673 return ret;
674 }
675