1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2021-2021 Hisilicon Limited. 3 4 #include "hnae3.h" 5 #include "hclge_comm_cmd.h" 6 7 static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw, 8 struct hclge_comm_cmq_ring *ring) 9 { 10 dma_addr_t dma = ring->desc_dma_addr; 11 u32 reg_val; 12 13 if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) { 14 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 15 lower_32_bits(dma)); 16 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 17 upper_32_bits(dma)); 18 reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 19 reg_val &= HCLGE_COMM_NIC_SW_RST_RDY; 20 reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; 21 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); 22 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); 23 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); 24 } else { 25 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 26 lower_32_bits(dma)); 27 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 28 upper_32_bits(dma)); 29 reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; 30 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val); 31 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); 32 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); 33 } 34 } 35 36 void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw) 37 { 38 hclge_comm_cmd_config_regs(hw, &hw->cmq.csq); 39 hclge_comm_cmd_config_regs(hw, &hw->cmq.crq); 40 } 41 42 void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) 43 { 44 desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | 45 HCLGE_COMM_CMD_FLAG_IN); 46 if (is_read) 47 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); 48 else 49 desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR); 50 } 51 52 static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev, 53 bool is_pf) 54 { 55 set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps); 56 if (is_pf) { 57 set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps); 58 set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); 59 set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); 60 } 61 } 62 63 void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, 64 enum hclge_opcode_type opcode, 65 bool is_read) 66 { 67 memset((void *)desc, 0, sizeof(struct hclge_desc)); 68 desc->opcode = cpu_to_le16(opcode); 69 desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | 70 HCLGE_COMM_CMD_FLAG_IN); 71 72 if (is_read) 73 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); 74 } 75 76 int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, 77 struct hclge_comm_hw *hw, bool en) 78 { 79 struct hclge_comm_firmware_compat_cmd *req; 80 struct hclge_desc desc; 81 u32 compat = 0; 82 83 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false); 84 85 if (en) { 86 req = (struct hclge_comm_firmware_compat_cmd *)desc.data; 87 88 hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1); 89 hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1); 90 if (hclge_comm_dev_phy_imp_supported(ae_dev)) 91 hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1); 92 hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1); 93 hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1); 94 hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1); 95 96 req->compat = cpu_to_le32(compat); 97 } 98 99 return hclge_comm_cmd_send(hw, &desc, 1); 100 } 101 102 void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring) 103 { 104 int size = ring->desc_num * sizeof(struct hclge_desc); 105 106 if (!ring->desc) 107 return; 108 109 dma_free_coherent(&ring->pdev->dev, size, 110 ring->desc, ring->desc_dma_addr); 111 ring->desc = NULL; 112 } 113 114 static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring) 115 { 116 int size = ring->desc_num * sizeof(struct hclge_desc); 117 118 ring->desc = dma_alloc_coherent(&ring->pdev->dev, 119 size, &ring->desc_dma_addr, GFP_KERNEL); 120 if (!ring->desc) 121 return -ENOMEM; 122 123 return 0; 124 } 125 126 static __le32 hclge_comm_build_api_caps(void) 127 { 128 u32 api_caps = 0; 129 130 hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1); 131 132 return cpu_to_le32(api_caps); 133 } 134 135 static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { 136 {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, 137 {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B}, 138 {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, 139 {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, 140 {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, 141 {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, 142 {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B}, 143 {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B}, 144 {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B}, 145 {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B}, 146 {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, 147 {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, 148 {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B}, 149 {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, 150 {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, 151 HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, 152 {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, 153 {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, 154 {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B}, 155 {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B}, 156 {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B}, 157 {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B}, 158 {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B}, 159 }; 160 161 static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { 162 {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, 163 {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, 164 {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, 165 {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, 166 {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, 167 {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, 168 {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, 169 {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, 170 {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, 171 {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B}, 172 }; 173 174 static void 175 hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf, 176 struct hclge_comm_query_version_cmd *cmd) 177 { 178 const struct hclge_comm_caps_bit_map *caps_map = 179 is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps; 180 u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) : 181 ARRAY_SIZE(hclge_vf_cmd_caps); 182 u32 caps, i; 183 184 caps = __le32_to_cpu(cmd->caps[0]); 185 for (i = 0; i < size; i++) 186 if (hnae3_get_bit(caps, caps_map[i].imp_bit)) 187 set_bit(caps_map[i].local_bit, ae_dev->caps); 188 } 189 190 int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type) 191 { 192 struct hclge_comm_cmq_ring *ring = 193 (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq : 194 &hw->cmq.crq; 195 int ret; 196 197 ring->ring_type = ring_type; 198 199 ret = hclge_comm_alloc_cmd_desc(ring); 200 if (ret) 201 dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n", 202 (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ", 203 ret); 204 205 return ret; 206 } 207 208 int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev, 209 struct hclge_comm_hw *hw, 210 u32 *fw_version, bool is_pf) 211 { 212 struct hclge_comm_query_version_cmd *resp; 213 struct hclge_desc desc; 214 int ret; 215 216 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); 217 resp = (struct hclge_comm_query_version_cmd *)desc.data; 218 resp->api_caps = hclge_comm_build_api_caps(); 219 220 ret = hclge_comm_cmd_send(hw, &desc, 1); 221 if (ret) 222 return ret; 223 224 *fw_version = le32_to_cpu(resp->firmware); 225 226 ae_dev->dev_version = le32_to_cpu(resp->hardware) << 227 HNAE3_PCI_REVISION_BIT_SIZE; 228 ae_dev->dev_version |= ae_dev->pdev->revision; 229 230 if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { 231 hclge_comm_set_default_capability(ae_dev, is_pf); 232 return 0; 233 } 234 235 hclge_comm_parse_capability(ae_dev, is_pf, resp); 236 237 return ret; 238 } 239 240 static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT, 241 HCLGE_OPC_STATS_32_BIT, 242 HCLGE_OPC_STATS_MAC, 243 HCLGE_OPC_STATS_MAC_ALL, 244 HCLGE_OPC_QUERY_32_BIT_REG, 245 HCLGE_OPC_QUERY_64_BIT_REG, 246 HCLGE_QUERY_CLEAR_MPF_RAS_INT, 247 HCLGE_QUERY_CLEAR_PF_RAS_INT, 248 HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, 249 HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, 250 HCLGE_QUERY_ALL_ERR_INFO }; 251 252 static bool hclge_comm_is_special_opcode(u16 opcode) 253 { 254 /* these commands have several descriptors, 255 * and use the first one to save opcode and return value 256 */ 257 u32 i; 258 259 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) 260 if (spec_opcode[i] == opcode) 261 return true; 262 263 return false; 264 } 265 266 static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring) 267 { 268 int ntc = ring->next_to_clean; 269 int ntu = ring->next_to_use; 270 int used = (ntu - ntc + ring->desc_num) % ring->desc_num; 271 272 return ring->desc_num - used - 1; 273 } 274 275 static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw, 276 struct hclge_desc *desc, int num) 277 { 278 struct hclge_desc *desc_to_use; 279 int handle = 0; 280 281 while (handle < num) { 282 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; 283 *desc_to_use = desc[handle]; 284 (hw->cmq.csq.next_to_use)++; 285 if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) 286 hw->cmq.csq.next_to_use = 0; 287 handle++; 288 } 289 } 290 291 static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring, 292 int head) 293 { 294 int ntc = ring->next_to_clean; 295 int ntu = ring->next_to_use; 296 297 if (ntu > ntc) 298 return head >= ntc && head <= ntu; 299 300 return head >= ntc || head <= ntu; 301 } 302 303 static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw) 304 { 305 struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; 306 int clean; 307 u32 head; 308 309 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); 310 rmb(); /* Make sure head is ready before touch any data */ 311 312 if (!hclge_comm_is_valid_csq_clean_head(csq, head)) { 313 dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n", 314 head, csq->next_to_use, csq->next_to_clean); 315 dev_warn(&hw->cmq.csq.pdev->dev, 316 "Disabling any further commands to IMP firmware\n"); 317 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); 318 dev_warn(&hw->cmq.csq.pdev->dev, 319 "IMP firmware watchdog reset soon expected!\n"); 320 return -EIO; 321 } 322 323 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; 324 csq->next_to_clean = head; 325 return clean; 326 } 327 328 static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) 329 { 330 u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); 331 return head == hw->cmq.csq.next_to_use; 332 } 333 334 static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) 335 { 336 static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = { 337 {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS}, 338 }; 339 u32 i; 340 341 for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++) 342 if (cmdq_tx_timeout_map[i].opcode == opcode) 343 return cmdq_tx_timeout_map[i].tx_timeout; 344 345 return tx_timeout; 346 } 347 348 static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode, 349 bool *is_completed) 350 { 351 u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode, 352 hw->cmq.tx_timeout); 353 u32 timeout = 0; 354 355 do { 356 if (hclge_comm_cmd_csq_done(hw)) { 357 *is_completed = true; 358 break; 359 } 360 udelay(1); 361 timeout++; 362 } while (timeout < cmdq_tx_timeout); 363 } 364 365 static int hclge_comm_cmd_convert_err_code(u16 desc_ret) 366 { 367 struct hclge_comm_errcode hclge_comm_cmd_errcode[] = { 368 { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 }, 369 { HCLGE_COMM_CMD_NO_AUTH, -EPERM }, 370 { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP }, 371 { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL }, 372 { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR }, 373 { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK }, 374 { HCLGE_COMM_CMD_PARA_ERR, -EINVAL }, 375 { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE }, 376 { HCLGE_COMM_CMD_TIMEOUT, -ETIME }, 377 { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK }, 378 { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO }, 379 { HCLGE_COMM_CMD_INVALID, -EBADR }, 380 }; 381 u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode); 382 u32 i; 383 384 for (i = 0; i < errcode_count; i++) 385 if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret) 386 return hclge_comm_cmd_errcode[i].common_errno; 387 388 return -EIO; 389 } 390 391 static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw, 392 struct hclge_desc *desc, int num, 393 int ntc) 394 { 395 u16 opcode, desc_ret; 396 int handle; 397 398 opcode = le16_to_cpu(desc[0].opcode); 399 for (handle = 0; handle < num; handle++) { 400 desc[handle] = hw->cmq.csq.desc[ntc]; 401 ntc++; 402 if (ntc >= hw->cmq.csq.desc_num) 403 ntc = 0; 404 } 405 if (likely(!hclge_comm_is_special_opcode(opcode))) 406 desc_ret = le16_to_cpu(desc[num - 1].retval); 407 else 408 desc_ret = le16_to_cpu(desc[0].retval); 409 410 hw->cmq.last_status = desc_ret; 411 412 return hclge_comm_cmd_convert_err_code(desc_ret); 413 } 414 415 static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, 416 struct hclge_desc *desc, 417 int num, int ntc) 418 { 419 bool is_completed = false; 420 int handle, ret; 421 422 /* If the command is sync, wait for the firmware to write back, 423 * if multi descriptors to be sent, use the first one to check 424 */ 425 if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) 426 hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode), 427 &is_completed); 428 429 if (!is_completed) 430 ret = -EBADE; 431 else 432 ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc); 433 434 /* Clean the command send queue */ 435 handle = hclge_comm_cmd_csq_clean(hw); 436 if (handle < 0) 437 ret = handle; 438 else if (handle != num) 439 dev_warn(&hw->cmq.csq.pdev->dev, 440 "cleaned %d, need to clean %d\n", handle, num); 441 return ret; 442 } 443 444 /** 445 * hclge_comm_cmd_send - send command to command queue 446 * @hw: pointer to the hw struct 447 * @desc: prefilled descriptor for describing the command 448 * @num : the number of descriptors to be sent 449 * 450 * This is the main send command for command queue, it 451 * sends the queue, cleans the queue, etc 452 **/ 453 int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, 454 int num) 455 { 456 struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; 457 int ret; 458 int ntc; 459 460 spin_lock_bh(&hw->cmq.csq.lock); 461 462 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) { 463 spin_unlock_bh(&hw->cmq.csq.lock); 464 return -EBUSY; 465 } 466 467 if (num > hclge_comm_ring_space(&hw->cmq.csq)) { 468 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, 469 * need update the SW HEAD pointer csq->next_to_clean 470 */ 471 csq->next_to_clean = 472 hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); 473 spin_unlock_bh(&hw->cmq.csq.lock); 474 return -EBUSY; 475 } 476 477 /** 478 * Record the location of desc in the ring for this time 479 * which will be use for hardware to write back 480 */ 481 ntc = hw->cmq.csq.next_to_use; 482 483 hclge_comm_cmd_copy_desc(hw, desc, num); 484 485 /* Write to hardware */ 486 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 487 hw->cmq.csq.next_to_use); 488 489 ret = hclge_comm_cmd_check_result(hw, desc, num, ntc); 490 491 spin_unlock_bh(&hw->cmq.csq.lock); 492 493 return ret; 494 } 495 496 static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw) 497 { 498 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0); 499 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0); 500 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0); 501 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); 502 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); 503 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0); 504 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0); 505 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0); 506 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); 507 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); 508 } 509 510 void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, 511 struct hclge_comm_hw *hw) 512 { 513 struct hclge_comm_cmq *cmdq = &hw->cmq; 514 515 hclge_comm_firmware_compat_config(ae_dev, hw, false); 516 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); 517 518 /* wait to ensure that the firmware completes the possible left 519 * over commands. 520 */ 521 msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME); 522 spin_lock_bh(&cmdq->csq.lock); 523 spin_lock(&cmdq->crq.lock); 524 hclge_comm_cmd_uninit_regs(hw); 525 spin_unlock(&cmdq->crq.lock); 526 spin_unlock_bh(&cmdq->csq.lock); 527 528 hclge_comm_free_cmd_desc(&cmdq->csq); 529 hclge_comm_free_cmd_desc(&cmdq->crq); 530 } 531 532 int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) 533 { 534 struct hclge_comm_cmq *cmdq = &hw->cmq; 535 int ret; 536 537 /* Setup the lock for command queue */ 538 spin_lock_init(&cmdq->csq.lock); 539 spin_lock_init(&cmdq->crq.lock); 540 541 cmdq->csq.pdev = pdev; 542 cmdq->crq.pdev = pdev; 543 544 /* Setup the queue entries for use cmd queue */ 545 cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; 546 cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; 547 548 /* Setup Tx write back timeout */ 549 cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT; 550 551 /* Setup queue rings */ 552 ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ); 553 if (ret) { 554 dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret); 555 return ret; 556 } 557 558 ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ); 559 if (ret) { 560 dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret); 561 goto err_csq; 562 } 563 564 return 0; 565 err_csq: 566 hclge_comm_free_cmd_desc(&hw->cmq.csq); 567 return ret; 568 } 569 570 int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, 571 u32 *fw_version, bool is_pf, 572 unsigned long reset_pending) 573 { 574 struct hclge_comm_cmq *cmdq = &hw->cmq; 575 int ret; 576 577 spin_lock_bh(&cmdq->csq.lock); 578 spin_lock(&cmdq->crq.lock); 579 580 cmdq->csq.next_to_clean = 0; 581 cmdq->csq.next_to_use = 0; 582 cmdq->crq.next_to_clean = 0; 583 cmdq->crq.next_to_use = 0; 584 585 hclge_comm_cmd_init_regs(hw); 586 587 spin_unlock(&cmdq->crq.lock); 588 spin_unlock_bh(&cmdq->csq.lock); 589 590 clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); 591 592 /* Check if there is new reset pending, because the higher level 593 * reset may happen when lower level reset is being processed. 594 */ 595 if (reset_pending) { 596 ret = -EBUSY; 597 goto err_cmd_init; 598 } 599 600 /* get version and device capabilities */ 601 ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw, 602 fw_version, is_pf); 603 if (ret) { 604 dev_err(&ae_dev->pdev->dev, 605 "failed to query version and capabilities, ret = %d\n", 606 ret); 607 goto err_cmd_init; 608 } 609 610 dev_info(&ae_dev->pdev->dev, 611 "The firmware version is %lu.%lu.%lu.%lu\n", 612 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK, 613 HNAE3_FW_VERSION_BYTE3_SHIFT), 614 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK, 615 HNAE3_FW_VERSION_BYTE2_SHIFT), 616 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK, 617 HNAE3_FW_VERSION_BYTE1_SHIFT), 618 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK, 619 HNAE3_FW_VERSION_BYTE0_SHIFT)); 620 621 if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) 622 return 0; 623 624 /* ask the firmware to enable some features, driver can work without 625 * it. 626 */ 627 ret = hclge_comm_firmware_compat_config(ae_dev, hw, true); 628 if (ret) 629 dev_warn(&ae_dev->pdev->dev, 630 "Firmware compatible features not enabled(%d).\n", 631 ret); 632 return 0; 633 634 err_cmd_init: 635 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); 636 637 return ret; 638 } 639