1 /* 2 * Copyright 2014-2017 Cavium, Inc. 3 * The contents of this file are subject to the terms of the Common Development 4 * and Distribution License, v.1, (the "License"). 5 * 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the License at available 9 * at http://opensource.org/licenses/CDDL-1.0 10 * 11 * See the License for the specific language governing permissions and 12 * limitations under the License. 13 */ 14 15 #include "lm5706.h" 16 17 18 19 /******************************************************************************* 20 * Description: 21 * 22 * Return: 23 ******************************************************************************/ 24 lm_status_t 25 lm_set_mac_addr( 26 lm_device_t *pdev, 27 u32_t addr_idx, 28 u8_t *mac_addr) 29 { 30 u32_t val; 31 32 if(addr_idx >= 16) 33 { 34 DbgBreakMsg("Invalid mac address index.\n"); 35 36 return LM_STATUS_FAILURE; 37 } 38 39 val = (mac_addr[0]<<8) | mac_addr[1]; 40 REG_WR(pdev, emac.emac_mac_match[addr_idx*2], val); 41 42 val = (mac_addr[2]<<24) | (mac_addr[3]<<16) | 43 (mac_addr[4]<<8) | mac_addr[5]; 44 REG_WR(pdev, emac.emac_mac_match[addr_idx*2+1], val); 45 46 return LM_STATUS_SUCCESS; 47 } /* lm_set_mac_addr */ 48 49 50 51 /******************************************************************************* 52 * Description: 53 * 54 * Return: 55 * None. 56 * 57 * Note: 58 * The caller is responsible for synchronizing calls to lm_reg_rd_ind and 59 * lm_reg_wr_ind. 60 ******************************************************************************/ 61 void 62 lm_reg_rd_ind( 63 lm_device_t *pdev, 64 u32_t offset, 65 u32_t *ret) 66 { 67 /* DbgBreakIf(offset & 0x3); // this can occur for some shmem accesses */ 68 69 mm_acquire_ind_reg_lock(pdev); 70 71 REG_WR(pdev, pci_config.pcicfg_reg_window_address, offset); 72 REG_RD(pdev, pci_config.pcicfg_reg_window, ret); 73 74 mm_release_ind_reg_lock(pdev); 75 } /* lm_reg_rd_ind */ 76 77 78 79 /******************************************************************************* 80 * Description: 81 * 82 * Return: 83 * None. 84 * 85 * Note: 86 * The caller is responsible for synchronizing calls to lm_reg_rd_ind and 87 * lm_reg_wr_ind. 88 ******************************************************************************/ 89 void 90 lm_reg_wr_ind( 91 lm_device_t *pdev, 92 u32_t offset, 93 u32_t val) 94 { 95 DbgBreakIf(offset & 0x3); 96 97 mm_acquire_ind_reg_lock(pdev); 98 99 REG_WR(pdev, pci_config.pcicfg_reg_window_address, offset); 100 REG_WR(pdev, pci_config.pcicfg_reg_window, val); 101 102 mm_release_ind_reg_lock(pdev); 103 } /* lm_reg_wr_ind */ 104 105 106 107 /******************************************************************************* 108 * Description: 109 * 110 * Return: 111 ******************************************************************************/ 112 void 113 lm_ctx_wr( 114 lm_device_t *pdev, 115 u32_t cid_addr, 116 u32_t offset, 117 u32_t val) 118 { 119 u32_t retry_cnt; 120 u32_t idx; 121 122 DbgBreakIf(cid_addr > MAX_CID_ADDR || offset & 0x3 || cid_addr & CTX_MASK); 123 124 offset += cid_addr; 125 126 if(CHIP_NUM(pdev) == CHIP_NUM_5709) 127 { 128 if (CHIP_REV(pdev) == CHIP_REV_IKOS) 129 { 130 retry_cnt = 2000; 131 } 132 else 133 { 134 retry_cnt = 250; 135 } 136 137 REG_WR(pdev, context.ctx_ctx_data, val); 138 REG_WR(pdev, context.ctx_ctx_ctrl, offset | CTX_CTX_CTRL_WRITE_REQ); 139 140 for(idx=0; idx < retry_cnt; idx++) 141 { 142 REG_RD(pdev, context.ctx_ctx_ctrl, &val); 143 144 if((val & CTX_CTX_CTRL_WRITE_REQ) == 0) 145 { 146 break; 147 } 148 149 mm_wait(pdev, 10); 150 } 151 152 DbgBreakIf(idx == retry_cnt); 153 } 154 else 155 { 156 REG_WR(pdev, context.ctx_data_adr, offset); 157 REG_WR(pdev, context.ctx_data, val); 158 } 159 } /* lm_ctx_wr */ 160 161 162 163 /******************************************************************************* 164 * Description: 165 * 166 * Return: 167 ******************************************************************************/ 168 u32_t 169 lm_ctx_rd( 170 lm_device_t *pdev, 171 u32_t cid_addr, 172 u32_t offset) 173 { 174 u32_t retry_cnt; 175 u32_t val; 176 u32_t idx; 177 178 DbgBreakIf(cid_addr > MAX_CID_ADDR || offset & 0x3 || cid_addr & CTX_MASK); 179 180 offset += cid_addr; 181 182 if(CHIP_NUM(pdev) == CHIP_NUM_5709) 183 { 184 if(CHIP_REV(pdev) == CHIP_REV_IKOS) 185 { 186 retry_cnt = 1000; 187 } 188 else 189 { 190 retry_cnt = 25; 191 } 192 193 REG_WR(pdev, context.ctx_ctx_ctrl, offset | CTX_CTX_CTRL_READ_REQ); 194 195 for(idx = 0; idx < retry_cnt; idx++) 196 { 197 REG_RD(pdev, context.ctx_ctx_ctrl, &val); 198 199 if((val & CTX_CTX_CTRL_READ_REQ) == 0) 200 { 201 break; 202 } 203 204 mm_wait(pdev, 5); 205 } 206 207 DbgBreakIf(idx == retry_cnt); 208 209 REG_RD(pdev, context.ctx_ctx_data, &val); 210 } 211 else 212 { 213 REG_WR(pdev, context.ctx_data_adr, offset); 214 REG_RD(pdev, context.ctx_data, &val); 215 } 216 217 return val; 218 } /* lm_ctx_rd */ 219 220 221 222 /******************************************************************************* 223 * Description: 224 * 225 * Return: 226 ******************************************************************************/ 227 void 228 lm_disable_int( 229 lm_device_t *pdev) 230 { 231 u32_t sb_idx; 232 u32_t val; 233 234 switch(CHIP_NUM(pdev)) 235 { 236 case CHIP_NUM_5706: 237 case CHIP_NUM_5708: 238 REG_RD(pdev, pci_config.pcicfg_int_ack_cmd, &val); 239 val |= PCICFG_INT_ACK_CMD_MASK_INT; 240 REG_WR(pdev, pci_config.pcicfg_int_ack_cmd, val); 241 break; 242 243 case CHIP_NUM_5709: 244 for(sb_idx = 0; sb_idx < 9; sb_idx++) 245 { 246 val = PCICFG_INT_ACK_CMD_MASK_INT | (sb_idx << 24); 247 REG_WR(pdev, pci_config.pcicfg_int_ack_cmd, val); 248 } 249 break; 250 251 default: 252 DbgBreakMsg("Unsupported chip.\n"); 253 break; 254 } 255 } /* lm_disable_int */ 256 257 258 259 /******************************************************************************* 260 * Description: 261 * 262 * Return: 263 ******************************************************************************/ 264 void 265 lm_enable_int( 266 lm_device_t *pdev) 267 { 268 u32_t val; 269 270 switch(CHIP_NUM(pdev)) 271 { 272 case CHIP_NUM_5706: 273 case CHIP_NUM_5708: 274 REG_RD(pdev, pci_config.pcicfg_int_ack_cmd, &val); 275 val &= ~PCICFG_INT_ACK_CMD_MASK_INT; 276 REG_WR(pdev, pci_config.pcicfg_int_ack_cmd, val); 277 break; 278 279 case CHIP_NUM_5709: 280 REG_RD(pdev, hc.hc_config, &val); 281 val |= HC_CONFIG_UNMASK_ALL; 282 REG_WR(pdev, hc.hc_config, val); 283 break; 284 285 default: 286 DbgBreakMsg("Unsupported chip.\n"); 287 break; 288 } 289 } /* lm_enable_int */ 290 291 292 293 /******************************************************************************* 294 * Description: 295 * 296 * Return: 297 ******************************************************************************/ 298 void 299 lm_reg_rd_blk( 300 lm_device_t *pdev, 301 u32_t reg_offset, 302 u32_t *buf_ptr, 303 u32_t u32t_cnt) 304 { 305 u32_t grc_win_offset; 306 u32_t grc_win_base; 307 308 DbgBreakIf(reg_offset & 0x3); 309 310 grc_win_offset = reg_offset & (GRC_WINDOW_SIZE - 1); 311 grc_win_base = reg_offset & ~(GRC_WINDOW_SIZE - 1); 312 313 REG_WR(pdev, pci.pci_grc_window_addr, grc_win_base); 314 315 while(u32t_cnt) 316 { 317 if(grc_win_offset >= GRC_WINDOW_SIZE) 318 { 319 grc_win_offset = 0; 320 grc_win_base += GRC_WINDOW_SIZE; 321 322 REG_WR(pdev, pci.pci_grc_window_addr, grc_win_base); 323 } 324 325 REG_RD_OFFSET(pdev, GRC_WINDOW_BASE + grc_win_offset, buf_ptr); 326 327 buf_ptr++; 328 u32t_cnt--; 329 grc_win_offset += 4; 330 } 331 332 REG_WR(pdev, pci.pci_grc_window_addr, pdev->hw_info.shmem_base & ~0x7fff); 333 } /* lm_reg_rd_blk */ 334 335 336 337 /******************************************************************************* 338 * Description: 339 * 340 * Return: 341 ******************************************************************************/ 342 void 343 lm_reg_rd_blk_ind( 344 lm_device_t *pdev, 345 u32_t reg_offset, 346 u32_t *buf_ptr, 347 u32_t u32t_cnt) 348 { 349 DbgBreakIf(reg_offset & 0x3); 350 351 mm_acquire_ind_reg_lock(pdev); 352 353 while(u32t_cnt) 354 { 355 REG_WR(pdev, pci_config.pcicfg_reg_window_address, reg_offset); 356 REG_RD(pdev, pci_config.pcicfg_reg_window, buf_ptr); 357 358 buf_ptr++; 359 u32t_cnt--; 360 reg_offset += 4; 361 } 362 363 mm_release_ind_reg_lock(pdev); 364 } /* lm_reg_rd_blk_ind */ 365 366 367 368 /******************************************************************************* 369 * Description: 370 * 371 * Return: 372 ******************************************************************************/ 373 void 374 lm_reg_wr_blk( 375 lm_device_t *pdev, 376 u32_t reg_offset, 377 u32_t *data_ptr, 378 u32_t u32t_cnt) 379 { 380 u32_t grc_win_offset; 381 u32_t grc_win_base; 382 u32_t grc_win_size; 383 384 DbgBreakIf(reg_offset & 0x3); 385 386 if (CHIP_NUM(pdev) == CHIP_NUM_5709) 387 { 388 grc_win_size = GRC_WINDOW_SIZE / 4; 389 } 390 else 391 { 392 grc_win_size = GRC_WINDOW_SIZE; 393 } 394 395 grc_win_offset = reg_offset & (grc_win_size - 1); 396 grc_win_base = reg_offset & ~(grc_win_size - 1); 397 398 REG_WR(pdev, pci.pci_grc_window_addr, grc_win_base); 399 400 while(u32t_cnt) 401 { 402 if(grc_win_offset >= grc_win_size) 403 { 404 grc_win_offset = 0; 405 grc_win_base += grc_win_size; 406 407 REG_WR(pdev, pci.pci_grc_window_addr, grc_win_base); 408 } 409 410 REG_WR_OFFSET(pdev, GRC_WINDOW_BASE + grc_win_offset, *data_ptr); 411 412 data_ptr++; 413 u32t_cnt--; 414 grc_win_offset += 4; 415 } 416 417 REG_WR(pdev, pci.pci_grc_window_addr, pdev->hw_info.shmem_base & ~0x7fff); 418 } /* lm_reg_wr_blk */ 419 420 421 422 /******************************************************************************* 423 * Description: 424 * 425 * Return: 426 ******************************************************************************/ 427 void 428 lm_reg_wr_blk_ind( 429 lm_device_t *pdev, 430 u32_t reg_offset, 431 u32_t *data_ptr, 432 u32_t u32t_cnt) 433 { 434 DbgBreakIf(reg_offset & 0x3); 435 436 mm_acquire_ind_reg_lock(pdev); 437 438 while(u32t_cnt) 439 { 440 REG_WR(pdev, pci_config.pcicfg_reg_window_address, reg_offset); 441 REG_WR(pdev, pci_config.pcicfg_reg_window, *data_ptr); 442 443 data_ptr++; 444 u32t_cnt--; 445 reg_offset += 4; 446 } 447 448 mm_release_ind_reg_lock(pdev); 449 } /* lm_reg_wr_blk_ind */ 450 451 452 453 /******************************************************************************* 454 * Description: 455 * 456 * Return: 457 ******************************************************************************/ 458 lm_status_t 459 lm_submit_fw_cmd( 460 lm_device_t *pdev, 461 u32_t drv_msg) 462 { 463 u32_t val; 464 465 if(pdev->vars.fw_timed_out) 466 { 467 DbgMessage(pdev, WARN, "fw timed out.\n"); 468 469 return LM_STATUS_FAILURE; 470 } 471 472 DbgBreakIf(drv_msg & 0xffff); 473 474 REG_RD_IND( 475 pdev, 476 pdev->hw_info.shmem_base + OFFSETOF(shmem_region_t, drv_fw_mb.fw_mb), 477 &val); 478 if((val & FW_MSG_ACK) != (pdev->vars.fw_wr_seq & DRV_MSG_SEQ)) 479 { 480 DbgMessage(pdev, WARN, "command pending.\n"); 481 482 return LM_STATUS_FAILURE; 483 } 484 485 pdev->vars.fw_wr_seq++; 486 487 drv_msg |= (pdev->vars.fw_wr_seq & DRV_MSG_SEQ); 488 489 REG_WR_IND( 490 pdev, 491 pdev->hw_info.shmem_base + 492 OFFSETOF(shmem_region_t, drv_fw_mb.drv_mb), 493 drv_msg); 494 495 return LM_STATUS_SUCCESS; 496 } /* lm_submit_fw_cmd */ 497 498 499 500 /******************************************************************************* 501 * Description: 502 * 503 * Return: 504 ******************************************************************************/ 505 lm_status_t 506 lm_last_fw_cmd_status( 507 lm_device_t *pdev) 508 { 509 u32_t val; 510 511 if(pdev->vars.fw_timed_out) 512 { 513 DbgMessage(pdev, WARN, "fw timed out.\n"); 514 515 return LM_STATUS_TIMEOUT; 516 } 517 518 REG_RD_IND( 519 pdev, 520 pdev->hw_info.shmem_base + 521 OFFSETOF(shmem_region_t, drv_fw_mb.fw_mb), 522 &val); 523 if((val & FW_MSG_ACK) != (pdev->vars.fw_wr_seq & DRV_MSG_SEQ)) 524 { 525 return LM_STATUS_BUSY; 526 } 527 528 if((val & FW_MSG_STATUS_MASK) != FW_MSG_STATUS_OK) 529 { 530 return LM_STATUS_FAILURE; 531 } 532 533 return LM_STATUS_SUCCESS; 534 } /* lm_last_fw_cmd_status */ 535 536 537 538 /******************************************************************************* 539 * Description: 540 * 541 * Return: 542 ******************************************************************************/ 543 u32_t 544 lm_mb_get_cid_addr( 545 lm_device_t *pdev, 546 u32_t cid) 547 { 548 u32_t mq_offset; 549 550 DbgBreakIf(pdev->params.bin_mq_mode && CHIP_NUM(pdev) != CHIP_NUM_5709); 551 552 if(cid < 256 || pdev->params.bin_mq_mode == FALSE) 553 { 554 mq_offset = 0x10000 + (cid << MB_KERNEL_CTX_SHIFT); 555 } 556 else 557 { 558 DbgBreakIf(cid < pdev->hw_info.first_l4_l5_bin); 559 560 mq_offset = 0x10000 + 561 ((((cid - pdev->hw_info.first_l4_l5_bin) / 562 pdev->hw_info.bin_size) + 256) << MB_KERNEL_CTX_SHIFT); 563 } 564 565 DbgBreakIf(mq_offset > pdev->hw_info.bar_size); 566 567 return mq_offset; 568 } /* lm_mb_get_cid_addr */ 569 570 571 572 /******************************************************************************* 573 * Description: 574 * 575 * Return: 576 ******************************************************************************/ 577 u32_t 578 lm_mb_get_bypass_addr( 579 lm_device_t *pdev, 580 u32_t cid) 581 { 582 u32_t mq_offset; 583 584 DbgBreakIf(pdev->params.bin_mq_mode && CHIP_NUM(pdev) != CHIP_NUM_5709); 585 586 if(cid < 256 || pdev->params.bin_mq_mode == FALSE) 587 { 588 mq_offset = 0x10000 + 589 MB_KERNEL_CTX_SIZE * MAX_CID_CNT + 590 cid * LM_PAGE_SIZE; 591 } 592 else 593 { 594 DbgBreakIf(cid < pdev->hw_info.first_l4_l5_bin); 595 596 mq_offset = 0x10000 + 597 MB_KERNEL_CTX_SIZE * MAX_CID_CNT + 598 (((cid - pdev->hw_info.first_l4_l5_bin) / 599 pdev->hw_info.bin_size) + 256) * LM_PAGE_SIZE; 600 } 601 602 DbgBreakIf(mq_offset > pdev->hw_info.bar_size); 603 604 return mq_offset; 605 } /* lm_mb_get_bypass_addr */ 606 607