1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_init_ops.c 30 */ 31 #include <sys/cdefs.h> 32 /* include the precompiled configuration values - only once */ 33 #include "bcm_osal.h" 34 #include "ecore_hsi_common.h" 35 #include "ecore.h" 36 #include "ecore_hw.h" 37 #include "ecore_status.h" 38 #include "ecore_rt_defs.h" 39 #include "ecore_init_fw_funcs.h" 40 41 #ifndef CONFIG_ECORE_BINARY_FW 42 #ifdef CONFIG_ECORE_ZIPPED_FW 43 #include "ecore_init_values_zipped.h" 44 #else 45 #include "ecore_init_values.h" 46 #endif 47 #endif 48 49 #include "ecore_iro_values.h" 50 #include "ecore_sriov.h" 51 #include "ecore_gtt_values.h" 52 #include "reg_addr.h" 53 #include "ecore_init_ops.h" 54 55 #define ECORE_INIT_MAX_POLL_COUNT 100 56 #define ECORE_INIT_POLL_PERIOD_US 500 57 58 void ecore_init_iro_array(struct ecore_dev *p_dev) 59 { 60 p_dev->iro_arr = iro_arr; 61 } 62 63 /* Runtime configuration helpers */ 64 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn) 65 { 66 int i; 67 68 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) 69 p_hwfn->rt_data.b_valid[i] = false; 70 } 71 72 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, 73 u32 rt_offset, u32 val) 74 { 75 if (rt_offset >= RUNTIME_ARRAY_SIZE) { 76 DP_ERR(p_hwfn, 77 "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n", 78 val, rt_offset, RUNTIME_ARRAY_SIZE); 79 return; 80 } 81 82 p_hwfn->rt_data.init_val[rt_offset] = val; 83 p_hwfn->rt_data.b_valid[rt_offset] = true; 84 } 85 86 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn, 87 u32 rt_offset, u32 *p_val, 88 osal_size_t size) 89 { 90 osal_size_t i; 91 92 if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { 93 DP_ERR(p_hwfn, 94 "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n", 95 rt_offset, (u32)(rt_offset + size - 1), 96 RUNTIME_ARRAY_SIZE); 97 return; 98 } 99 100 for (i = 0; i < size / sizeof(u32); i++) { 101 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; 102 p_hwfn->rt_data.b_valid[rt_offset + i] = true; 103 } 104 } 105 106 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn, 107 struct ecore_ptt *p_ptt, 108 u32 addr, 109 u16 rt_offset, 110 u16 size, 111 bool b_must_dmae) 112 { 113 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; 114 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; 115 u16 i, segment; 116 enum _ecore_status_t rc = ECORE_SUCCESS; 117 118 /* Since not all RT entries are initialized, go over the RT and 119 * for each segment of initialized values use DMA. 120 */ 121 for (i = 0; i < size; i++) { 122 if (!p_valid[i]) 123 continue; 124 125 /* In case there isn't any wide-bus configuration here, 126 * simply write the data instead of using dmae. 127 */ 128 if (!b_must_dmae) { 129 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), 130 p_init_val[i]); 131 continue; 132 } 133 134 /* Start of a new segment */ 135 for (segment = 1; i + segment < size; segment++) 136 if (!p_valid[i + segment]) 137 break; 138 139 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 140 (osal_uintptr_t)(p_init_val + i), 141 addr + (i << 2), segment, 142 OSAL_NULL /* default parameters */); 143 if (rc != ECORE_SUCCESS) 144 return rc; 145 146 /* Jump over the entire segment, including invalid entry */ 147 i += segment; 148 } 149 150 return rc; 151 } 152 153 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn) 154 { 155 struct ecore_rt_data *rt_data = &p_hwfn->rt_data; 156 157 if (IS_VF(p_hwfn->p_dev)) 158 return ECORE_SUCCESS; 159 160 rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 161 sizeof(bool) * RUNTIME_ARRAY_SIZE); 162 if (!rt_data->b_valid) 163 return ECORE_NOMEM; 164 165 rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 166 sizeof(u32) * RUNTIME_ARRAY_SIZE); 167 if (!rt_data->init_val) { 168 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid); 169 rt_data->b_valid = OSAL_NULL; 170 return ECORE_NOMEM; 171 } 172 173 return ECORE_SUCCESS; 174 } 175 176 void ecore_init_free(struct ecore_hwfn *p_hwfn) 177 { 178 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val); 179 p_hwfn->rt_data.init_val = OSAL_NULL; 180 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid); 181 p_hwfn->rt_data.b_valid = OSAL_NULL; 182 } 183 184 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn, 185 struct ecore_ptt *p_ptt, 186 u32 addr, u32 dmae_data_offset, 187 u32 size, const u32 *p_buf, 188 bool b_must_dmae, bool b_can_dmae) 189 { 190 enum _ecore_status_t rc = ECORE_SUCCESS; 191 192 /* Perform DMAE only for lengthy enough sections or for wide-bus */ 193 #ifndef ASIC_ONLY 194 if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) || 195 !b_can_dmae || (!b_must_dmae && (size < 16))) { 196 #else 197 if (!b_can_dmae || (!b_must_dmae && (size < 16))) { 198 #endif 199 const u32 *data = p_buf + dmae_data_offset; 200 u32 i; 201 202 for (i = 0; i < size; i++) 203 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); 204 } else { 205 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 206 (osal_uintptr_t)(p_buf + 207 dmae_data_offset), 208 addr, size, 209 OSAL_NULL /* default parameters */); 210 } 211 212 return rc; 213 } 214 215 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn, 216 struct ecore_ptt *p_ptt, 217 u32 addr, u32 fill_count) 218 { 219 static u32 zero_buffer[DMAE_MAX_RW_SIZE]; 220 struct ecore_dmae_params params; 221 222 OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); 223 224 OSAL_MEMSET(¶ms, 0, sizeof(params)); 225 params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC; 226 return ecore_dmae_host2grc(p_hwfn, p_ptt, 227 (osal_uintptr_t)(&(zero_buffer[0])), 228 addr, fill_count, ¶ms); 229 } 230 231 static void ecore_init_fill(struct ecore_hwfn *p_hwfn, 232 struct ecore_ptt *p_ptt, 233 u32 addr, u32 fill, u32 fill_count) 234 { 235 u32 i; 236 237 for (i = 0; i < fill_count; i++, addr += sizeof(u32)) 238 ecore_wr(p_hwfn, p_ptt, addr, fill); 239 } 240 241 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn, 242 struct ecore_ptt *p_ptt, 243 const struct init_write_op *cmd, 244 bool b_must_dmae, 245 bool b_can_dmae) 246 { 247 u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset); 248 u32 data = OSAL_LE32_TO_CPU(cmd->data); 249 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; 250 #ifdef CONFIG_ECORE_ZIPPED_FW 251 u32 offset, output_len, input_len, max_size; 252 #endif 253 struct ecore_dev *p_dev = p_hwfn->p_dev; 254 const union init_array_hdr *hdr; 255 const u32 *array_data; 256 enum _ecore_status_t rc = ECORE_SUCCESS; 257 u32 size; 258 259 array_data = p_dev->fw_data->arr_data; 260 261 hdr = (const union init_array_hdr *) (array_data + 262 dmae_array_offset); 263 data = OSAL_LE32_TO_CPU(hdr->raw.data); 264 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { 265 case INIT_ARR_ZIPPED: 266 #ifdef CONFIG_ECORE_ZIPPED_FW 267 offset = dmae_array_offset + 1; 268 input_len = GET_FIELD(data, 269 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE); 270 max_size = MAX_ZIPPED_SIZE * 4; 271 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size); 272 273 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len, 274 (u8 *)&array_data[offset], 275 max_size, (u8 *)p_hwfn->unzip_buf); 276 if (output_len) { 277 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0, 278 output_len, 279 p_hwfn->unzip_buf, 280 b_must_dmae, b_can_dmae); 281 } else { 282 DP_NOTICE(p_hwfn, true, 283 "Failed to unzip dmae data\n"); 284 rc = ECORE_INVAL; 285 } 286 #else 287 DP_NOTICE(p_hwfn, true, 288 "Using zipped firmware without config enabled\n"); 289 rc = ECORE_INVAL; 290 #endif 291 break; 292 case INIT_ARR_PATTERN: 293 { 294 u32 repeats = GET_FIELD(data, 295 INIT_ARRAY_PATTERN_HDR_REPETITIONS); 296 u32 i; 297 298 size = GET_FIELD(data, 299 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE); 300 301 for (i = 0; i < repeats; i++, addr += size << 2) { 302 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 303 dmae_array_offset + 1, 304 size, array_data, 305 b_must_dmae, b_can_dmae); 306 if (rc) 307 break; 308 } 309 break; 310 } 311 case INIT_ARR_STANDARD: 312 size = GET_FIELD(data, 313 INIT_ARRAY_STANDARD_HDR_SIZE); 314 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 315 dmae_array_offset + 1, 316 size, array_data, 317 b_must_dmae, b_can_dmae); 318 break; 319 } 320 321 return rc; 322 } 323 324 /* init_ops write command */ 325 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn, 326 struct ecore_ptt *p_ptt, 327 const struct init_write_op *p_cmd, 328 bool b_can_dmae) 329 { 330 u32 data = OSAL_LE32_TO_CPU(p_cmd->data); 331 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); 332 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; 333 enum _ecore_status_t rc = ECORE_SUCCESS; 334 335 /* Sanitize */ 336 if (b_must_dmae && !b_can_dmae) { 337 DP_NOTICE(p_hwfn, true, 338 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n", 339 addr); 340 return ECORE_INVAL; 341 } 342 343 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { 344 case INIT_SRC_INLINE: 345 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val); 346 ecore_wr(p_hwfn, p_ptt, addr, data); 347 break; 348 case INIT_SRC_ZEROS: 349 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count); 350 if (b_must_dmae || (b_can_dmae && (data >= 64))) 351 rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data); 352 else 353 ecore_init_fill(p_hwfn, p_ptt, addr, 0, data); 354 break; 355 case INIT_SRC_ARRAY: 356 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd, 357 b_must_dmae, b_can_dmae); 358 break; 359 case INIT_SRC_RUNTIME: 360 rc = ecore_init_rt(p_hwfn, p_ptt, addr, 361 OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset), 362 OSAL_LE16_TO_CPU(p_cmd->args.runtime.size), 363 b_must_dmae); 364 break; 365 } 366 367 return rc; 368 } 369 370 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val) 371 { 372 return (val == expected_val); 373 } 374 375 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val) 376 { 377 return (val & expected_val) == expected_val; 378 } 379 380 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val) 381 { 382 return (val | expected_val) > 0; 383 } 384 385 /* init_ops read/poll commands */ 386 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn, 387 struct ecore_ptt *p_ptt, 388 const struct init_read_op *cmd) 389 { 390 bool (*comp_check)(u32 val, u32 expected_val); 391 u32 delay = ECORE_INIT_POLL_PERIOD_US, val; 392 u32 data, addr, poll; 393 int i; 394 395 data = OSAL_LE32_TO_CPU(cmd->op_data); 396 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; 397 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); 398 399 #ifndef ASIC_ONLY 400 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 401 delay *= 100; 402 #endif 403 404 val = ecore_rd(p_hwfn, p_ptt, addr); 405 406 if (poll == INIT_POLL_NONE) 407 return; 408 409 switch (poll) { 410 case INIT_POLL_EQ: 411 comp_check = comp_eq; 412 break; 413 case INIT_POLL_OR: 414 comp_check = comp_or; 415 break; 416 case INIT_POLL_AND: 417 comp_check = comp_and; 418 break; 419 default: 420 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", 421 cmd->op_data); 422 return; 423 } 424 425 data = OSAL_LE32_TO_CPU(cmd->expected_val); 426 for (i = 0; 427 i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); 428 i++) { 429 OSAL_UDELAY(delay); 430 val = ecore_rd(p_hwfn, p_ptt, addr); 431 } 432 433 if (i == ECORE_INIT_MAX_POLL_COUNT) 434 DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", 435 addr, 436 OSAL_LE32_TO_CPU(cmd->expected_val), val, 437 OSAL_LE32_TO_CPU(cmd->op_data)); 438 } 439 440 /* init_ops callbacks entry point */ 441 static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, 442 struct ecore_ptt *p_ptt, 443 const struct init_callback_op *p_cmd) 444 { 445 enum _ecore_status_t rc; 446 447 switch (p_cmd->callback_id) { 448 case DMAE_READY_CB: 449 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); 450 break; 451 default: 452 DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n", 453 p_cmd->callback_id); 454 return ECORE_INVAL; 455 } 456 457 return rc; 458 } 459 460 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn, 461 u16 *p_offset, int modes) 462 { 463 struct ecore_dev *p_dev = p_hwfn->p_dev; 464 const u8 *modes_tree_buf; 465 u8 arg1, arg2, tree_val; 466 467 modes_tree_buf = p_dev->fw_data->modes_tree_buf; 468 tree_val = modes_tree_buf[(*p_offset)++]; 469 switch(tree_val) { 470 case INIT_MODE_OP_NOT: 471 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; 472 case INIT_MODE_OP_OR: 473 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 474 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 475 return arg1 | arg2; 476 case INIT_MODE_OP_AND: 477 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 478 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 479 return arg1 & arg2; 480 default: 481 tree_val -= MAX_INIT_MODE_OPS; 482 return (modes & (1 << tree_val)) ? 1 : 0; 483 } 484 } 485 486 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn, 487 const struct init_if_mode_op *p_cmd, int modes) 488 { 489 u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset); 490 491 if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes)) 492 return 0; 493 else 494 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data), 495 INIT_IF_MODE_OP_CMD_OFFSET); 496 } 497 498 static u32 ecore_init_cmd_phase(const struct init_if_phase_op *p_cmd, 499 u32 phase, u32 phase_id) 500 { 501 u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data); 502 u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data); 503 504 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase && 505 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID || 506 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id))) 507 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET); 508 else 509 return 0; 510 } 511 512 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, 513 struct ecore_ptt *p_ptt, 514 int phase, 515 int phase_id, 516 int modes) 517 { 518 struct ecore_dev *p_dev = p_hwfn->p_dev; 519 u32 cmd_num, num_init_ops; 520 const union init_op *init_ops; 521 bool b_dmae = false; 522 enum _ecore_status_t rc = ECORE_SUCCESS; 523 524 num_init_ops = p_dev->fw_data->init_ops_size; 525 init_ops = p_dev->fw_data->init_ops; 526 527 #ifdef CONFIG_ECORE_ZIPPED_FW 528 p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, 529 MAX_ZIPPED_SIZE * 4); 530 if (!p_hwfn->unzip_buf) { 531 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n"); 532 return ECORE_NOMEM; 533 } 534 #endif 535 536 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { 537 const union init_op *cmd = &init_ops[cmd_num]; 538 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data); 539 540 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { 541 case INIT_OP_WRITE: 542 rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, 543 b_dmae); 544 break; 545 546 case INIT_OP_READ: 547 ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); 548 break; 549 550 case INIT_OP_IF_MODE: 551 cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode, 552 modes); 553 break; 554 case INIT_OP_IF_PHASE: 555 cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase, 556 phase_id); 557 b_dmae = GET_FIELD(data, 558 INIT_IF_PHASE_OP_DMAE_ENABLE); 559 break; 560 case INIT_OP_DELAY: 561 /* ecore_init_run is always invoked from 562 * sleep-able context 563 */ 564 OSAL_UDELAY(cmd->delay.delay); 565 break; 566 567 case INIT_OP_CALLBACK: 568 rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); 569 break; 570 } 571 572 if (rc) 573 break; 574 } 575 #ifdef CONFIG_ECORE_ZIPPED_FW 576 OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf); 577 p_hwfn->unzip_buf = OSAL_NULL; 578 #endif 579 return rc; 580 } 581 582 void ecore_gtt_init(struct ecore_hwfn *p_hwfn, 583 struct ecore_ptt *p_ptt) 584 { 585 u32 gtt_base; 586 u32 i; 587 588 #ifndef ASIC_ONLY 589 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 590 /* This is done by MFW on ASIC; regardless, this should only 591 * be done once per chip [i.e., common]. Implementation is 592 * not too bright, but it should work on the simple FPGA/EMUL 593 * scenarios. 594 */ 595 static bool initialized = false; 596 int poll_cnt = 500; 597 u32 val; 598 599 /* initialize PTT/GTT (poll for completion) */ 600 if (!initialized) { 601 ecore_wr(p_hwfn, p_ptt, 602 PGLUE_B_REG_START_INIT_PTT_GTT, 1); 603 initialized = true; 604 } 605 606 do { 607 /* ptt might be overrided by HW until this is done */ 608 OSAL_UDELAY(10); 609 ecore_ptt_invalidate(p_hwfn); 610 val = ecore_rd(p_hwfn, p_ptt, 611 PGLUE_B_REG_INIT_DONE_PTT_GTT); 612 } while ((val != 1) && --poll_cnt); 613 614 if (!poll_cnt) 615 DP_ERR(p_hwfn, "PGLUE_B_REG_INIT_DONE didn't complete\n"); 616 } 617 #endif 618 619 /* Set the global windows */ 620 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START; 621 622 for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++) 623 if (pxp_global_win[i]) 624 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, 625 pxp_global_win[i]); 626 } 627 628 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, 629 #ifdef CONFIG_ECORE_BINARY_FW 630 const u8 *fw_data) 631 #else 632 const u8 OSAL_UNUSED *fw_data) 633 #endif 634 { 635 struct ecore_fw_data *fw = p_dev->fw_data; 636 637 #ifdef CONFIG_ECORE_BINARY_FW 638 struct bin_buffer_hdr *buf_hdr; 639 u32 offset, len; 640 641 if (!fw_data) { 642 DP_NOTICE(p_dev, true, "Invalid fw data\n"); 643 return ECORE_INVAL; 644 } 645 646 buf_hdr = (struct bin_buffer_hdr *)fw_data; 647 648 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; 649 fw->fw_ver_info = (struct fw_ver_info *)(fw_data + offset); 650 651 offset = buf_hdr[BIN_BUF_INIT_CMD].offset; 652 fw->init_ops = (union init_op *)(fw_data + offset); 653 654 offset = buf_hdr[BIN_BUF_INIT_VAL].offset; 655 fw->arr_data = (u32 *)(fw_data + offset); 656 657 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset; 658 fw->modes_tree_buf = (u8 *)(fw_data + offset); 659 len = buf_hdr[BIN_BUF_INIT_CMD].length; 660 fw->init_ops_size = len / sizeof(struct init_raw_op); 661 #else 662 fw->init_ops = (const union init_op *)init_ops; 663 fw->arr_data = (const u32 *)init_val; 664 fw->modes_tree_buf = (const u8 *)modes_tree_buf; 665 fw->init_ops_size = init_ops_size; 666 #endif 667 668 return ECORE_SUCCESS; 669 } 670