1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_init_ops.c 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* include the precompiled configuration values - only once */ 35 #include "bcm_osal.h" 36 #include "ecore_hsi_common.h" 37 #include "ecore.h" 38 #include "ecore_hw.h" 39 #include "ecore_status.h" 40 #include "ecore_rt_defs.h" 41 #include "ecore_init_fw_funcs.h" 42 43 #ifndef CONFIG_ECORE_BINARY_FW 44 #ifdef CONFIG_ECORE_ZIPPED_FW 45 #include "ecore_init_values_zipped.h" 46 #else 47 #include "ecore_init_values.h" 48 #endif 49 #endif 50 51 #include "ecore_iro_values.h" 52 #include "ecore_sriov.h" 53 #include "ecore_gtt_values.h" 54 #include "reg_addr.h" 55 #include "ecore_init_ops.h" 56 57 #define ECORE_INIT_MAX_POLL_COUNT 100 58 #define ECORE_INIT_POLL_PERIOD_US 500 59 60 void ecore_init_iro_array(struct ecore_dev *p_dev) 61 { 62 p_dev->iro_arr = iro_arr; 63 } 64 65 /* Runtime configuration helpers */ 66 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn) 67 { 68 int i; 69 70 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) 71 p_hwfn->rt_data.b_valid[i] = false; 72 } 73 74 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, 75 u32 rt_offset, u32 val) 76 { 77 if (rt_offset >= RUNTIME_ARRAY_SIZE) { 78 DP_ERR(p_hwfn, 79 "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n", 80 val, rt_offset, RUNTIME_ARRAY_SIZE); 81 return; 82 } 83 84 p_hwfn->rt_data.init_val[rt_offset] = val; 85 p_hwfn->rt_data.b_valid[rt_offset] = true; 86 } 87 88 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn, 89 u32 rt_offset, u32 *p_val, 90 osal_size_t size) 91 { 92 osal_size_t i; 93 94 if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { 95 DP_ERR(p_hwfn, 96 "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n", 97 rt_offset, (u32)(rt_offset + size - 1), 98 RUNTIME_ARRAY_SIZE); 99 return; 100 } 101 102 for (i = 0; i < size / sizeof(u32); i++) { 103 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; 104 p_hwfn->rt_data.b_valid[rt_offset + i] = true; 105 106 } 107 } 108 109 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn, 110 struct ecore_ptt *p_ptt, 111 u32 addr, 112 u16 rt_offset, 113 u16 size, 114 bool b_must_dmae) 115 { 116 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; 117 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; 118 u16 i, segment; 119 enum _ecore_status_t rc = ECORE_SUCCESS; 120 121 /* Since not all RT entries are initialized, go over the RT and 122 * for each segment of initialized values use DMA. 123 */ 124 for (i = 0; i < size; i++) { 125 if (!p_valid[i]) 126 continue; 127 128 /* In case there isn't any wide-bus configuration here, 129 * simply write the data instead of using dmae. 130 */ 131 if (!b_must_dmae) { 132 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), 133 p_init_val[i]); 134 continue; 135 } 136 137 /* Start of a new segment */ 138 for (segment = 1; i + segment < size; segment++) 139 if (!p_valid[i + segment]) 140 break; 141 142 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 143 (osal_uintptr_t)(p_init_val + i), 144 addr + (i << 2), segment, 0); 145 if (rc != ECORE_SUCCESS) 146 return rc; 147 148 /* Jump over the entire segment, including invalid entry */ 149 i += segment; 150 } 151 152 return rc; 153 } 154 155 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn) 156 { 157 struct ecore_rt_data *rt_data = &p_hwfn->rt_data; 158 159 if (IS_VF(p_hwfn->p_dev)) 160 return ECORE_SUCCESS; 161 162 rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 163 sizeof(bool) * RUNTIME_ARRAY_SIZE); 164 if (!rt_data->b_valid) 165 return ECORE_NOMEM; 166 167 rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 168 sizeof(u32) * RUNTIME_ARRAY_SIZE); 169 if (!rt_data->init_val) { 170 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid); 171 rt_data->b_valid = OSAL_NULL; 172 return ECORE_NOMEM; 173 } 174 175 return ECORE_SUCCESS; 176 } 177 178 void ecore_init_free(struct ecore_hwfn *p_hwfn) 179 { 180 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val); 181 p_hwfn->rt_data.init_val = OSAL_NULL; 182 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid); 183 p_hwfn->rt_data.b_valid = OSAL_NULL; 184 } 185 186 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn, 187 struct ecore_ptt *p_ptt, 188 u32 addr, u32 dmae_data_offset, 189 u32 size, const u32 *p_buf, 190 bool b_must_dmae, bool b_can_dmae) 191 { 192 enum _ecore_status_t rc = ECORE_SUCCESS; 193 194 /* Perform DMAE only for lengthy enough sections or for wide-bus */ 195 #ifndef ASIC_ONLY 196 if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) || 197 !b_can_dmae || (!b_must_dmae && (size < 16))) { 198 #else 199 if (!b_can_dmae || (!b_must_dmae && (size < 16))) { 200 #endif 201 const u32 *data = p_buf + dmae_data_offset; 202 u32 i; 203 204 for (i = 0; i < size; i++) 205 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); 206 } else { 207 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 208 (osal_uintptr_t)(p_buf + 209 dmae_data_offset), 210 addr, size, 0); 211 } 212 213 return rc; 214 } 215 216 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn, 217 struct ecore_ptt *p_ptt, 218 u32 addr, u32 fill_count) 219 { 220 static u32 zero_buffer[DMAE_MAX_RW_SIZE]; 221 222 OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); 223 224 return ecore_dmae_host2grc(p_hwfn, p_ptt, 225 (osal_uintptr_t)(&(zero_buffer[0])), 226 addr, fill_count, 227 ECORE_DMAE_FLAG_RW_REPL_SRC); 228 } 229 230 static void ecore_init_fill(struct ecore_hwfn *p_hwfn, 231 struct ecore_ptt *p_ptt, 232 u32 addr, u32 fill, u32 fill_count) 233 { 234 u32 i; 235 236 for (i = 0; i < fill_count; i++, addr += sizeof(u32)) 237 ecore_wr(p_hwfn, p_ptt, addr, fill); 238 } 239 240 241 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn, 242 struct ecore_ptt *p_ptt, 243 struct init_write_op *cmd, 244 bool b_must_dmae, 245 bool b_can_dmae) 246 { 247 u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset); 248 u32 data = OSAL_LE32_TO_CPU(cmd->data); 249 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; 250 #ifdef CONFIG_ECORE_ZIPPED_FW 251 u32 offset, output_len, input_len, max_size; 252 #endif 253 struct ecore_dev *p_dev = p_hwfn->p_dev; 254 union init_array_hdr *hdr; 255 const u32 *array_data; 256 enum _ecore_status_t rc = ECORE_SUCCESS; 257 u32 size; 258 259 array_data = p_dev->fw_data->arr_data; 260 261 hdr = (union init_array_hdr *) (array_data + 262 dmae_array_offset); 263 data = OSAL_LE32_TO_CPU(hdr->raw.data); 264 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { 265 case INIT_ARR_ZIPPED: 266 #ifdef CONFIG_ECORE_ZIPPED_FW 267 offset = dmae_array_offset + 1; 268 input_len = GET_FIELD(data, 269 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE); 270 max_size = MAX_ZIPPED_SIZE * 4; 271 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size); 272 273 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len, 274 (u8 *)&array_data[offset], 275 max_size, (u8 *)p_hwfn->unzip_buf); 276 if (output_len) { 277 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0, 278 output_len, 279 p_hwfn->unzip_buf, 280 b_must_dmae, b_can_dmae); 281 } else { 282 DP_NOTICE(p_hwfn, true, 283 "Failed to unzip dmae data\n"); 284 rc = ECORE_INVAL; 285 } 286 #else 287 DP_NOTICE(p_hwfn, true, 288 "Using zipped firmware without config enabled\n"); 289 rc = ECORE_INVAL; 290 #endif 291 break; 292 case INIT_ARR_PATTERN: 293 { 294 u32 repeats = GET_FIELD(data, 295 INIT_ARRAY_PATTERN_HDR_REPETITIONS); 296 u32 i; 297 298 size = GET_FIELD(data, 299 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE); 300 301 for (i = 0; i < repeats; i++, addr += size << 2) { 302 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 303 dmae_array_offset + 1, 304 size, array_data, 305 b_must_dmae, b_can_dmae); 306 if (rc) 307 break; 308 } 309 break; 310 } 311 case INIT_ARR_STANDARD: 312 size = GET_FIELD(data, 313 INIT_ARRAY_STANDARD_HDR_SIZE); 314 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 315 dmae_array_offset + 1, 316 size, array_data, 317 b_must_dmae, b_can_dmae); 318 break; 319 } 320 321 return rc; 322 } 323 324 /* init_ops write command */ 325 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn, 326 struct ecore_ptt *p_ptt, 327 struct init_write_op *p_cmd, 328 bool b_can_dmae) 329 { 330 u32 data = OSAL_LE32_TO_CPU(p_cmd->data); 331 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); 332 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; 333 enum _ecore_status_t rc = ECORE_SUCCESS; 334 335 /* Sanitize */ 336 if (b_must_dmae && !b_can_dmae) { 337 DP_NOTICE(p_hwfn, true, 338 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n", 339 addr); 340 return ECORE_INVAL; 341 } 342 343 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { 344 case INIT_SRC_INLINE: 345 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val); 346 ecore_wr(p_hwfn, p_ptt, addr, data); 347 break; 348 case INIT_SRC_ZEROS: 349 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count); 350 if (b_must_dmae || (b_can_dmae && (data >= 64))) 351 rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data); 352 else 353 ecore_init_fill(p_hwfn, p_ptt, addr, 0, data); 354 break; 355 case INIT_SRC_ARRAY: 356 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd, 357 b_must_dmae, b_can_dmae); 358 break; 359 case INIT_SRC_RUNTIME: 360 ecore_init_rt(p_hwfn, p_ptt, addr, 361 OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset), 362 OSAL_LE16_TO_CPU(p_cmd->args.runtime.size), 363 b_must_dmae); 364 break; 365 } 366 367 return rc; 368 } 369 370 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val) 371 { 372 return (val == expected_val); 373 } 374 375 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val) 376 { 377 return (val & expected_val) == expected_val; 378 } 379 380 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val) 381 { 382 return (val | expected_val) > 0; 383 } 384 385 /* init_ops read/poll commands */ 386 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn, 387 struct ecore_ptt *p_ptt, 388 struct init_read_op *cmd) 389 { 390 bool (*comp_check)(u32 val, u32 expected_val); 391 u32 delay = ECORE_INIT_POLL_PERIOD_US, val; 392 u32 data, addr, poll; 393 int i; 394 395 data = OSAL_LE32_TO_CPU(cmd->op_data); 396 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; 397 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); 398 399 #ifndef ASIC_ONLY 400 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 401 delay *= 100; 402 #endif 403 404 val = ecore_rd(p_hwfn, p_ptt, addr); 405 406 if (poll == INIT_POLL_NONE) 407 return; 408 409 switch (poll) { 410 case INIT_POLL_EQ: 411 comp_check = comp_eq; 412 break; 413 case INIT_POLL_OR: 414 comp_check = comp_or; 415 break; 416 case INIT_POLL_AND: 417 comp_check = comp_and; 418 break; 419 default: 420 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", 421 cmd->op_data); 422 return; 423 } 424 425 data = OSAL_LE32_TO_CPU(cmd->expected_val); 426 for (i = 0; 427 i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); 428 i++) { 429 OSAL_UDELAY(delay); 430 val = ecore_rd(p_hwfn, p_ptt, addr); 431 } 432 433 if (i == ECORE_INIT_MAX_POLL_COUNT) 434 DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", 435 addr, 436 OSAL_LE32_TO_CPU(cmd->expected_val), val, 437 OSAL_LE32_TO_CPU(cmd->op_data)); 438 } 439 440 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn, 441 u16 *p_offset, int modes) 442 { 443 struct ecore_dev *p_dev = p_hwfn->p_dev; 444 const u8 *modes_tree_buf; 445 u8 arg1, arg2, tree_val; 446 447 modes_tree_buf = p_dev->fw_data->modes_tree_buf; 448 tree_val = modes_tree_buf[(*p_offset)++]; 449 switch(tree_val) { 450 case INIT_MODE_OP_NOT: 451 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; 452 case INIT_MODE_OP_OR: 453 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 454 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 455 return arg1 | arg2; 456 case INIT_MODE_OP_AND: 457 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 458 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 459 return arg1 & arg2; 460 default: 461 tree_val -= MAX_INIT_MODE_OPS; 462 return (modes & (1 << tree_val)) ? 1 : 0; 463 } 464 } 465 466 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn, 467 struct init_if_mode_op *p_cmd, int modes) 468 { 469 u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset); 470 471 if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes)) 472 return 0; 473 else 474 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data), 475 INIT_IF_MODE_OP_CMD_OFFSET); 476 } 477 478 static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd, 479 u32 phase, u32 phase_id) 480 { 481 u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data); 482 483 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase && 484 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID || 485 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id))) 486 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data), 487 INIT_IF_PHASE_OP_CMD_OFFSET); 488 else 489 return 0; 490 } 491 492 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, 493 struct ecore_ptt *p_ptt, 494 int phase, 495 int phase_id, 496 int modes) 497 { 498 struct ecore_dev *p_dev = p_hwfn->p_dev; 499 u32 cmd_num, num_init_ops; 500 union init_op *init_ops; 501 bool b_dmae = false; 502 enum _ecore_status_t rc = ECORE_SUCCESS; 503 504 num_init_ops = p_dev->fw_data->init_ops_size; 505 init_ops = p_dev->fw_data->init_ops; 506 507 #ifdef CONFIG_ECORE_ZIPPED_FW 508 p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, 509 MAX_ZIPPED_SIZE * 4); 510 if (!p_hwfn->unzip_buf) { 511 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n"); 512 return ECORE_NOMEM; 513 } 514 #endif 515 516 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { 517 union init_op *cmd = &init_ops[cmd_num]; 518 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data); 519 520 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { 521 case INIT_OP_WRITE: 522 rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, 523 b_dmae); 524 break; 525 526 case INIT_OP_READ: 527 ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); 528 break; 529 530 case INIT_OP_IF_MODE: 531 cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode, 532 modes); 533 break; 534 case INIT_OP_IF_PHASE: 535 cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase, 536 phase_id); 537 b_dmae = GET_FIELD(data, 538 INIT_IF_PHASE_OP_DMAE_ENABLE); 539 break; 540 case INIT_OP_DELAY: 541 /* ecore_init_run is always invoked from 542 * sleep-able context 543 */ 544 OSAL_UDELAY(cmd->delay.delay); 545 break; 546 547 case INIT_OP_CALLBACK: 548 DP_NOTICE(p_hwfn, true, 549 "Currently init values have no need of callbacks\n"); 550 break; 551 } 552 553 if (rc) 554 break; 555 } 556 #ifdef CONFIG_ECORE_ZIPPED_FW 557 OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf); 558 p_hwfn->unzip_buf = OSAL_NULL; 559 #endif 560 return rc; 561 } 562 563 void ecore_gtt_init(struct ecore_hwfn *p_hwfn, 564 struct ecore_ptt *p_ptt) 565 { 566 u32 gtt_base; 567 u32 i; 568 569 #ifndef ASIC_ONLY 570 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 571 /* This is done by MFW on ASIC; regardless, this should only 572 * be done once per chip [i.e., common]. Implementation is 573 * not too bright, but it should work on the simple FPGA/EMUL 574 * scenarios. 575 */ 576 static bool initialized = false; 577 int poll_cnt = 500; 578 u32 val; 579 580 /* initialize PTT/GTT (poll for completion) */ 581 if (!initialized) { 582 ecore_wr(p_hwfn, p_ptt, 583 PGLUE_B_REG_START_INIT_PTT_GTT, 1); 584 initialized = true; 585 } 586 587 do { 588 /* ptt might be overrided by HW until this is done */ 589 OSAL_UDELAY(10); 590 ecore_ptt_invalidate(p_hwfn); 591 val = ecore_rd(p_hwfn, p_ptt, 592 PGLUE_B_REG_INIT_DONE_PTT_GTT); 593 } while ((val != 1) && --poll_cnt); 594 595 if (!poll_cnt) 596 DP_ERR(p_hwfn, "PGLUE_B_REG_INIT_DONE didn't complete\n"); 597 } 598 #endif 599 600 /* Set the global windows */ 601 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START; 602 603 for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++) 604 if (pxp_global_win[i]) 605 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, 606 pxp_global_win[i]); 607 } 608 609 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, 610 #ifdef CONFIG_ECORE_BINARY_FW 611 const u8 *fw_data) 612 #else 613 const u8 OSAL_UNUSED *fw_data) 614 #endif 615 { 616 struct ecore_fw_data *fw = p_dev->fw_data; 617 618 #ifdef CONFIG_ECORE_BINARY_FW 619 struct bin_buffer_hdr *buf_hdr; 620 u32 offset, len; 621 622 if (!fw_data) { 623 DP_NOTICE(p_dev, true, "Invalid fw data\n"); 624 return ECORE_INVAL; 625 } 626 627 buf_hdr = (struct bin_buffer_hdr *)fw_data; 628 629 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; 630 fw->fw_ver_info = (struct fw_ver_info *)(fw_data + offset); 631 632 offset = buf_hdr[BIN_BUF_INIT_CMD].offset; 633 fw->init_ops = (union init_op *)(fw_data + offset); 634 635 offset = buf_hdr[BIN_BUF_INIT_VAL].offset; 636 fw->arr_data = (u32 *)(fw_data + offset); 637 638 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset; 639 fw->modes_tree_buf = (u8 *)(fw_data + offset); 640 len = buf_hdr[BIN_BUF_INIT_CMD].length; 641 fw->init_ops_size = len / sizeof(struct init_raw_op); 642 #else 643 fw->init_ops = (union init_op *)init_ops; 644 fw->arr_data = (u32 *)init_val; 645 fw->modes_tree_buf = (u8 *)modes_tree_buf; 646 fw->init_ops_size = init_ops_size; 647 #endif 648 649 return ECORE_SUCCESS; 650 } 651