1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, v.1, (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2014-2017 Cavium, Inc. 24 * The contents of this file are subject to the terms of the Common Development 25 * and Distribution License, v.1, (the "License"). 26 27 * You may not use this file except in compliance with the License. 28 29 * You can obtain a copy of the License at available 30 * at http://opensource.org/licenses/CDDL-1.0 31 32 * See the License for the specific language governing permissions and 33 * limitations under the License. 34 */ 35 36 #include "bcm_osal.h" 37 #include "reg_addr.h" 38 #include "common_hsi.h" 39 #include "ecore_hsi_common.h" 40 #include "ecore_hsi_eth.h" 41 #include "tcp_common.h" 42 #include "ecore_hsi_iscsi.h" 43 #include "ecore_hsi_fcoe.h" 44 #include "ecore_hsi_roce.h" 45 #include "ecore_hsi_iwarp.h" 46 #include "ecore_rt_defs.h" 47 #include "ecore_status.h" 48 #include "ecore.h" 49 #include "ecore_init_ops.h" 50 #include "ecore_init_fw_funcs.h" 51 #include "ecore_cxt.h" 52 #include "ecore_hw.h" 53 #include "ecore_dev_api.h" 54 #include "ecore_sriov.h" 55 #include "ecore_roce.h" 56 #include "ecore_mcp.h" 57 58 /* Max number of connection types in HW (DQ/CDU etc.) */ 59 #define MAX_CONN_TYPES PROTOCOLID_COMMON 60 #define NUM_TASK_TYPES 2 61 #define NUM_TASK_PF_SEGMENTS 4 62 #define NUM_TASK_VF_SEGMENTS 1 63 64 /* Doorbell-Queue constants */ 65 #define DQ_RANGE_SHIFT 4 66 #define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT) 67 68 /* Searcher constants */ 69 #define SRC_MIN_NUM_ELEMS 256 70 71 /* Timers constants */ 72 #define TM_SHIFT 7 73 #define TM_ALIGN (1 << TM_SHIFT) 74 #define TM_ELEM_SIZE 4 75 76 /* ILT constants */ 77 /* If for some reason, HW P size is modified to be less than 32K, 78 * special handling needs to be made for CDU initialization 79 */ 80 #ifdef CONFIG_ECORE_ROCE 81 /* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. Can be 82 * optimized with resource management scheme 83 */ 84 #define ILT_DEFAULT_HW_P_SIZE 4 85 #else 86 #define ILT_DEFAULT_HW_P_SIZE 3 87 #endif 88 89 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) 90 #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET 91 92 /* ILT entry structure */ 93 #define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL 94 #define ILT_ENTRY_PHY_ADDR_SHIFT 0 95 #define ILT_ENTRY_VALID_MASK 0x1ULL 96 #define ILT_ENTRY_VALID_SHIFT 52 97 #define ILT_ENTRY_IN_REGS 2 98 #define ILT_REG_SIZE_IN_BYTES 4 99 100 /* connection context union */ 101 union conn_context { 102 struct core_conn_context core_ctx; 103 struct eth_conn_context eth_ctx; 104 struct iscsi_conn_context iscsi_ctx; 105 struct fcoe_conn_context fcoe_ctx; 106 struct roce_conn_context roce_ctx; 107 }; 108 109 /* TYPE-0 task context - iSCSI, FCOE */ 110 union type0_task_context { 111 struct iscsi_task_context iscsi_ctx; 112 struct fcoe_task_context fcoe_ctx; 113 }; 114 115 /* TYPE-1 task context - ROCE */ 116 union type1_task_context { 117 struct rdma_task_context roce_ctx; 118 }; 119 120 struct src_ent { 121 u8 opaque[56]; 122 u64 next; 123 }; 124 125 #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ 126 #define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) 127 128 #define CONN_CXT_SIZE(p_hwfn) \ 129 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) 130 131 #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context)) 132 133 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \ 134 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) 135 136 /* Alignment is inherent to the type1_task_context structure */ 137 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context) 138 139 /* PF per protocl configuration object */ 140 #define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS) 141 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS) 142 143 struct ecore_tid_seg { 144 u32 count; 145 u8 type; 146 bool has_fl_mem; 147 }; 148 149 struct ecore_conn_type_cfg { 150 u32 cid_count; 151 u32 cids_per_vf; 152 struct ecore_tid_seg tid_seg[TASK_SEGMENTS]; 153 }; 154 155 /* ILT Client configuration, 156 * Per connection type (protocol) resources (cids, tis, vf cids etc.) 157 * 1 - for connection context (CDUC) and for each task context we need two 158 * values, for regular task context and for force load memory 159 */ 160 #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) 161 #define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) 162 #define CDUC_BLK (0) 163 #define SRQ_BLK (0) 164 #define CDUT_SEG_BLK(n) (1 + (u8)(n)) 165 #define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS) 166 167 enum ilt_clients { 168 ILT_CLI_CDUC, 169 ILT_CLI_CDUT, 170 ILT_CLI_QM, 171 ILT_CLI_TM, 172 ILT_CLI_SRC, 173 ILT_CLI_TSDM, 174 ILT_CLI_MAX 175 }; 176 177 struct ilt_cfg_pair { 178 u32 reg; 179 u32 val; 180 }; 181 182 struct ecore_ilt_cli_blk { 183 u32 total_size; /* 0 means not active */ 184 u32 real_size_in_page; 185 u32 start_line; 186 u32 dynamic_line_cnt; 187 }; 188 189 struct ecore_ilt_client_cfg { 190 bool active; 191 192 /* ILT boundaries */ 193 struct ilt_cfg_pair first; 194 struct ilt_cfg_pair last; 195 struct ilt_cfg_pair p_size; 196 197 /* ILT client blocks for PF */ 198 struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; 199 u32 pf_total_lines; 200 201 /* ILT client blocks for VFs */ 202 struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS]; 203 u32 vf_total_lines; 204 }; 205 206 /* Per Path - 207 * ILT shadow table 208 * Protocol acquired CID lists 209 * PF start line in ILT 210 */ 211 struct ecore_dma_mem { 212 dma_addr_t p_phys; 213 void *p_virt; 214 osal_size_t size; 215 }; 216 217 #define MAP_WORD_SIZE sizeof(unsigned long) 218 #define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8) 219 220 struct ecore_cid_acquired_map { 221 u32 start_cid; 222 u32 max_count; 223 unsigned long *cid_map; 224 }; 225 226 struct ecore_cxt_mngr { 227 /* Per protocl configuration */ 228 struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES]; 229 230 /* computed ILT structure */ 231 struct ecore_ilt_client_cfg clients[ILT_CLI_MAX]; 232 233 /* Task type sizes */ 234 u32 task_type_size[NUM_TASK_TYPES]; 235 236 /* total number of VFs for this hwfn - 237 * ALL VFs are symmetric in terms of HW resources 238 */ 239 u32 vf_count; 240 241 /* Acquired CIDs */ 242 struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES]; 243 /* TBD - do we want this allocated to reserve space? */ 244 struct ecore_cid_acquired_map acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS]; 245 246 /* ILT shadow table */ 247 struct ecore_dma_mem *ilt_shadow; 248 u32 pf_start_line; 249 250 /* Mutex for a dynamic ILT allocation */ 251 osal_mutex_t mutex; 252 253 /* SRC T2 */ 254 struct ecore_dma_mem *t2; 255 u32 t2_num_pages; 256 u64 first_free; 257 u64 last_free; 258 259 /* The infrastructure originally was very generic and context/task 260 * oriented - per connection-type we would set how many of those 261 * are needed, and later when determining how much memory we're 262 * needing for a given block we'd iterate over all the relevant 263 * connection-types. 264 * But since then we've had some additional resources, some of which 265 * require memory which is indepent of the general context/task 266 * scheme. We add those here explicitly per-feature. 267 */ 268 269 /* total number of SRQ's for this hwfn */ 270 u32 srq_count; 271 272 /* Maximal number of L2 steering filters */ 273 u32 arfs_count; 274 275 /* TODO - VF arfs filters ? */ 276 }; 277 278 /* check if resources/configuration is required according to protocol type */ 279 static bool src_proto(struct ecore_hwfn *p_hwfn, 280 enum protocol_type type) 281 { 282 return type == PROTOCOLID_ISCSI || 283 type == PROTOCOLID_FCOE || 284 type == PROTOCOLID_TOE || 285 type == PROTOCOLID_IWARP; 286 } 287 288 static bool tm_cid_proto(enum protocol_type type) 289 { 290 return type == PROTOCOLID_ISCSI || 291 type == PROTOCOLID_FCOE || 292 type == PROTOCOLID_ROCE || 293 type == PROTOCOLID_IWARP; 294 } 295 296 static bool tm_tid_proto(enum protocol_type type) 297 { 298 return type == PROTOCOLID_FCOE; 299 } 300 301 /* counts the iids for the CDU/CDUC ILT client configuration */ 302 struct ecore_cdu_iids { 303 u32 pf_cids; 304 u32 per_vf_cids; 305 }; 306 307 static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr, 308 struct ecore_cdu_iids *iids) 309 { 310 u32 type; 311 312 for (type = 0; type < MAX_CONN_TYPES; type++) { 313 iids->pf_cids += p_mngr->conn_cfg[type].cid_count; 314 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf; 315 } 316 } 317 318 /* counts the iids for the Searcher block configuration */ 319 struct ecore_src_iids { 320 u32 pf_cids; 321 u32 per_vf_cids; 322 }; 323 324 static void ecore_cxt_src_iids(struct ecore_hwfn *p_hwfn, 325 struct ecore_cxt_mngr *p_mngr, 326 struct ecore_src_iids *iids) 327 { 328 u32 i; 329 330 for (i = 0; i < MAX_CONN_TYPES; i++) { 331 if (!src_proto(p_hwfn, i)) 332 continue; 333 334 iids->pf_cids += p_mngr->conn_cfg[i].cid_count; 335 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf; 336 } 337 338 /* Add L2 filtering filters in addition */ 339 iids->pf_cids += p_mngr->arfs_count; 340 } 341 342 /* counts the iids for the Timers block configuration */ 343 struct ecore_tm_iids { 344 u32 pf_cids; 345 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */ 346 u32 pf_tids_total; 347 u32 per_vf_cids; 348 u32 per_vf_tids; 349 }; 350 351 static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn, 352 struct ecore_cxt_mngr *p_mngr, 353 struct ecore_tm_iids *iids) 354 { 355 bool tm_vf_required = false; 356 bool tm_required = false; 357 int i, j; 358 359 /* Timers is a special case -> we don't count how many cids require 360 * timers but what's the max cid that will be used by the timer block. 361 * therefore we traverse in reverse order, and once we hit a protocol 362 * that requires the timers memory, we'll sum all the protocols up 363 * to that one. 364 */ 365 for (i = MAX_CONN_TYPES - 1; i >= 0; i--) { 366 struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i]; 367 368 if (tm_cid_proto(i) || tm_required) { 369 if (p_cfg->cid_count) 370 tm_required = true; 371 372 iids->pf_cids += p_cfg->cid_count; 373 } 374 375 if (tm_cid_proto(i) || tm_vf_required) { 376 if (p_cfg->cids_per_vf) 377 tm_vf_required = true; 378 379 iids->per_vf_cids += p_cfg->cids_per_vf; 380 } 381 382 if (tm_tid_proto(i)) { 383 struct ecore_tid_seg *segs = p_cfg->tid_seg; 384 385 /* for each segment there is at most one 386 * protocol for which count is not 0. 387 */ 388 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) 389 iids->pf_tids[j] += segs[j].count; 390 391 /* The last array elelment is for the VFs. As for PF 392 * segments there can be only one protocol for 393 * which this value is not 0. 394 */ 395 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; 396 } 397 } 398 399 iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN); 400 iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN); 401 iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN); 402 403 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) { 404 iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN); 405 iids->pf_tids_total += iids->pf_tids[j]; 406 } 407 } 408 409 static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, 410 struct ecore_qm_iids *iids) 411 { 412 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 413 struct ecore_tid_seg *segs; 414 u32 vf_cids = 0, type, j; 415 u32 vf_tids = 0; 416 417 for (type = 0; type < MAX_CONN_TYPES; type++) { 418 iids->cids += p_mngr->conn_cfg[type].cid_count; 419 vf_cids += p_mngr->conn_cfg[type].cids_per_vf; 420 421 segs = p_mngr->conn_cfg[type].tid_seg; 422 /* for each segment there is at most one 423 * protocol for which count is not 0. 424 */ 425 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) 426 iids->tids += segs[j].count; 427 428 /* The last array elelment is for the VFs. As for PF 429 * segments there can be only one protocol for 430 * which this value is not 0. 431 */ 432 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; 433 } 434 435 iids->vf_cids += vf_cids * p_mngr->vf_count; 436 iids->tids += vf_tids * p_mngr->vf_count; 437 438 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, 439 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n", 440 iids->cids, iids->vf_cids, iids->tids, vf_tids); 441 } 442 443 static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn, 444 u32 seg) 445 { 446 struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr; 447 u32 i; 448 449 /* Find the protocol with tid count > 0 for this segment. 450 Note: there can only be one and this is already validated. 451 */ 452 for (i = 0; i < MAX_CONN_TYPES; i++) { 453 if (p_cfg->conn_cfg[i].tid_seg[seg].count) 454 return &p_cfg->conn_cfg[i].tid_seg[seg]; 455 } 456 return OSAL_NULL; 457 } 458 459 /* set the iids (cid/tid) count per protocol */ 460 static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn, 461 enum protocol_type type, 462 u32 cid_count, u32 vf_cid_cnt) 463 { 464 struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; 465 struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; 466 467 p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN); 468 p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN); 469 470 if (type == PROTOCOLID_ROCE) { 471 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; 472 u32 cxt_size = CONN_CXT_SIZE(p_hwfn); 473 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 474 u32 align = elems_per_page * DQ_RANGE_ALIGN; 475 476 p_conn->cid_count = ROUNDUP(p_conn->cid_count, align); 477 } 478 } 479 480 u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn, 481 enum protocol_type type, 482 u32 *vf_cid) 483 { 484 if (vf_cid) 485 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; 486 487 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; 488 } 489 490 u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn, 491 enum protocol_type type) 492 { 493 return p_hwfn->p_cxt_mngr->acquired[type].start_cid; 494 } 495 496 u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn, 497 enum protocol_type type) 498 { 499 u32 cnt = 0; 500 int i; 501 502 for (i = 0; i < TASK_SEGMENTS; i++) 503 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count; 504 505 return cnt; 506 } 507 508 static void ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn, 509 enum protocol_type proto, 510 u8 seg, 511 u8 seg_type, 512 u32 count, 513 bool has_fl) 514 { 515 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 516 struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; 517 518 p_seg->count = count; 519 p_seg->has_fl_mem = has_fl; 520 p_seg->type = seg_type; 521 } 522 523 /* the *p_line parameter must be either 0 for the first invocation or the 524 value returned in the previous invocation. 525 */ 526 static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli, 527 struct ecore_ilt_cli_blk *p_blk, 528 u32 start_line, 529 u32 total_size, 530 u32 elem_size) 531 { 532 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); 533 534 /* verify that it's called once for each block */ 535 if (p_blk->total_size) 536 return; 537 538 p_blk->total_size = total_size; 539 p_blk->real_size_in_page = 0; 540 if (elem_size) 541 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size; 542 p_blk->start_line = start_line; 543 } 544 545 static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn, 546 struct ecore_ilt_client_cfg *p_cli, 547 struct ecore_ilt_cli_blk *p_blk, 548 u32 *p_line, 549 enum ilt_clients client_id) 550 { 551 if (!p_blk->total_size) 552 return; 553 554 if (!p_cli->active) 555 p_cli->first.val = *p_line; 556 557 p_cli->active = true; 558 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); 559 p_cli->last.val = *p_line-1; 560 561 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, 562 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n", 563 client_id, p_cli->first.val, p_cli->last.val, 564 p_blk->total_size, p_blk->real_size_in_page, 565 p_blk->start_line); 566 } 567 568 static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn, 569 enum ilt_clients ilt_client) 570 { 571 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count; 572 struct ecore_ilt_client_cfg *p_cli; 573 u32 lines_to_skip = 0; 574 u32 cxts_per_p; 575 576 /* TBD MK: ILT code should be simplified once PROTO enum is changed */ 577 578 if (ilt_client == ILT_CLI_CDUC) { 579 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; 580 581 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) / 582 (u32)CONN_CXT_SIZE(p_hwfn); 583 584 lines_to_skip = cid_count / cxts_per_p; 585 } 586 587 return lines_to_skip; 588 } 589 590 static struct ecore_ilt_client_cfg * 591 ecore_cxt_set_cli(struct ecore_ilt_client_cfg *p_cli) 592 { 593 p_cli->active = false; 594 p_cli->first.val = 0; 595 p_cli->last.val = 0; 596 return p_cli; 597 } 598 599 static struct ecore_ilt_cli_blk * 600 ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk) 601 { 602 p_blk->total_size = 0; 603 return p_blk; 604 } 605 606 enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn, 607 u32 *line_count) 608 { 609 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 610 u32 curr_line, total, i, task_size, line; 611 struct ecore_ilt_client_cfg *p_cli; 612 struct ecore_ilt_cli_blk *p_blk; 613 struct ecore_cdu_iids cdu_iids; 614 struct ecore_src_iids src_iids; 615 struct ecore_qm_iids qm_iids; 616 struct ecore_tm_iids tm_iids; 617 struct ecore_tid_seg *p_seg; 618 619 OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids)); 620 OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids)); 621 OSAL_MEM_ZERO(&src_iids, sizeof(src_iids)); 622 OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids)); 623 624 p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT); 625 626 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, 627 "hwfn [%d] - Set context manager starting line to be 0x%08x\n", 628 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); 629 630 /* CDUC */ 631 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]); 632 633 curr_line = p_mngr->pf_start_line; 634 635 /* CDUC PF */ 636 p_cli->pf_total_lines = 0; 637 638 /* get the counters for the CDUC,CDUC and QM clients */ 639 ecore_cxt_cdu_iids(p_mngr, &cdu_iids); 640 641 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]); 642 643 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn); 644 645 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 646 total, CONN_CXT_SIZE(p_hwfn)); 647 648 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); 649 p_cli->pf_total_lines = curr_line - p_blk->start_line; 650 651 p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn, 652 ILT_CLI_CDUC); 653 654 /* CDUC VF */ 655 p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]); 656 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); 657 658 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 659 total, CONN_CXT_SIZE(p_hwfn)); 660 661 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); 662 p_cli->vf_total_lines = curr_line - p_blk->start_line; 663 664 for (i = 1; i < p_mngr->vf_count; i++) 665 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 666 ILT_CLI_CDUC); 667 668 /* CDUT PF */ 669 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]); 670 p_cli->first.val = curr_line; 671 672 /* first the 'working' task memory */ 673 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 674 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); 675 if (!p_seg || p_seg->count == 0) 676 continue; 677 678 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]); 679 total = p_seg->count * p_mngr->task_type_size[p_seg->type]; 680 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, 681 p_mngr->task_type_size[p_seg->type]); 682 683 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 684 ILT_CLI_CDUT); 685 } 686 687 /* next the 'init' task memory (forced load memory) */ 688 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 689 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); 690 if (!p_seg || p_seg->count == 0) 691 continue; 692 693 p_blk = ecore_cxt_set_blk( 694 &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]); 695 696 if (!p_seg->has_fl_mem) { 697 /* The segment is active (total size pf 'working' 698 * memory is > 0) but has no FL (forced-load, Init) 699 * memory. Thus: 700 * 701 * 1. The total-size in the corrsponding FL block of 702 * the ILT client is set to 0 - No ILT line are 703 * provisioned and no ILT memory allocated. 704 * 705 * 2. The start-line of said block is set to the 706 * start line of the matching working memory 707 * block in the ILT client. This is later used to 708 * configure the CDU segment offset registers and 709 * results in an FL command for TIDs of this 710 * segement behaves as regular load commands 711 * (loading TIDs from the working memory). 712 */ 713 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line; 714 715 ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); 716 continue; 717 } 718 total = p_seg->count * p_mngr->task_type_size[p_seg->type]; 719 720 ecore_ilt_cli_blk_fill(p_cli, p_blk, 721 curr_line, total, 722 p_mngr->task_type_size[p_seg->type]); 723 724 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 725 ILT_CLI_CDUT); 726 } 727 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line; 728 729 /* CDUT VF */ 730 p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF); 731 if (p_seg && p_seg->count) { 732 /* Stricly speaking we need to iterate over all VF 733 * task segment types, but a VF has only 1 segment 734 */ 735 736 /* 'working' memory */ 737 total = p_seg->count * p_mngr->task_type_size[p_seg->type]; 738 739 p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]); 740 ecore_ilt_cli_blk_fill(p_cli, p_blk, 741 curr_line, total, 742 p_mngr->task_type_size[p_seg->type]); 743 744 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 745 ILT_CLI_CDUT); 746 747 /* 'init' memory */ 748 p_blk = ecore_cxt_set_blk( 749 &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]); 750 if (!p_seg->has_fl_mem) { 751 /* see comment above */ 752 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line; 753 ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); 754 } else { 755 task_size = p_mngr->task_type_size[p_seg->type]; 756 ecore_ilt_cli_blk_fill(p_cli, p_blk, 757 curr_line, total, 758 task_size); 759 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 760 ILT_CLI_CDUT); 761 } 762 p_cli->vf_total_lines = curr_line - 763 p_cli->vf_blks[0].start_line; 764 765 /* Now for the rest of the VFs */ 766 for (i = 1; i < p_mngr->vf_count; i++) { 767 /* don't set p_blk i.e. don't clear total_size */ 768 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; 769 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 770 ILT_CLI_CDUT); 771 772 /* don't set p_blk i.e. don't clear total_size */ 773 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; 774 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 775 ILT_CLI_CDUT); 776 } 777 } 778 779 /* QM */ 780 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]); 781 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]); 782 783 ecore_cxt_qm_iids(p_hwfn, &qm_iids); 784 total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 785 qm_iids.vf_cids, qm_iids.tids, 786 p_hwfn->qm_info.num_pqs, 787 p_hwfn->qm_info.num_vf_pqs); 788 789 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, 790 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", 791 qm_iids.cids, qm_iids.vf_cids, qm_iids.tids, 792 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); 793 794 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000, 795 QM_PQ_ELEMENT_SIZE); 796 797 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); 798 p_cli->pf_total_lines = curr_line - p_blk->start_line; 799 800 /* SRC */ 801 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]); 802 ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids); 803 804 /* Both the PF and VFs searcher connections are stored in the per PF 805 * database. Thus sum the PF searcher cids and all the VFs searcher 806 * cids. 807 */ 808 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; 809 if (total) { 810 u32 local_max = OSAL_MAX_T(u32, total, 811 SRC_MIN_NUM_ELEMS); 812 813 total = OSAL_ROUNDUP_POW_OF_TWO(local_max); 814 815 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]); 816 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 817 total * sizeof(struct src_ent), 818 sizeof(struct src_ent)); 819 820 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 821 ILT_CLI_SRC); 822 p_cli->pf_total_lines = curr_line - p_blk->start_line; 823 } 824 825 /* TM PF */ 826 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]); 827 ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); 828 total = tm_iids.pf_cids + tm_iids.pf_tids_total; 829 if (total) { 830 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]); 831 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 832 total * TM_ELEM_SIZE, 833 TM_ELEM_SIZE); 834 835 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 836 ILT_CLI_TM); 837 p_cli->pf_total_lines = curr_line - p_blk->start_line; 838 } 839 840 /* TM VF */ 841 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids; 842 if (total) { 843 p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]); 844 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 845 total * TM_ELEM_SIZE, 846 TM_ELEM_SIZE); 847 848 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 849 ILT_CLI_TM); 850 851 p_cli->vf_total_lines = curr_line - p_blk->start_line; 852 for (i = 1; i < p_mngr->vf_count; i++) { 853 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 854 ILT_CLI_TM); 855 } 856 } 857 858 /* TSDM (SRQ CONTEXT) */ 859 total = ecore_cxt_get_srq_count(p_hwfn); 860 861 if (total) { 862 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]); 863 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]); 864 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 865 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE); 866 867 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 868 ILT_CLI_TSDM); 869 p_cli->pf_total_lines = curr_line - p_blk->start_line; 870 } 871 872 *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line; 873 874 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > 875 RESC_NUM(p_hwfn, ECORE_ILT)) { 876 return ECORE_INVAL; 877 } 878 879 return ECORE_SUCCESS; 880 } 881 882 u32 ecore_cxt_cfg_ilt_compute_excess(struct ecore_hwfn *p_hwfn, u32 used_lines) 883 { 884 struct ecore_ilt_client_cfg *p_cli; 885 u32 excess_lines, available_lines; 886 struct ecore_cxt_mngr *p_mngr; 887 u32 ilt_page_size, elem_size; 888 struct ecore_tid_seg *p_seg; 889 int i; 890 891 available_lines = RESC_NUM(p_hwfn, ECORE_ILT); 892 excess_lines = used_lines - available_lines; 893 894 if (!excess_lines) 895 return 0; 896 897 if (!ECORE_IS_RDMA_PERSONALITY(p_hwfn)) 898 return 0; 899 900 p_mngr = p_hwfn->p_cxt_mngr; 901 p_cli = &p_mngr->clients[ILT_CLI_CDUT]; 902 ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); 903 904 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 905 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); 906 if (!p_seg || p_seg->count == 0) 907 continue; 908 909 elem_size = p_mngr->task_type_size[p_seg->type]; 910 if (!elem_size) 911 continue; 912 913 return (ilt_page_size / elem_size) * excess_lines; 914 } 915 916 DP_ERR(p_hwfn, "failed computing excess ILT lines\n"); 917 return 0; 918 } 919 920 static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn) 921 { 922 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 923 u32 i; 924 925 if (!p_mngr->t2) 926 return; 927 928 for (i = 0; i < p_mngr->t2_num_pages; i++) 929 if (p_mngr->t2[i].p_virt) 930 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 931 p_mngr->t2[i].p_virt, 932 p_mngr->t2[i].p_phys, 933 p_mngr->t2[i].size); 934 935 OSAL_FREE(p_hwfn->p_dev, p_mngr->t2); 936 p_mngr->t2 = OSAL_NULL; 937 } 938 939 static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn) 940 { 941 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 942 u32 conn_num, total_size, ent_per_page, psz, i; 943 struct ecore_ilt_client_cfg *p_src; 944 struct ecore_src_iids src_iids; 945 struct ecore_dma_mem *p_t2; 946 enum _ecore_status_t rc; 947 948 OSAL_MEM_ZERO(&src_iids, sizeof(src_iids)); 949 950 /* if the SRC ILT client is inactive - there are no connection 951 * requiring the searcer, leave. 952 */ 953 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC]; 954 if (!p_src->active) 955 return ECORE_SUCCESS; 956 957 ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids); 958 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; 959 total_size = conn_num * sizeof(struct src_ent); 960 961 /* use the same page size as the SRC ILT client */ 962 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val); 963 p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz); 964 965 /* allocate t2 */ 966 p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 967 p_mngr->t2_num_pages * 968 sizeof(struct ecore_dma_mem)); 969 if (!p_mngr->t2) { 970 DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n"); 971 rc = ECORE_NOMEM; 972 goto t2_fail; 973 } 974 975 /* allocate t2 pages */ 976 for (i = 0; i < p_mngr->t2_num_pages; i++) { 977 u32 size = OSAL_MIN_T(u32, total_size, psz); 978 void **p_virt = &p_mngr->t2[i].p_virt; 979 980 *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 981 &p_mngr->t2[i].p_phys, 982 size); 983 if (!p_mngr->t2[i].p_virt) { 984 rc = ECORE_NOMEM; 985 goto t2_fail; 986 } 987 OSAL_MEM_ZERO(*p_virt, size); 988 p_mngr->t2[i].size = size; 989 total_size -= size; 990 } 991 992 /* Set the t2 pointers */ 993 994 /* entries per page - must be a power of two */ 995 ent_per_page = psz / sizeof(struct src_ent); 996 997 p_mngr->first_free = (u64)p_mngr->t2[0].p_phys; 998 999 p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page]; 1000 p_mngr->last_free = (u64)p_t2->p_phys + 1001 ((conn_num - 1) & (ent_per_page - 1)) * 1002 sizeof(struct src_ent); 1003 1004 for (i = 0; i < p_mngr->t2_num_pages; i++) { 1005 u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num); 1006 struct src_ent *entries = p_mngr->t2[i].p_virt; 1007 u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val; 1008 u32 j; 1009 1010 for (j = 0; j < ent_num - 1; j++) { 1011 val = p_ent_phys + 1012 (j + 1) * sizeof(struct src_ent); 1013 entries[j].next = OSAL_CPU_TO_BE64(val); 1014 } 1015 1016 if (i < p_mngr->t2_num_pages - 1) 1017 val = (u64)p_mngr->t2[i + 1].p_phys; 1018 else 1019 val = 0; 1020 entries[j].next = OSAL_CPU_TO_BE64(val); 1021 1022 conn_num -= ent_num; 1023 } 1024 1025 return ECORE_SUCCESS; 1026 1027 t2_fail: 1028 ecore_cxt_src_t2_free(p_hwfn); 1029 return rc; 1030 } 1031 1032 #define for_each_ilt_valid_client(pos, clients) \ 1033 for (pos = 0; pos < ILT_CLI_MAX; pos++) \ 1034 if (!clients[pos].active) { \ 1035 continue; \ 1036 } else \ 1037 1038 1039 /* Total number of ILT lines used by this PF */ 1040 static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients) 1041 { 1042 u32 size = 0; 1043 u32 i; 1044 1045 for_each_ilt_valid_client(i, ilt_clients) 1046 size += (ilt_clients[i].last.val - 1047 ilt_clients[i].first.val + 1); 1048 1049 return size; 1050 } 1051 1052 static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn) 1053 { 1054 struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients; 1055 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1056 u32 ilt_size, i; 1057 1058 ilt_size = ecore_cxt_ilt_shadow_size(p_cli); 1059 1060 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { 1061 struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i]; 1062 1063 if (p_dma->p_virt) 1064 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1065 p_dma->p_virt, 1066 p_dma->p_phys, 1067 p_dma->size); 1068 p_dma->p_virt = OSAL_NULL; 1069 } 1070 OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow); 1071 } 1072 1073 static enum _ecore_status_t ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn, 1074 struct ecore_ilt_cli_blk *p_blk, 1075 enum ilt_clients ilt_client, 1076 u32 start_line_offset) 1077 { 1078 struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; 1079 u32 lines, line, sz_left, lines_to_skip = 0; 1080 1081 /* Special handling for RoCE that supports dynamic allocation */ 1082 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn) && 1083 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) 1084 return ECORE_SUCCESS; 1085 1086 lines_to_skip = p_blk->dynamic_line_cnt; 1087 1088 if (!p_blk->total_size) 1089 return ECORE_SUCCESS; 1090 1091 sz_left = p_blk->total_size; 1092 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - 1093 lines_to_skip; 1094 line = p_blk->start_line + start_line_offset - 1095 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip; 1096 1097 for (; lines; lines--) { 1098 dma_addr_t p_phys; 1099 void *p_virt; 1100 u32 size; 1101 1102 size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page); 1103 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1104 &p_phys, size); 1105 if (!p_virt) 1106 return ECORE_NOMEM; 1107 OSAL_MEM_ZERO(p_virt, size); 1108 1109 ilt_shadow[line].p_phys = p_phys; 1110 ilt_shadow[line].p_virt = p_virt; 1111 ilt_shadow[line].size = size; 1112 1113 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, 1114 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n", 1115 line, (u64)p_phys, p_virt, size); 1116 1117 sz_left -= size; 1118 line++; 1119 } 1120 1121 return ECORE_SUCCESS; 1122 } 1123 1124 static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn) 1125 { 1126 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1127 struct ecore_ilt_client_cfg *clients = p_mngr->clients; 1128 struct ecore_ilt_cli_blk *p_blk; 1129 u32 size, i, j, k; 1130 enum _ecore_status_t rc; 1131 1132 size = ecore_cxt_ilt_shadow_size(clients); 1133 p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1134 size * sizeof(struct ecore_dma_mem)); 1135 1136 if (!p_mngr->ilt_shadow) { 1137 DP_NOTICE(p_hwfn, true, "Failed to allocate ilt shadow table\n"); 1138 rc = ECORE_NOMEM; 1139 goto ilt_shadow_fail; 1140 } 1141 1142 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, 1143 "Allocated 0x%x bytes for ilt shadow\n", 1144 (u32)(size * sizeof(struct ecore_dma_mem))); 1145 1146 for_each_ilt_valid_client(i, clients) { 1147 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { 1148 p_blk = &clients[i].pf_blks[j]; 1149 rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0); 1150 if (rc != ECORE_SUCCESS) 1151 goto ilt_shadow_fail; 1152 } 1153 for (k = 0; k < p_mngr->vf_count; k++) { 1154 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) { 1155 u32 lines = clients[i].vf_total_lines * k; 1156 1157 p_blk = &clients[i].vf_blks[j]; 1158 rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, 1159 i, lines); 1160 if (rc != ECORE_SUCCESS) 1161 goto ilt_shadow_fail; 1162 } 1163 } 1164 } 1165 1166 return ECORE_SUCCESS; 1167 1168 ilt_shadow_fail: 1169 ecore_ilt_shadow_free(p_hwfn); 1170 return rc; 1171 } 1172 1173 static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn) 1174 { 1175 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1176 u32 type, vf; 1177 1178 for (type = 0; type < MAX_CONN_TYPES; type++) { 1179 OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map); 1180 p_mngr->acquired[type].max_count = 0; 1181 p_mngr->acquired[type].start_cid = 0; 1182 1183 for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) { 1184 OSAL_FREE(p_hwfn->p_dev, 1185 p_mngr->acquired_vf[type][vf].cid_map); 1186 p_mngr->acquired_vf[type][vf].max_count = 0; 1187 p_mngr->acquired_vf[type][vf].start_cid = 0; 1188 } 1189 } 1190 } 1191 1192 static enum _ecore_status_t 1193 ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, 1194 u32 cid_start, u32 cid_count, 1195 struct ecore_cid_acquired_map *p_map) 1196 { 1197 u32 size; 1198 1199 if (!cid_count) 1200 return ECORE_SUCCESS; 1201 1202 size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD); 1203 p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); 1204 if (p_map->cid_map == OSAL_NULL) 1205 return ECORE_NOMEM; 1206 1207 p_map->max_count = cid_count; 1208 p_map->start_cid = cid_start; 1209 1210 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT, 1211 "Type %08x start: %08x count %08x\n", 1212 type, p_map->start_cid, p_map->max_count); 1213 1214 return ECORE_SUCCESS; 1215 } 1216 1217 static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn) 1218 { 1219 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1220 u32 start_cid = 0, vf_start_cid = 0; 1221 u32 type, vf; 1222 1223 for (type = 0; type < MAX_CONN_TYPES; type++) { 1224 struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type]; 1225 struct ecore_cid_acquired_map *p_map; 1226 1227 /* Handle PF maps */ 1228 p_map = &p_mngr->acquired[type]; 1229 if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid, 1230 p_cfg->cid_count, p_map)) 1231 goto cid_map_fail; 1232 1233 /* Handle VF maps */ 1234 for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) { 1235 p_map = &p_mngr->acquired_vf[type][vf]; 1236 if (ecore_cid_map_alloc_single(p_hwfn, type, 1237 vf_start_cid, 1238 p_cfg->cids_per_vf, 1239 p_map)) 1240 goto cid_map_fail; 1241 } 1242 1243 start_cid += p_cfg->cid_count; 1244 vf_start_cid += p_cfg->cids_per_vf; 1245 } 1246 1247 return ECORE_SUCCESS; 1248 1249 cid_map_fail: 1250 ecore_cid_map_free(p_hwfn); 1251 return ECORE_NOMEM; 1252 } 1253 1254 enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn) 1255 { 1256 struct ecore_ilt_client_cfg *clients; 1257 struct ecore_cxt_mngr *p_mngr; 1258 u32 i; 1259 1260 p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr)); 1261 if (!p_mngr) { 1262 DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_cxt_mngr'\n"); 1263 return ECORE_NOMEM; 1264 } 1265 1266 /* Initialize ILT client registers */ 1267 clients = p_mngr->clients; 1268 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); 1269 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); 1270 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); 1271 1272 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); 1273 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); 1274 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); 1275 1276 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT); 1277 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT); 1278 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE); 1279 1280 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT); 1281 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT); 1282 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE); 1283 1284 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT); 1285 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT); 1286 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE); 1287 1288 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); 1289 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); 1290 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); 1291 1292 /* default ILT page size for all clients is 32K */ 1293 for (i = 0; i < ILT_CLI_MAX; i++) 1294 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; 1295 1296 /* Initialize task sizes */ 1297 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn); 1298 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn); 1299 1300 if (p_hwfn->p_dev->p_iov_info) 1301 p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs; 1302 1303 /* Initialize the dynamic ILT allocation mutex */ 1304 OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex); 1305 OSAL_MUTEX_INIT(&p_mngr->mutex); 1306 1307 /* Set the cxt mangr pointer priori to further allocations */ 1308 p_hwfn->p_cxt_mngr = p_mngr; 1309 1310 return ECORE_SUCCESS; 1311 } 1312 1313 enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn) 1314 { 1315 enum _ecore_status_t rc; 1316 1317 /* Allocate the ILT shadow table */ 1318 rc = ecore_ilt_shadow_alloc(p_hwfn); 1319 if (rc) { 1320 DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n"); 1321 goto tables_alloc_fail; 1322 } 1323 1324 /* Allocate the T2 table */ 1325 rc = ecore_cxt_src_t2_alloc(p_hwfn); 1326 if (rc) { 1327 DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n"); 1328 goto tables_alloc_fail; 1329 } 1330 1331 /* Allocate and initalize the acquired cids bitmaps */ 1332 rc = ecore_cid_map_alloc(p_hwfn); 1333 if (rc) { 1334 DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n"); 1335 goto tables_alloc_fail; 1336 } 1337 1338 return ECORE_SUCCESS; 1339 1340 tables_alloc_fail: 1341 ecore_cxt_mngr_free(p_hwfn); 1342 return rc; 1343 } 1344 void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn) 1345 { 1346 if (!p_hwfn->p_cxt_mngr) 1347 return; 1348 1349 ecore_cid_map_free(p_hwfn); 1350 ecore_cxt_src_t2_free(p_hwfn); 1351 ecore_ilt_shadow_free(p_hwfn); 1352 OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex); 1353 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr); 1354 1355 p_hwfn->p_cxt_mngr = OSAL_NULL; 1356 } 1357 1358 void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn) 1359 { 1360 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1361 struct ecore_cid_acquired_map *p_map; 1362 struct ecore_conn_type_cfg *p_cfg; 1363 int type; 1364 u32 len; 1365 1366 /* Reset acquired cids */ 1367 for (type = 0; type < MAX_CONN_TYPES; type++) { 1368 u32 vf; 1369 1370 p_cfg = &p_mngr->conn_cfg[type]; 1371 if (p_cfg->cid_count) { 1372 p_map = &p_mngr->acquired[type]; 1373 len = DIV_ROUND_UP(p_map->max_count, 1374 BITS_PER_MAP_WORD) * 1375 MAP_WORD_SIZE; 1376 OSAL_MEM_ZERO(p_map->cid_map, len); 1377 } 1378 1379 if (!p_cfg->cids_per_vf) 1380 continue; 1381 1382 for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) { 1383 p_map = &p_mngr->acquired_vf[type][vf]; 1384 len = DIV_ROUND_UP(p_map->max_count, 1385 BITS_PER_MAP_WORD) * 1386 MAP_WORD_SIZE; 1387 OSAL_MEM_ZERO(p_map->cid_map, len); 1388 } 1389 } 1390 } 1391 1392 /* HW initialization helper (per Block, per phase) */ 1393 1394 /* CDU Common */ 1395 #define CDUC_CXT_SIZE_SHIFT \ 1396 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT 1397 1398 #define CDUC_CXT_SIZE_MASK \ 1399 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT) 1400 1401 #define CDUC_BLOCK_WASTE_SHIFT \ 1402 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT 1403 1404 #define CDUC_BLOCK_WASTE_MASK \ 1405 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT) 1406 1407 #define CDUC_NCIB_SHIFT \ 1408 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT 1409 1410 #define CDUC_NCIB_MASK \ 1411 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) 1412 1413 #define CDUT_TYPE0_CXT_SIZE_SHIFT \ 1414 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 1415 1416 #define CDUT_TYPE0_CXT_SIZE_MASK \ 1417 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \ 1418 CDUT_TYPE0_CXT_SIZE_SHIFT) 1419 1420 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT \ 1421 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 1422 1423 #define CDUT_TYPE0_BLOCK_WASTE_MASK \ 1424 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \ 1425 CDUT_TYPE0_BLOCK_WASTE_SHIFT) 1426 1427 #define CDUT_TYPE0_NCIB_SHIFT \ 1428 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 1429 1430 #define CDUT_TYPE0_NCIB_MASK \ 1431 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \ 1432 CDUT_TYPE0_NCIB_SHIFT) 1433 1434 #define CDUT_TYPE1_CXT_SIZE_SHIFT \ 1435 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 1436 1437 #define CDUT_TYPE1_CXT_SIZE_MASK \ 1438 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \ 1439 CDUT_TYPE1_CXT_SIZE_SHIFT) 1440 1441 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT \ 1442 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 1443 1444 #define CDUT_TYPE1_BLOCK_WASTE_MASK \ 1445 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \ 1446 CDUT_TYPE1_BLOCK_WASTE_SHIFT) 1447 1448 #define CDUT_TYPE1_NCIB_SHIFT \ 1449 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 1450 1451 #define CDUT_TYPE1_NCIB_MASK \ 1452 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \ 1453 CDUT_TYPE1_NCIB_SHIFT) 1454 1455 static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn) 1456 { 1457 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; 1458 1459 /* CDUC - connection configuration */ 1460 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; 1461 cxt_size = CONN_CXT_SIZE(p_hwfn); 1462 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 1463 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; 1464 1465 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size); 1466 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); 1467 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); 1468 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); 1469 1470 /* CDUT - type-0 tasks configuration */ 1471 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val; 1472 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0]; 1473 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 1474 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; 1475 1476 /* cxt size and block-waste are multipes of 8 */ 1477 cdu_params = 0; 1478 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3)); 1479 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3)); 1480 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page); 1481 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params); 1482 1483 /* CDUT - type-1 tasks configuration */ 1484 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1]; 1485 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 1486 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; 1487 1488 /* cxt size and block-waste are multipes of 8 */ 1489 cdu_params = 0; 1490 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3)); 1491 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3)); 1492 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page); 1493 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params); 1494 } 1495 1496 /* CDU PF */ 1497 #define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT 1498 #define CDU_SEG_REG_TYPE_MASK 0x1 1499 #define CDU_SEG_REG_OFFSET_SHIFT 0 1500 #define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK 1501 1502 static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn) 1503 { 1504 struct ecore_ilt_client_cfg *p_cli; 1505 struct ecore_tid_seg *p_seg; 1506 u32 cdu_seg_params, offset; 1507 int i; 1508 1509 static const u32 rt_type_offset_arr[] = { 1510 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET, 1511 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET, 1512 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET, 1513 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 1514 }; 1515 1516 static const u32 rt_type_offset_fl_arr[] = { 1517 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET, 1518 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET, 1519 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET, 1520 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 1521 }; 1522 1523 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 1524 1525 /* There are initializations only for CDUT during pf Phase */ 1526 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 1527 /* Segment 0*/ 1528 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); 1529 if (!p_seg) 1530 continue; 1531 1532 /* Note: start_line is already adjusted for the CDU 1533 * segment register granularity, so we just need to 1534 * divide. Adjustment is implicit as we assume ILT 1535 * Page size is larger than 32K! 1536 */ 1537 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * 1538 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line - 1539 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; 1540 1541 cdu_seg_params = 0; 1542 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); 1543 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); 1544 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], 1545 cdu_seg_params); 1546 1547 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * 1548 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line - 1549 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; 1550 1551 cdu_seg_params = 0; 1552 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); 1553 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); 1554 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], 1555 cdu_seg_params); 1556 1557 } 1558 } 1559 1560 void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn) 1561 { 1562 struct ecore_qm_info *qm_info = &p_hwfn->qm_info; 1563 struct ecore_qm_iids iids; 1564 1565 OSAL_MEM_ZERO(&iids, sizeof(iids)); 1566 ecore_cxt_qm_iids(p_hwfn, &iids); 1567 1568 ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id, 1569 p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port, 1570 p_hwfn->first_on_engine, 1571 iids.cids, iids.vf_cids, iids.tids, 1572 qm_info->start_pq, 1573 qm_info->num_pqs - qm_info->num_vf_pqs, 1574 qm_info->num_vf_pqs, 1575 qm_info->start_vport, 1576 qm_info->num_vports, qm_info->pf_wfq, qm_info->pf_rl, 1577 p_hwfn->qm_info.qm_pq_params, 1578 p_hwfn->qm_info.qm_vport_params); 1579 } 1580 1581 /* CM PF */ 1582 void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn) 1583 { 1584 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB)); 1585 } 1586 1587 /* DQ PF */ 1588 static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn) 1589 { 1590 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1591 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0; 1592 1593 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); 1594 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); 1595 1596 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT); 1597 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid); 1598 1599 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); 1600 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); 1601 1602 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT); 1603 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid); 1604 1605 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); 1606 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); 1607 1608 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT); 1609 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid); 1610 1611 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); 1612 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); 1613 1614 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT); 1615 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid); 1616 1617 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); 1618 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); 1619 1620 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT); 1621 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid); 1622 1623 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); 1624 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); 1625 1626 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT); 1627 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid); 1628 1629 /* Connection types 6 & 7 are not in use, yet they must be configured 1630 * as the highest possible connection. Not configuring them means the 1631 * defaults will be used, and with a large number of cids a bug may 1632 * occur, if the defaults will be smaller than dq_pf_max_cid / 1633 * dq_vf_max_cid. 1634 */ 1635 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid); 1636 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid); 1637 1638 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid); 1639 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid); 1640 } 1641 1642 static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn) 1643 { 1644 struct ecore_ilt_client_cfg *ilt_clients; 1645 int i; 1646 1647 ilt_clients = p_hwfn->p_cxt_mngr->clients; 1648 for_each_ilt_valid_client(i, ilt_clients) { 1649 STORE_RT_REG(p_hwfn, 1650 ilt_clients[i].first.reg, 1651 ilt_clients[i].first.val); 1652 STORE_RT_REG(p_hwfn, 1653 ilt_clients[i].last.reg, 1654 ilt_clients[i].last.val); 1655 STORE_RT_REG(p_hwfn, 1656 ilt_clients[i].p_size.reg, 1657 ilt_clients[i].p_size.val); 1658 } 1659 } 1660 1661 static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn) 1662 { 1663 struct ecore_ilt_client_cfg *p_cli; 1664 u32 blk_factor; 1665 1666 /* For simplicty we set the 'block' to be an ILT page */ 1667 if (p_hwfn->p_dev->p_iov_info) { 1668 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 1669 1670 STORE_RT_REG(p_hwfn, 1671 PSWRQ2_REG_VF_BASE_RT_OFFSET, 1672 p_iov->first_vf_in_pf); 1673 STORE_RT_REG(p_hwfn, 1674 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET, 1675 p_iov->first_vf_in_pf + p_iov->total_vfs); 1676 } 1677 1678 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; 1679 blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); 1680 if (p_cli->active) { 1681 STORE_RT_REG(p_hwfn, 1682 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET, 1683 blk_factor); 1684 STORE_RT_REG(p_hwfn, 1685 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET, 1686 p_cli->pf_total_lines); 1687 STORE_RT_REG(p_hwfn, 1688 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, 1689 p_cli->vf_total_lines); 1690 } 1691 1692 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 1693 blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); 1694 if (p_cli->active) { 1695 STORE_RT_REG(p_hwfn, 1696 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET, 1697 blk_factor); 1698 STORE_RT_REG(p_hwfn, 1699 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET, 1700 p_cli->pf_total_lines); 1701 STORE_RT_REG(p_hwfn, 1702 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET, 1703 p_cli->vf_total_lines); 1704 } 1705 1706 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM]; 1707 blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); 1708 if (p_cli->active) { 1709 STORE_RT_REG(p_hwfn, 1710 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, 1711 blk_factor); 1712 STORE_RT_REG(p_hwfn, 1713 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET, 1714 p_cli->pf_total_lines); 1715 STORE_RT_REG(p_hwfn, 1716 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET, 1717 p_cli->vf_total_lines); 1718 } 1719 } 1720 1721 /* ILT (PSWRQ2) PF */ 1722 static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn) 1723 { 1724 struct ecore_ilt_client_cfg *clients; 1725 struct ecore_cxt_mngr *p_mngr; 1726 struct ecore_dma_mem *p_shdw; 1727 u32 line, rt_offst, i; 1728 1729 ecore_ilt_bounds_init(p_hwfn); 1730 ecore_ilt_vf_bounds_init(p_hwfn); 1731 1732 p_mngr = p_hwfn->p_cxt_mngr; 1733 p_shdw = p_mngr->ilt_shadow; 1734 clients = p_hwfn->p_cxt_mngr->clients; 1735 1736 for_each_ilt_valid_client(i, clients) { 1737 /* Client's 1st val and RT array are absolute, ILT shadows' 1738 * lines are relative. 1739 */ 1740 line = clients[i].first.val - p_mngr->pf_start_line; 1741 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET + 1742 clients[i].first.val * ILT_ENTRY_IN_REGS; 1743 1744 for (; line <= clients[i].last.val - p_mngr->pf_start_line; 1745 line++, rt_offst += ILT_ENTRY_IN_REGS) { 1746 u64 ilt_hw_entry = 0; 1747 1748 /** p_virt could be OSAL_NULL incase of dynamic 1749 * allocation 1750 */ 1751 if (p_shdw[line].p_virt != OSAL_NULL) { 1752 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); 1753 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, 1754 (p_shdw[line].p_phys >> 12)); 1755 1756 DP_VERBOSE( 1757 p_hwfn, ECORE_MSG_ILT, 1758 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n", 1759 rt_offst, line, i, 1760 (u64)(p_shdw[line].p_phys >> 12)); 1761 } 1762 1763 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); 1764 } 1765 } 1766 } 1767 1768 /* SRC (Searcher) PF */ 1769 static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn) 1770 { 1771 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1772 u32 rounded_conn_num, conn_num, conn_max; 1773 struct ecore_src_iids src_iids; 1774 1775 OSAL_MEM_ZERO(&src_iids, sizeof(src_iids)); 1776 ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids); 1777 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; 1778 if (!conn_num) 1779 return; 1780 1781 conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS); 1782 rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max); 1783 1784 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num); 1785 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET, 1786 OSAL_LOG2(rounded_conn_num)); 1787 1788 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET, 1789 p_hwfn->p_cxt_mngr->first_free); 1790 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET, 1791 p_hwfn->p_cxt_mngr->last_free); 1792 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, 1793 "Configured SEARCHER for 0x%08x connections\n", 1794 conn_num); 1795 } 1796 1797 /* Timers PF */ 1798 #define TM_CFG_NUM_IDS_SHIFT 0 1799 #define TM_CFG_NUM_IDS_MASK 0xFFFFULL 1800 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16 1801 #define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL 1802 #define TM_CFG_PARENT_PF_SHIFT 25 1803 #define TM_CFG_PARENT_PF_MASK 0x7ULL 1804 1805 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30 1806 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL 1807 1808 #define TM_CFG_TID_OFFSET_SHIFT 30 1809 #define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL 1810 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49 1811 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL 1812 1813 static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn) 1814 { 1815 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1816 u32 active_seg_mask = 0, tm_offset, rt_reg; 1817 struct ecore_tm_iids tm_iids; 1818 u64 cfg_word; 1819 u8 i; 1820 1821 OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids)); 1822 ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); 1823 1824 /* @@@TBD No pre-scan for now */ 1825 1826 /* Note: We assume consecutive VFs for a PF */ 1827 for (i = 0; i < p_mngr->vf_count; i++) { 1828 cfg_word = 0; 1829 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids); 1830 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); 1831 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); 1832 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */ 1833 1834 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + 1835 (sizeof(cfg_word) / sizeof(u32)) * 1836 (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 1837 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); 1838 } 1839 1840 cfg_word = 0; 1841 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids); 1842 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); 1843 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */ 1844 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */ 1845 1846 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + 1847 (sizeof(cfg_word) / sizeof(u32)) * 1848 (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id); 1849 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); 1850 1851 /* enale scan */ 1852 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET, 1853 tm_iids.pf_cids ? 0x1 : 0x0); 1854 1855 /* @@@TBD how to enable the scan for the VFs */ 1856 1857 tm_offset = tm_iids.per_vf_cids; 1858 1859 /* Note: We assume consecutive VFs for a PF */ 1860 for (i = 0; i < p_mngr->vf_count; i++) { 1861 cfg_word = 0; 1862 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids); 1863 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); 1864 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); 1865 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); 1866 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0); 1867 1868 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + 1869 (sizeof(cfg_word) / sizeof(u32)) * 1870 (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 1871 1872 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); 1873 } 1874 1875 tm_offset = tm_iids.pf_cids; 1876 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 1877 cfg_word = 0; 1878 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]); 1879 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); 1880 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); 1881 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); 1882 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0); 1883 1884 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + 1885 (sizeof(cfg_word) / sizeof(u32)) * 1886 (NUM_OF_VFS(p_hwfn->p_dev) + 1887 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); 1888 1889 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); 1890 active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0); 1891 1892 tm_offset += tm_iids.pf_tids[i]; 1893 } 1894 1895 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) 1896 active_seg_mask = 0; 1897 1898 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); 1899 1900 /* @@@TBD how to enable the scan for the VFs */ 1901 } 1902 1903 static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn) 1904 { 1905 if ((p_hwfn->hw_info.personality == ECORE_PCI_FCOE) && 1906 p_hwfn->pf_params.fcoe_pf_params.is_target) 1907 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0); 1908 } 1909 1910 static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn) 1911 { 1912 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1913 struct ecore_conn_type_cfg *p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE]; 1914 struct ecore_tid_seg *p_tid; 1915 1916 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */ 1917 if (!p_fcoe->cid_count) 1918 return; 1919 1920 p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG]; 1921 if (p_hwfn->pf_params.fcoe_pf_params.is_target) { 1922 STORE_RT_REG_AGG(p_hwfn, 1923 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET, 1924 p_tid->count); 1925 } else { 1926 STORE_RT_REG_AGG(p_hwfn, 1927 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET, 1928 p_tid->count); 1929 } 1930 } 1931 1932 void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn) 1933 { 1934 /* CDU configuration */ 1935 ecore_cdu_init_common(p_hwfn); 1936 ecore_prs_init_common(p_hwfn); 1937 } 1938 1939 void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn) 1940 { 1941 ecore_qm_init_pf(p_hwfn); 1942 ecore_cm_init_pf(p_hwfn); 1943 ecore_dq_init_pf(p_hwfn); 1944 ecore_cdu_init_pf(p_hwfn); 1945 ecore_ilt_init_pf(p_hwfn); 1946 ecore_src_init_pf(p_hwfn); 1947 ecore_tm_init_pf(p_hwfn); 1948 ecore_prs_init_pf(p_hwfn); 1949 } 1950 1951 enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn, 1952 enum protocol_type type, 1953 u32 *p_cid, u8 vfid) 1954 { 1955 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1956 struct ecore_cid_acquired_map *p_map; 1957 u32 rel_cid; 1958 1959 if (type >= MAX_CONN_TYPES) { 1960 DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type); 1961 return ECORE_INVAL; 1962 } 1963 1964 if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) { 1965 DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid); 1966 return ECORE_INVAL; 1967 } 1968 1969 /* Determine the right map to take this CID from */ 1970 if (vfid == ECORE_CXT_PF_CID) 1971 p_map = &p_mngr->acquired[type]; 1972 else 1973 p_map = &p_mngr->acquired_vf[type][vfid]; 1974 1975 if (p_map->cid_map == OSAL_NULL) { 1976 DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type); 1977 return ECORE_INVAL; 1978 } 1979 1980 rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map, 1981 p_map->max_count); 1982 1983 if (rel_cid >= p_map->max_count) { 1984 DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n", 1985 type); 1986 return ECORE_NORESOURCES; 1987 } 1988 1989 OSAL_SET_BIT(rel_cid, p_map->cid_map); 1990 1991 *p_cid = rel_cid + p_map->start_cid; 1992 1993 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT, 1994 "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n", 1995 *p_cid, rel_cid, vfid, type); 1996 1997 return ECORE_SUCCESS; 1998 } 1999 2000 enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn, 2001 enum protocol_type type, 2002 u32 *p_cid) 2003 { 2004 return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID); 2005 } 2006 2007 static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn, 2008 u32 cid, u8 vfid, 2009 enum protocol_type *p_type, 2010 struct ecore_cid_acquired_map **pp_map) 2011 { 2012 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 2013 u32 rel_cid; 2014 2015 /* Iterate over protocols and find matching cid range */ 2016 for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) { 2017 if (vfid == ECORE_CXT_PF_CID) 2018 *pp_map = &p_mngr->acquired[*p_type]; 2019 else 2020 *pp_map = &p_mngr->acquired_vf[*p_type][vfid]; 2021 2022 if (!((*pp_map)->cid_map)) 2023 continue; 2024 if (cid >= (*pp_map)->start_cid && 2025 cid < (*pp_map)->start_cid + (*pp_map)->max_count) { 2026 break; 2027 } 2028 } 2029 2030 if (*p_type == MAX_CONN_TYPES) { 2031 DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid); 2032 goto fail; 2033 } 2034 2035 rel_cid = cid - (*pp_map)->start_cid; 2036 if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) { 2037 DP_NOTICE(p_hwfn, true, 2038 "CID %d [vifd %02x] not acquired", cid, vfid); 2039 goto fail; 2040 } 2041 2042 return true; 2043 fail: 2044 *p_type = MAX_CONN_TYPES; 2045 *pp_map = OSAL_NULL; 2046 return false; 2047 } 2048 2049 void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid) 2050 { 2051 struct ecore_cid_acquired_map *p_map = OSAL_NULL; 2052 enum protocol_type type; 2053 bool b_acquired; 2054 u32 rel_cid; 2055 2056 if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) { 2057 DP_NOTICE(p_hwfn, true, 2058 "Trying to return incorrect CID belonging to VF %02x\n", 2059 vfid); 2060 return; 2061 } 2062 2063 /* Test acquired and find matching per-protocol map */ 2064 b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid, 2065 &type, &p_map); 2066 2067 if (!b_acquired) 2068 return; 2069 2070 rel_cid = cid - p_map->start_cid; 2071 OSAL_CLEAR_BIT(rel_cid, p_map->cid_map); 2072 2073 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT, 2074 "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n", 2075 cid, rel_cid, vfid, type); 2076 } 2077 2078 void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid) 2079 { 2080 _ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID); 2081 } 2082 2083 enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn, 2084 struct ecore_cxt_info *p_info) 2085 { 2086 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 2087 struct ecore_cid_acquired_map *p_map = OSAL_NULL; 2088 u32 conn_cxt_size, hw_p_size, cxts_per_p, line; 2089 enum protocol_type type; 2090 bool b_acquired; 2091 2092 /* Test acquired and find matching per-protocol map */ 2093 b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, 2094 ECORE_CXT_PF_CID, 2095 &type, &p_map); 2096 2097 if (!b_acquired) 2098 return ECORE_INVAL; 2099 2100 /* set the protocl type */ 2101 p_info->type = type; 2102 2103 /* compute context virtual pointer */ 2104 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; 2105 2106 conn_cxt_size = CONN_CXT_SIZE(p_hwfn); 2107 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size; 2108 line = p_info->iid / cxts_per_p; 2109 2110 /* Make sure context is allocated (dynamic allocation) */ 2111 if (!p_mngr->ilt_shadow[line].p_virt) 2112 return ECORE_INVAL; 2113 2114 p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt + 2115 p_info->iid % cxts_per_p * conn_cxt_size; 2116 2117 DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT), 2118 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n", 2119 (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid); 2120 2121 return ECORE_SUCCESS; 2122 } 2123 2124 static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs) 2125 { 2126 struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; 2127 2128 p_mgr->srq_count = num_srqs; 2129 } 2130 2131 u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn) 2132 { 2133 struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; 2134 2135 return p_mgr->srq_count; 2136 } 2137 2138 static void ecore_rdma_set_pf_params(struct ecore_hwfn *p_hwfn, 2139 struct ecore_rdma_pf_params *p_params, 2140 u32 num_tasks) 2141 { 2142 u32 num_cons, num_qps, num_srqs; 2143 enum protocol_type proto; 2144 2145 /* Override personality with rdma flavor */ 2146 num_srqs = OSAL_MIN_T(u32, ECORE_RDMA_MAX_SRQS, p_params->num_srqs); 2147 2148 /* The only case RDMA personality can be overriden is if NVRAM is 2149 * configured with ETH_RDMA or if no rdma protocol was requested 2150 */ 2151 switch (p_params->rdma_protocol) { 2152 case ECORE_RDMA_PROTOCOL_DEFAULT: 2153 if (p_hwfn->mcp_info->func_info.protocol == 2154 ECORE_PCI_ETH_RDMA) { 2155 DP_NOTICE(p_hwfn, false, 2156 "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n"); 2157 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 2158 } 2159 break; 2160 case ECORE_RDMA_PROTOCOL_NONE: 2161 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 2162 return; /* intentional... nothing left to do... */ 2163 case ECORE_RDMA_PROTOCOL_ROCE: 2164 if (p_hwfn->mcp_info->func_info.protocol == ECORE_PCI_ETH_RDMA) 2165 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; 2166 break; 2167 case ECORE_RDMA_PROTOCOL_IWARP: 2168 if (p_hwfn->mcp_info->func_info.protocol == ECORE_PCI_ETH_RDMA) 2169 p_hwfn->hw_info.personality = ECORE_PCI_ETH_IWARP; 2170 break; 2171 } 2172 2173 switch (p_hwfn->hw_info.personality) { 2174 case ECORE_PCI_ETH_IWARP: 2175 /* Each QP requires one connection */ 2176 num_cons = OSAL_MIN_T(u32, IWARP_MAX_QPS, p_params->num_qps); 2177 #ifdef CONFIG_ECORE_IWARP /* required for the define */ 2178 /* additional connections required for passive tcp handling */ 2179 num_cons += ECORE_IWARP_PREALLOC_CNT; 2180 #endif 2181 proto = PROTOCOLID_IWARP; 2182 p_params->roce_edpm_mode = false; 2183 break; 2184 case ECORE_PCI_ETH_ROCE: 2185 num_qps = OSAL_MIN_T(u32, ROCE_MAX_QPS, p_params->num_qps); 2186 num_cons = num_qps * 2; /* each QP requires two connections */ 2187 proto = PROTOCOLID_ROCE; 2188 break; 2189 default: 2190 return; 2191 } 2192 2193 if (num_cons && num_tasks) { 2194 ecore_cxt_set_proto_cid_count(p_hwfn, proto, 2195 num_cons, 0); 2196 2197 /* Deliberatly passing ROCE for tasks id. This is because 2198 * iWARP / RoCE share the task id. 2199 */ 2200 ecore_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE, 2201 ECORE_CXT_ROCE_TID_SEG, 2202 1, /* RoCE segment type */ 2203 num_tasks, 2204 false); /* !force load */ 2205 ecore_cxt_set_srq_count(p_hwfn, num_srqs); 2206 2207 } else { 2208 DP_INFO(p_hwfn->p_dev, 2209 "RDMA personality used without setting params!\n"); 2210 } 2211 } 2212 2213 enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn, 2214 u32 rdma_tasks) 2215 { 2216 /* Set the number of required CORE connections */ 2217 u32 core_cids = 1; /* SPQ */ 2218 2219 if (p_hwfn->using_ll2) 2220 core_cids += 4; /* @@@TBD Use the proper #define */ 2221 2222 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); 2223 2224 switch (p_hwfn->hw_info.personality) { 2225 case ECORE_PCI_ETH_RDMA: 2226 case ECORE_PCI_ETH_IWARP: 2227 case ECORE_PCI_ETH_ROCE: 2228 ecore_rdma_set_pf_params(p_hwfn, 2229 &p_hwfn->pf_params.rdma_pf_params, 2230 rdma_tasks); 2231 2232 /* no need for break since RoCE coexist with Ethernet */ 2233 /* FALLTHROUGH */ 2234 case ECORE_PCI_ETH: 2235 { 2236 struct ecore_eth_pf_params *p_params = 2237 &p_hwfn->pf_params.eth_pf_params; 2238 2239 if (!p_params->num_vf_cons) 2240 p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT; 2241 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 2242 p_params->num_cons, 2243 p_params->num_vf_cons); 2244 p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; 2245 2246 break; 2247 } 2248 case ECORE_PCI_FCOE: 2249 { 2250 struct ecore_fcoe_pf_params *p_params; 2251 2252 p_params = &p_hwfn->pf_params.fcoe_pf_params; 2253 2254 if (p_params->num_cons && p_params->num_tasks) { 2255 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_FCOE, 2256 p_params->num_cons, 0); 2257 2258 ecore_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE, 2259 ECORE_CXT_FCOE_TID_SEG, 2260 0, /* segment type */ 2261 p_params->num_tasks, 2262 true); 2263 } else { 2264 DP_INFO(p_hwfn->p_dev, 2265 "Fcoe personality used without setting params!\n"); 2266 } 2267 break; 2268 } 2269 case ECORE_PCI_ISCSI: 2270 { 2271 struct ecore_iscsi_pf_params *p_params; 2272 2273 p_params = &p_hwfn->pf_params.iscsi_pf_params; 2274 2275 if (p_params->num_cons && p_params->num_tasks) { 2276 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ISCSI, 2277 p_params->num_cons, 0); 2278 2279 ecore_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ISCSI, 2280 ECORE_CXT_ISCSI_TID_SEG, 2281 0, /* segment type */ 2282 p_params->num_tasks, 2283 true); 2284 } else { 2285 DP_INFO(p_hwfn->p_dev, 2286 "Iscsi personality used without setting params!\n"); 2287 } 2288 break; 2289 } 2290 default: 2291 return ECORE_INVAL; 2292 } 2293 2294 return ECORE_SUCCESS; 2295 } 2296 2297 enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn, 2298 struct ecore_tid_mem *p_info) 2299 { 2300 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 2301 u32 proto, seg, total_lines, i, shadow_line; 2302 struct ecore_ilt_client_cfg *p_cli; 2303 struct ecore_ilt_cli_blk *p_fl_seg; 2304 struct ecore_tid_seg *p_seg_info; 2305 2306 /* Verify the personality */ 2307 switch (p_hwfn->hw_info.personality) { 2308 case ECORE_PCI_FCOE: 2309 proto = PROTOCOLID_FCOE; 2310 seg = ECORE_CXT_FCOE_TID_SEG; 2311 break; 2312 case ECORE_PCI_ISCSI: 2313 proto = PROTOCOLID_ISCSI; 2314 seg = ECORE_CXT_ISCSI_TID_SEG; 2315 break; 2316 default: 2317 return ECORE_INVAL; 2318 } 2319 2320 p_cli = &p_mngr->clients[ILT_CLI_CDUT]; 2321 if (!p_cli->active) { 2322 return ECORE_INVAL; 2323 } 2324 2325 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; 2326 if (!p_seg_info->has_fl_mem) 2327 return ECORE_INVAL; 2328 2329 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; 2330 total_lines = DIV_ROUND_UP(p_fl_seg->total_size, 2331 p_fl_seg->real_size_in_page); 2332 2333 for (i = 0; i < total_lines; i++) { 2334 shadow_line = i + p_fl_seg->start_line - 2335 p_hwfn->p_cxt_mngr->pf_start_line; 2336 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt; 2337 } 2338 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) - 2339 p_fl_seg->real_size_in_page; 2340 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type]; 2341 p_info->num_tids_per_block = p_fl_seg->real_size_in_page / 2342 p_info->tid_size; 2343 2344 return ECORE_SUCCESS; 2345 } 2346 2347 /* This function is very RoCE oriented, if another protocol in the future 2348 * will want this feature we'll need to modify the function to be more generic 2349 */ 2350 enum _ecore_status_t 2351 ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn, 2352 enum ecore_cxt_elem_type elem_type, 2353 u32 iid) 2354 { 2355 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line; 2356 struct ecore_ilt_client_cfg *p_cli; 2357 struct ecore_ilt_cli_blk *p_blk; 2358 struct ecore_ptt *p_ptt; 2359 dma_addr_t p_phys; 2360 u64 ilt_hw_entry; 2361 void *p_virt; 2362 enum _ecore_status_t rc = ECORE_SUCCESS; 2363 2364 switch (elem_type) { 2365 case ECORE_ELEM_CXT: 2366 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; 2367 elem_size = CONN_CXT_SIZE(p_hwfn); 2368 p_blk = &p_cli->pf_blks[CDUC_BLK]; 2369 break; 2370 case ECORE_ELEM_SRQ: 2371 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; 2372 elem_size = SRQ_CXT_SIZE; 2373 p_blk = &p_cli->pf_blks[SRQ_BLK]; 2374 break; 2375 case ECORE_ELEM_TASK: 2376 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 2377 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); 2378 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)]; 2379 break; 2380 default: 2381 DP_NOTICE(p_hwfn, false, 2382 "ECORE_INVALID elem type = %d", elem_type); 2383 return ECORE_INVAL; 2384 } 2385 2386 /* Calculate line in ilt */ 2387 hw_p_size = p_cli->p_size.val; 2388 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; 2389 line = p_blk->start_line + (iid / elems_per_p); 2390 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line; 2391 2392 /* If line is already allocated, do nothing, otherwise allocate it and 2393 * write it to the PSWRQ2 registers. 2394 * This section can be run in parallel from different contexts and thus 2395 * a mutex protection is needed. 2396 */ 2397 2398 OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex); 2399 2400 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt) 2401 goto out0; 2402 2403 p_ptt = ecore_ptt_acquire(p_hwfn); 2404 if (!p_ptt) { 2405 DP_NOTICE(p_hwfn, false, 2406 "ECORE_TIME_OUT on ptt acquire - dynamic allocation"); 2407 rc = ECORE_TIMEOUT; 2408 goto out0; 2409 } 2410 2411 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 2412 &p_phys, 2413 p_blk->real_size_in_page); 2414 if (!p_virt) { 2415 rc = ECORE_NOMEM; 2416 goto out1; 2417 } 2418 OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page); 2419 2420 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only, 2421 * to compensate for a HW bug, but it is configured even if DIF is not 2422 * enabled. This is harmless and allows us to avoid a dedicated API. We 2423 * configure the field for all of the contexts on the newly allocated 2424 * page. 2425 */ 2426 if (elem_type == ECORE_ELEM_TASK) { 2427 u32 elem_i; 2428 u8 *elem_start = (u8 *)p_virt; 2429 union type1_task_context *elem; 2430 2431 for (elem_i = 0; elem_i < elems_per_p; elem_i++) { 2432 elem = (union type1_task_context *)elem_start; 2433 SET_FIELD(elem->roce_ctx.tdif_context.flags1, 2434 TDIF_TASK_CONTEXT_REFTAGMASK , 0xf); 2435 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); 2436 } 2437 } 2438 2439 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt; 2440 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys; 2441 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size = 2442 p_blk->real_size_in_page; 2443 2444 /* compute absolute offset */ 2445 reg_offset = PSWRQ2_REG_ILT_MEMORY + 2446 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS); 2447 2448 ilt_hw_entry = 0; 2449 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); 2450 SET_FIELD(ilt_hw_entry, 2451 ILT_ENTRY_PHY_ADDR, 2452 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12)); 2453 2454 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */ 2455 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry, 2456 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 2457 0 /* no flags */); 2458 2459 if (elem_type == ECORE_ELEM_CXT) { 2460 u32 last_cid_allocated = (1 + (iid / elems_per_p)) * 2461 elems_per_p; 2462 2463 /* Update the relevant register in the parser */ 2464 ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 2465 last_cid_allocated - 1); 2466 2467 /* RoCE w/a -> we don't write to the prs search reg until first 2468 * cid is allocated. This is because the prs checks 2469 * last_cid-1 >=0 making 0 a valid value... this will cause 2470 * the a context load to occur on a RoCE packet received with 2471 * cid=0 even before context was initialized, can happen with a 2472 * stray packet from switch or a packet with crc-error 2473 */ 2474 2475 if (!p_hwfn->b_rdma_enabled_in_prs) { 2476 /* Enable Rdma search */ 2477 ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); 2478 p_hwfn->b_rdma_enabled_in_prs = true; 2479 } 2480 } 2481 2482 out1: 2483 ecore_ptt_release(p_hwfn, p_ptt); 2484 out0: 2485 OSAL_MUTEX_RELEASE(&p_hwfn->p_cxt_mngr->mutex); 2486 2487 return rc; 2488 } 2489 2490 /* This function is very RoCE oriented, if another protocol in the future 2491 * will want this feature we'll need to modify the function to be more generic 2492 */ 2493 enum _ecore_status_t 2494 ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn, 2495 enum ecore_cxt_elem_type elem_type, 2496 u32 start_iid, u32 count) 2497 { 2498 u32 start_line, end_line, shadow_start_line, shadow_end_line; 2499 u32 reg_offset, elem_size, hw_p_size, elems_per_p; 2500 struct ecore_ilt_client_cfg *p_cli; 2501 struct ecore_ilt_cli_blk *p_blk; 2502 u32 end_iid = start_iid + count; 2503 struct ecore_ptt *p_ptt; 2504 u64 ilt_hw_entry = 0; 2505 u32 i; 2506 2507 switch (elem_type) { 2508 case ECORE_ELEM_CXT: 2509 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; 2510 elem_size = CONN_CXT_SIZE(p_hwfn); 2511 p_blk = &p_cli->pf_blks[CDUC_BLK]; 2512 break; 2513 case ECORE_ELEM_SRQ: 2514 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; 2515 elem_size = SRQ_CXT_SIZE; 2516 p_blk = &p_cli->pf_blks[SRQ_BLK]; 2517 break; 2518 case ECORE_ELEM_TASK: 2519 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 2520 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); 2521 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)]; 2522 break; 2523 default: 2524 DP_NOTICE(p_hwfn, false, 2525 "ECORE_INVALID elem type = %d", elem_type); 2526 return ECORE_INVAL; 2527 } 2528 2529 /* Calculate line in ilt */ 2530 hw_p_size = p_cli->p_size.val; 2531 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; 2532 start_line = p_blk->start_line + (start_iid / elems_per_p); 2533 end_line = p_blk->start_line + (end_iid / elems_per_p); 2534 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p)) 2535 end_line--; 2536 2537 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line; 2538 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line; 2539 2540 p_ptt = ecore_ptt_acquire(p_hwfn); 2541 if (!p_ptt) { 2542 DP_NOTICE(p_hwfn, false, "ECORE_TIME_OUT on ptt acquire - dynamic allocation"); 2543 return ECORE_TIMEOUT; 2544 } 2545 2546 for (i = shadow_start_line; i < shadow_end_line; i++) { 2547 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt) 2548 continue; 2549 2550 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 2551 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt, 2552 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys, 2553 p_hwfn->p_cxt_mngr->ilt_shadow[i].size); 2554 2555 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL; 2556 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0; 2557 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0; 2558 2559 /* compute absolute offset */ 2560 reg_offset = PSWRQ2_REG_ILT_MEMORY + 2561 ((start_line++) * ILT_REG_SIZE_IN_BYTES * 2562 ILT_ENTRY_IN_REGS); 2563 2564 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a 2565 * wide-bus. 2566 */ 2567 ecore_dmae_host2grc(p_hwfn, p_ptt, 2568 (u64)(osal_uintptr_t)&ilt_hw_entry, 2569 reg_offset, 2570 sizeof(ilt_hw_entry) / sizeof(u32), 2571 0 /* no flags */); 2572 } 2573 2574 ecore_ptt_release(p_hwfn, p_ptt); 2575 2576 return ECORE_SUCCESS; 2577 } 2578 2579 enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn, 2580 u32 tid, 2581 u8 ctx_type, 2582 void **pp_task_ctx) 2583 { 2584 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 2585 struct ecore_ilt_client_cfg *p_cli; 2586 struct ecore_ilt_cli_blk *p_seg; 2587 struct ecore_tid_seg *p_seg_info; 2588 u32 proto, seg; 2589 u32 total_lines; 2590 u32 tid_size, ilt_idx; 2591 u32 num_tids_per_block; 2592 2593 /* Verify the personality */ 2594 switch (p_hwfn->hw_info.personality) { 2595 case ECORE_PCI_FCOE: 2596 proto = PROTOCOLID_FCOE; 2597 seg = ECORE_CXT_FCOE_TID_SEG; 2598 break; 2599 case ECORE_PCI_ISCSI: 2600 proto = PROTOCOLID_ISCSI; 2601 seg = ECORE_CXT_ISCSI_TID_SEG; 2602 break; 2603 default: 2604 return ECORE_INVAL; 2605 } 2606 2607 p_cli = &p_mngr->clients[ILT_CLI_CDUT]; 2608 if (!p_cli->active) { 2609 return ECORE_INVAL; 2610 } 2611 2612 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; 2613 2614 if (ctx_type == ECORE_CTX_WORKING_MEM) { 2615 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)]; 2616 } else if (ctx_type == ECORE_CTX_FL_MEM) { 2617 if (!p_seg_info->has_fl_mem) { 2618 return ECORE_INVAL; 2619 } 2620 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; 2621 } else { 2622 return ECORE_INVAL; 2623 } 2624 total_lines = DIV_ROUND_UP(p_seg->total_size, 2625 p_seg->real_size_in_page); 2626 tid_size = p_mngr->task_type_size[p_seg_info->type]; 2627 num_tids_per_block = p_seg->real_size_in_page / tid_size; 2628 2629 if (total_lines < tid/num_tids_per_block) 2630 return ECORE_INVAL; 2631 2632 ilt_idx = tid / num_tids_per_block + p_seg->start_line - 2633 p_mngr->pf_start_line; 2634 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt + 2635 (tid % num_tids_per_block) * tid_size; 2636 2637 return ECORE_SUCCESS; 2638 } 2639