1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_init_fw_funcs.c 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "bcm_osal.h" 35 #include "ecore_hw.h" 36 #include "ecore_init_ops.h" 37 #include "reg_addr.h" 38 #include "ecore_rt_defs.h" 39 #include "ecore_hsi_common.h" 40 #include "ecore_hsi_init_func.h" 41 #include "ecore_hsi_eth.h" 42 #include "ecore_hsi_init_tool.h" 43 #include "ecore_iro.h" 44 #include "ecore_init_fw_funcs.h" 45 46 #define CDU_VALIDATION_DEFAULT_CFG 61 47 48 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = { 49 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */ 50 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */ 51 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */ 52 }; 53 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { 54 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ 55 }; 56 57 /* General constants */ 58 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0) 59 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0) 60 #define QM_INVALID_PQ_ID 0xffff 61 62 /* Feature enable */ 63 #define QM_BYPASS_EN 1 64 #define QM_BYTE_CRD_EN 1 65 66 /* Other PQ constants */ 67 #define QM_OTHER_PQS_PER_PF 4 68 69 /* VOQ constants */ 70 #define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS) 71 72 /* WFQ constants: */ 73 74 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */ 75 #define QM_WFQ_UPPER_BOUND 62500000 76 77 /* Bit of VOQ in WFQ VP PQ map */ 78 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0 79 80 /* Bit of PF in WFQ VP PQ map */ 81 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5 82 #define QM_WFQ_VP_PQ_PF_E5_SHIFT 6 83 84 /* 0x9000 = 4*9*1024 */ 85 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) 86 87 /* Max WFQ increment value is 0.7 * upper bound */ 88 #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10) 89 90 /* Number of VOQs in E5 QmWfqCrd register */ 91 #define QM_WFQ_CRD_E5_NUM_VOQS 16 92 93 /* RL constants: */ 94 95 /* Period in us */ 96 #define QM_RL_PERIOD 5 97 98 /* Period in 25MHz cycles */ 99 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) 100 101 /* RL increment value - rate is specified in mbps. the factor of 1.01 was 102 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC 103 * 2544 test. In this scenario the PF RL was reducing the line rate to 99% 104 * although the credit increment value was the correct one and FW calculated 105 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at 106 * this point. 107 */ 108 #define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / (8 * 100)), 1) 109 110 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ 111 #define QM_PF_RL_UPPER_BOUND 62500000 112 113 /* Max PF RL increment value is 0.7 * upper bound */ 114 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) 115 116 /* Vport RL Upper bound, link speed is in Mpbs */ 117 #define QM_VP_RL_UPPER_BOUND(speed) ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000)) 118 119 /* Max Vport RL increment value is the Vport RL upper bound */ 120 #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed) 121 122 /* Vport RL credit threshold in case of QM bypass */ 123 #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1) 124 125 /* AFullOprtnstcCrdMask constants */ 126 #define QM_OPPOR_LINE_VOQ_DEF 1 127 #define QM_OPPOR_FW_STOP_DEF 0 128 #define QM_OPPOR_PQ_EMPTY_DEF 1 129 130 /* Command Queue constants: */ 131 132 /* Pure LB CmdQ lines (+spare) */ 133 #define PBF_CMDQ_PURE_LB_LINES 150 134 135 #define PBF_CMDQ_LINES_E5_RSVD_RATIO 8 136 137 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) 138 139 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) 140 141 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) 142 143 /* BTB: blocks constants (block size = 256B) */ 144 145 /* 256B blocks in 9700B packet */ 146 #define BTB_JUMBO_PKT_BLOCKS 38 147 148 /* Headroom per-port */ 149 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS 150 #define BTB_PURE_LB_FACTOR 10 151 152 /* Factored (hence really 0.7) */ 153 #define BTB_PURE_LB_RATIO 7 154 155 /* QM stop command constants */ 156 #define QM_STOP_PQ_MASK_WIDTH 32 157 #define QM_STOP_CMD_ADDR 2 158 #define QM_STOP_CMD_STRUCT_SIZE 2 159 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 160 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 161 #define QM_STOP_CMD_PAUSE_MASK_MASK -1 162 #define QM_STOP_CMD_GROUP_ID_OFFSET 1 163 #define QM_STOP_CMD_GROUP_ID_SHIFT 16 164 #define QM_STOP_CMD_GROUP_ID_MASK 15 165 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1 166 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24 167 #define QM_STOP_CMD_PQ_TYPE_MASK 1 168 #define QM_STOP_CMD_MAX_POLL_COUNT 100 169 #define QM_STOP_CMD_POLL_PERIOD_US 500 170 171 /* QM command macros */ 172 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE 173 #define QM_CMD_SET_FIELD(var, cmd, field, value) SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value) 174 175 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, ext_voq, wrr) OSAL_MEMSET(&map, 0, sizeof(map)); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&map)) 176 177 #define WRITE_PQ_INFO_TO_RAM 1 178 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24)) 179 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4 180 181 /******************** INTERNAL IMPLEMENTATION *********************/ 182 183 /* Returns the external VOQ number */ 184 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn, 185 u8 port_id, 186 u8 tc, 187 u8 max_phys_tcs_per_port) 188 { 189 if (tc == PURE_LB_TC) 190 return NUM_OF_PHYS_TCS * (ECORE_IS_E5(p_hwfn->p_dev) ? MAX_NUM_PORTS_E5 : MAX_NUM_PORTS_BB) + port_id; 191 else 192 return port_id * (ECORE_IS_E5(p_hwfn->p_dev) ? NUM_OF_PHYS_TCS : max_phys_tcs_per_port) + tc; 193 } 194 195 /* Prepare PF RL enable/disable runtime init values */ 196 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, 197 bool pf_rl_en) 198 { 199 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); 200 if (pf_rl_en) { 201 u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4; 202 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; 203 204 /* Enable RLs for all VOQs */ 205 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (u32)voq_bit_mask); 206 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 207 if (num_ext_voqs >= 32) 208 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET, (u32)(voq_bit_mask >> 32)); 209 #endif 210 211 /* Write RL period */ 212 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 213 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 214 215 /* Set credit threshold for QM bypass flow */ 216 if (QM_BYPASS_EN) 217 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, QM_PF_RL_UPPER_BOUND); 218 } 219 } 220 221 /* Prepare PF WFQ enable/disable runtime init values */ 222 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, 223 bool pf_wfq_en) 224 { 225 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); 226 227 /* Set credit threshold for QM bypass flow */ 228 if (pf_wfq_en && QM_BYPASS_EN) 229 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND); 230 } 231 232 /* Prepare VPORT RL enable/disable runtime init values */ 233 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, 234 bool vport_rl_en) 235 { 236 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0); 237 if (vport_rl_en) { 238 /* Write RL period (use timer 0 only) */ 239 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 240 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 241 242 /* Set credit threshold for QM bypass flow */ 243 if (QM_BYPASS_EN) 244 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, QM_VP_RL_BYPASS_THRESH_SPEED); 245 } 246 } 247 248 /* Prepare VPORT WFQ enable/disable runtime init values */ 249 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, 250 bool vport_wfq_en) 251 { 252 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0); 253 254 /* Set credit threshold for QM bypass flow */ 255 if (vport_wfq_en && QM_BYPASS_EN) 256 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND); 257 } 258 259 /* Prepare runtime init values to allocate PBF command queue lines for 260 * the specified VOQ. 261 */ 262 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn, 263 u8 ext_voq, 264 u16 cmdq_lines) 265 { 266 u32 qm_line_crd; 267 268 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); 269 270 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines); 271 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); 272 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); 273 } 274 275 /* Prepare runtime init values to allocate PBF command queue lines. */ 276 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn, 277 u8 max_ports_per_engine, 278 u8 max_phys_tcs_per_port, 279 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 280 { 281 u8 tc, ext_voq, port_id, num_tcs_in_port; 282 u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4; 283 284 /* Clear PBF lines of all VOQs */ 285 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) 286 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0); 287 288 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 289 u16 phys_lines, phys_lines_per_tc; 290 291 if (!port_params[port_id].active) 292 continue; 293 294 /* Find number of command queue lines to divide between the 295 * active physical TCs. In E5, 1/8 of the lines are reserved. 296 * the lines for pure LB TC are subtracted. 297 */ 298 phys_lines = port_params[port_id].num_pbf_cmd_lines; 299 if (ECORE_IS_E5(p_hwfn->p_dev)) 300 phys_lines -= DIV_ROUND_UP(phys_lines, PBF_CMDQ_LINES_E5_RSVD_RATIO); 301 phys_lines -= PBF_CMDQ_PURE_LB_LINES; 302 303 /* Find #lines per active physical TC */ 304 num_tcs_in_port = 0; 305 for (tc = 0; tc < max_phys_tcs_per_port; tc++) 306 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) 307 num_tcs_in_port++; 308 phys_lines_per_tc = phys_lines / num_tcs_in_port; 309 310 /* Init registers per active TC */ 311 for (tc = 0; tc < max_phys_tcs_per_port; tc++) { 312 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port); 313 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) 314 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, phys_lines_per_tc); 315 } 316 317 /* Init registers for pure LB TC */ 318 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port); 319 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, PBF_CMDQ_PURE_LB_LINES); 320 } 321 } 322 323 /* Prepare runtime init values to allocate guaranteed BTB blocks for the 324 * specified port. The guaranteed BTB space is divided between the TCs as 325 * follows (shared space Is currently not used): 326 * 1. Parameters: 327 * B - BTB blocks for this port 328 * C - Number of physical TCs for this port 329 * 2. Calculation: 330 * a. 38 blocks (9700B jumbo frame) are allocated for global per port 331 * headroom. 332 * b. B = B - 38 (remainder after global headroom allocation). 333 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. 334 * d. B = B � MAX(38, B/(C+0.7)) (remainder after pure LB allocation). 335 * e. B/C blocks are allocated for each physical TC. 336 * Assumptions: 337 * - MTU is up to 9700 bytes (38 blocks) 338 * - All TCs are considered symmetrical (same rate and packet size) 339 * - No optimization for lossy TC (all are considered lossless). Shared space 340 * is not enabled and allocated for each TC. 341 */ 342 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn, 343 u8 max_ports_per_engine, 344 u8 max_phys_tcs_per_port, 345 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 346 { 347 u32 usable_blocks, pure_lb_blocks, phys_blocks; 348 u8 tc, ext_voq, port_id, num_tcs_in_port; 349 350 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 351 if (!port_params[port_id].active) 352 continue; 353 354 /* Subtract headroom blocks */ 355 usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; 356 357 /* Find blocks per physical TC. use factor to avoid floating 358 * arithmethic. 359 */ 360 num_tcs_in_port = 0; 361 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) 362 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) 363 num_tcs_in_port++; 364 365 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO); 366 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS, pure_lb_blocks / BTB_PURE_LB_FACTOR); 367 phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port; 368 369 /* Init physical TCs */ 370 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { 371 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) { 372 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port); 373 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), phys_blocks); 374 } 375 } 376 377 /* Init pure LB TC */ 378 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port); 379 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks); 380 } 381 } 382 383 /* Prepare Tx PQ mapping runtime init values for the specified PF */ 384 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, 385 struct ecore_ptt *p_ptt, 386 u8 port_id, 387 u8 pf_id, 388 u8 max_phys_tcs_per_port, 389 bool is_pf_loading, 390 u32 num_pf_cids, 391 u32 num_vf_cids, 392 u16 start_pq, 393 u16 num_pf_pqs, 394 u16 num_vf_pqs, 395 u8 start_vport, 396 u32 base_mem_addr_4kb, 397 struct init_qm_pq_params *pq_params, 398 struct init_qm_vport_params *vport_params) 399 { 400 /* A bit per Tx PQ indicating if the PQ is associated with a VF */ 401 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; 402 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; 403 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; 404 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; 405 406 num_pqs = num_pf_pqs + num_vf_pqs; 407 408 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE; 409 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE; 410 411 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids); 412 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids); 413 mem_addr_4kb = base_mem_addr_4kb; 414 415 /* Set mapping from PQ group to PF */ 416 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) 417 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); 418 419 /* Set PQ sizes */ 420 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(num_pf_cids)); 421 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(num_vf_cids)); 422 423 /* Go over all Tx PQs */ 424 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) { 425 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS; 426 u8 ext_voq, vport_id_in_pf; 427 bool is_vf_pq, rl_valid; 428 u16 first_tx_pq_id; 429 430 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port); 431 is_vf_pq = (i >= num_pf_pqs); 432 rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < max_qm_global_rls; 433 434 /* Update first Tx PQ of VPORT/TC */ 435 vport_id_in_pf = pq_params[i].vport_id - start_vport; 436 first_tx_pq_id = vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id]; 437 if (first_tx_pq_id == QM_INVALID_PQ_ID) { 438 u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id << (ECORE_IS_E5(p_hwfn->p_dev) ? QM_WFQ_VP_PQ_PF_E5_SHIFT : QM_WFQ_VP_PQ_PF_E4_SHIFT)); 439 440 /* Create new VP PQ */ 441 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id] = pq_id; 442 first_tx_pq_id = pq_id; 443 444 /* Map VP PQ to VOQ and PF */ 445 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, map_val); 446 } 447 448 /* Check RL ID */ 449 if (pq_params[i].rl_valid && pq_params[i].vport_id >= max_qm_global_rls) 450 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n"); 451 452 /* Prepare PQ map entry */ 453 if (ECORE_IS_E5(p_hwfn->p_dev)) { 454 struct qm_rf_pq_map_e5 tx_pq_map; 455 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E5, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group); 456 } 457 else { 458 struct qm_rf_pq_map_e4 tx_pq_map; 459 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group); 460 } 461 462 /* Set PQ base address */ 463 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); 464 465 /* Clear PQ pointer table entry (64 bit) */ 466 if (is_pf_loading) 467 for (j = 0; j < 2; j++) 468 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET + (pq_id * 2) + j, 0); 469 470 /* Write PQ info to RAM */ 471 if (WRITE_PQ_INFO_TO_RAM != 0) 472 { 473 u32 pq_info = 0; 474 pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, pq_params[i].tc_id, port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0); 475 ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), pq_info); 476 } 477 478 /* If VF PQ, add indication to PQ VF mask */ 479 if (is_vf_pq) { 480 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE)); 481 mem_addr_4kb += vport_pq_mem_4kb; 482 } 483 else { 484 mem_addr_4kb += pq_mem_4kb; 485 } 486 } 487 488 /* Store Tx PQ VF mask to size select register */ 489 for (i = 0; i < num_tx_pq_vf_masks; i++) 490 if (tx_pq_vf_mask[i]) 491 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]); 492 } 493 494 /* Prepare Other PQ mapping runtime init values for the specified PF */ 495 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, 496 u8 pf_id, 497 bool is_pf_loading, 498 u32 num_pf_cids, 499 u32 num_tids, 500 u32 base_mem_addr_4kb) 501 { 502 u32 pq_size, pq_mem_4kb, mem_addr_4kb; 503 u16 i, j, pq_id, pq_group; 504 505 /* A single other PQ group is used in each PF, where PQ group i is used 506 * in PF i. 507 */ 508 pq_group = pf_id; 509 pq_size = num_pf_cids + num_tids; 510 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); 511 mem_addr_4kb = base_mem_addr_4kb; 512 513 /* Map PQ group to PF */ 514 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); 515 516 /* Set PQ sizes */ 517 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); 518 519 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { 520 /* Set PQ base address */ 521 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb); 522 523 /* Clear PQ pointer table entry */ 524 if (is_pf_loading) 525 for (j = 0; j < 2; j++) 526 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLOTHER_RT_OFFSET + (pq_id * 2) + j, 0); 527 528 mem_addr_4kb += pq_mem_4kb; 529 } 530 } 531 532 /* Prepare PF WFQ runtime init values for the specified PF. 533 * Return -1 on error. 534 */ 535 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, 536 u8 port_id, 537 u8 pf_id, 538 u16 pf_wfq, 539 u8 max_phys_tcs_per_port, 540 u16 num_tx_pqs, 541 struct init_qm_pq_params *pq_params) 542 { 543 u32 inc_val, crd_reg_offset; 544 u8 ext_voq; 545 u16 i; 546 547 inc_val = QM_WFQ_INC_VAL(pf_wfq); 548 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 549 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n"); 550 return -1; 551 } 552 553 for (i = 0; i < num_tx_pqs; i++) { 554 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port); 555 crd_reg_offset = ECORE_IS_E5(p_hwfn->p_dev) ? 556 (ext_voq < QM_WFQ_CRD_E5_NUM_VOQS ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (ext_voq % QM_WFQ_CRD_E5_NUM_VOQS) * MAX_NUM_PFS_E5 + pf_id : 557 (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + ext_voq * MAX_NUM_PFS_BB + (pf_id % MAX_NUM_PFS_BB); 558 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); 559 } 560 561 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); 562 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val); 563 564 return 0; 565 } 566 567 /* Prepare PF RL runtime init values for the specified PF. 568 * Return -1 on error. 569 */ 570 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, 571 u8 pf_id, 572 u32 pf_rl) 573 { 574 u32 inc_val; 575 576 inc_val = QM_RL_INC_VAL(pf_rl); 577 if (inc_val > QM_PF_RL_MAX_INC_VAL) { 578 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n"); 579 return -1; 580 } 581 582 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, (u32)QM_RL_CRD_REG_SIGN_BIT); 583 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); 584 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); 585 586 return 0; 587 } 588 589 /* Prepare VPORT WFQ runtime init values for the specified VPORTs. 590 * Return -1 on error. 591 */ 592 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn, 593 u8 num_vports, 594 struct init_qm_vport_params *vport_params) 595 { 596 u16 vport_pq_id; 597 u32 inc_val; 598 u8 tc, i; 599 600 /* Go over all PF VPORTs */ 601 for (i = 0; i < num_vports; i++) { 602 if (!vport_params[i].vport_wfq) 603 continue; 604 605 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq); 606 if (inc_val > QM_WFQ_MAX_INC_VAL) { 607 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n"); 608 return -1; 609 } 610 611 /* Each VPORT can have several VPORT PQ IDs for various TCs */ 612 for (tc = 0; tc < NUM_OF_TCS; tc++) { 613 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; 614 if (vport_pq_id != QM_INVALID_PQ_ID) { 615 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT); 616 STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vport_pq_id, inc_val); 617 } 618 } 619 } 620 621 return 0; 622 } 623 624 /* Prepare VPORT RL runtime init values for the specified VPORTs. 625 * Return -1 on error. 626 */ 627 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn, 628 u8 start_vport, 629 u8 num_vports, 630 u32 link_speed, 631 struct init_qm_vport_params *vport_params) 632 { 633 u8 i, vport_id; 634 u32 inc_val; 635 636 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) { 637 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n"); 638 return -1; 639 } 640 641 /* Go over all PF VPORTs */ 642 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { 643 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ? vport_params[i].vport_rl : link_speed); 644 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { 645 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n"); 646 return -1; 647 } 648 649 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, (u32)QM_RL_CRD_REG_SIGN_BIT); 650 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, QM_VP_RL_UPPER_BOUND(link_speed) | (u32)QM_RL_CRD_REG_SIGN_BIT); 651 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, inc_val); 652 } 653 654 return 0; 655 } 656 657 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn, 658 struct ecore_ptt *p_ptt) 659 { 660 u32 reg_val, i; 661 662 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) { 663 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US); 664 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); 665 } 666 667 /* Check if timeout while waiting for SDM command ready */ 668 if (i == QM_STOP_CMD_MAX_POLL_COUNT) { 669 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Timeout when waiting for QM SDM command ready signal\n"); 670 return false; 671 } 672 673 return true; 674 } 675 676 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn, 677 struct ecore_ptt *p_ptt, 678 u32 cmd_addr, 679 u32 cmd_data_lsb, 680 u32 cmd_data_msb) 681 { 682 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) 683 return false; 684 685 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); 686 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); 687 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); 688 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); 689 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); 690 691 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt); 692 } 693 694 /******************** INTERFACE IMPLEMENTATION *********************/ 695 696 u32 ecore_qm_pf_mem_size(u32 num_pf_cids, 697 u32 num_vf_cids, 698 u32 num_tids, 699 u16 num_pf_pqs, 700 u16 num_vf_pqs) 701 { 702 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + 703 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + 704 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; 705 } 706 707 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, 708 u8 max_ports_per_engine, 709 u8 max_phys_tcs_per_port, 710 bool pf_rl_en, 711 bool pf_wfq_en, 712 bool vport_rl_en, 713 bool vport_wfq_en, 714 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 715 { 716 u32 mask; 717 718 /* Init AFullOprtnstcCrdMask */ 719 mask = (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) | 720 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) | 721 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) | 722 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) | 723 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) | 724 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) | 725 (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) | 726 (QM_OPPOR_PQ_EMPTY_DEF << QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT); 727 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); 728 729 /* Enable/disable PF RL */ 730 ecore_enable_pf_rl(p_hwfn, pf_rl_en); 731 732 /* Enable/disable PF WFQ */ 733 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en); 734 735 /* Enable/disable VPORT RL */ 736 ecore_enable_vport_rl(p_hwfn, vport_rl_en); 737 738 /* Enable/disable VPORT WFQ */ 739 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en); 740 741 /* Init PBF CMDQ line credit */ 742 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params); 743 744 /* Init BTB blocks in PBF */ 745 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params); 746 747 return 0; 748 } 749 750 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, 751 struct ecore_ptt *p_ptt, 752 u8 port_id, 753 u8 pf_id, 754 u8 max_phys_tcs_per_port, 755 bool is_pf_loading, 756 u32 num_pf_cids, 757 u32 num_vf_cids, 758 u32 num_tids, 759 u16 start_pq, 760 u16 num_pf_pqs, 761 u16 num_vf_pqs, 762 u8 start_vport, 763 u8 num_vports, 764 u16 pf_wfq, 765 u32 pf_rl, 766 u32 link_speed, 767 struct init_qm_pq_params *pq_params, 768 struct init_qm_vport_params *vport_params) 769 { 770 u32 other_mem_size_4kb; 771 u8 tc, i; 772 773 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; 774 775 /* Clear first Tx PQ ID array for each VPORT */ 776 for(i = 0; i < num_vports; i++) 777 for(tc = 0; tc < NUM_OF_TCS; tc++) 778 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; 779 780 /* Map Other PQs (if any) */ 781 #if QM_OTHER_PQS_PER_PF > 0 782 ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids, num_tids, 0); 783 #endif 784 785 /* Map Tx PQs */ 786 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, is_pf_loading, num_pf_cids, num_vf_cids, 787 start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params); 788 789 /* Init PF WFQ */ 790 if (pf_wfq) 791 if (ecore_pf_wfq_rt_init(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, num_pf_pqs + num_vf_pqs, pq_params)) 792 return -1; 793 794 /* Init PF RL */ 795 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl)) 796 return -1; 797 798 /* Set VPORT WFQ */ 799 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params)) 800 return -1; 801 802 /* Set VPORT RL */ 803 if (ecore_vport_rl_rt_init(p_hwfn, start_vport, num_vports, link_speed, vport_params)) 804 return -1; 805 806 return 0; 807 } 808 809 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn, 810 struct ecore_ptt *p_ptt, 811 u8 pf_id, 812 u16 pf_wfq) 813 { 814 u32 inc_val; 815 816 inc_val = QM_WFQ_INC_VAL(pf_wfq); 817 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 818 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n"); 819 return -1; 820 } 821 822 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); 823 824 return 0; 825 } 826 827 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn, 828 struct ecore_ptt *p_ptt, 829 u8 pf_id, 830 u32 pf_rl) 831 { 832 u32 inc_val; 833 834 inc_val = QM_RL_INC_VAL(pf_rl); 835 if (inc_val > QM_PF_RL_MAX_INC_VAL) { 836 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n"); 837 return -1; 838 } 839 840 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); 841 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); 842 843 return 0; 844 } 845 846 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn, 847 struct ecore_ptt *p_ptt, 848 u16 first_tx_pq_id[NUM_OF_TCS], 849 u16 vport_wfq) 850 { 851 u16 vport_pq_id; 852 u32 inc_val; 853 u8 tc; 854 855 inc_val = QM_WFQ_INC_VAL(vport_wfq); 856 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 857 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n"); 858 return -1; 859 } 860 861 for (tc = 0; tc < NUM_OF_TCS; tc++) { 862 vport_pq_id = first_tx_pq_id[tc]; 863 if (vport_pq_id != QM_INVALID_PQ_ID) { 864 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val); 865 } 866 } 867 868 return 0; 869 } 870 871 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn, 872 struct ecore_ptt *p_ptt, 873 u8 vport_id, 874 u32 vport_rl, 875 u32 link_speed) 876 { 877 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS; 878 879 if (vport_id >= max_qm_global_rls) { 880 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n"); 881 return -1; 882 } 883 884 inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed); 885 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { 886 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n"); 887 return -1; 888 } 889 890 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); 891 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); 892 893 return 0; 894 } 895 896 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn, 897 struct ecore_ptt *p_ptt, 898 bool is_release_cmd, 899 bool is_tx_pq, 900 u16 start_pq, 901 u16 num_pqs) 902 { 903 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = {0}; 904 u32 pq_mask = 0, last_pq, pq_id; 905 906 last_pq = start_pq + num_pqs - 1; 907 908 /* Set command's PQ type */ 909 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); 910 911 /* Go over requested PQs */ 912 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { 913 /* Set PQ bit in mask (stop command only) */ 914 if (!is_release_cmd) 915 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); 916 917 /* If last PQ or end of PQ mask, write command */ 918 if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) { 919 QM_CMD_SET_FIELD(cmd_arr, (u32)QM_STOP_CMD, PAUSE_MASK, pq_mask); 920 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH); 921 if (!ecore_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0], cmd_arr[1])) 922 return false; 923 pq_mask = 0; 924 } 925 } 926 927 return true; 928 } 929 930 #ifndef UNUSED_HSI_FUNC 931 932 /* NIG: ETS configuration constants */ 933 #define NIG_TX_ETS_CLIENT_OFFSET 4 934 #define NIG_LB_ETS_CLIENT_OFFSET 1 935 #define NIG_ETS_MIN_WFQ_BYTES 1600 936 937 /* NIG: ETS constants */ 938 #define NIG_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu))) 939 940 /* NIG: RL constants */ 941 942 /* Byte base type value */ 943 #define NIG_RL_BASE_TYPE 1 944 945 /* Period in us */ 946 #define NIG_RL_PERIOD 1 947 948 /* Period in 25MHz cycles */ 949 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD) 950 951 /* Rate in mbps */ 952 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8) 953 954 #define NIG_RL_MAX_VAL(inc_val,mtu) (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu))) 955 956 /* NIG: packet prioritry configuration constants */ 957 #define NIG_PRIORITY_MAP_TC_BITS 4 958 959 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn, 960 struct ecore_ptt *p_ptt, 961 struct init_ets_req* req, 962 bool is_lb) 963 { 964 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff; 965 u32 tc_bound_base_addr, tc_bound_addr_diff; 966 u8 sp_tc_map = 0, wfq_tc_map = 0; 967 u8 tc, num_tc, tc_client_offset; 968 969 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS; 970 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET; 971 min_weight = 0xffffffff; 972 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_0; 973 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 - NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : 974 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 - NIG_REG_TX_ARB_CREDIT_WEIGHT_0; 975 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; 976 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : 977 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; 978 979 for (tc = 0; tc < num_tc; tc++) { 980 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 981 982 /* Update SP map */ 983 if (tc_req->use_sp) 984 sp_tc_map |= (1 << tc); 985 986 if (!tc_req->use_wfq) 987 continue; 988 989 /* Update WFQ map */ 990 wfq_tc_map |= (1 << tc); 991 992 /* Find minimal weight */ 993 if (tc_req->weight < min_weight) 994 min_weight = tc_req->weight; 995 } 996 997 /* Write SP map */ 998 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT : NIG_REG_TX_ARB_CLIENT_IS_STRICT, (sp_tc_map << tc_client_offset)); 999 1000 /* Write WFQ map */ 1001 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ : NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ, (wfq_tc_map << tc_client_offset)); 1002 1003 /* Write WFQ weights */ 1004 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) { 1005 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 1006 u32 byte_weight; 1007 1008 if (!tc_req->use_wfq) 1009 continue; 1010 1011 /* Translate weight to bytes */ 1012 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight; 1013 1014 /* Write WFQ weight */ 1015 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr + tc_weight_addr_diff * tc_client_offset, byte_weight); 1016 1017 /* Write WFQ upper bound */ 1018 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr + tc_bound_addr_diff * tc_client_offset, NIG_ETS_UP_BOUND(byte_weight, req->mtu)); 1019 } 1020 } 1021 1022 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn, 1023 struct ecore_ptt *p_ptt, 1024 struct init_nig_lb_rl_req* req) 1025 { 1026 u32 ctrl, inc_val, reg_offset; 1027 u8 tc; 1028 1029 /* Disable global MAC+LB RL */ 1030 ctrl = NIG_RL_BASE_TYPE << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT; 1031 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); 1032 1033 /* Configure and enable global MAC+LB RL */ 1034 if (req->lb_mac_rate) { 1035 /* Configure */ 1036 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M); 1037 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate); 1038 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE, inc_val); 1039 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu)); 1040 1041 /* Enable */ 1042 ctrl |= 1 << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT; 1043 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); 1044 } 1045 1046 /* Disable global LB-only RL */ 1047 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT; 1048 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); 1049 1050 /* Configure and enable global LB-only RL */ 1051 if (req->lb_rate) { 1052 /* Configure */ 1053 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M); 1054 inc_val = NIG_RL_INC_VAL(req->lb_rate); 1055 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE, inc_val); 1056 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu)); 1057 1058 /* Enable */ 1059 ctrl |= 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT; 1060 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); 1061 } 1062 1063 /* Per-TC RLs */ 1064 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; tc++, reg_offset += 4) { 1065 /* Disable TC RL */ 1066 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT; 1067 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl); 1068 1069 /* Configure and enable TC RL */ 1070 if (!req->tc_rate[tc]) 1071 continue; 1072 1073 /* Configure */ 1074 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 + reg_offset, NIG_RL_PERIOD_CLK_25M); 1075 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]); 1076 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 + reg_offset, inc_val); 1077 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 + reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu)); 1078 1079 /* Enable */ 1080 ctrl |= 1 << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT; 1081 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl); 1082 } 1083 } 1084 1085 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn, 1086 struct ecore_ptt *p_ptt, 1087 struct init_nig_pri_tc_map_req* req) 1088 { 1089 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 }; 1090 u32 pri_tc_mask = 0; 1091 u8 pri, tc; 1092 1093 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) { 1094 if (!req->pri[pri].valid) 1095 continue; 1096 1097 pri_tc_mask |= (req->pri[pri].tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS)); 1098 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri); 1099 } 1100 1101 /* Write priority -> TC mask */ 1102 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask); 1103 1104 /* Write TC -> priority mask */ 1105 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { 1106 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4, tc_pri_mask[tc]); 1107 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4, tc_pri_mask[tc]); 1108 } 1109 } 1110 1111 #endif /* UNUSED_HSI_FUNC */ 1112 1113 #ifndef UNUSED_HSI_FUNC 1114 1115 /* PRS: ETS configuration constants */ 1116 #define PRS_ETS_MIN_WFQ_BYTES 1600 1117 #define PRS_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu))) 1118 1119 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn, 1120 struct ecore_ptt *p_ptt, 1121 struct init_ets_req* req) 1122 { 1123 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff; 1124 u8 tc, sp_tc_map = 0, wfq_tc_map = 0; 1125 1126 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0; 1127 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 - PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0; 1128 1129 for (tc = 0; tc < NUM_OF_TCS; tc++) { 1130 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 1131 1132 /* Update SP map */ 1133 if (tc_req->use_sp) 1134 sp_tc_map |= (1 << tc); 1135 1136 if (!tc_req->use_wfq) 1137 continue; 1138 1139 /* Update WFQ map */ 1140 wfq_tc_map |= (1 << tc); 1141 1142 /* Find minimal weight */ 1143 if (tc_req->weight < min_weight) 1144 min_weight = tc_req->weight; 1145 } 1146 1147 /* Write SP map */ 1148 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map); 1149 1150 /* Write WFQ map */ 1151 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ, wfq_tc_map); 1152 1153 /* Write WFQ weights */ 1154 for (tc = 0; tc < NUM_OF_TCS; tc++) { 1155 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 1156 u32 byte_weight; 1157 1158 if (!tc_req->use_wfq) 1159 continue; 1160 1161 /* Translate weight to bytes */ 1162 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight; 1163 1164 /* Write WFQ weight */ 1165 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc * tc_weight_addr_diff, byte_weight); 1166 1167 /* Write WFQ upper bound */ 1168 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 + tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight, req->mtu)); 1169 } 1170 } 1171 1172 #endif /* UNUSED_HSI_FUNC */ 1173 #ifndef UNUSED_HSI_FUNC 1174 1175 /* BRB: RAM configuration constants */ 1176 #define BRB_TOTAL_RAM_BLOCKS_BB 4800 1177 #define BRB_TOTAL_RAM_BLOCKS_K2 5632 1178 #define BRB_BLOCK_SIZE 128 1179 #define BRB_MIN_BLOCKS_PER_TC 9 1180 #define BRB_HYST_BYTES 10240 1181 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE) 1182 1183 /* Temporary big RAM allocation - should be updated */ 1184 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, 1185 struct ecore_ptt *p_ptt, 1186 struct init_brb_ram_req* req) 1187 { 1188 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks; 1189 u32 active_port_blocks, reg_offset = 0; 1190 u8 port, active_ports = 0; 1191 1192 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE); 1193 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE); 1194 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 : BRB_TOTAL_RAM_BLOCKS_BB; 1195 1196 /* Find number of active ports */ 1197 for (port = 0; port < MAX_NUM_PORTS; port++) 1198 if (req->num_active_tcs[port]) 1199 active_ports++; 1200 1201 active_port_blocks = (u32)(total_blocks / active_ports); 1202 1203 for (port = 0; port < req->max_ports_per_engine; port++) { 1204 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks; 1205 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th; 1206 u32 tc_guaranteed_blocks; 1207 u8 tc; 1208 1209 /* Calculate per-port sizes */ 1210 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE); 1211 port_blocks = req->num_active_tcs[port] ? active_port_blocks : 0; 1212 port_guaranteed_blocks = req->num_active_tcs[port] * tc_guaranteed_blocks; 1213 port_shared_blocks = port_blocks - port_guaranteed_blocks; 1214 full_xoff_th = req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC; 1215 full_xon_th = full_xoff_th + min_pkt_size_blocks; 1216 pause_xoff_th = tc_headroom_blocks; 1217 pause_xon_th = pause_xoff_th + min_pkt_size_blocks; 1218 1219 /* Init total size per port */ 1220 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4, port_blocks); 1221 1222 /* Init shared size per port */ 1223 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4, port_shared_blocks); 1224 1225 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) { 1226 /* Clear init values for non-active TCs */ 1227 if (tc == req->num_active_tcs[port]) { 1228 tc_guaranteed_blocks = 0; 1229 full_xoff_th = 0; 1230 full_xon_th = 0; 1231 pause_xoff_th = 0; 1232 pause_xon_th = 0; 1233 } 1234 1235 /* Init guaranteed size per TC */ 1236 ecore_wr(p_hwfn, p_ptt, BRB_REG_TC_GUARANTIED_0 + reg_offset, tc_guaranteed_blocks); 1237 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset, BRB_HYST_BLOCKS); 1238 1239 /* Init pause/full thresholds per physical TC - for 1240 * loopback traffic. 1241 */ 1242 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th); 1243 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th); 1244 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th); 1245 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th); 1246 1247 /* Init pause/full thresholds per physical TC - for 1248 * main traffic. 1249 */ 1250 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th); 1251 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th); 1252 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th); 1253 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th); 1254 } 1255 } 1256 } 1257 1258 #endif /* UNUSED_HSI_FUNC */ 1259 #ifndef UNUSED_HSI_FUNC 1260 1261 /* In MF, should be called once per port to set EtherType of OuterTag */ 1262 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType) 1263 { 1264 /* Update DORQ register */ 1265 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType); 1266 } 1267 1268 #endif /* UNUSED_HSI_FUNC */ 1269 1270 #define SET_TUNNEL_TYPE_ENABLE_BIT(var,offset,enable) var = ((var) & ~(1 << (offset))) | ( (enable) ? (1 << (offset)) : 0) 1271 #define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008 1272 #define PRS_ETH_OUTPUT_FORMAT -46832 1273 1274 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, 1275 struct ecore_ptt *p_ptt, 1276 u16 dest_port) 1277 { 1278 /* Update PRS register */ 1279 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); 1280 1281 /* Update NIG register */ 1282 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); 1283 1284 /* Update PBF register */ 1285 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); 1286 } 1287 1288 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, 1289 struct ecore_ptt *p_ptt, 1290 bool vxlan_enable) 1291 { 1292 u32 reg_val; 1293 1294 /* Update PRS register */ 1295 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1296 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable); 1297 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1298 if (reg_val) /* TODO: handle E5 init */ 1299 { 1300 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); 1301 1302 /* Update output only if tunnel blocks not included. */ 1303 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1304 { 1305 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1306 } 1307 } 1308 1309 /* Update NIG register */ 1310 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 1311 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT, vxlan_enable); 1312 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 1313 1314 /* Update DORQ register */ 1315 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0); 1316 } 1317 1318 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, 1319 struct ecore_ptt *p_ptt, 1320 bool eth_gre_enable, 1321 bool ip_gre_enable) 1322 { 1323 u32 reg_val; 1324 1325 /* Update PRS register */ 1326 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1327 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable); 1328 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable); 1329 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1330 if (reg_val) /* TODO: handle E5 init */ 1331 { 1332 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); 1333 1334 /* Update output only if tunnel blocks not included. */ 1335 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1336 { 1337 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1338 } 1339 } 1340 1341 /* Update NIG register */ 1342 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 1343 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable); 1344 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable); 1345 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 1346 1347 /* Update DORQ registers */ 1348 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0); 1349 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0); 1350 } 1351 1352 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn, 1353 struct ecore_ptt *p_ptt, 1354 u16 dest_port) 1355 1356 { 1357 /* Update PRS register */ 1358 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); 1359 1360 /* Update NIG register */ 1361 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); 1362 1363 /* Update PBF register */ 1364 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); 1365 } 1366 1367 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, 1368 struct ecore_ptt *p_ptt, 1369 bool eth_geneve_enable, 1370 bool ip_geneve_enable) 1371 { 1372 u32 reg_val; 1373 1374 /* Update PRS register */ 1375 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1376 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT, eth_geneve_enable); 1377 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable); 1378 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1379 if (reg_val) /* TODO: handle E5 init */ 1380 { 1381 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); 1382 1383 /* Update output only if tunnel blocks not included. */ 1384 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1385 { 1386 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1387 } 1388 } 1389 1390 /* Update NIG register */ 1391 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0); 1392 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); 1393 1394 /* EDPM with geneve tunnel not supported in BB */ 1395 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) 1396 return; 1397 1398 /* Update DORQ registers */ 1399 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, eth_geneve_enable ? 1 : 0); 1400 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, ip_geneve_enable ? 1 : 0); 1401 } 1402 1403 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4 1404 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512 1405 1406 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, 1407 struct ecore_ptt *p_ptt, 1408 bool enable) 1409 { 1410 u32 reg_val, cfg_mask; 1411 1412 /* read PRS config register */ 1413 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); 1414 1415 /* set VXLAN_NO_L2_ENABLE mask */ 1416 cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); 1417 1418 if (enable) 1419 { 1420 /* set VXLAN_NO_L2_ENABLE flag */ 1421 reg_val |= cfg_mask; 1422 1423 /* update PRS FIC register */ 1424 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); 1425 } 1426 else 1427 { 1428 /* clear VXLAN_NO_L2_ENABLE flag */ 1429 reg_val &= ~cfg_mask; 1430 } 1431 1432 /* write PRS config register */ 1433 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); 1434 } 1435 1436 #ifndef UNUSED_HSI_FUNC 1437 1438 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23 1439 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 1440 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25 1441 #define PARSER_ETH_CONN_CM_HDR 0 1442 #define CAM_LINE_SIZE sizeof(u32) 1443 #define RAM_LINE_SIZE sizeof(u64) 1444 #define REG_SIZE sizeof(u32) 1445 1446 void ecore_gft_disable(struct ecore_hwfn *p_hwfn, 1447 struct ecore_ptt *p_ptt, 1448 u16 pf_id) 1449 { 1450 /* disable gft search for PF */ 1451 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); 1452 1453 /* Clean ram & cam for next gft session*/ 1454 1455 /* Zero camline */ 1456 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, 0); 1457 1458 /* Zero ramline */ 1459 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id, 0); 1460 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + REG_SIZE, 0); 1461 } 1462 1463 void ecore_set_gft_event_id_cm_hdr (struct ecore_hwfn *p_hwfn, 1464 struct ecore_ptt *p_ptt) 1465 { 1466 u32 rfs_cm_hdr_event_id; 1467 1468 /* Set RFS event ID to be awakened i Tstorm By Prs */ 1469 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); 1470 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; 1471 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; 1472 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); 1473 } 1474 1475 void ecore_gft_config(struct ecore_hwfn *p_hwfn, 1476 struct ecore_ptt *p_ptt, 1477 u16 pf_id, 1478 bool tcp, 1479 bool udp, 1480 bool ipv4, 1481 bool ipv6, 1482 enum gft_profile_type profile_type) 1483 { 1484 u32 reg_val, cam_line, ram_line_lo, ram_line_hi; 1485 1486 if (!ipv6 && !ipv4) 1487 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n"); 1488 if (!tcp && !udp) 1489 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n"); 1490 if (profile_type >= MAX_GFT_PROFILE_TYPE) 1491 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n"); 1492 1493 /* Set RFS event ID to be awakened i Tstorm By Prs */ 1494 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; 1495 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; 1496 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); 1497 1498 /* Do not load context only cid in PRS on match. */ 1499 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); 1500 1501 /* Do not use tenant ID exist bit for gft search*/ 1502 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); 1503 1504 /* Set Cam */ 1505 cam_line = 0; 1506 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); 1507 1508 /* Filters are per PF!! */ 1509 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); 1510 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); 1511 1512 if (!(tcp && udp)) { 1513 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); 1514 if (tcp) 1515 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL); 1516 else 1517 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL); 1518 } 1519 1520 if (!(ipv4 && ipv6)) { 1521 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); 1522 if (ipv4) 1523 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4); 1524 else 1525 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6); 1526 } 1527 1528 /* Write characteristics to cam */ 1529 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line); 1530 cam_line = ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id); 1531 1532 /* Write line to RAM - compare to filter 4 tuple */ 1533 ram_line_lo = 0; 1534 ram_line_hi = 0; 1535 1536 /* Tunnel type */ 1537 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); 1538 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); 1539 1540 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) 1541 { 1542 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); 1543 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); 1544 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); 1545 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1546 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1); 1547 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); 1548 } 1549 else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) 1550 { 1551 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); 1552 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1553 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); 1554 } 1555 else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) 1556 { 1557 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); 1558 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1559 } 1560 else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) 1561 { 1562 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); 1563 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1564 } 1565 else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) 1566 { 1567 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); 1568 } 1569 1570 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id, ram_line_lo); 1571 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + REG_SIZE, ram_line_hi); 1572 1573 /* Set default profile so that no filter match will happen */ 1574 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff); 1575 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff); 1576 1577 /* Enable gft search */ 1578 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); 1579 } 1580 1581 #endif /* UNUSED_HSI_FUNC */ 1582 1583 /* Configure VF zone size mode*/ 1584 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 mode, bool runtime_init) 1585 { 1586 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG; 1587 u32 msdm_vf_offset_mask; 1588 1589 if (mode == VF_ZONE_SIZE_MODE_DOUBLE) 1590 msdm_vf_size_log += 1; 1591 else if (mode == VF_ZONE_SIZE_MODE_QUAD) 1592 msdm_vf_size_log += 2; 1593 1594 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1; 1595 1596 if (runtime_init) { 1597 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET, msdm_vf_size_log); 1598 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET, msdm_vf_offset_mask); 1599 } 1600 else { 1601 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log); 1602 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask); 1603 } 1604 } 1605 1606 /* Get mstorm statistics for offset by VF zone size mode */ 1607 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn, u16 stat_cnt_id, u16 vf_zone_size_mode) 1608 { 1609 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id); 1610 1611 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) && (stat_cnt_id > MAX_NUM_PFS)) { 1612 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE) 1613 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS); 1614 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD) 1615 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS); 1616 } 1617 1618 return offset; 1619 } 1620 1621 /* Get mstorm VF producer offset by VF zone size mode */ 1622 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8 vf_queue_id, u16 vf_zone_size_mode) 1623 { 1624 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id); 1625 1626 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) { 1627 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE) 1628 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id; 1629 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD) 1630 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id; 1631 } 1632 1633 return offset; 1634 } 1635 1636 #ifndef LINUX_REMOVE 1637 #define CRC8_INIT_VALUE 0xFF 1638 #endif 1639 static u8 cdu_crc8_table[CRC8_TABLE_SIZE]; 1640 1641 /* Calculate and return CDU validation byte per connection type/region/cid */ 1642 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) 1643 { 1644 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG; 1645 1646 static u8 crc8_table_valid; /*automatically initialized to 0*/ 1647 u8 crc, validation_byte = 0; 1648 u32 validation_string = 0; 1649 u32 data_to_crc; 1650 1651 if (crc8_table_valid == 0) { 1652 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07); 1653 crc8_table_valid = 1; 1654 } 1655 1656 /* The CRC is calculated on the String-to-compress: 1657 * [31:8] = {CID[31:20],CID[11:0]} 1658 * [7:4] = Region 1659 * [3:0] = Type 1660 */ 1661 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) 1662 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); 1663 1664 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) 1665 validation_string |= ((region & 0xF) << 4); 1666 1667 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) 1668 validation_string |= (conn_type & 0xF); 1669 1670 /* Convert to big-endian and calculate CRC8*/ 1671 data_to_crc = OSAL_BE32_TO_CPU(validation_string); 1672 1673 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE); 1674 1675 /* The validation byte [7:0] is composed: 1676 * for type A validation 1677 * [7] = active configuration bit 1678 * [6:0] = crc[6:0] 1679 * 1680 * for type B validation 1681 * [7] = active configuration bit 1682 * [6:3] = connection_type[3:0] 1683 * [2:0] = crc[2:0] 1684 */ 1685 validation_byte |= ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; 1686 1687 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) 1688 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); 1689 else 1690 validation_byte |= crc & 0x7F; 1691 1692 return validation_byte; 1693 } 1694 1695 /* Calcualte and set validation bytes for session context */ 1696 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 cid) 1697 { 1698 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; 1699 1700 p_ctx = (u8* const)p_ctx_mem; 1701 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; 1702 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; 1703 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; 1704 1705 OSAL_MEMSET(p_ctx, 0, ctx_size); 1706 1707 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid); 1708 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid); 1709 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid); 1710 } 1711 1712 /* Calcualte and set validation bytes for task context */ 1713 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 tid) 1714 { 1715 u8 *p_ctx, *region1_val_ptr; 1716 1717 p_ctx = (u8* const)p_ctx_mem; 1718 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; 1719 1720 OSAL_MEMSET(p_ctx, 0, ctx_size); 1721 1722 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid); 1723 } 1724 1725 /* Memset session context to 0 while preserving validation bytes */ 1726 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) 1727 { 1728 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; 1729 u8 x_val, t_val, u_val; 1730 1731 p_ctx = (u8* const)p_ctx_mem; 1732 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; 1733 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; 1734 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; 1735 1736 x_val = *x_val_ptr; 1737 t_val = *t_val_ptr; 1738 u_val = *u_val_ptr; 1739 1740 OSAL_MEMSET(p_ctx, 0, ctx_size); 1741 1742 *x_val_ptr = x_val; 1743 *t_val_ptr = t_val; 1744 *u_val_ptr = u_val; 1745 } 1746 1747 /* Memset task context to 0 while preserving validation bytes */ 1748 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) 1749 { 1750 u8 *p_ctx, *region1_val_ptr; 1751 u8 region1_val; 1752 1753 p_ctx = (u8* const)p_ctx_mem; 1754 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; 1755 1756 region1_val = *region1_val_ptr; 1757 1758 OSAL_MEMSET(p_ctx, 0, ctx_size); 1759 1760 *region1_val_ptr = region1_val; 1761 } 1762 1763 /* Enable and configure context validation */ 1764 void ecore_enable_context_validation(struct ecore_hwfn * p_hwfn, struct ecore_ptt *p_ptt) 1765 { 1766 u32 ctx_validation; 1767 1768 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ 1769 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24; 1770 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); 1771 1772 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ 1773 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; 1774 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); 1775 1776 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ 1777 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; 1778 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); 1779 } 1780 1781 #define RSS_IND_TABLE_BASE_ADDR 4112 1782 #define RSS_IND_TABLE_VPORT_SIZE 16 1783 #define RSS_IND_TABLE_ENTRY_PER_LINE 8 1784 1785 /* Update RSS indirection table entry. */ 1786 void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn * p_hwfn, 1787 struct ecore_ptt *p_ptt, 1788 u8 rss_id, 1789 u8 ind_table_index, 1790 u16 ind_table_value) 1791 { 1792 u32 cnt, rss_addr; 1793 u32 * reg_val; 1794 u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE]; 1795 u16 rss_ind_mask [RSS_IND_TABLE_ENTRY_PER_LINE]; 1796 1797 /* get entry address */ 1798 rss_addr = RSS_IND_TABLE_BASE_ADDR + 1799 RSS_IND_TABLE_VPORT_SIZE * rss_id + 1800 ind_table_index/RSS_IND_TABLE_ENTRY_PER_LINE; 1801 1802 /* prepare update command */ 1803 ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE; 1804 1805 for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt ++) 1806 { 1807 if (cnt == ind_table_index) 1808 { 1809 rss_ind_entry[cnt] = ind_table_value; 1810 rss_ind_mask[cnt] = 0xFFFF; 1811 } 1812 else 1813 { 1814 rss_ind_entry[cnt] = 0; 1815 rss_ind_mask[cnt] = 0; 1816 } 1817 } 1818 1819 /* Update entry in HW*/ 1820 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); 1821 1822 reg_val = (u32*)rss_ind_mask; 1823 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]); 1824 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]); 1825 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]); 1826 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]); 1827 1828 reg_val = (u32*)rss_ind_entry; 1829 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]); 1830 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]); 1831 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]); 1832 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]); 1833 } 1834