1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_init_fw_funcs.c 30 */ 31 #include <sys/cdefs.h> 32 #include "bcm_osal.h" 33 #include "ecore_hw.h" 34 #include "ecore_init_ops.h" 35 #include "reg_addr.h" 36 #include "ecore_rt_defs.h" 37 #include "ecore_hsi_common.h" 38 #include "ecore_hsi_init_func.h" 39 #include "ecore_hsi_eth.h" 40 #include "ecore_hsi_init_tool.h" 41 #include "ecore_iro.h" 42 #include "ecore_init_fw_funcs.h" 43 44 #define CDU_VALIDATION_DEFAULT_CFG 61 45 46 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = { 47 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */ 48 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */ 49 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */ 50 }; 51 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { 52 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ 53 }; 54 55 /* General constants */ 56 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0) 57 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0) 58 #define QM_INVALID_PQ_ID 0xffff 59 60 /* Feature enable */ 61 #define QM_BYPASS_EN 1 62 #define QM_BYTE_CRD_EN 1 63 64 /* Other PQ constants */ 65 #define QM_OTHER_PQS_PER_PF 4 66 67 /* VOQ constants */ 68 #define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS) 69 70 /* WFQ constants: */ 71 72 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */ 73 #define QM_WFQ_UPPER_BOUND 62500000 74 75 /* Bit of VOQ in WFQ VP PQ map */ 76 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0 77 78 /* Bit of PF in WFQ VP PQ map */ 79 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5 80 #define QM_WFQ_VP_PQ_PF_E5_SHIFT 6 81 82 /* 0x9000 = 4*9*1024 */ 83 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) 84 85 /* Max WFQ increment value is 0.7 * upper bound */ 86 #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10) 87 88 /* Number of VOQs in E5 QmWfqCrd register */ 89 #define QM_WFQ_CRD_E5_NUM_VOQS 16 90 91 /* RL constants: */ 92 93 /* Period in us */ 94 #define QM_RL_PERIOD 5 95 96 /* Period in 25MHz cycles */ 97 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) 98 99 /* RL increment value - rate is specified in mbps. the factor of 1.01 was 100 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC 101 * 2544 test. In this scenario the PF RL was reducing the line rate to 99% 102 * although the credit increment value was the correct one and FW calculated 103 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at 104 * this point. 105 */ 106 #define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / (8 * 100)), 1) 107 108 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ 109 #define QM_PF_RL_UPPER_BOUND 62500000 110 111 /* Max PF RL increment value is 0.7 * upper bound */ 112 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) 113 114 /* Vport RL Upper bound, link speed is in Mpbs */ 115 #define QM_VP_RL_UPPER_BOUND(speed) ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000)) 116 117 /* Max Vport RL increment value is the Vport RL upper bound */ 118 #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed) 119 120 /* Vport RL credit threshold in case of QM bypass */ 121 #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1) 122 123 /* AFullOprtnstcCrdMask constants */ 124 #define QM_OPPOR_LINE_VOQ_DEF 1 125 #define QM_OPPOR_FW_STOP_DEF 0 126 #define QM_OPPOR_PQ_EMPTY_DEF 1 127 128 /* Command Queue constants: */ 129 130 /* Pure LB CmdQ lines (+spare) */ 131 #define PBF_CMDQ_PURE_LB_LINES 150 132 133 #define PBF_CMDQ_LINES_E5_RSVD_RATIO 8 134 135 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) 136 137 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) 138 139 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) 140 141 /* BTB: blocks constants (block size = 256B) */ 142 143 /* 256B blocks in 9700B packet */ 144 #define BTB_JUMBO_PKT_BLOCKS 38 145 146 /* Headroom per-port */ 147 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS 148 #define BTB_PURE_LB_FACTOR 10 149 150 /* Factored (hence really 0.7) */ 151 #define BTB_PURE_LB_RATIO 7 152 153 /* QM stop command constants */ 154 #define QM_STOP_PQ_MASK_WIDTH 32 155 #define QM_STOP_CMD_ADDR 2 156 #define QM_STOP_CMD_STRUCT_SIZE 2 157 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 158 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 159 #define QM_STOP_CMD_PAUSE_MASK_MASK -1 160 #define QM_STOP_CMD_GROUP_ID_OFFSET 1 161 #define QM_STOP_CMD_GROUP_ID_SHIFT 16 162 #define QM_STOP_CMD_GROUP_ID_MASK 15 163 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1 164 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24 165 #define QM_STOP_CMD_PQ_TYPE_MASK 1 166 #define QM_STOP_CMD_MAX_POLL_COUNT 100 167 #define QM_STOP_CMD_POLL_PERIOD_US 500 168 169 /* QM command macros */ 170 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE 171 #define QM_CMD_SET_FIELD(var, cmd, field, value) SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value) 172 173 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, ext_voq, wrr) OSAL_MEMSET(&map, 0, sizeof(map)); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&map)) 174 175 #define WRITE_PQ_INFO_TO_RAM 1 176 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24)) 177 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4 178 179 /******************** INTERNAL IMPLEMENTATION *********************/ 180 181 /* Returns the external VOQ number */ 182 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn, 183 u8 port_id, 184 u8 tc, 185 u8 max_phys_tcs_per_port) 186 { 187 if (tc == PURE_LB_TC) 188 return NUM_OF_PHYS_TCS * (ECORE_IS_E5(p_hwfn->p_dev) ? MAX_NUM_PORTS_E5 : MAX_NUM_PORTS_BB) + port_id; 189 else 190 return port_id * (ECORE_IS_E5(p_hwfn->p_dev) ? NUM_OF_PHYS_TCS : max_phys_tcs_per_port) + tc; 191 } 192 193 /* Prepare PF RL enable/disable runtime init values */ 194 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, 195 bool pf_rl_en) 196 { 197 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); 198 if (pf_rl_en) { 199 u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4; 200 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; 201 202 /* Enable RLs for all VOQs */ 203 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (u32)voq_bit_mask); 204 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 205 if (num_ext_voqs >= 32) 206 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET, (u32)(voq_bit_mask >> 32)); 207 #endif 208 209 /* Write RL period */ 210 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 211 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 212 213 /* Set credit threshold for QM bypass flow */ 214 if (QM_BYPASS_EN) 215 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, QM_PF_RL_UPPER_BOUND); 216 } 217 } 218 219 /* Prepare PF WFQ enable/disable runtime init values */ 220 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, 221 bool pf_wfq_en) 222 { 223 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); 224 225 /* Set credit threshold for QM bypass flow */ 226 if (pf_wfq_en && QM_BYPASS_EN) 227 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND); 228 } 229 230 /* Prepare VPORT RL enable/disable runtime init values */ 231 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, 232 bool vport_rl_en) 233 { 234 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0); 235 if (vport_rl_en) { 236 /* Write RL period (use timer 0 only) */ 237 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 238 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 239 240 /* Set credit threshold for QM bypass flow */ 241 if (QM_BYPASS_EN) 242 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, QM_VP_RL_BYPASS_THRESH_SPEED); 243 } 244 } 245 246 /* Prepare VPORT WFQ enable/disable runtime init values */ 247 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, 248 bool vport_wfq_en) 249 { 250 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0); 251 252 /* Set credit threshold for QM bypass flow */ 253 if (vport_wfq_en && QM_BYPASS_EN) 254 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND); 255 } 256 257 /* Prepare runtime init values to allocate PBF command queue lines for 258 * the specified VOQ. 259 */ 260 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn, 261 u8 ext_voq, 262 u16 cmdq_lines) 263 { 264 u32 qm_line_crd; 265 266 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); 267 268 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines); 269 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); 270 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); 271 } 272 273 /* Prepare runtime init values to allocate PBF command queue lines. */ 274 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn, 275 u8 max_ports_per_engine, 276 u8 max_phys_tcs_per_port, 277 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 278 { 279 u8 tc, ext_voq, port_id, num_tcs_in_port; 280 u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4; 281 282 /* Clear PBF lines of all VOQs */ 283 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) 284 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0); 285 286 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 287 u16 phys_lines, phys_lines_per_tc; 288 289 if (!port_params[port_id].active) 290 continue; 291 292 /* Find number of command queue lines to divide between the 293 * active physical TCs. In E5, 1/8 of the lines are reserved. 294 * the lines for pure LB TC are subtracted. 295 */ 296 phys_lines = port_params[port_id].num_pbf_cmd_lines; 297 if (ECORE_IS_E5(p_hwfn->p_dev)) 298 phys_lines -= DIV_ROUND_UP(phys_lines, PBF_CMDQ_LINES_E5_RSVD_RATIO); 299 phys_lines -= PBF_CMDQ_PURE_LB_LINES; 300 301 /* Find #lines per active physical TC */ 302 num_tcs_in_port = 0; 303 for (tc = 0; tc < max_phys_tcs_per_port; tc++) 304 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) 305 num_tcs_in_port++; 306 phys_lines_per_tc = phys_lines / num_tcs_in_port; 307 308 /* Init registers per active TC */ 309 for (tc = 0; tc < max_phys_tcs_per_port; tc++) { 310 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port); 311 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) 312 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, phys_lines_per_tc); 313 } 314 315 /* Init registers for pure LB TC */ 316 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port); 317 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, PBF_CMDQ_PURE_LB_LINES); 318 } 319 } 320 321 /* Prepare runtime init values to allocate guaranteed BTB blocks for the 322 * specified port. The guaranteed BTB space is divided between the TCs as 323 * follows (shared space Is currently not used): 324 * 1. Parameters: 325 * B - BTB blocks for this port 326 * C - Number of physical TCs for this port 327 * 2. Calculation: 328 * a. 38 blocks (9700B jumbo frame) are allocated for global per port 329 * headroom. 330 * b. B = B - 38 (remainder after global headroom allocation). 331 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. 332 * d. B = B � MAX(38, B/(C+0.7)) (remainder after pure LB allocation). 333 * e. B/C blocks are allocated for each physical TC. 334 * Assumptions: 335 * - MTU is up to 9700 bytes (38 blocks) 336 * - All TCs are considered symmetrical (same rate and packet size) 337 * - No optimization for lossy TC (all are considered lossless). Shared space 338 * is not enabled and allocated for each TC. 339 */ 340 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn, 341 u8 max_ports_per_engine, 342 u8 max_phys_tcs_per_port, 343 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 344 { 345 u32 usable_blocks, pure_lb_blocks, phys_blocks; 346 u8 tc, ext_voq, port_id, num_tcs_in_port; 347 348 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 349 if (!port_params[port_id].active) 350 continue; 351 352 /* Subtract headroom blocks */ 353 usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; 354 355 /* Find blocks per physical TC. use factor to avoid floating 356 * arithmethic. 357 */ 358 num_tcs_in_port = 0; 359 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) 360 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) 361 num_tcs_in_port++; 362 363 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO); 364 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS, pure_lb_blocks / BTB_PURE_LB_FACTOR); 365 phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port; 366 367 /* Init physical TCs */ 368 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { 369 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) { 370 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port); 371 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), phys_blocks); 372 } 373 } 374 375 /* Init pure LB TC */ 376 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port); 377 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks); 378 } 379 } 380 381 /* Prepare Tx PQ mapping runtime init values for the specified PF */ 382 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, 383 struct ecore_ptt *p_ptt, 384 u8 port_id, 385 u8 pf_id, 386 u8 max_phys_tcs_per_port, 387 bool is_pf_loading, 388 u32 num_pf_cids, 389 u32 num_vf_cids, 390 u16 start_pq, 391 u16 num_pf_pqs, 392 u16 num_vf_pqs, 393 u8 start_vport, 394 u32 base_mem_addr_4kb, 395 struct init_qm_pq_params *pq_params, 396 struct init_qm_vport_params *vport_params) 397 { 398 /* A bit per Tx PQ indicating if the PQ is associated with a VF */ 399 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; 400 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; 401 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; 402 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; 403 404 num_pqs = num_pf_pqs + num_vf_pqs; 405 406 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE; 407 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE; 408 409 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids); 410 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids); 411 mem_addr_4kb = base_mem_addr_4kb; 412 413 /* Set mapping from PQ group to PF */ 414 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) 415 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); 416 417 /* Set PQ sizes */ 418 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(num_pf_cids)); 419 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(num_vf_cids)); 420 421 /* Go over all Tx PQs */ 422 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) { 423 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS; 424 u8 ext_voq, vport_id_in_pf; 425 bool is_vf_pq, rl_valid; 426 u16 first_tx_pq_id; 427 428 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port); 429 is_vf_pq = (i >= num_pf_pqs); 430 rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < max_qm_global_rls; 431 432 /* Update first Tx PQ of VPORT/TC */ 433 vport_id_in_pf = pq_params[i].vport_id - start_vport; 434 first_tx_pq_id = vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id]; 435 if (first_tx_pq_id == QM_INVALID_PQ_ID) { 436 u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id << (ECORE_IS_E5(p_hwfn->p_dev) ? QM_WFQ_VP_PQ_PF_E5_SHIFT : QM_WFQ_VP_PQ_PF_E4_SHIFT)); 437 438 /* Create new VP PQ */ 439 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id] = pq_id; 440 first_tx_pq_id = pq_id; 441 442 /* Map VP PQ to VOQ and PF */ 443 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, map_val); 444 } 445 446 /* Check RL ID */ 447 if (pq_params[i].rl_valid && pq_params[i].vport_id >= max_qm_global_rls) 448 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n"); 449 450 /* Prepare PQ map entry */ 451 if (ECORE_IS_E5(p_hwfn->p_dev)) { 452 struct qm_rf_pq_map_e5 tx_pq_map; 453 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E5, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group); 454 } 455 else { 456 struct qm_rf_pq_map_e4 tx_pq_map; 457 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group); 458 } 459 460 /* Set PQ base address */ 461 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); 462 463 /* Clear PQ pointer table entry (64 bit) */ 464 if (is_pf_loading) 465 for (j = 0; j < 2; j++) 466 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET + (pq_id * 2) + j, 0); 467 468 /* Write PQ info to RAM */ 469 if (WRITE_PQ_INFO_TO_RAM != 0) 470 { 471 u32 pq_info = 0; 472 pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, pq_params[i].tc_id, port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0); 473 ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), pq_info); 474 } 475 476 /* If VF PQ, add indication to PQ VF mask */ 477 if (is_vf_pq) { 478 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE)); 479 mem_addr_4kb += vport_pq_mem_4kb; 480 } 481 else { 482 mem_addr_4kb += pq_mem_4kb; 483 } 484 } 485 486 /* Store Tx PQ VF mask to size select register */ 487 for (i = 0; i < num_tx_pq_vf_masks; i++) 488 if (tx_pq_vf_mask[i]) 489 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]); 490 } 491 492 /* Prepare Other PQ mapping runtime init values for the specified PF */ 493 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, 494 u8 pf_id, 495 bool is_pf_loading, 496 u32 num_pf_cids, 497 u32 num_tids, 498 u32 base_mem_addr_4kb) 499 { 500 u32 pq_size, pq_mem_4kb, mem_addr_4kb; 501 u16 i, j, pq_id, pq_group; 502 503 /* A single other PQ group is used in each PF, where PQ group i is used 504 * in PF i. 505 */ 506 pq_group = pf_id; 507 pq_size = num_pf_cids + num_tids; 508 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); 509 mem_addr_4kb = base_mem_addr_4kb; 510 511 /* Map PQ group to PF */ 512 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); 513 514 /* Set PQ sizes */ 515 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); 516 517 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { 518 /* Set PQ base address */ 519 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb); 520 521 /* Clear PQ pointer table entry */ 522 if (is_pf_loading) 523 for (j = 0; j < 2; j++) 524 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLOTHER_RT_OFFSET + (pq_id * 2) + j, 0); 525 526 mem_addr_4kb += pq_mem_4kb; 527 } 528 } 529 530 /* Prepare PF WFQ runtime init values for the specified PF. 531 * Return -1 on error. 532 */ 533 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, 534 u8 port_id, 535 u8 pf_id, 536 u16 pf_wfq, 537 u8 max_phys_tcs_per_port, 538 u16 num_tx_pqs, 539 struct init_qm_pq_params *pq_params) 540 { 541 u32 inc_val, crd_reg_offset; 542 u8 ext_voq; 543 u16 i; 544 545 inc_val = QM_WFQ_INC_VAL(pf_wfq); 546 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 547 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n"); 548 return -1; 549 } 550 551 for (i = 0; i < num_tx_pqs; i++) { 552 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port); 553 crd_reg_offset = ECORE_IS_E5(p_hwfn->p_dev) ? 554 (ext_voq < QM_WFQ_CRD_E5_NUM_VOQS ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (ext_voq % QM_WFQ_CRD_E5_NUM_VOQS) * MAX_NUM_PFS_E5 + pf_id : 555 (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + ext_voq * MAX_NUM_PFS_BB + (pf_id % MAX_NUM_PFS_BB); 556 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); 557 } 558 559 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); 560 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val); 561 562 return 0; 563 } 564 565 /* Prepare PF RL runtime init values for the specified PF. 566 * Return -1 on error. 567 */ 568 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, 569 u8 pf_id, 570 u32 pf_rl) 571 { 572 u32 inc_val; 573 574 inc_val = QM_RL_INC_VAL(pf_rl); 575 if (inc_val > QM_PF_RL_MAX_INC_VAL) { 576 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n"); 577 return -1; 578 } 579 580 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, (u32)QM_RL_CRD_REG_SIGN_BIT); 581 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); 582 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); 583 584 return 0; 585 } 586 587 /* Prepare VPORT WFQ runtime init values for the specified VPORTs. 588 * Return -1 on error. 589 */ 590 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn, 591 u8 num_vports, 592 struct init_qm_vport_params *vport_params) 593 { 594 u16 vport_pq_id; 595 u32 inc_val; 596 u8 tc, i; 597 598 /* Go over all PF VPORTs */ 599 for (i = 0; i < num_vports; i++) { 600 if (!vport_params[i].vport_wfq) 601 continue; 602 603 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq); 604 if (inc_val > QM_WFQ_MAX_INC_VAL) { 605 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n"); 606 return -1; 607 } 608 609 /* Each VPORT can have several VPORT PQ IDs for various TCs */ 610 for (tc = 0; tc < NUM_OF_TCS; tc++) { 611 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; 612 if (vport_pq_id != QM_INVALID_PQ_ID) { 613 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT); 614 STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vport_pq_id, inc_val); 615 } 616 } 617 } 618 619 return 0; 620 } 621 622 /* Prepare VPORT RL runtime init values for the specified VPORTs. 623 * Return -1 on error. 624 */ 625 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn, 626 u8 start_vport, 627 u8 num_vports, 628 u32 link_speed, 629 struct init_qm_vport_params *vport_params) 630 { 631 u8 i, vport_id; 632 u32 inc_val; 633 634 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) { 635 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n"); 636 return -1; 637 } 638 639 /* Go over all PF VPORTs */ 640 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { 641 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ? vport_params[i].vport_rl : link_speed); 642 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { 643 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n"); 644 return -1; 645 } 646 647 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, (u32)QM_RL_CRD_REG_SIGN_BIT); 648 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, QM_VP_RL_UPPER_BOUND(link_speed) | (u32)QM_RL_CRD_REG_SIGN_BIT); 649 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, inc_val); 650 } 651 652 return 0; 653 } 654 655 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn, 656 struct ecore_ptt *p_ptt) 657 { 658 u32 reg_val, i; 659 660 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) { 661 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US); 662 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); 663 } 664 665 /* Check if timeout while waiting for SDM command ready */ 666 if (i == QM_STOP_CMD_MAX_POLL_COUNT) { 667 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Timeout when waiting for QM SDM command ready signal\n"); 668 return false; 669 } 670 671 return true; 672 } 673 674 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn, 675 struct ecore_ptt *p_ptt, 676 u32 cmd_addr, 677 u32 cmd_data_lsb, 678 u32 cmd_data_msb) 679 { 680 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) 681 return false; 682 683 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); 684 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); 685 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); 686 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); 687 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); 688 689 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt); 690 } 691 692 /******************** INTERFACE IMPLEMENTATION *********************/ 693 694 u32 ecore_qm_pf_mem_size(u32 num_pf_cids, 695 u32 num_vf_cids, 696 u32 num_tids, 697 u16 num_pf_pqs, 698 u16 num_vf_pqs) 699 { 700 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + 701 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + 702 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; 703 } 704 705 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, 706 u8 max_ports_per_engine, 707 u8 max_phys_tcs_per_port, 708 bool pf_rl_en, 709 bool pf_wfq_en, 710 bool vport_rl_en, 711 bool vport_wfq_en, 712 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 713 { 714 u32 mask; 715 716 /* Init AFullOprtnstcCrdMask */ 717 mask = (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) | 718 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) | 719 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) | 720 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) | 721 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) | 722 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) | 723 (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) | 724 (QM_OPPOR_PQ_EMPTY_DEF << QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT); 725 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); 726 727 /* Enable/disable PF RL */ 728 ecore_enable_pf_rl(p_hwfn, pf_rl_en); 729 730 /* Enable/disable PF WFQ */ 731 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en); 732 733 /* Enable/disable VPORT RL */ 734 ecore_enable_vport_rl(p_hwfn, vport_rl_en); 735 736 /* Enable/disable VPORT WFQ */ 737 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en); 738 739 /* Init PBF CMDQ line credit */ 740 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params); 741 742 /* Init BTB blocks in PBF */ 743 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params); 744 745 return 0; 746 } 747 748 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, 749 struct ecore_ptt *p_ptt, 750 u8 port_id, 751 u8 pf_id, 752 u8 max_phys_tcs_per_port, 753 bool is_pf_loading, 754 u32 num_pf_cids, 755 u32 num_vf_cids, 756 u32 num_tids, 757 u16 start_pq, 758 u16 num_pf_pqs, 759 u16 num_vf_pqs, 760 u8 start_vport, 761 u8 num_vports, 762 u16 pf_wfq, 763 u32 pf_rl, 764 u32 link_speed, 765 struct init_qm_pq_params *pq_params, 766 struct init_qm_vport_params *vport_params) 767 { 768 u32 other_mem_size_4kb; 769 u8 tc, i; 770 771 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; 772 773 /* Clear first Tx PQ ID array for each VPORT */ 774 for(i = 0; i < num_vports; i++) 775 for(tc = 0; tc < NUM_OF_TCS; tc++) 776 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; 777 778 /* Map Other PQs (if any) */ 779 #if QM_OTHER_PQS_PER_PF > 0 780 ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids, num_tids, 0); 781 #endif 782 783 /* Map Tx PQs */ 784 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, is_pf_loading, num_pf_cids, num_vf_cids, 785 start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params); 786 787 /* Init PF WFQ */ 788 if (pf_wfq) 789 if (ecore_pf_wfq_rt_init(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, num_pf_pqs + num_vf_pqs, pq_params)) 790 return -1; 791 792 /* Init PF RL */ 793 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl)) 794 return -1; 795 796 /* Set VPORT WFQ */ 797 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params)) 798 return -1; 799 800 /* Set VPORT RL */ 801 if (ecore_vport_rl_rt_init(p_hwfn, start_vport, num_vports, link_speed, vport_params)) 802 return -1; 803 804 return 0; 805 } 806 807 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn, 808 struct ecore_ptt *p_ptt, 809 u8 pf_id, 810 u16 pf_wfq) 811 { 812 u32 inc_val; 813 814 inc_val = QM_WFQ_INC_VAL(pf_wfq); 815 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 816 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n"); 817 return -1; 818 } 819 820 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); 821 822 return 0; 823 } 824 825 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn, 826 struct ecore_ptt *p_ptt, 827 u8 pf_id, 828 u32 pf_rl) 829 { 830 u32 inc_val; 831 832 inc_val = QM_RL_INC_VAL(pf_rl); 833 if (inc_val > QM_PF_RL_MAX_INC_VAL) { 834 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n"); 835 return -1; 836 } 837 838 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); 839 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); 840 841 return 0; 842 } 843 844 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn, 845 struct ecore_ptt *p_ptt, 846 u16 first_tx_pq_id[NUM_OF_TCS], 847 u16 vport_wfq) 848 { 849 u16 vport_pq_id; 850 u32 inc_val; 851 u8 tc; 852 853 inc_val = QM_WFQ_INC_VAL(vport_wfq); 854 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 855 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n"); 856 return -1; 857 } 858 859 for (tc = 0; tc < NUM_OF_TCS; tc++) { 860 vport_pq_id = first_tx_pq_id[tc]; 861 if (vport_pq_id != QM_INVALID_PQ_ID) { 862 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val); 863 } 864 } 865 866 return 0; 867 } 868 869 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn, 870 struct ecore_ptt *p_ptt, 871 u8 vport_id, 872 u32 vport_rl, 873 u32 link_speed) 874 { 875 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS; 876 877 if (vport_id >= max_qm_global_rls) { 878 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n"); 879 return -1; 880 } 881 882 inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed); 883 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { 884 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n"); 885 return -1; 886 } 887 888 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); 889 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); 890 891 return 0; 892 } 893 894 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn, 895 struct ecore_ptt *p_ptt, 896 bool is_release_cmd, 897 bool is_tx_pq, 898 u16 start_pq, 899 u16 num_pqs) 900 { 901 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = {0}; 902 u32 pq_mask = 0, last_pq, pq_id; 903 904 last_pq = start_pq + num_pqs - 1; 905 906 /* Set command's PQ type */ 907 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); 908 909 /* Go over requested PQs */ 910 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { 911 /* Set PQ bit in mask (stop command only) */ 912 if (!is_release_cmd) 913 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); 914 915 /* If last PQ or end of PQ mask, write command */ 916 if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) { 917 QM_CMD_SET_FIELD(cmd_arr, (u32)QM_STOP_CMD, PAUSE_MASK, pq_mask); 918 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH); 919 if (!ecore_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0], cmd_arr[1])) 920 return false; 921 pq_mask = 0; 922 } 923 } 924 925 return true; 926 } 927 928 #ifndef UNUSED_HSI_FUNC 929 930 /* NIG: ETS configuration constants */ 931 #define NIG_TX_ETS_CLIENT_OFFSET 4 932 #define NIG_LB_ETS_CLIENT_OFFSET 1 933 #define NIG_ETS_MIN_WFQ_BYTES 1600 934 935 /* NIG: ETS constants */ 936 #define NIG_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu))) 937 938 /* NIG: RL constants */ 939 940 /* Byte base type value */ 941 #define NIG_RL_BASE_TYPE 1 942 943 /* Period in us */ 944 #define NIG_RL_PERIOD 1 945 946 /* Period in 25MHz cycles */ 947 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD) 948 949 /* Rate in mbps */ 950 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8) 951 952 #define NIG_RL_MAX_VAL(inc_val,mtu) (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu))) 953 954 /* NIG: packet prioritry configuration constants */ 955 #define NIG_PRIORITY_MAP_TC_BITS 4 956 957 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn, 958 struct ecore_ptt *p_ptt, 959 struct init_ets_req* req, 960 bool is_lb) 961 { 962 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff; 963 u32 tc_bound_base_addr, tc_bound_addr_diff; 964 u8 sp_tc_map = 0, wfq_tc_map = 0; 965 u8 tc, num_tc, tc_client_offset; 966 967 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS; 968 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET; 969 min_weight = 0xffffffff; 970 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_0; 971 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 - NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : 972 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 - NIG_REG_TX_ARB_CREDIT_WEIGHT_0; 973 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; 974 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : 975 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; 976 977 for (tc = 0; tc < num_tc; tc++) { 978 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 979 980 /* Update SP map */ 981 if (tc_req->use_sp) 982 sp_tc_map |= (1 << tc); 983 984 if (!tc_req->use_wfq) 985 continue; 986 987 /* Update WFQ map */ 988 wfq_tc_map |= (1 << tc); 989 990 /* Find minimal weight */ 991 if (tc_req->weight < min_weight) 992 min_weight = tc_req->weight; 993 } 994 995 /* Write SP map */ 996 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT : NIG_REG_TX_ARB_CLIENT_IS_STRICT, (sp_tc_map << tc_client_offset)); 997 998 /* Write WFQ map */ 999 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ : NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ, (wfq_tc_map << tc_client_offset)); 1000 1001 /* Write WFQ weights */ 1002 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) { 1003 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 1004 u32 byte_weight; 1005 1006 if (!tc_req->use_wfq) 1007 continue; 1008 1009 /* Translate weight to bytes */ 1010 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight; 1011 1012 /* Write WFQ weight */ 1013 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr + tc_weight_addr_diff * tc_client_offset, byte_weight); 1014 1015 /* Write WFQ upper bound */ 1016 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr + tc_bound_addr_diff * tc_client_offset, NIG_ETS_UP_BOUND(byte_weight, req->mtu)); 1017 } 1018 } 1019 1020 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn, 1021 struct ecore_ptt *p_ptt, 1022 struct init_nig_lb_rl_req* req) 1023 { 1024 u32 ctrl, inc_val, reg_offset; 1025 u8 tc; 1026 1027 /* Disable global MAC+LB RL */ 1028 ctrl = NIG_RL_BASE_TYPE << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT; 1029 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); 1030 1031 /* Configure and enable global MAC+LB RL */ 1032 if (req->lb_mac_rate) { 1033 /* Configure */ 1034 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M); 1035 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate); 1036 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE, inc_val); 1037 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu)); 1038 1039 /* Enable */ 1040 ctrl |= 1 << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT; 1041 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); 1042 } 1043 1044 /* Disable global LB-only RL */ 1045 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT; 1046 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); 1047 1048 /* Configure and enable global LB-only RL */ 1049 if (req->lb_rate) { 1050 /* Configure */ 1051 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M); 1052 inc_val = NIG_RL_INC_VAL(req->lb_rate); 1053 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE, inc_val); 1054 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu)); 1055 1056 /* Enable */ 1057 ctrl |= 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT; 1058 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); 1059 } 1060 1061 /* Per-TC RLs */ 1062 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; tc++, reg_offset += 4) { 1063 /* Disable TC RL */ 1064 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT; 1065 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl); 1066 1067 /* Configure and enable TC RL */ 1068 if (!req->tc_rate[tc]) 1069 continue; 1070 1071 /* Configure */ 1072 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 + reg_offset, NIG_RL_PERIOD_CLK_25M); 1073 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]); 1074 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 + reg_offset, inc_val); 1075 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 + reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu)); 1076 1077 /* Enable */ 1078 ctrl |= 1 << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT; 1079 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl); 1080 } 1081 } 1082 1083 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn, 1084 struct ecore_ptt *p_ptt, 1085 struct init_nig_pri_tc_map_req* req) 1086 { 1087 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 }; 1088 u32 pri_tc_mask = 0; 1089 u8 pri, tc; 1090 1091 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) { 1092 if (!req->pri[pri].valid) 1093 continue; 1094 1095 pri_tc_mask |= (req->pri[pri].tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS)); 1096 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri); 1097 } 1098 1099 /* Write priority -> TC mask */ 1100 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask); 1101 1102 /* Write TC -> priority mask */ 1103 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { 1104 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4, tc_pri_mask[tc]); 1105 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4, tc_pri_mask[tc]); 1106 } 1107 } 1108 1109 #endif /* UNUSED_HSI_FUNC */ 1110 1111 #ifndef UNUSED_HSI_FUNC 1112 1113 /* PRS: ETS configuration constants */ 1114 #define PRS_ETS_MIN_WFQ_BYTES 1600 1115 #define PRS_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu))) 1116 1117 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn, 1118 struct ecore_ptt *p_ptt, 1119 struct init_ets_req* req) 1120 { 1121 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff; 1122 u8 tc, sp_tc_map = 0, wfq_tc_map = 0; 1123 1124 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0; 1125 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 - PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0; 1126 1127 for (tc = 0; tc < NUM_OF_TCS; tc++) { 1128 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 1129 1130 /* Update SP map */ 1131 if (tc_req->use_sp) 1132 sp_tc_map |= (1 << tc); 1133 1134 if (!tc_req->use_wfq) 1135 continue; 1136 1137 /* Update WFQ map */ 1138 wfq_tc_map |= (1 << tc); 1139 1140 /* Find minimal weight */ 1141 if (tc_req->weight < min_weight) 1142 min_weight = tc_req->weight; 1143 } 1144 1145 /* Write SP map */ 1146 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map); 1147 1148 /* Write WFQ map */ 1149 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ, wfq_tc_map); 1150 1151 /* Write WFQ weights */ 1152 for (tc = 0; tc < NUM_OF_TCS; tc++) { 1153 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 1154 u32 byte_weight; 1155 1156 if (!tc_req->use_wfq) 1157 continue; 1158 1159 /* Translate weight to bytes */ 1160 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight; 1161 1162 /* Write WFQ weight */ 1163 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc * tc_weight_addr_diff, byte_weight); 1164 1165 /* Write WFQ upper bound */ 1166 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 + tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight, req->mtu)); 1167 } 1168 } 1169 1170 #endif /* UNUSED_HSI_FUNC */ 1171 #ifndef UNUSED_HSI_FUNC 1172 1173 /* BRB: RAM configuration constants */ 1174 #define BRB_TOTAL_RAM_BLOCKS_BB 4800 1175 #define BRB_TOTAL_RAM_BLOCKS_K2 5632 1176 #define BRB_BLOCK_SIZE 128 1177 #define BRB_MIN_BLOCKS_PER_TC 9 1178 #define BRB_HYST_BYTES 10240 1179 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE) 1180 1181 /* Temporary big RAM allocation - should be updated */ 1182 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, 1183 struct ecore_ptt *p_ptt, 1184 struct init_brb_ram_req* req) 1185 { 1186 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks; 1187 u32 active_port_blocks, reg_offset = 0; 1188 u8 port, active_ports = 0; 1189 1190 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE); 1191 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE); 1192 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 : BRB_TOTAL_RAM_BLOCKS_BB; 1193 1194 /* Find number of active ports */ 1195 for (port = 0; port < MAX_NUM_PORTS; port++) 1196 if (req->num_active_tcs[port]) 1197 active_ports++; 1198 1199 active_port_blocks = (u32)(total_blocks / active_ports); 1200 1201 for (port = 0; port < req->max_ports_per_engine; port++) { 1202 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks; 1203 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th; 1204 u32 tc_guaranteed_blocks; 1205 u8 tc; 1206 1207 /* Calculate per-port sizes */ 1208 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE); 1209 port_blocks = req->num_active_tcs[port] ? active_port_blocks : 0; 1210 port_guaranteed_blocks = req->num_active_tcs[port] * tc_guaranteed_blocks; 1211 port_shared_blocks = port_blocks - port_guaranteed_blocks; 1212 full_xoff_th = req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC; 1213 full_xon_th = full_xoff_th + min_pkt_size_blocks; 1214 pause_xoff_th = tc_headroom_blocks; 1215 pause_xon_th = pause_xoff_th + min_pkt_size_blocks; 1216 1217 /* Init total size per port */ 1218 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4, port_blocks); 1219 1220 /* Init shared size per port */ 1221 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4, port_shared_blocks); 1222 1223 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) { 1224 /* Clear init values for non-active TCs */ 1225 if (tc == req->num_active_tcs[port]) { 1226 tc_guaranteed_blocks = 0; 1227 full_xoff_th = 0; 1228 full_xon_th = 0; 1229 pause_xoff_th = 0; 1230 pause_xon_th = 0; 1231 } 1232 1233 /* Init guaranteed size per TC */ 1234 ecore_wr(p_hwfn, p_ptt, BRB_REG_TC_GUARANTIED_0 + reg_offset, tc_guaranteed_blocks); 1235 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset, BRB_HYST_BLOCKS); 1236 1237 /* Init pause/full thresholds per physical TC - for 1238 * loopback traffic. 1239 */ 1240 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th); 1241 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th); 1242 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th); 1243 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th); 1244 1245 /* Init pause/full thresholds per physical TC - for 1246 * main traffic. 1247 */ 1248 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th); 1249 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th); 1250 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th); 1251 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th); 1252 } 1253 } 1254 } 1255 1256 #endif /* UNUSED_HSI_FUNC */ 1257 #ifndef UNUSED_HSI_FUNC 1258 1259 /* In MF, should be called once per port to set EtherType of OuterTag */ 1260 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType) 1261 { 1262 /* Update DORQ register */ 1263 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType); 1264 } 1265 1266 #endif /* UNUSED_HSI_FUNC */ 1267 1268 #define SET_TUNNEL_TYPE_ENABLE_BIT(var,offset,enable) var = ((var) & ~(1 << (offset))) | ( (enable) ? (1 << (offset)) : 0) 1269 #define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008 1270 #define PRS_ETH_OUTPUT_FORMAT -46832 1271 1272 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, 1273 struct ecore_ptt *p_ptt, 1274 u16 dest_port) 1275 { 1276 /* Update PRS register */ 1277 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); 1278 1279 /* Update NIG register */ 1280 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); 1281 1282 /* Update PBF register */ 1283 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); 1284 } 1285 1286 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, 1287 struct ecore_ptt *p_ptt, 1288 bool vxlan_enable) 1289 { 1290 u32 reg_val; 1291 1292 /* Update PRS register */ 1293 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1294 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable); 1295 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1296 if (reg_val) /* TODO: handle E5 init */ 1297 { 1298 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); 1299 1300 /* Update output only if tunnel blocks not included. */ 1301 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1302 { 1303 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1304 } 1305 } 1306 1307 /* Update NIG register */ 1308 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 1309 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT, vxlan_enable); 1310 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 1311 1312 /* Update DORQ register */ 1313 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0); 1314 } 1315 1316 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, 1317 struct ecore_ptt *p_ptt, 1318 bool eth_gre_enable, 1319 bool ip_gre_enable) 1320 { 1321 u32 reg_val; 1322 1323 /* Update PRS register */ 1324 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1325 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable); 1326 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable); 1327 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1328 if (reg_val) /* TODO: handle E5 init */ 1329 { 1330 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); 1331 1332 /* Update output only if tunnel blocks not included. */ 1333 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1334 { 1335 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1336 } 1337 } 1338 1339 /* Update NIG register */ 1340 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 1341 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable); 1342 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable); 1343 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 1344 1345 /* Update DORQ registers */ 1346 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0); 1347 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0); 1348 } 1349 1350 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn, 1351 struct ecore_ptt *p_ptt, 1352 u16 dest_port) 1353 1354 { 1355 /* Update PRS register */ 1356 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); 1357 1358 /* Update NIG register */ 1359 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); 1360 1361 /* Update PBF register */ 1362 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); 1363 } 1364 1365 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, 1366 struct ecore_ptt *p_ptt, 1367 bool eth_geneve_enable, 1368 bool ip_geneve_enable) 1369 { 1370 u32 reg_val; 1371 1372 /* Update PRS register */ 1373 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1374 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT, eth_geneve_enable); 1375 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable); 1376 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1377 if (reg_val) /* TODO: handle E5 init */ 1378 { 1379 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); 1380 1381 /* Update output only if tunnel blocks not included. */ 1382 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1383 { 1384 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1385 } 1386 } 1387 1388 /* Update NIG register */ 1389 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0); 1390 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); 1391 1392 /* EDPM with geneve tunnel not supported in BB */ 1393 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) 1394 return; 1395 1396 /* Update DORQ registers */ 1397 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, eth_geneve_enable ? 1 : 0); 1398 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, ip_geneve_enable ? 1 : 0); 1399 } 1400 1401 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4 1402 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512 1403 1404 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, 1405 struct ecore_ptt *p_ptt, 1406 bool enable) 1407 { 1408 u32 reg_val, cfg_mask; 1409 1410 /* read PRS config register */ 1411 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); 1412 1413 /* set VXLAN_NO_L2_ENABLE mask */ 1414 cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); 1415 1416 if (enable) 1417 { 1418 /* set VXLAN_NO_L2_ENABLE flag */ 1419 reg_val |= cfg_mask; 1420 1421 /* update PRS FIC register */ 1422 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); 1423 } 1424 else 1425 { 1426 /* clear VXLAN_NO_L2_ENABLE flag */ 1427 reg_val &= ~cfg_mask; 1428 } 1429 1430 /* write PRS config register */ 1431 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); 1432 } 1433 1434 #ifndef UNUSED_HSI_FUNC 1435 1436 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23 1437 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 1438 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25 1439 #define PARSER_ETH_CONN_CM_HDR 0 1440 #define CAM_LINE_SIZE sizeof(u32) 1441 #define RAM_LINE_SIZE sizeof(u64) 1442 #define REG_SIZE sizeof(u32) 1443 1444 void ecore_gft_disable(struct ecore_hwfn *p_hwfn, 1445 struct ecore_ptt *p_ptt, 1446 u16 pf_id) 1447 { 1448 /* disable gft search for PF */ 1449 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); 1450 1451 /* Clean ram & cam for next gft session*/ 1452 1453 /* Zero camline */ 1454 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, 0); 1455 1456 /* Zero ramline */ 1457 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id, 0); 1458 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + REG_SIZE, 0); 1459 } 1460 1461 void ecore_set_gft_event_id_cm_hdr (struct ecore_hwfn *p_hwfn, 1462 struct ecore_ptt *p_ptt) 1463 { 1464 u32 rfs_cm_hdr_event_id; 1465 1466 /* Set RFS event ID to be awakened i Tstorm By Prs */ 1467 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); 1468 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; 1469 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; 1470 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); 1471 } 1472 1473 void ecore_gft_config(struct ecore_hwfn *p_hwfn, 1474 struct ecore_ptt *p_ptt, 1475 u16 pf_id, 1476 bool tcp, 1477 bool udp, 1478 bool ipv4, 1479 bool ipv6, 1480 enum gft_profile_type profile_type) 1481 { 1482 u32 reg_val, cam_line, ram_line_lo, ram_line_hi; 1483 1484 if (!ipv6 && !ipv4) 1485 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n"); 1486 if (!tcp && !udp) 1487 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n"); 1488 if (profile_type >= MAX_GFT_PROFILE_TYPE) 1489 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n"); 1490 1491 /* Set RFS event ID to be awakened i Tstorm By Prs */ 1492 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; 1493 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; 1494 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); 1495 1496 /* Do not load context only cid in PRS on match. */ 1497 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); 1498 1499 /* Do not use tenant ID exist bit for gft search*/ 1500 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); 1501 1502 /* Set Cam */ 1503 cam_line = 0; 1504 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); 1505 1506 /* Filters are per PF!! */ 1507 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); 1508 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); 1509 1510 if (!(tcp && udp)) { 1511 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); 1512 if (tcp) 1513 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL); 1514 else 1515 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL); 1516 } 1517 1518 if (!(ipv4 && ipv6)) { 1519 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); 1520 if (ipv4) 1521 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4); 1522 else 1523 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6); 1524 } 1525 1526 /* Write characteristics to cam */ 1527 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line); 1528 cam_line = ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id); 1529 1530 /* Write line to RAM - compare to filter 4 tuple */ 1531 ram_line_lo = 0; 1532 ram_line_hi = 0; 1533 1534 /* Tunnel type */ 1535 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); 1536 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); 1537 1538 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) 1539 { 1540 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); 1541 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); 1542 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); 1543 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1544 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1); 1545 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); 1546 } 1547 else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) 1548 { 1549 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); 1550 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1551 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); 1552 } 1553 else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) 1554 { 1555 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); 1556 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1557 } 1558 else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) 1559 { 1560 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); 1561 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1562 } 1563 else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) 1564 { 1565 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); 1566 } 1567 1568 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id, ram_line_lo); 1569 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + REG_SIZE, ram_line_hi); 1570 1571 /* Set default profile so that no filter match will happen */ 1572 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff); 1573 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff); 1574 1575 /* Enable gft search */ 1576 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); 1577 } 1578 1579 #endif /* UNUSED_HSI_FUNC */ 1580 1581 /* Configure VF zone size mode*/ 1582 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 mode, bool runtime_init) 1583 { 1584 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG; 1585 u32 msdm_vf_offset_mask; 1586 1587 if (mode == VF_ZONE_SIZE_MODE_DOUBLE) 1588 msdm_vf_size_log += 1; 1589 else if (mode == VF_ZONE_SIZE_MODE_QUAD) 1590 msdm_vf_size_log += 2; 1591 1592 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1; 1593 1594 if (runtime_init) { 1595 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET, msdm_vf_size_log); 1596 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET, msdm_vf_offset_mask); 1597 } 1598 else { 1599 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log); 1600 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask); 1601 } 1602 } 1603 1604 /* Get mstorm statistics for offset by VF zone size mode */ 1605 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn, u16 stat_cnt_id, u16 vf_zone_size_mode) 1606 { 1607 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id); 1608 1609 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) && (stat_cnt_id > MAX_NUM_PFS)) { 1610 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE) 1611 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS); 1612 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD) 1613 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS); 1614 } 1615 1616 return offset; 1617 } 1618 1619 /* Get mstorm VF producer offset by VF zone size mode */ 1620 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8 vf_queue_id, u16 vf_zone_size_mode) 1621 { 1622 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id); 1623 1624 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) { 1625 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE) 1626 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id; 1627 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD) 1628 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id; 1629 } 1630 1631 return offset; 1632 } 1633 1634 #ifndef LINUX_REMOVE 1635 #define CRC8_INIT_VALUE 0xFF 1636 #endif 1637 static u8 cdu_crc8_table[CRC8_TABLE_SIZE]; 1638 1639 /* Calculate and return CDU validation byte per connection type/region/cid */ 1640 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) 1641 { 1642 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG; 1643 1644 static u8 crc8_table_valid; /*automatically initialized to 0*/ 1645 u8 crc, validation_byte = 0; 1646 u32 validation_string = 0; 1647 u32 data_to_crc; 1648 1649 if (crc8_table_valid == 0) { 1650 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07); 1651 crc8_table_valid = 1; 1652 } 1653 1654 /* The CRC is calculated on the String-to-compress: 1655 * [31:8] = {CID[31:20],CID[11:0]} 1656 * [7:4] = Region 1657 * [3:0] = Type 1658 */ 1659 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) 1660 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); 1661 1662 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) 1663 validation_string |= ((region & 0xF) << 4); 1664 1665 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) 1666 validation_string |= (conn_type & 0xF); 1667 1668 /* Convert to big-endian and calculate CRC8*/ 1669 data_to_crc = OSAL_BE32_TO_CPU(validation_string); 1670 1671 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE); 1672 1673 /* The validation byte [7:0] is composed: 1674 * for type A validation 1675 * [7] = active configuration bit 1676 * [6:0] = crc[6:0] 1677 * 1678 * for type B validation 1679 * [7] = active configuration bit 1680 * [6:3] = connection_type[3:0] 1681 * [2:0] = crc[2:0] 1682 */ 1683 validation_byte |= ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; 1684 1685 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) 1686 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); 1687 else 1688 validation_byte |= crc & 0x7F; 1689 1690 return validation_byte; 1691 } 1692 1693 /* Calcualte and set validation bytes for session context */ 1694 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 cid) 1695 { 1696 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; 1697 1698 p_ctx = (u8* const)p_ctx_mem; 1699 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; 1700 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; 1701 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; 1702 1703 OSAL_MEMSET(p_ctx, 0, ctx_size); 1704 1705 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid); 1706 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid); 1707 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid); 1708 } 1709 1710 /* Calcualte and set validation bytes for task context */ 1711 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 tid) 1712 { 1713 u8 *p_ctx, *region1_val_ptr; 1714 1715 p_ctx = (u8* const)p_ctx_mem; 1716 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; 1717 1718 OSAL_MEMSET(p_ctx, 0, ctx_size); 1719 1720 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid); 1721 } 1722 1723 /* Memset session context to 0 while preserving validation bytes */ 1724 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) 1725 { 1726 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; 1727 u8 x_val, t_val, u_val; 1728 1729 p_ctx = (u8* const)p_ctx_mem; 1730 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; 1731 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; 1732 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; 1733 1734 x_val = *x_val_ptr; 1735 t_val = *t_val_ptr; 1736 u_val = *u_val_ptr; 1737 1738 OSAL_MEMSET(p_ctx, 0, ctx_size); 1739 1740 *x_val_ptr = x_val; 1741 *t_val_ptr = t_val; 1742 *u_val_ptr = u_val; 1743 } 1744 1745 /* Memset task context to 0 while preserving validation bytes */ 1746 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) 1747 { 1748 u8 *p_ctx, *region1_val_ptr; 1749 u8 region1_val; 1750 1751 p_ctx = (u8* const)p_ctx_mem; 1752 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; 1753 1754 region1_val = *region1_val_ptr; 1755 1756 OSAL_MEMSET(p_ctx, 0, ctx_size); 1757 1758 *region1_val_ptr = region1_val; 1759 } 1760 1761 /* Enable and configure context validation */ 1762 void ecore_enable_context_validation(struct ecore_hwfn * p_hwfn, struct ecore_ptt *p_ptt) 1763 { 1764 u32 ctx_validation; 1765 1766 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ 1767 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24; 1768 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); 1769 1770 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ 1771 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; 1772 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); 1773 1774 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ 1775 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; 1776 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); 1777 } 1778 1779 #define RSS_IND_TABLE_BASE_ADDR 4112 1780 #define RSS_IND_TABLE_VPORT_SIZE 16 1781 #define RSS_IND_TABLE_ENTRY_PER_LINE 8 1782 1783 /* Update RSS indirection table entry. */ 1784 void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn * p_hwfn, 1785 struct ecore_ptt *p_ptt, 1786 u8 rss_id, 1787 u8 ind_table_index, 1788 u16 ind_table_value) 1789 { 1790 u32 cnt, rss_addr; 1791 u32 * reg_val; 1792 u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE]; 1793 u16 rss_ind_mask [RSS_IND_TABLE_ENTRY_PER_LINE]; 1794 1795 /* get entry address */ 1796 rss_addr = RSS_IND_TABLE_BASE_ADDR + 1797 RSS_IND_TABLE_VPORT_SIZE * rss_id + 1798 ind_table_index/RSS_IND_TABLE_ENTRY_PER_LINE; 1799 1800 /* prepare update command */ 1801 ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE; 1802 1803 for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt ++) 1804 { 1805 if (cnt == ind_table_index) 1806 { 1807 rss_ind_entry[cnt] = ind_table_value; 1808 rss_ind_mask[cnt] = 0xFFFF; 1809 } 1810 else 1811 { 1812 rss_ind_entry[cnt] = 0; 1813 rss_ind_mask[cnt] = 0; 1814 } 1815 } 1816 1817 /* Update entry in HW*/ 1818 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); 1819 1820 reg_val = (u32*)rss_ind_mask; 1821 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]); 1822 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]); 1823 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]); 1824 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]); 1825 1826 reg_val = (u32*)rss_ind_entry; 1827 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]); 1828 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]); 1829 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]); 1830 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]); 1831 } 1832