1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "en/params.h" 5 #include "en/txrx.h" 6 #include "en/port.h" 7 #include "en_accel/en_accel.h" 8 #include "en_accel/ipsec.h" 9 #include <net/xdp_sock_drv.h> 10 11 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) 12 { 13 u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size); 14 15 return min_page_shift ? : 12; 16 } 17 18 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) 19 { 20 u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT; 21 u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev); 22 23 /* Regular RQ uses order-0 pages, the NIC must be able to map them. */ 24 if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift)) 25 min_page_shift = req_page_shift; 26 27 return max(req_page_shift, min_page_shift); 28 } 29 30 enum mlx5e_mpwrq_umr_mode 31 mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) 32 { 33 /* Different memory management schemes use different mechanisms to map 34 * user-mode memory. The stricter guarantees we have, the faster 35 * mechanisms we use: 36 * 1. MTT - direct mapping in page granularity. 37 * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but 38 * all mappings have the same size. 39 * 3. KLM - indirect mapping to another MKey to arbitrary addresses, and 40 * mappings can have different sizes. 41 */ 42 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 43 bool unaligned = xsk ? xsk->unaligned : false; 44 bool oversized = false; 45 46 if (xsk) { 47 oversized = xsk->chunk_size < (1 << page_shift); 48 WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift)); 49 } 50 51 /* XSK frame size doesn't match the UMR page size, either because the 52 * frame size is not a power of two, or it's smaller than the minimal 53 * page size supported by the firmware. 54 * It's possible to receive packets bigger than MTU in certain setups. 55 * To avoid writing over the XSK frame boundary, the top region of each 56 * stride is mapped to a garbage page, resulting in two mappings of 57 * different sizes per frame. 58 */ 59 if (oversized) { 60 /* An optimization for frame sizes equal to 3 * power_of_two. 61 * 3 KSMs point to the frame, and one KSM points to the garbage 62 * page, which works faster than KLM. 63 */ 64 if (xsk->chunk_size % 3 == 0 && is_power_of_2(xsk->chunk_size / 3)) 65 return MLX5E_MPWRQ_UMR_MODE_TRIPLE; 66 67 return MLX5E_MPWRQ_UMR_MODE_OVERSIZED; 68 } 69 70 /* XSK frames can start at arbitrary unaligned locations, but they all 71 * have the same size which is a power of two. It allows to optimize to 72 * one KSM per frame. 73 */ 74 if (unaligned) 75 return MLX5E_MPWRQ_UMR_MODE_UNALIGNED; 76 77 /* XSK: frames are naturally aligned, MTT can be used. 78 * Non-XSK: Allocations happen in units of CPU pages, therefore, the 79 * mappings are naturally aligned. 80 */ 81 return MLX5E_MPWRQ_UMR_MODE_ALIGNED; 82 } 83 84 u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode) 85 { 86 switch (mode) { 87 case MLX5E_MPWRQ_UMR_MODE_ALIGNED: 88 return sizeof(struct mlx5_mtt); 89 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: 90 return sizeof(struct mlx5_ksm); 91 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: 92 return sizeof(struct mlx5_klm) * 2; 93 case MLX5E_MPWRQ_UMR_MODE_TRIPLE: 94 return sizeof(struct mlx5_ksm) * 4; 95 } 96 WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode); 97 return 0; 98 } 99 100 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, 101 enum mlx5e_mpwrq_umr_mode umr_mode) 102 { 103 u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); 104 u8 max_pages_per_wqe, max_log_mpwqe_size; 105 u16 max_wqe_size; 106 107 /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */ 108 max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; 109 max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe), 110 MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size; 111 max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift; 112 113 WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU); 114 115 return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); 116 } 117 118 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, 119 enum mlx5e_mpwrq_umr_mode umr_mode) 120 { 121 u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); 122 u8 pages_per_wqe; 123 124 pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1; 125 126 /* Two MTTs are needed to form an octword. The number of MTTs is encoded 127 * in octwords in a UMR WQE, so we need at least two to avoid mapping 128 * garbage addresses. 129 */ 130 if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) 131 pages_per_wqe = 2; 132 133 /* Sanity check for further calculations to succeed. */ 134 BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64); 135 if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE)) 136 return MLX5_MPWRQ_MAX_PAGES_PER_WQE; 137 138 return pages_per_wqe; 139 } 140 141 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, 142 enum mlx5e_mpwrq_umr_mode umr_mode) 143 { 144 u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode); 145 u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); 146 u16 umr_wqe_sz; 147 148 umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) + 149 ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT); 150 151 WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK); 152 153 return umr_wqe_sz; 154 } 155 156 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, 157 enum mlx5e_mpwrq_umr_mode umr_mode) 158 { 159 return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode), 160 MLX5_SEND_WQE_BB); 161 } 162 163 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, 164 enum mlx5e_mpwrq_umr_mode umr_mode) 165 { 166 u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode); 167 168 /* Add another page as a buffer between WQEs. This page will absorb 169 * write overflow by the hardware, when receiving packets larger than 170 * MTU. These oversize packets are dropped by the driver at a later 171 * stage. 172 */ 173 return ALIGN(pages_per_wqe + 1, 174 MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode)); 175 } 176 177 u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, 178 enum mlx5e_mpwrq_umr_mode umr_mode) 179 { 180 /* Same limits apply to KSMs and KLMs. */ 181 u32 klm_limit = min(MLX5E_MAX_RQ_NUM_KSMS, 182 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size)); 183 184 switch (umr_mode) { 185 case MLX5E_MPWRQ_UMR_MODE_ALIGNED: 186 return MLX5E_MAX_RQ_NUM_MTTS; 187 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: 188 return klm_limit; 189 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: 190 /* Each entry is two KLMs. */ 191 return klm_limit / 2; 192 case MLX5E_MPWRQ_UMR_MODE_TRIPLE: 193 /* Each entry is four KSMs. */ 194 return klm_limit / 4; 195 } 196 WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode); 197 return 0; 198 } 199 200 static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift, 201 enum mlx5e_mpwrq_umr_mode umr_mode) 202 { 203 u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode); 204 u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode); 205 206 return ilog2(max_entries / mtts_per_wqe); 207 } 208 209 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, 210 enum mlx5e_mpwrq_umr_mode umr_mode) 211 { 212 return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) + 213 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - 214 MLX5E_ORDER2_MAX_PACKET_MTU; 215 } 216 217 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, 218 struct mlx5e_xsk_param *xsk) 219 { 220 u16 headroom; 221 222 if (xsk) 223 return xsk->headroom; 224 225 headroom = NET_IP_ALIGN; 226 if (params->xdp_prog) 227 headroom += XDP_PACKET_HEADROOM; 228 else 229 headroom += MLX5_RX_HEADROOM; 230 231 return headroom; 232 } 233 234 static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params, 235 struct mlx5e_xsk_param *xsk) 236 { 237 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 238 239 return xsk->headroom + hw_mtu; 240 } 241 242 static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk) 243 { 244 /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */ 245 u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL); 246 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 247 248 return MLX5_SKB_FRAG_SZ(headroom + hw_mtu); 249 } 250 251 static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev, 252 struct mlx5e_params *params, 253 struct mlx5e_xsk_param *xsk, 254 bool mpwqe) 255 { 256 u32 sz; 257 258 /* XSK frames are mapped as individual pages, because frames may come in 259 * an arbitrary order from random locations in the UMEM. 260 */ 261 if (xsk) 262 return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; 263 264 sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false)); 265 266 /* XDP in mlx5e doesn't support multiple packets per page. 267 * Do not assume sz <= PAGE_SIZE if params->xdp_prog is set. 268 */ 269 return params->xdp_prog && sz < PAGE_SIZE ? PAGE_SIZE : sz; 270 } 271 272 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev, 273 struct mlx5e_params *params, 274 struct mlx5e_xsk_param *xsk) 275 { 276 u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true); 277 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 278 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 279 280 return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - 281 order_base_2(linear_stride_sz); 282 } 283 284 bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, 285 struct mlx5e_params *params, 286 struct mlx5e_xsk_param *xsk) 287 { 288 if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) 289 return false; 290 291 /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data 292 * must fit into a CPU page. 293 */ 294 if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE) 295 return false; 296 297 /* XSK frames must be big enough to hold the packet data. */ 298 if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size) 299 return false; 300 301 return true; 302 } 303 304 static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, 305 u8 log_stride_sz, u8 log_num_strides, 306 u8 page_shift, 307 enum mlx5e_mpwrq_umr_mode umr_mode) 308 { 309 if (log_stride_sz + log_num_strides != 310 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode)) 311 return false; 312 313 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE || 314 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX) 315 return false; 316 317 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX) 318 return false; 319 320 if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) 321 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE; 322 323 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE; 324 } 325 326 bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev, 327 struct mlx5e_params *params, 328 struct mlx5e_xsk_param *xsk) 329 { 330 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 331 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 332 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 333 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 334 335 return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size, 336 log_wqe_num_of_strides, 337 page_shift, umr_mode); 338 } 339 340 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, 341 struct mlx5e_params *params, 342 struct mlx5e_xsk_param *xsk) 343 { 344 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 345 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 346 u8 log_num_strides; 347 u8 log_stride_sz; 348 u8 log_wqe_sz; 349 350 if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) 351 return false; 352 353 log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); 354 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); 355 356 if (log_wqe_sz < log_stride_sz) 357 return false; 358 359 log_num_strides = log_wqe_sz - log_stride_sz; 360 361 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, 362 log_num_strides, page_shift, 363 umr_mode); 364 } 365 366 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev, 367 struct mlx5e_params *params, 368 struct mlx5e_xsk_param *xsk) 369 { 370 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 371 u8 log_pkts_per_wqe, page_shift, max_log_rq_size; 372 373 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk); 374 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 375 max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode); 376 377 /* Numbers are unsigned, don't subtract to avoid underflow. */ 378 if (params->log_rq_mtu_frames < 379 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) 380 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; 381 382 /* Ethtool's rx_max_pending is calculated for regular RQ, that uses 383 * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a 384 * frame size not equal to PAGE_SIZE. 385 * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on 386 * unexpected failure. 387 */ 388 if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size)) 389 return max_log_rq_size; 390 391 return params->log_rq_mtu_frames - log_pkts_per_wqe; 392 } 393 394 u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev, 395 struct mlx5e_params *params) 396 { 397 return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE)); 398 } 399 400 u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev, 401 struct mlx5e_params *params) 402 { 403 return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE); 404 } 405 406 u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, 407 struct mlx5e_params *params) 408 { 409 u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * 410 PAGE_SIZE; 411 412 return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); 413 } 414 415 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, 416 struct mlx5e_params *params, 417 struct mlx5e_xsk_param *xsk) 418 { 419 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 420 return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); 421 422 /* XDP in mlx5e doesn't support multiple packets per page. */ 423 if (params->xdp_prog) 424 return PAGE_SHIFT; 425 426 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); 427 } 428 429 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, 430 struct mlx5e_params *params, 431 struct mlx5e_xsk_param *xsk) 432 { 433 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 434 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 435 u8 log_wqe_size, log_stride_size; 436 437 log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); 438 log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 439 WARN(log_wqe_size < log_stride_size, 440 "Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n", 441 log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk); 442 return log_wqe_size - log_stride_size; 443 } 444 445 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz) 446 { 447 #define UMR_WQE_BULK (2) 448 return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1); 449 } 450 451 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, 452 struct mlx5e_params *params, 453 struct mlx5e_xsk_param *xsk) 454 { 455 u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk); 456 457 if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) 458 return linear_headroom; 459 460 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 461 return linear_headroom; 462 463 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 464 return linear_headroom; 465 466 return 0; 467 } 468 469 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 470 { 471 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 472 u16 stop_room; 473 474 stop_room = mlx5e_ktls_get_stop_room(mdev, params); 475 stop_room += mlx5e_stop_room_for_max_wqe(mdev); 476 if (is_mpwqe) 477 /* A MPWQE can take up to the maximum cacheline-aligned WQE + 478 * all the normal stop room can be taken if a new packet breaks 479 * the active MPWQE session and allocates its WQEs right away. 480 */ 481 stop_room += mlx5e_stop_room_for_mpwqe(mdev); 482 483 return stop_room; 484 } 485 486 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 487 { 488 size_t sq_size = 1 << params->log_sq_size; 489 u16 stop_room; 490 491 stop_room = mlx5e_calc_sq_stop_room(mdev, params); 492 if (stop_room >= sq_size) { 493 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n", 494 stop_room, sq_size); 495 return -EINVAL; 496 } 497 498 return 0; 499 } 500 501 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) 502 { 503 struct dim_cq_moder moder = {}; 504 505 moder.cq_period_mode = cq_period_mode; 506 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 507 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 508 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 509 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; 510 511 return moder; 512 } 513 514 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) 515 { 516 struct dim_cq_moder moder = {}; 517 518 moder.cq_period_mode = cq_period_mode; 519 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 520 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 521 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 522 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; 523 524 return moder; 525 } 526 527 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) 528 { 529 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? 530 DIM_CQ_PERIOD_MODE_START_FROM_CQE : 531 DIM_CQ_PERIOD_MODE_START_FROM_EQE; 532 } 533 534 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 535 { 536 if (params->tx_dim_enabled) { 537 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 538 539 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); 540 } else { 541 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 542 } 543 } 544 545 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 546 { 547 if (params->rx_dim_enabled) { 548 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 549 550 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); 551 } else { 552 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 553 } 554 } 555 556 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 557 { 558 mlx5e_reset_tx_moderation(params, cq_period_mode); 559 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 560 params->tx_cq_moderation.cq_period_mode == 561 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 562 } 563 564 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 565 { 566 mlx5e_reset_rx_moderation(params, cq_period_mode); 567 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 568 params->rx_cq_moderation.cq_period_mode == 569 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 570 } 571 572 bool slow_pci_heuristic(struct mlx5_core_dev *mdev) 573 { 574 u32 link_speed = 0; 575 u32 pci_bw = 0; 576 577 mlx5_port_max_linkspeed(mdev, &link_speed); 578 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); 579 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", 580 link_speed, pci_bw); 581 582 #define MLX5E_SLOW_PCI_RATIO (2) 583 584 return link_speed && pci_bw && 585 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; 586 } 587 588 int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 589 { 590 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL); 591 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL); 592 593 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) 594 return -EOPNOTSUPP; 595 596 return 0; 597 } 598 599 int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, 600 struct mlx5e_xsk_param *xsk) 601 { 602 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 603 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 604 u16 max_mtu_pkts; 605 606 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) { 607 mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n", 608 page_shift, umr_mode); 609 return -EOPNOTSUPP; 610 } 611 612 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) { 613 mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n"); 614 return -EINVAL; 615 } 616 617 /* Current RQ length is too big for the given frame size, the 618 * needed number of WQEs exceeds the maximum. 619 */ 620 max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, 621 mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned)); 622 if (params->log_rq_mtu_frames > max_mtu_pkts) { 623 mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n", 624 1 << params->log_rq_mtu_frames, xsk->chunk_size); 625 return -EINVAL; 626 } 627 628 return 0; 629 } 630 631 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 632 struct mlx5e_params *params) 633 { 634 params->log_rq_mtu_frames = is_kdump_kernel() ? 635 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : 636 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 637 } 638 639 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 640 { 641 params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? 642 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : 643 MLX5_WQ_TYPE_CYCLIC; 644 } 645 646 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, 647 struct mlx5e_params *params) 648 { 649 /* Prefer Striding RQ, unless any of the following holds: 650 * - Striding RQ configuration is not possible/supported. 651 * - CQE compression is ON, and stride_index mini_cqe layout is not supported. 652 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. 653 * 654 * No XSK params: checking the availability of striding RQ in general. 655 */ 656 if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) || 657 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) && 658 !mlx5e_mpwrq_validate_regular(mdev, params) && 659 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || 660 !mlx5e_rx_is_linear_skb(mdev, params, NULL))) 661 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); 662 mlx5e_set_rq_type(mdev, params); 663 mlx5e_init_rq_type_params(mdev, params); 664 } 665 666 /* Build queue parameters */ 667 668 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) 669 { 670 *ccp = (struct mlx5e_create_cq_param) { 671 .napi = &c->napi, 672 .ch_stats = c->stats, 673 .node = cpu_to_node(c->cpu), 674 .ix = c->ix, 675 }; 676 } 677 678 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp) 679 { 680 if (xdp) 681 /* XDP requires all fragments to be of the same size. */ 682 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size; 683 684 /* Optimization for small packets: the last fragment is bigger than the others. */ 685 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE; 686 } 687 688 static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params, 689 struct mlx5e_rq_frags_info *info) 690 { 691 u16 bulk_bound_rq_size = (1 << params->log_rq_mtu_frames) / 4; 692 u32 bulk_bound_rq_size_in_bytes; 693 u32 sum_frag_strides = 0; 694 u32 wqe_bulk_in_bytes; 695 u16 split_factor; 696 u32 wqe_bulk; 697 int i; 698 699 for (i = 0; i < info->num_frags; i++) 700 sum_frag_strides += info->arr[i].frag_stride; 701 702 /* For MTUs larger than PAGE_SIZE, align to PAGE_SIZE to reflect 703 * amount of consumed pages per wqe in bytes. 704 */ 705 if (sum_frag_strides > PAGE_SIZE) 706 sum_frag_strides = ALIGN(sum_frag_strides, PAGE_SIZE); 707 708 bulk_bound_rq_size_in_bytes = bulk_bound_rq_size * sum_frag_strides; 709 710 #define MAX_WQE_BULK_BYTES(xdp) ((xdp ? 256 : 512) * 1024) 711 712 /* A WQE bulk should not exceed min(512KB, 1/4 of rq size). For XDP 713 * keep bulk size smaller to avoid filling the page_pool cache on 714 * every bulk refill. 715 */ 716 wqe_bulk_in_bytes = min_t(u32, MAX_WQE_BULK_BYTES(params->xdp_prog), 717 bulk_bound_rq_size_in_bytes); 718 wqe_bulk = DIV_ROUND_UP(wqe_bulk_in_bytes, sum_frag_strides); 719 720 /* Make sure that allocations don't start when the page is still used 721 * by older WQEs. 722 */ 723 info->wqe_bulk = max_t(u16, info->wqe_index_mask + 1, wqe_bulk); 724 725 split_factor = DIV_ROUND_UP(MAX_WQE_BULK_BYTES(params->xdp_prog), 726 PP_ALLOC_CACHE_REFILL * PAGE_SIZE); 727 info->refill_unit = DIV_ROUND_UP(info->wqe_bulk, split_factor); 728 } 729 730 #define DEFAULT_FRAG_SIZE (2048) 731 732 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, 733 struct mlx5e_params *params, 734 struct mlx5e_xsk_param *xsk, 735 struct mlx5e_rq_frags_info *info) 736 { 737 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); 738 int frag_size_max = DEFAULT_FRAG_SIZE; 739 int first_frag_size_max; 740 u32 buf_size = 0; 741 u16 headroom; 742 int max_mtu; 743 int i; 744 745 if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) { 746 int frag_stride; 747 748 frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false); 749 750 info->arr[0].frag_size = byte_count; 751 info->arr[0].frag_stride = frag_stride; 752 info->num_frags = 1; 753 754 /* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The 755 * first WQE in the page is responsible for allocation of this 756 * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are 757 * still not completed, the allocation must stop before k*N. 758 */ 759 info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1; 760 761 goto out; 762 } 763 764 headroom = mlx5e_get_linear_rq_headroom(params, xsk); 765 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); 766 767 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max, 768 params->xdp_prog); 769 if (byte_count > max_mtu || params->xdp_prog) { 770 frag_size_max = PAGE_SIZE; 771 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); 772 773 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max, 774 params->xdp_prog); 775 if (byte_count > max_mtu) { 776 mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n", 777 params->sw_mtu, max_mtu); 778 return -EINVAL; 779 } 780 } 781 782 i = 0; 783 while (buf_size < byte_count) { 784 int frag_size = byte_count - buf_size; 785 786 if (i == 0) 787 frag_size = min(frag_size, first_frag_size_max); 788 else if (i < MLX5E_MAX_RX_FRAGS - 1) 789 frag_size = min(frag_size, frag_size_max); 790 791 info->arr[i].frag_size = frag_size; 792 buf_size += frag_size; 793 794 if (params->xdp_prog) { 795 /* XDP multi buffer expects fragments of the same size. */ 796 info->arr[i].frag_stride = frag_size_max; 797 } else { 798 if (i == 0) { 799 /* Ensure that headroom and tailroom are included. */ 800 frag_size += headroom; 801 frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 802 } 803 info->arr[i].frag_stride = roundup_pow_of_two(frag_size); 804 } 805 806 i++; 807 } 808 info->num_frags = i; 809 810 /* The last fragment of WQE with index 2*N may share the page with the 811 * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1 812 * is not completed yet, WQE 2*N must not be allocated, as it's 813 * responsible for allocating a new page. 814 */ 815 if (frag_size_max == PAGE_SIZE) { 816 /* No WQE can start in the middle of a page. */ 817 info->wqe_index_mask = 0; 818 } else { 819 /* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments, 820 * because there would be more than MLX5E_MAX_RX_FRAGS of them. 821 */ 822 WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE); 823 824 /* Odd number of fragments allows to pack the last fragment of 825 * the previous WQE and the first fragment of the next WQE into 826 * the same page. 827 * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS 828 * is 4, the last fragment can be bigger than the rest only if 829 * it's the fourth one, so WQEs consisting of 3 fragments will 830 * always share a page. 831 * When a page is shared, WQE bulk size is 2, otherwise just 1. 832 */ 833 info->wqe_index_mask = info->num_frags % 2; 834 } 835 836 out: 837 /* Bulking optimization to skip allocation until a large enough number 838 * of WQEs can be allocated in a row. Bulking also influences how well 839 * deferred page release works. 840 */ 841 mlx5e_rx_compute_wqe_bulk_params(params, info); 842 843 mlx5_core_dbg(mdev, "%s: wqe_bulk = %u, wqe_bulk_refill_unit = %u\n", 844 __func__, info->wqe_bulk, info->refill_unit); 845 846 info->log_num_frags = order_base_2(info->num_frags); 847 848 return 0; 849 } 850 851 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) 852 { 853 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; 854 855 switch (wq_type) { 856 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 857 sz += sizeof(struct mlx5e_rx_wqe_ll); 858 break; 859 default: /* MLX5_WQ_TYPE_CYCLIC */ 860 sz += sizeof(struct mlx5e_rx_wqe_cyc); 861 } 862 863 return order_base_2(sz); 864 } 865 866 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev, 867 struct mlx5e_cq_param *param) 868 { 869 void *cqc = param->cqc; 870 871 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 872 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) 873 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); 874 } 875 876 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, 877 struct mlx5e_params *params, 878 struct mlx5e_xsk_param *xsk) 879 { 880 int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; 881 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); 882 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 883 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 884 int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); 885 int wqe_size = BIT(log_stride_sz) * num_strides; 886 887 /* +1 is for the case that the pkt_per_rsrv dont consume the reservation 888 * so we get a filler cqe for the rest of the reservation. 889 */ 890 return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1)); 891 } 892 893 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, 894 struct mlx5e_params *params, 895 struct mlx5e_xsk_param *xsk, 896 struct mlx5e_cq_param *param) 897 { 898 bool hw_stridx = false; 899 void *cqc = param->cqc; 900 u8 log_cq_size; 901 902 switch (params->rq_wq_type) { 903 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 904 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); 905 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 906 log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk); 907 else 908 log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) + 909 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 910 break; 911 default: /* MLX5_WQ_TYPE_CYCLIC */ 912 log_cq_size = params->log_rq_mtu_frames; 913 } 914 915 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); 916 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { 917 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? 918 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); 919 MLX5_SET(cqc, cqc, cqe_compression_layout, 920 MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ? 921 MLX5_CQE_COMPRESS_LAYOUT_ENHANCED : 922 MLX5_CQE_COMPRESS_LAYOUT_BASIC); 923 MLX5_SET(cqc, cqc, cqe_comp_en, 1); 924 } 925 926 mlx5e_build_common_cq_param(mdev, param); 927 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; 928 } 929 930 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 931 { 932 bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; 933 bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); 934 935 return ro && lro_en ? 936 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; 937 } 938 939 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, 940 struct mlx5e_params *params, 941 struct mlx5e_xsk_param *xsk, 942 u16 q_counter, 943 struct mlx5e_rq_param *param) 944 { 945 void *rqc = param->rqc; 946 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 947 int ndsegs = 1; 948 int err; 949 950 switch (params->rq_wq_type) { 951 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: { 952 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 953 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 954 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 955 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 956 957 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size, 958 log_wqe_num_of_strides, 959 page_shift, umr_mode)) { 960 mlx5_core_err(mdev, 961 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n", 962 log_wqe_stride_size, log_wqe_num_of_strides, 963 umr_mode); 964 return -EINVAL; 965 } 966 967 MLX5_SET(wq, wq, log_wqe_num_of_strides, 968 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE); 969 MLX5_SET(wq, wq, log_wqe_stride_size, 970 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); 971 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); 972 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { 973 MLX5_SET(wq, wq, shampo_enable, true); 974 MLX5_SET(wq, wq, log_reservation_size, 975 mlx5e_shampo_get_log_rsrv_size(mdev, params)); 976 MLX5_SET(wq, wq, 977 log_max_num_of_packets_per_reservation, 978 mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 979 MLX5_SET(wq, wq, log_headers_entry_size, 980 mlx5e_shampo_get_log_hd_entry_size(mdev, params)); 981 MLX5_SET(rqc, rqc, reservation_timeout, 982 params->packet_merge.timeout); 983 MLX5_SET(rqc, rqc, shampo_match_criteria_type, 984 params->packet_merge.shampo.match_criteria_type); 985 MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity, 986 params->packet_merge.shampo.alignment_granularity); 987 } 988 break; 989 } 990 default: /* MLX5_WQ_TYPE_CYCLIC */ 991 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); 992 err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); 993 if (err) 994 return err; 995 ndsegs = param->frags_info.num_frags; 996 } 997 998 MLX5_SET(wq, wq, wq_type, params->rq_wq_type); 999 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params)); 1000 MLX5_SET(wq, wq, log_wq_stride, 1001 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); 1002 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); 1003 MLX5_SET(rqc, rqc, counter_set_id, q_counter); 1004 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); 1005 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); 1006 1007 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 1008 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp); 1009 1010 return 0; 1011 } 1012 1013 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, 1014 u16 q_counter, 1015 struct mlx5e_rq_param *param) 1016 { 1017 void *rqc = param->rqc; 1018 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 1019 1020 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1021 MLX5_SET(wq, wq, log_wq_stride, 1022 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); 1023 MLX5_SET(rqc, rqc, counter_set_id, q_counter); 1024 1025 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 1026 } 1027 1028 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev, 1029 struct mlx5e_params *params, 1030 struct mlx5e_cq_param *param) 1031 { 1032 void *cqc = param->cqc; 1033 1034 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); 1035 1036 mlx5e_build_common_cq_param(mdev, param); 1037 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; 1038 } 1039 1040 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev, 1041 struct mlx5e_sq_param *param) 1042 { 1043 void *sqc = param->sqc; 1044 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1045 1046 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 1047 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); 1048 1049 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 1050 } 1051 1052 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, 1053 struct mlx5e_params *params, 1054 struct mlx5e_sq_param *param) 1055 { 1056 void *sqc = param->sqc; 1057 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1058 bool allow_swp; 1059 1060 allow_swp = 1061 mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev); 1062 mlx5e_build_sq_param_common(mdev, param); 1063 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 1064 MLX5_SET(sqc, sqc, allow_swp, allow_swp); 1065 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 1066 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params); 1067 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); 1068 } 1069 1070 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev, 1071 u8 log_wq_size, 1072 struct mlx5e_cq_param *param) 1073 { 1074 void *cqc = param->cqc; 1075 1076 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); 1077 1078 mlx5e_build_common_cq_param(mdev, param); 1079 1080 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1081 } 1082 1083 /* This function calculates the maximum number of headers entries that are needed 1084 * per WQE, the formula is based on the size of the reservations and the 1085 * restriction we have about max packets for reservation that is equal to max 1086 * headers per reservation. 1087 */ 1088 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, 1089 struct mlx5e_params *params, 1090 struct mlx5e_rq_param *rq_param) 1091 { 1092 int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; 1093 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); 1094 int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 1095 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); 1096 int wqe_size = BIT(log_stride_sz) * num_strides; 1097 u32 hd_per_wqe; 1098 1099 /* Assumption: hd_per_wqe % 8 == 0. */ 1100 hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv; 1101 mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n", 1102 __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv); 1103 return hd_per_wqe; 1104 } 1105 1106 /* This function calculates the maximum number of headers entries that are needed 1107 * for the WQ, this value is uesed to allocate the header buffer in HW, thus 1108 * must be a pow of 2. 1109 */ 1110 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev, 1111 struct mlx5e_params *params, 1112 struct mlx5e_rq_param *rq_param) 1113 { 1114 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); 1115 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); 1116 u32 hd_per_wqe, hd_per_wq; 1117 1118 hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); 1119 hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size); 1120 return hd_per_wq; 1121 } 1122 1123 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, 1124 struct mlx5e_params *params, 1125 struct mlx5e_rq_param *rq_param) 1126 { 1127 int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest; 1128 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); 1129 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); 1130 u32 wqebbs; 1131 1132 max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); 1133 max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); 1134 max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; 1135 rest = max_hd_per_wqe % max_klm_per_umr; 1136 wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe; 1137 if (rest) 1138 wqebbs += MLX5E_KLM_UMR_WQEBBS(rest); 1139 wqebbs *= wq_size; 1140 return wqebbs; 1141 } 1142 1143 static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev, 1144 struct mlx5e_params *params, 1145 struct mlx5e_xsk_param *xsk) 1146 { 1147 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 1148 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 1149 u8 umr_wqebbs; 1150 1151 umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode); 1152 1153 return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); 1154 } 1155 1156 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, 1157 struct mlx5e_params *params, 1158 struct mlx5e_rq_param *rqp) 1159 { 1160 u32 wqebbs, total_pages, useful_space; 1161 1162 /* MLX5_WQ_TYPE_CYCLIC */ 1163 if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 1164 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 1165 1166 /* UMR WQEs for the regular RQ. */ 1167 wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL); 1168 1169 /* If XDP program is attached, XSK may be turned on at any time without 1170 * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of 1171 * both regular RQ and XSK RQ. 1172 * 1173 * XSK uses different values of page_shift, and the total number of UMR 1174 * WQEBBs depends on it. This dependency is complex and not monotonic, 1175 * especially taking into consideration that some of the parameters come 1176 * from capabilities. Hence, we have to try all valid values of XSK 1177 * frame size (and page_shift) to find the maximum. 1178 */ 1179 if (params->xdp_prog) { 1180 u32 max_xsk_wqebbs = 0; 1181 u8 frame_shift; 1182 1183 for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT; 1184 frame_shift <= PAGE_SHIFT; frame_shift++) { 1185 /* The headroom doesn't affect the calculation. */ 1186 struct mlx5e_xsk_param xsk = { 1187 .chunk_size = 1 << frame_shift, 1188 .unaligned = false, 1189 }; 1190 1191 /* XSK aligned mode. */ 1192 max_xsk_wqebbs = max(max_xsk_wqebbs, 1193 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); 1194 1195 /* XSK unaligned mode, frame size is a power of two. */ 1196 xsk.unaligned = true; 1197 max_xsk_wqebbs = max(max_xsk_wqebbs, 1198 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); 1199 1200 /* XSK unaligned mode, frame size is not equal to stride size. */ 1201 xsk.chunk_size -= 1; 1202 max_xsk_wqebbs = max(max_xsk_wqebbs, 1203 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); 1204 1205 /* XSK unaligned mode, frame size is a triple power of two. */ 1206 xsk.chunk_size = (1 << frame_shift) / 4 * 3; 1207 max_xsk_wqebbs = max(max_xsk_wqebbs, 1208 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); 1209 } 1210 1211 wqebbs += max_xsk_wqebbs; 1212 } 1213 1214 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 1215 wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp); 1216 1217 /* UMR WQEs don't cross the page boundary, they are padded with NOPs. 1218 * This padding is always smaller than the max WQE size. That gives us 1219 * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes 1220 * per page. The number of pages is estimated as the total size of WQEs 1221 * divided by the useful space in page, rounding up. If some WQEs don't 1222 * fully fit into the useful space, they can occupy part of the padding, 1223 * which proves this estimation to be correct (reserve enough space). 1224 */ 1225 useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB; 1226 total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space); 1227 wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB); 1228 1229 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs)); 1230 } 1231 1232 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) 1233 { 1234 if (mlx5e_is_ktls_rx(mdev)) 1235 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 1236 1237 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 1238 } 1239 1240 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev, 1241 u8 log_wq_size, 1242 struct mlx5e_sq_param *param) 1243 { 1244 void *sqc = param->sqc; 1245 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1246 1247 mlx5e_build_sq_param_common(mdev, param); 1248 1249 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 1250 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); 1251 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); 1252 } 1253 1254 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, 1255 u8 log_wq_size, 1256 struct mlx5e_sq_param *param) 1257 { 1258 void *sqc = param->sqc; 1259 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1260 1261 mlx5e_build_sq_param_common(mdev, param); 1262 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */ 1263 param->is_tls = mlx5e_is_ktls_rx(mdev); 1264 if (param->is_tls) 1265 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */ 1266 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); 1267 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 1268 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); 1269 } 1270 1271 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, 1272 struct mlx5e_params *params, 1273 struct mlx5e_xsk_param *xsk, 1274 struct mlx5e_sq_param *param) 1275 { 1276 void *sqc = param->sqc; 1277 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1278 1279 mlx5e_build_sq_param_common(mdev, param); 1280 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 1281 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); 1282 param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk); 1283 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); 1284 } 1285 1286 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, 1287 struct mlx5e_params *params, 1288 u16 q_counter, 1289 struct mlx5e_channel_param *cparam) 1290 { 1291 u8 icosq_log_wq_sz, async_icosq_log_wq_sz; 1292 int err; 1293 1294 err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq); 1295 if (err) 1296 return err; 1297 1298 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq); 1299 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev); 1300 1301 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq); 1302 mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq); 1303 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq); 1304 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq); 1305 1306 return 0; 1307 } 1308