1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "en/params.h"
5 #include "en/txrx.h"
6 #include "en/port.h"
7 #include "en_accel/en_accel.h"
8 #include "accel/ipsec.h"
9 #include "fpga/ipsec.h"
10
mlx5e_rx_is_xdp(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)11 static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
12 struct mlx5e_xsk_param *xsk)
13 {
14 return params->xdp_prog || xsk;
15 }
16
mlx5e_get_linear_rq_headroom(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)17 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
18 struct mlx5e_xsk_param *xsk)
19 {
20 u16 headroom;
21
22 if (xsk)
23 return xsk->headroom;
24
25 headroom = NET_IP_ALIGN;
26 if (mlx5e_rx_is_xdp(params, xsk))
27 headroom += XDP_PACKET_HEADROOM;
28 else
29 headroom += MLX5_RX_HEADROOM;
30
31 return headroom;
32 }
33
mlx5e_rx_get_min_frag_sz(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)34 u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
35 struct mlx5e_xsk_param *xsk)
36 {
37 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
38 u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
39
40 return linear_rq_headroom + hw_mtu;
41 }
42
mlx5e_rx_get_linear_frag_sz(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)43 static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
44 struct mlx5e_xsk_param *xsk)
45 {
46 u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
47
48 /* AF_XDP doesn't build SKBs in place. */
49 if (!xsk)
50 frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
51
52 /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
53 * special case. It can run with frames smaller than a page, as it
54 * doesn't allocate pages dynamically. However, here we pretend that
55 * fragments are page-sized: it allows to treat XSK frames like pages
56 * by redirecting alloc and free operations to XSK rings and by using
57 * the fact there are no multiple packets per "page" (which is a frame).
58 * The latter is important, because frames may come in a random order,
59 * and we will have trouble assemblying a real page of multiple frames.
60 */
61 if (mlx5e_rx_is_xdp(params, xsk))
62 frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
63
64 /* Even if we can go with a smaller fragment size, we must not put
65 * multiple packets into a single frame.
66 */
67 if (xsk)
68 frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
69
70 return frag_sz;
71 }
72
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)73 u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
74 struct mlx5e_xsk_param *xsk)
75 {
76 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
77
78 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
79 }
80
mlx5e_rx_is_linear_skb(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)81 bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
82 struct mlx5e_xsk_param *xsk)
83 {
84 /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
85 * than one page. For this, check both with and without xsk.
86 */
87 u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
88 mlx5e_rx_get_linear_frag_sz(params, NULL));
89
90 return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
91 }
92
mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev * mdev,u8 log_stride_sz,u8 log_num_strides)93 bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
94 u8 log_stride_sz, u8 log_num_strides)
95 {
96 if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
97 return false;
98
99 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
100 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
101 return false;
102
103 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
104 return false;
105
106 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
107 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
108
109 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
110 }
111
mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)112 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
113 struct mlx5e_params *params,
114 struct mlx5e_xsk_param *xsk)
115 {
116 s8 log_num_strides;
117 u8 log_stride_sz;
118
119 if (!mlx5e_rx_is_linear_skb(params, xsk))
120 return false;
121
122 log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
123 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
124
125 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
126 }
127
mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)128 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
129 struct mlx5e_xsk_param *xsk)
130 {
131 u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
132
133 /* Numbers are unsigned, don't subtract to avoid underflow. */
134 if (params->log_rq_mtu_frames <
135 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
136 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
137
138 return params->log_rq_mtu_frames - log_pkts_per_wqe;
139 }
140
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)141 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
142 struct mlx5e_params *params,
143 struct mlx5e_xsk_param *xsk)
144 {
145 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
146 return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
147
148 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
149 }
150
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)151 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
152 struct mlx5e_params *params,
153 struct mlx5e_xsk_param *xsk)
154 {
155 return MLX5_MPWRQ_LOG_WQE_SZ -
156 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
157 }
158
mlx5e_get_rq_headroom(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)159 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
160 struct mlx5e_params *params,
161 struct mlx5e_xsk_param *xsk)
162 {
163 bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
164 mlx5e_rx_is_linear_skb(params, xsk) :
165 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
166
167 return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
168 }
169
mlx5e_calc_sq_stop_room(struct mlx5_core_dev * mdev,struct mlx5e_params * params)170 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
171 {
172 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
173 u16 stop_room;
174
175 stop_room = mlx5e_tls_get_stop_room(mdev, params);
176 stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
177 if (is_mpwqe)
178 /* A MPWQE can take up to the maximum-sized WQE + all the normal
179 * stop room can be taken if a new packet breaks the active
180 * MPWQE session and allocates its WQEs right away.
181 */
182 stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
183
184 return stop_room;
185 }
186
mlx5e_validate_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)187 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
188 {
189 size_t sq_size = 1 << params->log_sq_size;
190 u16 stop_room;
191
192 stop_room = mlx5e_calc_sq_stop_room(mdev, params);
193 if (stop_room >= sq_size) {
194 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
195 stop_room, sq_size);
196 return -EINVAL;
197 }
198
199 return 0;
200 }
201
mlx5e_get_def_tx_moderation(u8 cq_period_mode)202 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
203 {
204 struct dim_cq_moder moder;
205
206 moder.cq_period_mode = cq_period_mode;
207 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
208 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
209 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
210 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
211
212 return moder;
213 }
214
mlx5e_get_def_rx_moderation(u8 cq_period_mode)215 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
216 {
217 struct dim_cq_moder moder;
218
219 moder.cq_period_mode = cq_period_mode;
220 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
221 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
222 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
223 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
224
225 return moder;
226 }
227
mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)228 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
229 {
230 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
231 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
232 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
233 }
234
mlx5e_reset_tx_moderation(struct mlx5e_params * params,u8 cq_period_mode)235 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
236 {
237 if (params->tx_dim_enabled) {
238 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
239
240 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
241 } else {
242 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
243 }
244 }
245
mlx5e_reset_rx_moderation(struct mlx5e_params * params,u8 cq_period_mode)246 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
247 {
248 if (params->rx_dim_enabled) {
249 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
250
251 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
252 } else {
253 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
254 }
255 }
256
mlx5e_set_tx_cq_mode_params(struct mlx5e_params * params,u8 cq_period_mode)257 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
258 {
259 mlx5e_reset_tx_moderation(params, cq_period_mode);
260 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
261 params->tx_cq_moderation.cq_period_mode ==
262 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
263 }
264
mlx5e_set_rx_cq_mode_params(struct mlx5e_params * params,u8 cq_period_mode)265 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
266 {
267 mlx5e_reset_rx_moderation(params, cq_period_mode);
268 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
269 params->rx_cq_moderation.cq_period_mode ==
270 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
271 }
272
slow_pci_heuristic(struct mlx5_core_dev * mdev)273 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
274 {
275 u32 link_speed = 0;
276 u32 pci_bw = 0;
277
278 mlx5e_port_max_linkspeed(mdev, &link_speed);
279 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
280 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
281 link_speed, pci_bw);
282
283 #define MLX5E_SLOW_PCI_RATIO (2)
284
285 return link_speed && pci_bw &&
286 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
287 }
288
mlx5e_striding_rq_possible(struct mlx5_core_dev * mdev,struct mlx5e_params * params)289 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
290 struct mlx5e_params *params)
291 {
292 if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
293 return false;
294
295 if (mlx5_fpga_is_ipsec_device(mdev))
296 return false;
297
298 if (params->xdp_prog) {
299 /* XSK params are not considered here. If striding RQ is in use,
300 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
301 * be called with the known XSK params.
302 */
303 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
304 return false;
305 }
306
307 return true;
308 }
309
mlx5e_init_rq_type_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)310 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
311 struct mlx5e_params *params)
312 {
313 params->log_rq_mtu_frames = is_kdump_kernel() ?
314 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
315 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
316
317 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
318 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
319 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
320 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
321 BIT(params->log_rq_mtu_frames),
322 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
323 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
324 }
325
mlx5e_set_rq_type(struct mlx5_core_dev * mdev,struct mlx5e_params * params)326 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
327 {
328 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
329 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
330 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
331 MLX5_WQ_TYPE_CYCLIC;
332 }
333
mlx5e_build_rq_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)334 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
335 struct mlx5e_params *params)
336 {
337 /* Prefer Striding RQ, unless any of the following holds:
338 * - Striding RQ configuration is not possible/supported.
339 * - Slow PCI heuristic.
340 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
341 *
342 * No XSK params: checking the availability of striding RQ in general.
343 */
344 if (!slow_pci_heuristic(mdev) &&
345 mlx5e_striding_rq_possible(mdev, params) &&
346 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
347 !mlx5e_rx_is_linear_skb(params, NULL)))
348 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
349 mlx5e_set_rq_type(mdev, params);
350 mlx5e_init_rq_type_params(mdev, params);
351 }
352
353 /* Build queue parameters */
354
mlx5e_build_create_cq_param(struct mlx5e_create_cq_param * ccp,struct mlx5e_channel * c)355 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
356 {
357 *ccp = (struct mlx5e_create_cq_param) {
358 .napi = &c->napi,
359 .ch_stats = c->stats,
360 .node = cpu_to_node(c->cpu),
361 .ix = c->ix,
362 };
363 }
364
365 #define DEFAULT_FRAG_SIZE (2048)
366
mlx5e_build_rq_frags_info(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_rq_frags_info * info)367 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
368 struct mlx5e_params *params,
369 struct mlx5e_xsk_param *xsk,
370 struct mlx5e_rq_frags_info *info)
371 {
372 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
373 int frag_size_max = DEFAULT_FRAG_SIZE;
374 u32 buf_size = 0;
375 int i;
376
377 if (mlx5_fpga_is_ipsec_device(mdev))
378 byte_count += MLX5E_METADATA_ETHER_LEN;
379
380 if (mlx5e_rx_is_linear_skb(params, xsk)) {
381 int frag_stride;
382
383 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
384 frag_stride = roundup_pow_of_two(frag_stride);
385
386 info->arr[0].frag_size = byte_count;
387 info->arr[0].frag_stride = frag_stride;
388 info->num_frags = 1;
389 info->wqe_bulk = PAGE_SIZE / frag_stride;
390 goto out;
391 }
392
393 if (byte_count > PAGE_SIZE +
394 (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
395 frag_size_max = PAGE_SIZE;
396
397 i = 0;
398 while (buf_size < byte_count) {
399 int frag_size = byte_count - buf_size;
400
401 if (i < MLX5E_MAX_RX_FRAGS - 1)
402 frag_size = min(frag_size, frag_size_max);
403
404 info->arr[i].frag_size = frag_size;
405 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
406
407 buf_size += frag_size;
408 i++;
409 }
410 info->num_frags = i;
411 /* number of different wqes sharing a page */
412 info->wqe_bulk = 1 + (info->num_frags % 2);
413
414 out:
415 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
416 info->log_num_frags = order_base_2(info->num_frags);
417 }
418
mlx5e_get_rqwq_log_stride(u8 wq_type,int ndsegs)419 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
420 {
421 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
422
423 switch (wq_type) {
424 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
425 sz += sizeof(struct mlx5e_rx_wqe_ll);
426 break;
427 default: /* MLX5_WQ_TYPE_CYCLIC */
428 sz += sizeof(struct mlx5e_rx_wqe_cyc);
429 }
430
431 return order_base_2(sz);
432 }
433
mlx5e_build_common_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_cq_param * param)434 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
435 struct mlx5e_cq_param *param)
436 {
437 void *cqc = param->cqc;
438
439 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
440 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
441 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
442 }
443
mlx5e_build_rx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_cq_param * param)444 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
445 struct mlx5e_params *params,
446 struct mlx5e_xsk_param *xsk,
447 struct mlx5e_cq_param *param)
448 {
449 bool hw_stridx = false;
450 void *cqc = param->cqc;
451 u8 log_cq_size;
452
453 switch (params->rq_wq_type) {
454 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
455 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
456 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
457 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
458 break;
459 default: /* MLX5_WQ_TYPE_CYCLIC */
460 log_cq_size = params->log_rq_mtu_frames;
461 }
462
463 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
464 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
465 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
466 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
467 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
468 }
469
470 mlx5e_build_common_cq_param(mdev, param);
471 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
472 }
473
mlx5e_build_rq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,u16 q_counter,struct mlx5e_rq_param * param)474 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
475 struct mlx5e_params *params,
476 struct mlx5e_xsk_param *xsk,
477 u16 q_counter,
478 struct mlx5e_rq_param *param)
479 {
480 void *rqc = param->rqc;
481 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
482 int ndsegs = 1;
483
484 switch (params->rq_wq_type) {
485 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
486 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
487 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
488
489 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
490 log_wqe_num_of_strides)) {
491 mlx5_core_err(mdev,
492 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
493 log_wqe_stride_size, log_wqe_num_of_strides);
494 return -EINVAL;
495 }
496
497 MLX5_SET(wq, wq, log_wqe_num_of_strides,
498 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
499 MLX5_SET(wq, wq, log_wqe_stride_size,
500 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
501 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
502 break;
503 }
504 default: /* MLX5_WQ_TYPE_CYCLIC */
505 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
506 mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info);
507 ndsegs = param->frags_info.num_frags;
508 }
509
510 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
511 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
512 MLX5_SET(wq, wq, log_wq_stride,
513 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
514 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
515 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
516 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
517 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
518
519 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
520 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
521
522 return 0;
523 }
524
mlx5e_build_drop_rq_param(struct mlx5_core_dev * mdev,u16 q_counter,struct mlx5e_rq_param * param)525 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
526 u16 q_counter,
527 struct mlx5e_rq_param *param)
528 {
529 void *rqc = param->rqc;
530 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
531
532 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
533 MLX5_SET(wq, wq, log_wq_stride,
534 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
535 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
536
537 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
538 }
539
mlx5e_build_tx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_cq_param * param)540 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
541 struct mlx5e_params *params,
542 struct mlx5e_cq_param *param)
543 {
544 void *cqc = param->cqc;
545
546 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
547
548 mlx5e_build_common_cq_param(mdev, param);
549 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
550 }
551
mlx5e_build_sq_param_common(struct mlx5_core_dev * mdev,struct mlx5e_sq_param * param)552 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
553 struct mlx5e_sq_param *param)
554 {
555 void *sqc = param->sqc;
556 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
557
558 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
559 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
560
561 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
562 }
563
mlx5e_build_sq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)564 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
565 struct mlx5e_params *params,
566 struct mlx5e_sq_param *param)
567 {
568 void *sqc = param->sqc;
569 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
570 bool allow_swp;
571
572 allow_swp = mlx5_geneve_tx_allowed(mdev) ||
573 !!MLX5_IPSEC_DEV(mdev);
574 mlx5e_build_sq_param_common(mdev, param);
575 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
576 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
577 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
578 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
579 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
580 }
581
mlx5e_build_ico_cq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_cq_param * param)582 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
583 u8 log_wq_size,
584 struct mlx5e_cq_param *param)
585 {
586 void *cqc = param->cqc;
587
588 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
589
590 mlx5e_build_common_cq_param(mdev, param);
591
592 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
593 }
594
mlx5e_get_rq_log_wq_sz(void * rqc)595 static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
596 {
597 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
598
599 return MLX5_GET(wq, wq, log_wq_sz);
600 }
601
mlx5e_build_icosq_log_wq_sz(struct mlx5e_params * params,struct mlx5e_rq_param * rqp)602 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
603 struct mlx5e_rq_param *rqp)
604 {
605 switch (params->rq_wq_type) {
606 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
607 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
608 order_base_2(MLX5E_UMR_WQEBBS) +
609 mlx5e_get_rq_log_wq_sz(rqp->rqc));
610 default: /* MLX5_WQ_TYPE_CYCLIC */
611 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
612 }
613 }
614
mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev * mdev)615 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
616 {
617 if (mlx5_accel_is_ktls_rx(mdev))
618 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
619
620 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
621 }
622
mlx5e_build_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)623 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
624 u8 log_wq_size,
625 struct mlx5e_sq_param *param)
626 {
627 void *sqc = param->sqc;
628 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
629
630 mlx5e_build_sq_param_common(mdev, param);
631
632 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
633 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
634 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
635 }
636
mlx5e_build_async_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)637 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
638 u8 log_wq_size,
639 struct mlx5e_sq_param *param)
640 {
641 void *sqc = param->sqc;
642 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
643
644 mlx5e_build_sq_param_common(mdev, param);
645 param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
646 param->is_tls = mlx5_accel_is_ktls_rx(mdev);
647 if (param->is_tls)
648 param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
649 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
650 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
651 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
652 }
653
mlx5e_build_xdpsq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)654 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
655 struct mlx5e_params *params,
656 struct mlx5e_sq_param *param)
657 {
658 void *sqc = param->sqc;
659 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
660
661 mlx5e_build_sq_param_common(mdev, param);
662 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
663 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
664 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
665 }
666
mlx5e_build_channel_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u16 q_counter,struct mlx5e_channel_param * cparam)667 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
668 struct mlx5e_params *params,
669 u16 q_counter,
670 struct mlx5e_channel_param *cparam)
671 {
672 u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
673 int err;
674
675 err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
676 if (err)
677 return err;
678
679 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
680 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
681
682 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
683 mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
684 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
685 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
686
687 return 0;
688 }
689