1 /*
2  * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "port_buffer.h"
33 
34 int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
35 			    struct mlx5e_port_buffer *port_buffer)
36 {
37 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
38 	struct mlx5_core_dev *mdev = priv->mdev;
39 	int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
40 	u32 total_used = 0;
41 	void *buffer;
42 	void *out;
43 	int err;
44 	int i;
45 
46 	out = kzalloc(sz, GFP_KERNEL);
47 	if (!out)
48 		return -ENOMEM;
49 
50 	err = mlx5e_port_query_pbmc(mdev, out);
51 	if (err)
52 		goto out;
53 
54 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
55 		buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
56 		port_buffer->buffer[i].lossy =
57 			MLX5_GET(bufferx_reg, buffer, lossy);
58 		port_buffer->buffer[i].epsb =
59 			MLX5_GET(bufferx_reg, buffer, epsb);
60 		port_buffer->buffer[i].size =
61 			MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
62 		port_buffer->buffer[i].xon =
63 			MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
64 		port_buffer->buffer[i].xoff =
65 			MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
66 		total_used += port_buffer->buffer[i].size;
67 
68 		mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
69 			  port_buffer->buffer[i].size,
70 			  port_buffer->buffer[i].xon,
71 			  port_buffer->buffer[i].xoff,
72 			  port_buffer->buffer[i].epsb,
73 			  port_buffer->buffer[i].lossy);
74 	}
75 
76 	port_buffer->internal_buffers_size = 0;
77 	for (i = MLX5E_MAX_NETWORK_BUFFER; i < MLX5E_TOTAL_BUFFERS; i++) {
78 		buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
79 		port_buffer->internal_buffers_size +=
80 			MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
81 	}
82 
83 	port_buffer->port_buffer_size =
84 		MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
85 	port_buffer->headroom_size = total_used;
86 	port_buffer->spare_buffer_size = port_buffer->port_buffer_size -
87 					 port_buffer->internal_buffers_size -
88 					 port_buffer->headroom_size;
89 
90 	mlx5e_dbg(HW, priv,
91 		  "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
92 		  port_buffer->port_buffer_size, port_buffer->headroom_size,
93 		  port_buffer->internal_buffers_size,
94 		  port_buffer->spare_buffer_size);
95 out:
96 	kfree(out);
97 	return err;
98 }
99 
100 struct mlx5e_buffer_pool {
101 	u32 infi_size;
102 	u32 size;
103 	u32 buff_occupancy;
104 };
105 
106 static int mlx5e_port_query_pool(struct mlx5_core_dev *mdev,
107 				 struct mlx5e_buffer_pool *buffer_pool,
108 				 u32 desc, u8 dir, u8 pool_idx)
109 {
110 	u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {};
111 	int err;
112 
113 	err = mlx5e_port_query_sbpr(mdev, desc, dir, pool_idx, out,
114 				    sizeof(out));
115 	if (err)
116 		return err;
117 
118 	buffer_pool->size = MLX5_GET(sbpr_reg, out, size);
119 	buffer_pool->infi_size = MLX5_GET(sbpr_reg, out, infi_size);
120 	buffer_pool->buff_occupancy = MLX5_GET(sbpr_reg, out, buff_occupancy);
121 
122 	return err;
123 }
124 
125 enum {
126 	MLX5_INGRESS_DIR = 0,
127 	MLX5_EGRESS_DIR = 1,
128 };
129 
130 enum {
131 	MLX5_LOSSY_POOL = 0,
132 	MLX5_LOSSLESS_POOL = 1,
133 };
134 
135 /* No limit on usage of shared buffer pool (max_buff=0) */
136 #define MLX5_SB_POOL_NO_THRESHOLD  0
137 /* Shared buffer pool usage threshold when calculated
138  * dynamically in alpha units. alpha=13 is equivalent to
139  * HW_alpha of  [(1/128) * 2 ^ (alpha-1)] = 32, where HW_alpha
140  * equates to the following portion of the shared buffer pool:
141  * [32 / (1 + n * 32)] While *n* is the number of buffers
142  * that are using the shared buffer pool.
143  */
144 #define MLX5_SB_POOL_THRESHOLD 13
145 
146 /* Shared buffer class management parameters */
147 struct mlx5_sbcm_params {
148 	u8 pool_idx;
149 	u8 max_buff;
150 	u8 infi_size;
151 };
152 
153 static const struct mlx5_sbcm_params sbcm_default = {
154 	.pool_idx = MLX5_LOSSY_POOL,
155 	.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
156 	.infi_size = 0,
157 };
158 
159 static const struct mlx5_sbcm_params sbcm_lossy = {
160 	.pool_idx = MLX5_LOSSY_POOL,
161 	.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
162 	.infi_size = 1,
163 };
164 
165 static const struct mlx5_sbcm_params sbcm_lossless = {
166 	.pool_idx = MLX5_LOSSLESS_POOL,
167 	.max_buff = MLX5_SB_POOL_THRESHOLD,
168 	.infi_size = 0,
169 };
170 
171 static const struct mlx5_sbcm_params sbcm_lossless_no_threshold = {
172 	.pool_idx = MLX5_LOSSLESS_POOL,
173 	.max_buff = MLX5_SB_POOL_NO_THRESHOLD,
174 	.infi_size = 1,
175 };
176 
177 /**
178  * select_sbcm_params() - selects the shared buffer pool configuration
179  *
180  * @buffer: <input> port buffer to retrieve params of
181  * @lossless_buff_count: <input> number of lossless buffers in total
182  *
183  * The selection is based on the following rules:
184  * 1. If buffer size is 0, no shared buffer pool is used.
185  * 2. If buffer is lossy, use lossy shared buffer pool.
186  * 3. If there are more than 1 lossless buffers, use lossless shared buffer pool
187  *    with threshold.
188  * 4. If there is only 1 lossless buffer, use lossless shared buffer pool
189  *    without threshold.
190  *
191  * @return const struct mlx5_sbcm_params* selected values
192  */
193 static const struct mlx5_sbcm_params *
194 select_sbcm_params(struct mlx5e_bufferx_reg *buffer, u8 lossless_buff_count)
195 {
196 	if (buffer->size == 0)
197 		return &sbcm_default;
198 
199 	if (buffer->lossy)
200 		return &sbcm_lossy;
201 
202 	if (lossless_buff_count > 1)
203 		return &sbcm_lossless;
204 
205 	return &sbcm_lossless_no_threshold;
206 }
207 
208 static int port_update_pool_cfg(struct mlx5_core_dev *mdev,
209 				struct mlx5e_port_buffer *port_buffer)
210 {
211 	const struct mlx5_sbcm_params *p;
212 	u8 lossless_buff_count = 0;
213 	int err;
214 	int i;
215 
216 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
217 		return 0;
218 
219 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
220 		lossless_buff_count += ((port_buffer->buffer[i].size) &&
221 				       (!(port_buffer->buffer[i].lossy)));
222 
223 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
224 		p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count);
225 		err = mlx5e_port_set_sbcm(mdev, 0, i,
226 					  MLX5_INGRESS_DIR,
227 					  p->infi_size,
228 					  p->max_buff,
229 					  p->pool_idx);
230 		if (err)
231 			return err;
232 	}
233 
234 	return 0;
235 }
236 
237 static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
238 				     u32 current_headroom_size,
239 				     u32 new_headroom_size)
240 {
241 	struct mlx5e_buffer_pool lossless_ipool;
242 	struct mlx5e_buffer_pool lossy_epool;
243 	u32 lossless_ipool_size;
244 	u32 shared_buffer_size;
245 	u32 total_buffer_size;
246 	u32 lossy_epool_size;
247 	int err;
248 
249 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
250 		return 0;
251 
252 	err = mlx5e_port_query_pool(mdev, &lossy_epool, 0, MLX5_EGRESS_DIR,
253 				    MLX5_LOSSY_POOL);
254 	if (err)
255 		return err;
256 
257 	err = mlx5e_port_query_pool(mdev, &lossless_ipool, 0, MLX5_INGRESS_DIR,
258 				    MLX5_LOSSLESS_POOL);
259 	if (err)
260 		return err;
261 
262 	total_buffer_size = current_headroom_size + lossy_epool.size +
263 			    lossless_ipool.size;
264 	shared_buffer_size = total_buffer_size - new_headroom_size;
265 
266 	if (shared_buffer_size < 4) {
267 		pr_err("Requested port buffer is too large, not enough space left for shared buffer\n");
268 		return -EINVAL;
269 	}
270 
271 	/* Total shared buffer size is split in a ratio of 3:1 between
272 	 * lossy and lossless pools respectively.
273 	 */
274 	lossy_epool_size = (shared_buffer_size / 4) * 3;
275 	lossless_ipool_size = shared_buffer_size / 4;
276 
277 	mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
278 			    lossy_epool_size);
279 	mlx5e_port_set_sbpr(mdev, 0, MLX5_INGRESS_DIR, MLX5_LOSSLESS_POOL, 0,
280 			    lossless_ipool_size);
281 	return 0;
282 }
283 
284 static int port_set_buffer(struct mlx5e_priv *priv,
285 			   struct mlx5e_port_buffer *port_buffer)
286 {
287 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
288 	struct mlx5_core_dev *mdev = priv->mdev;
289 	int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
290 	u32 new_headroom_size = 0;
291 	u32 current_headroom_size;
292 	void *in;
293 	int err;
294 	int i;
295 
296 	current_headroom_size = port_buffer->headroom_size;
297 
298 	in = kzalloc(sz, GFP_KERNEL);
299 	if (!in)
300 		return -ENOMEM;
301 
302 	err = mlx5e_port_query_pbmc(mdev, in);
303 	if (err)
304 		goto out;
305 
306 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
307 		void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
308 		u64 size = port_buffer->buffer[i].size;
309 		u64 xoff = port_buffer->buffer[i].xoff;
310 		u64 xon = port_buffer->buffer[i].xon;
311 
312 		new_headroom_size += size;
313 		do_div(size, port_buff_cell_sz);
314 		do_div(xoff, port_buff_cell_sz);
315 		do_div(xon, port_buff_cell_sz);
316 		MLX5_SET(bufferx_reg, buffer, size, size);
317 		MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
318 		MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
319 		MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
320 	}
321 
322 	new_headroom_size /= port_buff_cell_sz;
323 	current_headroom_size /= port_buff_cell_sz;
324 	err = port_update_shared_buffer(priv->mdev, current_headroom_size,
325 					new_headroom_size);
326 	if (err)
327 		goto out;
328 
329 	err = port_update_pool_cfg(priv->mdev, port_buffer);
330 	if (err)
331 		goto out;
332 
333 	err = mlx5e_port_set_pbmc(mdev, in);
334 out:
335 	kfree(in);
336 	return err;
337 }
338 
339 /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
340  * minimum speed value is 40Gbps
341  */
342 static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
343 {
344 	u32 speed;
345 	u32 xoff;
346 	int err;
347 
348 	err = mlx5e_port_linkspeed(priv->mdev, &speed);
349 	if (err)
350 		speed = SPEED_40000;
351 	speed = max_t(u32, speed, SPEED_40000);
352 
353 	xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
354 
355 	mlx5e_dbg(HW, priv, "%s: xoff=%d\n", __func__, xoff);
356 	return xoff;
357 }
358 
359 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
360 				 u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
361 {
362 	int i;
363 
364 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
365 		if (port_buffer->buffer[i].lossy) {
366 			port_buffer->buffer[i].xoff = 0;
367 			port_buffer->buffer[i].xon  = 0;
368 			continue;
369 		}
370 
371 		if (port_buffer->buffer[i].size <
372 		    (xoff + max_mtu + port_buff_cell_sz)) {
373 			pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
374 			       i, port_buffer->buffer[i].size);
375 			return -ENOMEM;
376 		}
377 
378 		port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
379 		port_buffer->buffer[i].xon  =
380 			port_buffer->buffer[i].xoff - max_mtu;
381 	}
382 
383 	return 0;
384 }
385 
386 /**
387  *	update_buffer_lossy	- Update buffer configuration based on pfc
388  *	@mdev: port function core device
389  *	@max_mtu: netdev's max_mtu
390  *	@pfc_en: <input> current pfc configuration
391  *	@buffer: <input> current prio to buffer mapping
392  *	@xoff:   <input> xoff value
393  *	@port_buff_cell_sz: <input> port buffer cell_size
394  *	@port_buffer: <output> port receive buffer configuration
395  *	@change: <output>
396  *
397  *	Update buffer configuration based on pfc configuration and
398  *	priority to buffer mapping.
399  *	Buffer's lossy bit is changed to:
400  *		lossless if there is at least one PFC enabled priority
401  *		mapped to this buffer lossy if all priorities mapped to
402  *		this buffer are PFC disabled
403  *
404  *	@return: 0 if no error,
405  *	sets change to true if buffer configuration was modified.
406  */
407 static int update_buffer_lossy(struct mlx5_core_dev *mdev,
408 			       unsigned int max_mtu,
409 			       u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
410 			       struct mlx5e_port_buffer *port_buffer,
411 			       bool *change)
412 {
413 	bool changed = false;
414 	u8 lossy_count;
415 	u8 prio_count;
416 	u8 lossy;
417 	int prio;
418 	int err;
419 	int i;
420 
421 	for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
422 		prio_count = 0;
423 		lossy_count = 0;
424 
425 		for (prio = 0; prio < MLX5E_MAX_PRIORITY; prio++) {
426 			if (buffer[prio] != i)
427 				continue;
428 
429 			prio_count++;
430 			lossy_count += !(pfc_en & (1 << prio));
431 		}
432 
433 		if (lossy_count == prio_count)
434 			lossy = 1;
435 		else /* lossy_count < prio_count */
436 			lossy = 0;
437 
438 		if (lossy != port_buffer->buffer[i].lossy) {
439 			port_buffer->buffer[i].lossy = lossy;
440 			changed = true;
441 		}
442 	}
443 
444 	if (changed) {
445 		err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
446 		if (err)
447 			return err;
448 
449 		err = port_update_pool_cfg(mdev, port_buffer);
450 		if (err)
451 			return err;
452 
453 		*change = true;
454 	}
455 
456 	return 0;
457 }
458 
459 static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
460 {
461 	u32 g_rx_pause, g_tx_pause;
462 	int err;
463 
464 	err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
465 	if (err)
466 		return err;
467 
468 	/* If global pause enabled, set all active buffers to lossless.
469 	 * Otherwise, check PFC setting.
470 	 */
471 	if (g_rx_pause || g_tx_pause)
472 		*pfc_en = 0xff;
473 	else
474 		err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
475 
476 	return err;
477 }
478 
479 #define MINIMUM_MAX_MTU 9216
480 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
481 				    u32 change, unsigned int mtu,
482 				    struct ieee_pfc *pfc,
483 				    u32 *buffer_size,
484 				    u8 *prio2buffer)
485 {
486 	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
487 	struct mlx5e_port_buffer port_buffer;
488 	u32 xoff = calculate_xoff(priv, mtu);
489 	bool update_prio2buffer = false;
490 	u8 buffer[MLX5E_MAX_PRIORITY];
491 	bool update_buffer = false;
492 	unsigned int max_mtu;
493 	u32 total_used = 0;
494 	u8 curr_pfc_en;
495 	int err;
496 	int i;
497 
498 	mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
499 	max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
500 
501 	err = mlx5e_port_query_buffer(priv, &port_buffer);
502 	if (err)
503 		return err;
504 
505 	if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
506 		update_buffer = true;
507 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
508 		if (err)
509 			return err;
510 	}
511 
512 	if (change & MLX5E_PORT_BUFFER_PFC) {
513 		mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n",
514 			  __func__, pfc->pfc_en);
515 		err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
516 		if (err)
517 			return err;
518 
519 		err = update_buffer_lossy(priv->mdev, max_mtu, pfc->pfc_en, buffer, xoff,
520 					  port_buff_cell_sz, &port_buffer,
521 					  &update_buffer);
522 		if (err)
523 			return err;
524 	}
525 
526 	if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
527 		update_prio2buffer = true;
528 		for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
529 			mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
530 				  __func__, i, prio2buffer[i]);
531 
532 		err = fill_pfc_en(priv->mdev, &curr_pfc_en);
533 		if (err)
534 			return err;
535 
536 		err = update_buffer_lossy(priv->mdev, max_mtu, curr_pfc_en, prio2buffer, xoff,
537 					  port_buff_cell_sz, &port_buffer, &update_buffer);
538 		if (err)
539 			return err;
540 	}
541 
542 	if (change & MLX5E_PORT_BUFFER_SIZE) {
543 		for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
544 			mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
545 			if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
546 				mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
547 					  __func__, i);
548 				return -EINVAL;
549 			}
550 
551 			port_buffer.buffer[i].size = buffer_size[i];
552 			total_used += buffer_size[i];
553 		}
554 
555 		mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
556 
557 		if (total_used > port_buffer.headroom_size &&
558 		    (total_used - port_buffer.headroom_size) >
559 			    port_buffer.spare_buffer_size)
560 			return -EINVAL;
561 
562 		update_buffer = true;
563 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
564 		if (err)
565 			return err;
566 	}
567 
568 	/* Need to update buffer configuration if xoff value is changed */
569 	if (!update_buffer && xoff != priv->dcbx.xoff) {
570 		update_buffer = true;
571 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
572 		if (err)
573 			return err;
574 	}
575 	priv->dcbx.xoff = xoff;
576 
577 	/* Apply the settings */
578 	if (update_buffer) {
579 		err = port_set_buffer(priv, &port_buffer);
580 		if (err)
581 			return err;
582 	}
583 
584 	if (update_prio2buffer)
585 		err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
586 
587 	return err;
588 }
589