1 /*-
2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved.
3 * Copyright (c) 2022 NVIDIA corporation & affiliates.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/fs.h>
31 #include <linux/rbtree.h>
32 #include <dev/mlx5/mlx5_core/mlx5_core.h>
33 #include <dev/mlx5/mlx5_core/fs_core.h>
34 #include <dev/mlx5/mlx5_core/mlx5_fc_cmd.h>
35
36 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
37 #define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
38 /* Max number of counters to query in bulk read is 32K */
39 #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
40 #define MLX5_INIT_COUNTERS_BULK 8
41 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
42 #define MLX5_FC_POOL_USED_BUFF_RATIO 10
43
44 struct mlx5_fc_cache {
45 u64 packets;
46 u64 bytes;
47 u64 lastuse;
48 };
49
50 struct mlx5_fc {
51 struct list_head list;
52 struct llist_node addlist;
53 struct llist_node dellist;
54
55 /* last{packets,bytes} members are used when calculating the delta since
56 * last reading
57 */
58 u64 lastpackets;
59 u64 lastbytes;
60
61 struct mlx5_fc_bulk *bulk;
62 u32 id;
63 bool aging;
64
65 struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
66 };
67
68 static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
69 static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
70 static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
71 static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
72
73 /* locking scheme:
74 *
75 * It is the responsibility of the user to prevent concurrent calls or bad
76 * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
77 * to struct mlx5_fc.
78 * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
79 * dump (access to struct mlx5_fc) after a counter is destroyed.
80 *
81 * access to counter list:
82 * - create (user context)
83 * - mlx5_fc_create() only adds to an addlist to be used by
84 * mlx5_fc_stats_work(). addlist is a lockless single linked list
85 * that doesn't require any additional synchronization when adding single
86 * node.
87 * - spawn thread to do the actual destroy
88 *
89 * - destroy (user context)
90 * - add a counter to lockless dellist
91 * - spawn thread to do the actual del
92 *
93 * - dump (user context)
94 * user should not call dump after destroy
95 *
96 * - query (single thread workqueue context)
97 * destroy/dump - no conflict (see destroy)
98 * query/dump - packets and bytes might be inconsistent (since update is not
99 * atomic)
100 * query/create - no conflict (see create)
101 * since every create/destroy spawn the work, only after necessary time has
102 * elapsed, the thread will actually query the hardware.
103 */
104
mlx5_fc_counters_lookup_next(struct mlx5_core_dev * dev,u32 id)105 static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
106 u32 id)
107 {
108 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
109 struct mlx5_fc *counter;
110 int next_id = id + 1;
111
112 rcu_read_lock();
113 /* skip counters that are in idr, but not yet in counters list */
114 while ((counter = idr_get_next(&fc_stats->counters_idr, &next_id)) != NULL &&
115 list_empty(&counter->list))
116 next_id++;
117 rcu_read_unlock();
118
119 return counter ? &counter->list : &fc_stats->counters;
120 }
121
mlx5_fc_stats_insert(struct mlx5_core_dev * dev,struct mlx5_fc * counter)122 static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
123 struct mlx5_fc *counter)
124 {
125 struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
126
127 list_add_tail(&counter->list, next);
128 }
129
mlx5_fc_stats_remove(struct mlx5_core_dev * dev,struct mlx5_fc * counter)130 static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
131 struct mlx5_fc *counter)
132 {
133 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
134
135 list_del(&counter->list);
136
137 spin_lock(&fc_stats->counters_idr_lock);
138 WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
139 spin_unlock(&fc_stats->counters_idr_lock);
140 }
141
get_init_bulk_query_len(struct mlx5_core_dev * dev)142 static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
143 {
144 return min_t(int, MLX5_INIT_COUNTERS_BULK,
145 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
146 }
147
get_max_bulk_query_len(struct mlx5_core_dev * dev)148 static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
149 {
150 return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
151 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
152 }
153
update_counter_cache(int index,u32 * bulk_raw_data,struct mlx5_fc_cache * cache)154 static void update_counter_cache(int index, u32 *bulk_raw_data,
155 struct mlx5_fc_cache *cache)
156 {
157 void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
158 flow_statistics[index]);
159 u64 packets = MLX5_GET64(traffic_counter, stats, packets);
160 u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
161
162 if (cache->packets == packets)
163 return;
164
165 cache->packets = packets;
166 cache->bytes = bytes;
167 cache->lastuse = jiffies;
168 }
169
mlx5_fc_stats_query_counter_range(struct mlx5_core_dev * dev,struct mlx5_fc * first,u32 last_id)170 static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
171 struct mlx5_fc *first,
172 u32 last_id)
173 {
174 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
175 bool query_more_counters = (first->id <= last_id);
176 int cur_bulk_len = fc_stats->bulk_query_len;
177 u32 *data = fc_stats->bulk_query_out;
178 struct mlx5_fc *counter = first;
179 u32 bulk_base_id;
180 int bulk_len;
181 int err;
182
183 while (query_more_counters) {
184 /* first id must be aligned to 4 when using bulk query */
185 bulk_base_id = counter->id & ~0x3;
186
187 /* number of counters to query inc. the last counter */
188 bulk_len = min_t(int, cur_bulk_len,
189 ALIGN(last_id - bulk_base_id + 1, 4));
190
191 err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
192 data);
193 if (err) {
194 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
195 return;
196 }
197 query_more_counters = false;
198
199 list_for_each_entry_from(counter, &fc_stats->counters, list) {
200 int counter_index = counter->id - bulk_base_id;
201 struct mlx5_fc_cache *cache = &counter->cache;
202
203 if (counter->id >= bulk_base_id + bulk_len) {
204 query_more_counters = true;
205 break;
206 }
207
208 update_counter_cache(counter_index, data, cache);
209 }
210 }
211 }
212
mlx5_fc_free(struct mlx5_core_dev * dev,struct mlx5_fc * counter)213 static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
214 {
215 mlx5_cmd_fc_free(dev, counter->id);
216 kfree(counter);
217 }
218
mlx5_fc_release(struct mlx5_core_dev * dev,struct mlx5_fc * counter)219 static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
220 {
221 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
222
223 if (counter->bulk)
224 mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
225 else
226 mlx5_fc_free(dev, counter);
227 }
228
mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev * dev)229 static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
230 {
231 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
232 int max_bulk_len = get_max_bulk_query_len(dev);
233 unsigned long now = jiffies;
234 u32 *bulk_query_out_tmp;
235 int max_out_len;
236
237 if (fc_stats->bulk_query_alloc_failed &&
238 time_before(now, fc_stats->next_bulk_query_alloc))
239 return;
240
241 max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
242 bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
243 if (!bulk_query_out_tmp) {
244 mlx5_core_warn(dev,
245 "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
246 max_bulk_len);
247 fc_stats->bulk_query_alloc_failed = true;
248 fc_stats->next_bulk_query_alloc =
249 now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
250 return;
251 }
252
253 kfree(fc_stats->bulk_query_out);
254 fc_stats->bulk_query_out = bulk_query_out_tmp;
255 fc_stats->bulk_query_len = max_bulk_len;
256 if (fc_stats->bulk_query_alloc_failed) {
257 mlx5_core_info(dev,
258 "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
259 max_bulk_len);
260 fc_stats->bulk_query_alloc_failed = false;
261 }
262 }
263
mlx5_fc_stats_work(struct work_struct * work)264 static void mlx5_fc_stats_work(struct work_struct *work)
265 {
266 struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
267 priv.fc_stats.work.work);
268 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
269 /* Take dellist first to ensure that counters cannot be deleted before
270 * they are inserted.
271 */
272 struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
273 struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
274 struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
275 unsigned long now = jiffies;
276
277 if (addlist || !list_empty(&fc_stats->counters))
278 queue_delayed_work(fc_stats->wq, &fc_stats->work,
279 fc_stats->sampling_interval);
280
281 llist_for_each_entry(counter, addlist, addlist) {
282 mlx5_fc_stats_insert(dev, counter);
283 fc_stats->num_counters++;
284 }
285
286 llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
287 mlx5_fc_stats_remove(dev, counter);
288
289 mlx5_fc_release(dev, counter);
290 fc_stats->num_counters--;
291 }
292
293 if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
294 fc_stats->num_counters > get_init_bulk_query_len(dev))
295 mlx5_fc_stats_bulk_query_size_increase(dev);
296
297 if (time_before(now, fc_stats->next_query) ||
298 list_empty(&fc_stats->counters))
299 return;
300 last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
301
302 counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
303 list);
304 if (counter)
305 mlx5_fc_stats_query_counter_range(dev, counter, last->id);
306
307 fc_stats->next_query = now + fc_stats->sampling_interval;
308 }
309
mlx5_fc_single_alloc(struct mlx5_core_dev * dev)310 static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
311 {
312 struct mlx5_fc *counter;
313 int err;
314
315 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
316 if (!counter)
317 return ERR_PTR(-ENOMEM);
318
319 err = mlx5_cmd_fc_alloc(dev, &counter->id);
320 if (err) {
321 kfree(counter);
322 return ERR_PTR(err);
323 }
324
325 return counter;
326 }
327
mlx5_fc_acquire(struct mlx5_core_dev * dev,bool aging)328 static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
329 {
330 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
331 struct mlx5_fc *counter;
332
333 if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
334 counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
335 if (!IS_ERR(counter))
336 return counter;
337 }
338
339 return mlx5_fc_single_alloc(dev);
340 }
341
mlx5_fc_create_ex(struct mlx5_core_dev * dev,bool aging)342 struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
343 {
344 struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
345 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
346 int err = 0;
347
348 if (IS_ERR(counter))
349 return counter;
350
351 INIT_LIST_HEAD(&counter->list);
352 counter->aging = aging;
353
354 if (aging) {
355 u32 id = counter->id;
356
357 counter->cache.lastuse = jiffies;
358 counter->lastbytes = counter->cache.bytes;
359 counter->lastpackets = counter->cache.packets;
360
361 idr_preload(GFP_KERNEL);
362 spin_lock(&fc_stats->counters_idr_lock);
363
364 err = idr_alloc(&fc_stats->counters_idr, counter, id, id + 1,
365 GFP_NOWAIT);
366
367 spin_unlock(&fc_stats->counters_idr_lock);
368 idr_preload_end();
369 if (err < 0 || err != id)
370 goto err_out_alloc;
371
372 llist_add(&counter->addlist, &fc_stats->addlist);
373 }
374
375 return counter;
376
377 err_out_alloc:
378 mlx5_fc_release(dev, counter);
379 return ERR_PTR(err);
380 }
381
mlx5_fc_create(struct mlx5_core_dev * dev,bool aging)382 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
383 {
384 struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging);
385 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
386
387 if (aging)
388 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
389 return counter;
390 }
391 EXPORT_SYMBOL(mlx5_fc_create);
392
mlx5_fc_id(struct mlx5_fc * counter)393 u32 mlx5_fc_id(struct mlx5_fc *counter)
394 {
395 return counter->id;
396 }
397 EXPORT_SYMBOL(mlx5_fc_id);
398
mlx5_fc_destroy(struct mlx5_core_dev * dev,struct mlx5_fc * counter)399 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
400 {
401 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
402
403 if (!counter)
404 return;
405
406 if (counter->aging) {
407 llist_add(&counter->dellist, &fc_stats->dellist);
408 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
409 return;
410 }
411
412 mlx5_fc_release(dev, counter);
413 }
414 EXPORT_SYMBOL(mlx5_fc_destroy);
415
mlx5_init_fc_stats(struct mlx5_core_dev * dev)416 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
417 {
418 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
419 int init_bulk_len;
420 int init_out_len;
421
422 spin_lock_init(&fc_stats->counters_idr_lock);
423 idr_init(&fc_stats->counters_idr);
424 INIT_LIST_HEAD(&fc_stats->counters);
425 init_llist_head(&fc_stats->addlist);
426 init_llist_head(&fc_stats->dellist);
427
428 init_bulk_len = get_init_bulk_query_len(dev);
429 init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
430 fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
431 if (!fc_stats->bulk_query_out)
432 return -ENOMEM;
433 fc_stats->bulk_query_len = init_bulk_len;
434
435 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
436 if (!fc_stats->wq)
437 goto err_wq_create;
438
439 fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
440 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
441
442 mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
443 return 0;
444
445 err_wq_create:
446 kfree(fc_stats->bulk_query_out);
447 return -ENOMEM;
448 }
449
mlx5_cleanup_fc_stats(struct mlx5_core_dev * dev)450 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
451 {
452 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
453 struct llist_node *tmplist;
454 struct mlx5_fc *counter;
455 struct mlx5_fc *tmp;
456
457 if (!dev->priv.fc_stats.wq)
458 return;
459
460 cancel_delayed_work_sync(&dev->priv.fc_stats.work);
461 destroy_workqueue(dev->priv.fc_stats.wq);
462 dev->priv.fc_stats.wq = NULL;
463
464 tmplist = llist_del_all(&fc_stats->addlist);
465 llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
466 mlx5_fc_release(dev, counter);
467
468 list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
469 mlx5_fc_release(dev, counter);
470
471 mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
472 idr_destroy(&fc_stats->counters_idr);
473 kfree(fc_stats->bulk_query_out);
474 }
475
mlx5_fc_query(struct mlx5_core_dev * dev,struct mlx5_fc * counter,u64 * packets,u64 * bytes)476 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
477 u64 *packets, u64 *bytes)
478 {
479 return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
480 }
481 EXPORT_SYMBOL(mlx5_fc_query);
482
mlx5_fc_query_lastuse(struct mlx5_fc * counter)483 u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
484 {
485 return counter->cache.lastuse;
486 }
487
mlx5_fc_query_cached(struct mlx5_fc * counter,u64 * bytes,u64 * packets,u64 * lastuse)488 void mlx5_fc_query_cached(struct mlx5_fc *counter,
489 u64 *bytes, u64 *packets, u64 *lastuse)
490 {
491 struct mlx5_fc_cache c;
492
493 c = counter->cache;
494
495 *bytes = c.bytes - counter->lastbytes;
496 *packets = c.packets - counter->lastpackets;
497 *lastuse = c.lastuse;
498
499 counter->lastbytes = c.bytes;
500 counter->lastpackets = c.packets;
501 }
502
mlx5_fc_queue_stats_work(struct mlx5_core_dev * dev,struct delayed_work * dwork,unsigned long delay)503 void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
504 struct delayed_work *dwork,
505 unsigned long delay)
506 {
507 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
508
509 queue_delayed_work(fc_stats->wq, dwork, delay);
510 }
511
mlx5_fc_update_sampling_interval(struct mlx5_core_dev * dev,unsigned long interval)512 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
513 unsigned long interval)
514 {
515 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
516
517 fc_stats->sampling_interval = min_t(unsigned long, interval,
518 fc_stats->sampling_interval);
519 }
520
521 /* Flow counter bluks */
522
523 struct mlx5_fc_bulk {
524 struct list_head pool_list;
525 u32 base_id;
526 int bulk_len;
527 unsigned long *bitmask;
528 struct mlx5_fc fcs[];
529 };
530
mlx5_fc_init(struct mlx5_fc * counter,struct mlx5_fc_bulk * bulk,u32 id)531 static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
532 u32 id)
533 {
534 counter->bulk = bulk;
535 counter->id = id;
536 }
537
mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk * bulk)538 static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
539 {
540 return bitmap_weight(bulk->bitmask, bulk->bulk_len);
541 }
542
mlx5_fc_bulk_create(struct mlx5_core_dev * dev)543 static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
544 {
545 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
546 struct mlx5_fc_bulk *bulk;
547 int err = -ENOMEM;
548 int bulk_len;
549 u32 base_id;
550 int i;
551
552 alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
553 bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
554
555 bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
556 if (!bulk)
557 goto err_alloc_bulk;
558
559 bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
560 GFP_KERNEL);
561 if (!bulk->bitmask)
562 goto err_alloc_bitmask;
563
564 err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
565 if (err)
566 goto err_mlx5_cmd_bulk_alloc;
567
568 bulk->base_id = base_id;
569 bulk->bulk_len = bulk_len;
570 for (i = 0; i < bulk_len; i++) {
571 mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
572 set_bit(i, bulk->bitmask);
573 }
574
575 return bulk;
576
577 err_mlx5_cmd_bulk_alloc:
578 kvfree(bulk->bitmask);
579 err_alloc_bitmask:
580 kvfree(bulk);
581 err_alloc_bulk:
582 return ERR_PTR(err);
583 }
584
585 static int
mlx5_fc_bulk_destroy(struct mlx5_core_dev * dev,struct mlx5_fc_bulk * bulk)586 mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
587 {
588 if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
589 mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
590 return -EBUSY;
591 }
592
593 mlx5_cmd_fc_free(dev, bulk->base_id);
594 kvfree(bulk->bitmask);
595 kvfree(bulk);
596
597 return 0;
598 }
599
mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk * bulk)600 static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
601 {
602 int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
603
604 if (free_fc_index >= bulk->bulk_len)
605 return ERR_PTR(-ENOSPC);
606
607 clear_bit(free_fc_index, bulk->bitmask);
608 return &bulk->fcs[free_fc_index];
609 }
610
mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk * bulk,struct mlx5_fc * fc)611 static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
612 {
613 int fc_index = fc->id - bulk->base_id;
614
615 if (test_bit(fc_index, bulk->bitmask))
616 return -EINVAL;
617
618 set_bit(fc_index, bulk->bitmask);
619 return 0;
620 }
621
622 /* Flow counters pool API */
623
mlx5_fc_pool_init(struct mlx5_fc_pool * fc_pool,struct mlx5_core_dev * dev)624 static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
625 {
626 fc_pool->dev = dev;
627 mutex_init(&fc_pool->pool_lock);
628 INIT_LIST_HEAD(&fc_pool->fully_used);
629 INIT_LIST_HEAD(&fc_pool->partially_used);
630 INIT_LIST_HEAD(&fc_pool->unused);
631 fc_pool->available_fcs = 0;
632 fc_pool->used_fcs = 0;
633 fc_pool->threshold = 0;
634 }
635
mlx5_fc_pool_cleanup(struct mlx5_fc_pool * fc_pool)636 static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
637 {
638 struct mlx5_core_dev *dev = fc_pool->dev;
639 struct mlx5_fc_bulk *bulk;
640 struct mlx5_fc_bulk *tmp;
641
642 list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
643 mlx5_fc_bulk_destroy(dev, bulk);
644 list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
645 mlx5_fc_bulk_destroy(dev, bulk);
646 list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
647 mlx5_fc_bulk_destroy(dev, bulk);
648 }
649
mlx5_fc_pool_update_threshold(struct mlx5_fc_pool * fc_pool)650 static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
651 {
652 fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
653 fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
654 }
655
656 static struct mlx5_fc_bulk *
mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool * fc_pool)657 mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
658 {
659 struct mlx5_core_dev *dev = fc_pool->dev;
660 struct mlx5_fc_bulk *new_bulk;
661
662 new_bulk = mlx5_fc_bulk_create(dev);
663 if (!IS_ERR(new_bulk))
664 fc_pool->available_fcs += new_bulk->bulk_len;
665 mlx5_fc_pool_update_threshold(fc_pool);
666 return new_bulk;
667 }
668
669 static void
mlx5_fc_pool_free_bulk(struct mlx5_fc_pool * fc_pool,struct mlx5_fc_bulk * bulk)670 mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
671 {
672 struct mlx5_core_dev *dev = fc_pool->dev;
673
674 fc_pool->available_fcs -= bulk->bulk_len;
675 mlx5_fc_bulk_destroy(dev, bulk);
676 mlx5_fc_pool_update_threshold(fc_pool);
677 }
678
679 static struct mlx5_fc *
mlx5_fc_pool_acquire_from_list(struct list_head * src_list,struct list_head * next_list,bool move_non_full_bulk)680 mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
681 struct list_head *next_list,
682 bool move_non_full_bulk)
683 {
684 struct mlx5_fc_bulk *bulk;
685 struct mlx5_fc *fc;
686
687 if (list_empty(src_list))
688 return ERR_PTR(-ENODATA);
689
690 bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
691 fc = mlx5_fc_bulk_acquire_fc(bulk);
692 if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
693 list_move(&bulk->pool_list, next_list);
694 return fc;
695 }
696
697 static struct mlx5_fc *
mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool * fc_pool)698 mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
699 {
700 struct mlx5_fc_bulk *new_bulk;
701 struct mlx5_fc *fc;
702
703 mutex_lock(&fc_pool->pool_lock);
704
705 fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
706 &fc_pool->fully_used, false);
707 if (IS_ERR(fc))
708 fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
709 &fc_pool->partially_used,
710 true);
711 if (IS_ERR(fc)) {
712 new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
713 if (IS_ERR(new_bulk)) {
714 fc = ERR_CAST(new_bulk);
715 goto out;
716 }
717 fc = mlx5_fc_bulk_acquire_fc(new_bulk);
718 list_add(&new_bulk->pool_list, &fc_pool->partially_used);
719 }
720 fc_pool->available_fcs--;
721 fc_pool->used_fcs++;
722
723 out:
724 mutex_unlock(&fc_pool->pool_lock);
725 return fc;
726 }
727
728 static void
mlx5_fc_pool_release_counter(struct mlx5_fc_pool * fc_pool,struct mlx5_fc * fc)729 mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
730 {
731 struct mlx5_core_dev *dev = fc_pool->dev;
732 struct mlx5_fc_bulk *bulk = fc->bulk;
733 int bulk_free_fcs_amount;
734
735 mutex_lock(&fc_pool->pool_lock);
736
737 if (mlx5_fc_bulk_release_fc(bulk, fc)) {
738 mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
739 goto unlock;
740 }
741
742 fc_pool->available_fcs++;
743 fc_pool->used_fcs--;
744
745 bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
746 if (bulk_free_fcs_amount == 1)
747 list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
748 if (bulk_free_fcs_amount == bulk->bulk_len) {
749 list_del(&bulk->pool_list);
750 if (fc_pool->available_fcs > fc_pool->threshold)
751 mlx5_fc_pool_free_bulk(fc_pool, bulk);
752 else
753 list_add(&bulk->pool_list, &fc_pool->unused);
754 }
755
756 unlock:
757 mutex_unlock(&fc_pool->pool_lock);
758 }
759