1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/rbtree.h>
36 #include "mlx5_core.h"
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39
40 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41 /* Max number of counters to query in bulk read is 32K */
42 #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
43 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
44 #define MLX5_FC_POOL_USED_BUFF_RATIO 10
45
46 struct mlx5_fc_cache {
47 u64 packets;
48 u64 bytes;
49 u64 lastuse;
50 };
51
52 struct mlx5_fc {
53 struct list_head list;
54 struct llist_node addlist;
55 struct llist_node dellist;
56
57 /* last{packets,bytes} members are used when calculating the delta since
58 * last reading
59 */
60 u64 lastpackets;
61 u64 lastbytes;
62
63 struct mlx5_fc_bulk *bulk;
64 u32 id;
65 bool aging;
66
67 struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
68 };
69
70 static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
71 static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
72 static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
73 static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
74
75 /* locking scheme:
76 *
77 * It is the responsibility of the user to prevent concurrent calls or bad
78 * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
79 * to struct mlx5_fc.
80 * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
81 * dump (access to struct mlx5_fc) after a counter is destroyed.
82 *
83 * access to counter list:
84 * - create (user context)
85 * - mlx5_fc_create() only adds to an addlist to be used by
86 * mlx5_fc_stats_work(). addlist is a lockless single linked list
87 * that doesn't require any additional synchronization when adding single
88 * node.
89 * - spawn thread to do the actual destroy
90 *
91 * - destroy (user context)
92 * - add a counter to lockless dellist
93 * - spawn thread to do the actual del
94 *
95 * - dump (user context)
96 * user should not call dump after destroy
97 *
98 * - query (single thread workqueue context)
99 * destroy/dump - no conflict (see destroy)
100 * query/dump - packets and bytes might be inconsistent (since update is not
101 * atomic)
102 * query/create - no conflict (see create)
103 * since every create/destroy spawn the work, only after necessary time has
104 * elapsed, the thread will actually query the hardware.
105 */
106
mlx5_fc_counters_lookup_next(struct mlx5_core_dev * dev,u32 id)107 static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
108 u32 id)
109 {
110 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
111 unsigned long next_id = (unsigned long)id + 1;
112 struct mlx5_fc *counter;
113 unsigned long tmp;
114
115 rcu_read_lock();
116 /* skip counters that are in idr, but not yet in counters list */
117 idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
118 counter, tmp, next_id) {
119 if (!list_empty(&counter->list))
120 break;
121 }
122 rcu_read_unlock();
123
124 return counter ? &counter->list : &fc_stats->counters;
125 }
126
mlx5_fc_stats_insert(struct mlx5_core_dev * dev,struct mlx5_fc * counter)127 static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
128 struct mlx5_fc *counter)
129 {
130 struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
131
132 list_add_tail(&counter->list, next);
133 }
134
mlx5_fc_stats_remove(struct mlx5_core_dev * dev,struct mlx5_fc * counter)135 static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
136 struct mlx5_fc *counter)
137 {
138 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
139
140 list_del(&counter->list);
141
142 spin_lock(&fc_stats->counters_idr_lock);
143 WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
144 spin_unlock(&fc_stats->counters_idr_lock);
145 }
146
get_max_bulk_query_len(struct mlx5_core_dev * dev)147 static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
148 {
149 return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
150 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
151 }
152
update_counter_cache(int index,u32 * bulk_raw_data,struct mlx5_fc_cache * cache)153 static void update_counter_cache(int index, u32 *bulk_raw_data,
154 struct mlx5_fc_cache *cache)
155 {
156 void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
157 flow_statistics[index]);
158 u64 packets = MLX5_GET64(traffic_counter, stats, packets);
159 u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
160
161 if (cache->packets == packets)
162 return;
163
164 cache->packets = packets;
165 cache->bytes = bytes;
166 cache->lastuse = jiffies;
167 }
168
mlx5_fc_stats_query_counter_range(struct mlx5_core_dev * dev,struct mlx5_fc * first,u32 last_id)169 static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
170 struct mlx5_fc *first,
171 u32 last_id)
172 {
173 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
174 bool query_more_counters = (first->id <= last_id);
175 int max_bulk_len = get_max_bulk_query_len(dev);
176 u32 *data = fc_stats->bulk_query_out;
177 struct mlx5_fc *counter = first;
178 u32 bulk_base_id;
179 int bulk_len;
180 int err;
181
182 while (query_more_counters) {
183 /* first id must be aligned to 4 when using bulk query */
184 bulk_base_id = counter->id & ~0x3;
185
186 /* number of counters to query inc. the last counter */
187 bulk_len = min_t(int, max_bulk_len,
188 ALIGN(last_id - bulk_base_id + 1, 4));
189
190 err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
191 data);
192 if (err) {
193 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
194 return;
195 }
196 query_more_counters = false;
197
198 list_for_each_entry_from(counter, &fc_stats->counters, list) {
199 int counter_index = counter->id - bulk_base_id;
200 struct mlx5_fc_cache *cache = &counter->cache;
201
202 if (counter->id >= bulk_base_id + bulk_len) {
203 query_more_counters = true;
204 break;
205 }
206
207 update_counter_cache(counter_index, data, cache);
208 }
209 }
210 }
211
mlx5_fc_free(struct mlx5_core_dev * dev,struct mlx5_fc * counter)212 static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
213 {
214 mlx5_cmd_fc_free(dev, counter->id);
215 kfree(counter);
216 }
217
mlx5_fc_release(struct mlx5_core_dev * dev,struct mlx5_fc * counter)218 static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
219 {
220 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
221
222 if (counter->bulk)
223 mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
224 else
225 mlx5_fc_free(dev, counter);
226 }
227
mlx5_fc_stats_work(struct work_struct * work)228 static void mlx5_fc_stats_work(struct work_struct *work)
229 {
230 struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
231 priv.fc_stats.work.work);
232 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
233 /* Take dellist first to ensure that counters cannot be deleted before
234 * they are inserted.
235 */
236 struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
237 struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
238 struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
239 unsigned long now = jiffies;
240
241 if (addlist || !list_empty(&fc_stats->counters))
242 queue_delayed_work(fc_stats->wq, &fc_stats->work,
243 fc_stats->sampling_interval);
244
245 llist_for_each_entry(counter, addlist, addlist)
246 mlx5_fc_stats_insert(dev, counter);
247
248 llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
249 mlx5_fc_stats_remove(dev, counter);
250
251 mlx5_fc_release(dev, counter);
252 }
253
254 if (time_before(now, fc_stats->next_query) ||
255 list_empty(&fc_stats->counters))
256 return;
257 last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
258
259 counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
260 list);
261 if (counter)
262 mlx5_fc_stats_query_counter_range(dev, counter, last->id);
263
264 fc_stats->next_query = now + fc_stats->sampling_interval;
265 }
266
mlx5_fc_single_alloc(struct mlx5_core_dev * dev)267 static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
268 {
269 struct mlx5_fc *counter;
270 int err;
271
272 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
273 if (!counter)
274 return ERR_PTR(-ENOMEM);
275
276 err = mlx5_cmd_fc_alloc(dev, &counter->id);
277 if (err) {
278 kfree(counter);
279 return ERR_PTR(err);
280 }
281
282 return counter;
283 }
284
mlx5_fc_acquire(struct mlx5_core_dev * dev,bool aging)285 static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
286 {
287 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
288 struct mlx5_fc *counter;
289
290 if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
291 counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
292 if (!IS_ERR(counter))
293 return counter;
294 }
295
296 return mlx5_fc_single_alloc(dev);
297 }
298
mlx5_fc_create(struct mlx5_core_dev * dev,bool aging)299 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
300 {
301 struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
302 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
303 int err;
304
305 if (IS_ERR(counter))
306 return counter;
307
308 INIT_LIST_HEAD(&counter->list);
309 counter->aging = aging;
310
311 if (aging) {
312 u32 id = counter->id;
313
314 counter->cache.lastuse = jiffies;
315 counter->lastbytes = counter->cache.bytes;
316 counter->lastpackets = counter->cache.packets;
317
318 idr_preload(GFP_KERNEL);
319 spin_lock(&fc_stats->counters_idr_lock);
320
321 err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
322 GFP_NOWAIT);
323
324 spin_unlock(&fc_stats->counters_idr_lock);
325 idr_preload_end();
326 if (err)
327 goto err_out_alloc;
328
329 llist_add(&counter->addlist, &fc_stats->addlist);
330
331 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
332 }
333
334 return counter;
335
336 err_out_alloc:
337 mlx5_fc_release(dev, counter);
338 return ERR_PTR(err);
339 }
340 EXPORT_SYMBOL(mlx5_fc_create);
341
mlx5_fc_id(struct mlx5_fc * counter)342 u32 mlx5_fc_id(struct mlx5_fc *counter)
343 {
344 return counter->id;
345 }
346 EXPORT_SYMBOL(mlx5_fc_id);
347
mlx5_fc_destroy(struct mlx5_core_dev * dev,struct mlx5_fc * counter)348 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
349 {
350 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
351
352 if (!counter)
353 return;
354
355 if (counter->aging) {
356 llist_add(&counter->dellist, &fc_stats->dellist);
357 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
358 return;
359 }
360
361 mlx5_fc_release(dev, counter);
362 }
363 EXPORT_SYMBOL(mlx5_fc_destroy);
364
mlx5_init_fc_stats(struct mlx5_core_dev * dev)365 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
366 {
367 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
368 int max_bulk_len;
369 int max_out_len;
370
371 spin_lock_init(&fc_stats->counters_idr_lock);
372 idr_init(&fc_stats->counters_idr);
373 INIT_LIST_HEAD(&fc_stats->counters);
374 init_llist_head(&fc_stats->addlist);
375 init_llist_head(&fc_stats->dellist);
376
377 max_bulk_len = get_max_bulk_query_len(dev);
378 max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
379 fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
380 if (!fc_stats->bulk_query_out)
381 return -ENOMEM;
382
383 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
384 if (!fc_stats->wq)
385 goto err_wq_create;
386
387 fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
388 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
389
390 mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
391 return 0;
392
393 err_wq_create:
394 kfree(fc_stats->bulk_query_out);
395 return -ENOMEM;
396 }
397
mlx5_cleanup_fc_stats(struct mlx5_core_dev * dev)398 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
399 {
400 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
401 struct llist_node *tmplist;
402 struct mlx5_fc *counter;
403 struct mlx5_fc *tmp;
404
405 cancel_delayed_work_sync(&dev->priv.fc_stats.work);
406 destroy_workqueue(dev->priv.fc_stats.wq);
407 dev->priv.fc_stats.wq = NULL;
408
409 tmplist = llist_del_all(&fc_stats->addlist);
410 llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
411 mlx5_fc_release(dev, counter);
412
413 list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
414 mlx5_fc_release(dev, counter);
415
416 mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
417 idr_destroy(&fc_stats->counters_idr);
418 kfree(fc_stats->bulk_query_out);
419 }
420
mlx5_fc_query(struct mlx5_core_dev * dev,struct mlx5_fc * counter,u64 * packets,u64 * bytes)421 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
422 u64 *packets, u64 *bytes)
423 {
424 return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
425 }
426 EXPORT_SYMBOL(mlx5_fc_query);
427
mlx5_fc_query_lastuse(struct mlx5_fc * counter)428 u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
429 {
430 return counter->cache.lastuse;
431 }
432
mlx5_fc_query_cached(struct mlx5_fc * counter,u64 * bytes,u64 * packets,u64 * lastuse)433 void mlx5_fc_query_cached(struct mlx5_fc *counter,
434 u64 *bytes, u64 *packets, u64 *lastuse)
435 {
436 struct mlx5_fc_cache c;
437
438 c = counter->cache;
439
440 *bytes = c.bytes - counter->lastbytes;
441 *packets = c.packets - counter->lastpackets;
442 *lastuse = c.lastuse;
443
444 counter->lastbytes = c.bytes;
445 counter->lastpackets = c.packets;
446 }
447
mlx5_fc_queue_stats_work(struct mlx5_core_dev * dev,struct delayed_work * dwork,unsigned long delay)448 void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
449 struct delayed_work *dwork,
450 unsigned long delay)
451 {
452 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
453
454 queue_delayed_work(fc_stats->wq, dwork, delay);
455 }
456
mlx5_fc_update_sampling_interval(struct mlx5_core_dev * dev,unsigned long interval)457 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
458 unsigned long interval)
459 {
460 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
461
462 fc_stats->sampling_interval = min_t(unsigned long, interval,
463 fc_stats->sampling_interval);
464 }
465
466 /* Flow counter bluks */
467
468 struct mlx5_fc_bulk {
469 struct list_head pool_list;
470 u32 base_id;
471 int bulk_len;
472 unsigned long *bitmask;
473 struct mlx5_fc fcs[0];
474 };
475
mlx5_fc_init(struct mlx5_fc * counter,struct mlx5_fc_bulk * bulk,u32 id)476 static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
477 u32 id)
478 {
479 counter->bulk = bulk;
480 counter->id = id;
481 }
482
mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk * bulk)483 static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
484 {
485 return bitmap_weight(bulk->bitmask, bulk->bulk_len);
486 }
487
mlx5_fc_bulk_create(struct mlx5_core_dev * dev)488 static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
489 {
490 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
491 struct mlx5_fc_bulk *bulk;
492 int err = -ENOMEM;
493 int bulk_len;
494 u32 base_id;
495 int i;
496
497 alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
498 bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
499
500 bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
501 GFP_KERNEL);
502 if (!bulk)
503 goto err_alloc_bulk;
504
505 bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
506 GFP_KERNEL);
507 if (!bulk->bitmask)
508 goto err_alloc_bitmask;
509
510 err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
511 if (err)
512 goto err_mlx5_cmd_bulk_alloc;
513
514 bulk->base_id = base_id;
515 bulk->bulk_len = bulk_len;
516 for (i = 0; i < bulk_len; i++) {
517 mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
518 set_bit(i, bulk->bitmask);
519 }
520
521 return bulk;
522
523 err_mlx5_cmd_bulk_alloc:
524 kfree(bulk->bitmask);
525 err_alloc_bitmask:
526 kfree(bulk);
527 err_alloc_bulk:
528 return ERR_PTR(err);
529 }
530
531 static int
mlx5_fc_bulk_destroy(struct mlx5_core_dev * dev,struct mlx5_fc_bulk * bulk)532 mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
533 {
534 if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
535 mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
536 return -EBUSY;
537 }
538
539 mlx5_cmd_fc_free(dev, bulk->base_id);
540 kfree(bulk->bitmask);
541 kfree(bulk);
542
543 return 0;
544 }
545
mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk * bulk)546 static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
547 {
548 int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
549
550 if (free_fc_index >= bulk->bulk_len)
551 return ERR_PTR(-ENOSPC);
552
553 clear_bit(free_fc_index, bulk->bitmask);
554 return &bulk->fcs[free_fc_index];
555 }
556
mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk * bulk,struct mlx5_fc * fc)557 static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
558 {
559 int fc_index = fc->id - bulk->base_id;
560
561 if (test_bit(fc_index, bulk->bitmask))
562 return -EINVAL;
563
564 set_bit(fc_index, bulk->bitmask);
565 return 0;
566 }
567
568 /* Flow counters pool API */
569
mlx5_fc_pool_init(struct mlx5_fc_pool * fc_pool,struct mlx5_core_dev * dev)570 static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
571 {
572 fc_pool->dev = dev;
573 mutex_init(&fc_pool->pool_lock);
574 INIT_LIST_HEAD(&fc_pool->fully_used);
575 INIT_LIST_HEAD(&fc_pool->partially_used);
576 INIT_LIST_HEAD(&fc_pool->unused);
577 fc_pool->available_fcs = 0;
578 fc_pool->used_fcs = 0;
579 fc_pool->threshold = 0;
580 }
581
mlx5_fc_pool_cleanup(struct mlx5_fc_pool * fc_pool)582 static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
583 {
584 struct mlx5_core_dev *dev = fc_pool->dev;
585 struct mlx5_fc_bulk *bulk;
586 struct mlx5_fc_bulk *tmp;
587
588 list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
589 mlx5_fc_bulk_destroy(dev, bulk);
590 list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
591 mlx5_fc_bulk_destroy(dev, bulk);
592 list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
593 mlx5_fc_bulk_destroy(dev, bulk);
594 }
595
mlx5_fc_pool_update_threshold(struct mlx5_fc_pool * fc_pool)596 static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
597 {
598 fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
599 fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
600 }
601
602 static struct mlx5_fc_bulk *
mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool * fc_pool)603 mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
604 {
605 struct mlx5_core_dev *dev = fc_pool->dev;
606 struct mlx5_fc_bulk *new_bulk;
607
608 new_bulk = mlx5_fc_bulk_create(dev);
609 if (!IS_ERR(new_bulk))
610 fc_pool->available_fcs += new_bulk->bulk_len;
611 mlx5_fc_pool_update_threshold(fc_pool);
612 return new_bulk;
613 }
614
615 static void
mlx5_fc_pool_free_bulk(struct mlx5_fc_pool * fc_pool,struct mlx5_fc_bulk * bulk)616 mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
617 {
618 struct mlx5_core_dev *dev = fc_pool->dev;
619
620 fc_pool->available_fcs -= bulk->bulk_len;
621 mlx5_fc_bulk_destroy(dev, bulk);
622 mlx5_fc_pool_update_threshold(fc_pool);
623 }
624
625 static struct mlx5_fc *
mlx5_fc_pool_acquire_from_list(struct list_head * src_list,struct list_head * next_list,bool move_non_full_bulk)626 mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
627 struct list_head *next_list,
628 bool move_non_full_bulk)
629 {
630 struct mlx5_fc_bulk *bulk;
631 struct mlx5_fc *fc;
632
633 if (list_empty(src_list))
634 return ERR_PTR(-ENODATA);
635
636 bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
637 fc = mlx5_fc_bulk_acquire_fc(bulk);
638 if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
639 list_move(&bulk->pool_list, next_list);
640 return fc;
641 }
642
643 static struct mlx5_fc *
mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool * fc_pool)644 mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
645 {
646 struct mlx5_fc_bulk *new_bulk;
647 struct mlx5_fc *fc;
648
649 mutex_lock(&fc_pool->pool_lock);
650
651 fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
652 &fc_pool->fully_used, false);
653 if (IS_ERR(fc))
654 fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
655 &fc_pool->partially_used,
656 true);
657 if (IS_ERR(fc)) {
658 new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
659 if (IS_ERR(new_bulk)) {
660 fc = ERR_CAST(new_bulk);
661 goto out;
662 }
663 fc = mlx5_fc_bulk_acquire_fc(new_bulk);
664 list_add(&new_bulk->pool_list, &fc_pool->partially_used);
665 }
666 fc_pool->available_fcs--;
667 fc_pool->used_fcs++;
668
669 out:
670 mutex_unlock(&fc_pool->pool_lock);
671 return fc;
672 }
673
674 static void
mlx5_fc_pool_release_counter(struct mlx5_fc_pool * fc_pool,struct mlx5_fc * fc)675 mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
676 {
677 struct mlx5_core_dev *dev = fc_pool->dev;
678 struct mlx5_fc_bulk *bulk = fc->bulk;
679 int bulk_free_fcs_amount;
680
681 mutex_lock(&fc_pool->pool_lock);
682
683 if (mlx5_fc_bulk_release_fc(bulk, fc)) {
684 mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
685 goto unlock;
686 }
687
688 fc_pool->available_fcs++;
689 fc_pool->used_fcs--;
690
691 bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
692 if (bulk_free_fcs_amount == 1)
693 list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
694 if (bulk_free_fcs_amount == bulk->bulk_len) {
695 list_del(&bulk->pool_list);
696 if (fc_pool->available_fcs > fc_pool->threshold)
697 mlx5_fc_pool_free_bulk(fc_pool, bulk);
698 else
699 list_add(&bulk->pool_list, &fc_pool->unused);
700 }
701
702 unlock:
703 mutex_unlock(&fc_pool->pool_lock);
704 }
705