2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/rbtree.h>
36 #include "mlx5_core.h"
40 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41 /* Max number of counters to query in bulk read is 32K */
42 #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
44 struct mlx5_fc_cache {
51 struct list_head list;
52 struct llist_node addlist;
53 struct llist_node dellist;
55 /* last{packets,bytes} members are used when calculating the delta since
64 struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
69 * It is the responsibility of the user to prevent concurrent calls or bad
70 * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
72 * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
73 * dump (access to struct mlx5_fc) after a counter is destroyed.
75 * access to counter list:
76 * - create (user context)
77 * - mlx5_fc_create() only adds to an addlist to be used by
78 * mlx5_fc_stats_query_work(). addlist is a lockless single linked list
79 * that doesn't require any additional synchronization when adding single
81 * - spawn thread to do the actual destroy
83 * - destroy (user context)
84 * - add a counter to lockless dellist
85 * - spawn thread to do the actual del
87 * - dump (user context)
88 * user should not call dump after destroy
90 * - query (single thread workqueue context)
91 * destroy/dump - no conflict (see destroy)
92 * query/dump - packets and bytes might be inconsistent (since update is not
94 * query/create - no conflict (see create)
95 * since every create/destroy spawn the work, only after necessary time has
96 * elapsed, the thread will actually query the hardware.
99 static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
102 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
103 unsigned long next_id = (unsigned long)id + 1;
104 struct mlx5_fc *counter;
108 /* skip counters that are in idr, but not yet in counters list */
109 idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
110 counter, tmp, next_id) {
111 if (!list_empty(&counter->list))
116 return counter ? &counter->list : &fc_stats->counters;
119 static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
120 struct mlx5_fc *counter)
122 struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
124 list_add_tail(&counter->list, next);
127 static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
128 struct mlx5_fc *counter)
130 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
132 list_del(&counter->list);
134 spin_lock(&fc_stats->counters_idr_lock);
135 WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
136 spin_unlock(&fc_stats->counters_idr_lock);
139 /* The function returns the last counter that was queried so the caller
140 * function can continue calling it till all counters are queried.
142 static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
143 struct mlx5_fc *first,
146 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
147 struct mlx5_fc *counter = NULL;
148 struct mlx5_cmd_fc_bulk *b;
154 int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
155 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
157 /* first id must be aligned to 4 when using bulk query */
158 afirst_id = first->id & ~0x3;
160 /* number of counters to query inc. the last counter */
161 num = ALIGN(last_id - afirst_id + 1, 4);
162 if (num > max_bulk) {
164 last_id = afirst_id + num - 1;
167 b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
169 mlx5_core_err(dev, "Error allocating resources for bulk query\n");
173 err = mlx5_cmd_fc_bulk_query(dev, b);
175 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
180 list_for_each_entry_from(counter, &fc_stats->counters, list) {
181 struct mlx5_fc_cache *c = &counter->cache;
185 if (counter->id > last_id) {
190 mlx5_cmd_fc_bulk_get(dev, b,
191 counter->id, &packets, &bytes);
193 if (c->packets == packets)
196 c->packets = packets;
198 c->lastuse = jiffies;
202 mlx5_cmd_fc_bulk_free(b);
204 return more ? counter : NULL;
207 static void mlx5_free_fc(struct mlx5_core_dev *dev,
208 struct mlx5_fc *counter)
210 mlx5_cmd_fc_free(dev, counter->id);
214 static void mlx5_fc_stats_work(struct work_struct *work)
216 struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
217 priv.fc_stats.work.work);
218 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
219 /* Take dellist first to ensure that counters cannot be deleted before
222 struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
223 struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
224 struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
225 unsigned long now = jiffies;
227 if (addlist || !list_empty(&fc_stats->counters))
228 queue_delayed_work(fc_stats->wq, &fc_stats->work,
229 fc_stats->sampling_interval);
231 llist_for_each_entry(counter, addlist, addlist)
232 mlx5_fc_stats_insert(dev, counter);
234 llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
235 mlx5_fc_stats_remove(dev, counter);
237 mlx5_free_fc(dev, counter);
240 if (time_before(now, fc_stats->next_query) ||
241 list_empty(&fc_stats->counters))
243 last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
245 counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
248 counter = mlx5_fc_stats_query(dev, counter, last->id);
250 fc_stats->next_query = now + fc_stats->sampling_interval;
253 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
255 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
256 struct mlx5_fc *counter;
259 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
261 return ERR_PTR(-ENOMEM);
262 INIT_LIST_HEAD(&counter->list);
264 err = mlx5_cmd_fc_alloc(dev, &counter->id);
269 u32 id = counter->id;
271 counter->cache.lastuse = jiffies;
272 counter->aging = true;
274 idr_preload(GFP_KERNEL);
275 spin_lock(&fc_stats->counters_idr_lock);
277 err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
280 spin_unlock(&fc_stats->counters_idr_lock);
285 llist_add(&counter->addlist, &fc_stats->addlist);
287 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
293 mlx5_cmd_fc_free(dev, counter->id);
299 EXPORT_SYMBOL(mlx5_fc_create);
301 u32 mlx5_fc_id(struct mlx5_fc *counter)
305 EXPORT_SYMBOL(mlx5_fc_id);
307 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
309 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
314 if (counter->aging) {
315 llist_add(&counter->dellist, &fc_stats->dellist);
316 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
320 mlx5_free_fc(dev, counter);
322 EXPORT_SYMBOL(mlx5_fc_destroy);
324 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
326 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
328 spin_lock_init(&fc_stats->counters_idr_lock);
329 idr_init(&fc_stats->counters_idr);
330 INIT_LIST_HEAD(&fc_stats->counters);
331 init_llist_head(&fc_stats->addlist);
332 init_llist_head(&fc_stats->dellist);
334 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
338 fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
339 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
344 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
346 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
347 struct llist_node *tmplist;
348 struct mlx5_fc *counter;
351 cancel_delayed_work_sync(&dev->priv.fc_stats.work);
352 destroy_workqueue(dev->priv.fc_stats.wq);
353 dev->priv.fc_stats.wq = NULL;
355 idr_destroy(&fc_stats->counters_idr);
357 tmplist = llist_del_all(&fc_stats->addlist);
358 llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
359 mlx5_free_fc(dev, counter);
361 list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
362 mlx5_free_fc(dev, counter);
365 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
366 u64 *packets, u64 *bytes)
368 return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
370 EXPORT_SYMBOL(mlx5_fc_query);
372 u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
374 return counter->cache.lastuse;
377 void mlx5_fc_query_cached(struct mlx5_fc *counter,
378 u64 *bytes, u64 *packets, u64 *lastuse)
380 struct mlx5_fc_cache c;
384 *bytes = c.bytes - counter->lastbytes;
385 *packets = c.packets - counter->lastpackets;
386 *lastuse = c.lastuse;
388 counter->lastbytes = c.bytes;
389 counter->lastpackets = c.packets;
392 void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
393 struct delayed_work *dwork,
396 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
398 queue_delayed_work(fc_stats->wq, dwork, delay);
401 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
402 unsigned long interval)
404 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
406 fc_stats->sampling_interval = min_t(unsigned long, interval,
407 fc_stats->sampling_interval);