net/mlx5e: Added 'raw_errors_laneX' fields to ethtool statistics
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_counters.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/rbtree.h>
36 #include "mlx5_core.h"
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39
40 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41 /* Max number of counters to query in bulk read is 32K */
42 #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
43
44 /* locking scheme:
45  *
46  * It is the responsibility of the user to prevent concurrent calls or bad
47  * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
48  * to struct mlx5_fc.
49  * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
50  * dump (access to struct mlx5_fc) after a counter is destroyed.
51  *
52  * access to counter list:
53  * - create (user context)
54  *   - mlx5_fc_create() only adds to an addlist to be used by
55  *     mlx5_fc_stats_query_work(). addlist is a lockless single linked list
56  *     that doesn't require any additional synchronization when adding single
57  *     node.
58  *   - spawn thread to do the actual destroy
59  *
60  * - destroy (user context)
61  *   - add a counter to lockless dellist
62  *   - spawn thread to do the actual del
63  *
64  * - dump (user context)
65  *   user should not call dump after destroy
66  *
67  * - query (single thread workqueue context)
68  *   destroy/dump - no conflict (see destroy)
69  *   query/dump - packets and bytes might be inconsistent (since update is not
70  *                atomic)
71  *   query/create - no conflict (see create)
72  *   since every create/destroy spawn the work, only after necessary time has
73  *   elapsed, the thread will actually query the hardware.
74  */
75
76 static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
77                                                       u32 id)
78 {
79         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
80         unsigned long next_id = (unsigned long)id + 1;
81         struct mlx5_fc *counter;
82
83         rcu_read_lock();
84         /* skip counters that are in idr, but not yet in counters list */
85         while ((counter = idr_get_next_ul(&fc_stats->counters_idr,
86                                           &next_id)) != NULL &&
87                list_empty(&counter->list))
88                 next_id++;
89         rcu_read_unlock();
90
91         return counter ? &counter->list : &fc_stats->counters;
92 }
93
94 static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
95                                  struct mlx5_fc *counter)
96 {
97         struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
98
99         list_add_tail(&counter->list, next);
100 }
101
102 static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
103                                  struct mlx5_fc *counter)
104 {
105         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
106
107         list_del(&counter->list);
108
109         spin_lock(&fc_stats->counters_idr_lock);
110         WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
111         spin_unlock(&fc_stats->counters_idr_lock);
112 }
113
114 /* The function returns the last counter that was queried so the caller
115  * function can continue calling it till all counters are queried.
116  */
117 static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
118                                            struct mlx5_fc *first,
119                                            u32 last_id)
120 {
121         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
122         struct mlx5_fc *counter = NULL;
123         struct mlx5_cmd_fc_bulk *b;
124         bool more = false;
125         u32 afirst_id;
126         int num;
127         int err;
128
129         int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
130                              (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
131
132         /* first id must be aligned to 4 when using bulk query */
133         afirst_id = first->id & ~0x3;
134
135         /* number of counters to query inc. the last counter */
136         num = ALIGN(last_id - afirst_id + 1, 4);
137         if (num > max_bulk) {
138                 num = max_bulk;
139                 last_id = afirst_id + num - 1;
140         }
141
142         b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
143         if (!b) {
144                 mlx5_core_err(dev, "Error allocating resources for bulk query\n");
145                 return NULL;
146         }
147
148         err = mlx5_cmd_fc_bulk_query(dev, b);
149         if (err) {
150                 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
151                 goto out;
152         }
153
154         counter = first;
155         list_for_each_entry_from(counter, &fc_stats->counters, list) {
156                 struct mlx5_fc_cache *c = &counter->cache;
157                 u64 packets;
158                 u64 bytes;
159
160                 if (counter->id > last_id) {
161                         more = true;
162                         break;
163                 }
164
165                 mlx5_cmd_fc_bulk_get(dev, b,
166                                      counter->id, &packets, &bytes);
167
168                 if (c->packets == packets)
169                         continue;
170
171                 c->packets = packets;
172                 c->bytes = bytes;
173                 c->lastuse = jiffies;
174         }
175
176 out:
177         mlx5_cmd_fc_bulk_free(b);
178
179         return more ? counter : NULL;
180 }
181
182 static void mlx5_free_fc(struct mlx5_core_dev *dev,
183                          struct mlx5_fc *counter)
184 {
185         mlx5_cmd_fc_free(dev, counter->id);
186         kfree(counter);
187 }
188
189 static void mlx5_fc_stats_work(struct work_struct *work)
190 {
191         struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
192                                                  priv.fc_stats.work.work);
193         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
194         /* Take dellist first to ensure that counters cannot be deleted before
195          * they are inserted.
196          */
197         struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
198         struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
199         struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
200         unsigned long now = jiffies;
201
202         if (addlist || !list_empty(&fc_stats->counters))
203                 queue_delayed_work(fc_stats->wq, &fc_stats->work,
204                                    fc_stats->sampling_interval);
205
206         llist_for_each_entry(counter, addlist, addlist)
207                 mlx5_fc_stats_insert(dev, counter);
208
209         llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
210                 mlx5_fc_stats_remove(dev, counter);
211
212                 mlx5_free_fc(dev, counter);
213         }
214
215         if (time_before(now, fc_stats->next_query) ||
216             list_empty(&fc_stats->counters))
217                 return;
218         last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
219
220         counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
221                                    list);
222         while (counter)
223                 counter = mlx5_fc_stats_query(dev, counter, last->id);
224
225         fc_stats->next_query = now + fc_stats->sampling_interval;
226 }
227
228 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
229 {
230         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
231         struct mlx5_fc *counter;
232         int err;
233
234         counter = kzalloc(sizeof(*counter), GFP_KERNEL);
235         if (!counter)
236                 return ERR_PTR(-ENOMEM);
237         INIT_LIST_HEAD(&counter->list);
238
239         err = mlx5_cmd_fc_alloc(dev, &counter->id);
240         if (err)
241                 goto err_out;
242
243         if (aging) {
244                 u32 id = counter->id;
245
246                 counter->cache.lastuse = jiffies;
247                 counter->aging = true;
248
249                 idr_preload(GFP_KERNEL);
250                 spin_lock(&fc_stats->counters_idr_lock);
251
252                 err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
253                                     GFP_NOWAIT);
254
255                 spin_unlock(&fc_stats->counters_idr_lock);
256                 idr_preload_end();
257                 if (err)
258                         goto err_out_alloc;
259
260                 llist_add(&counter->addlist, &fc_stats->addlist);
261
262                 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
263         }
264
265         return counter;
266
267 err_out_alloc:
268         mlx5_cmd_fc_free(dev, counter->id);
269 err_out:
270         kfree(counter);
271
272         return ERR_PTR(err);
273 }
274 EXPORT_SYMBOL(mlx5_fc_create);
275
276 u32 mlx5_fc_id(struct mlx5_fc *counter)
277 {
278         return counter->id;
279 }
280 EXPORT_SYMBOL(mlx5_fc_id);
281
282 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
283 {
284         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
285
286         if (!counter)
287                 return;
288
289         if (counter->aging) {
290                 llist_add(&counter->dellist, &fc_stats->dellist);
291                 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
292                 return;
293         }
294
295         mlx5_free_fc(dev, counter);
296 }
297 EXPORT_SYMBOL(mlx5_fc_destroy);
298
299 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
300 {
301         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
302
303         spin_lock_init(&fc_stats->counters_idr_lock);
304         idr_init(&fc_stats->counters_idr);
305         INIT_LIST_HEAD(&fc_stats->counters);
306         init_llist_head(&fc_stats->addlist);
307         init_llist_head(&fc_stats->dellist);
308
309         fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
310         if (!fc_stats->wq)
311                 return -ENOMEM;
312
313         fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
314         INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
315
316         return 0;
317 }
318
319 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
320 {
321         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
322         struct llist_node *tmplist;
323         struct mlx5_fc *counter;
324         struct mlx5_fc *tmp;
325
326         cancel_delayed_work_sync(&dev->priv.fc_stats.work);
327         destroy_workqueue(dev->priv.fc_stats.wq);
328         dev->priv.fc_stats.wq = NULL;
329
330         idr_destroy(&fc_stats->counters_idr);
331
332         tmplist = llist_del_all(&fc_stats->addlist);
333         llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
334                 mlx5_free_fc(dev, counter);
335
336         list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
337                 mlx5_free_fc(dev, counter);
338 }
339
340 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
341                   u64 *packets, u64 *bytes)
342 {
343         return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
344 }
345 EXPORT_SYMBOL(mlx5_fc_query);
346
347 void mlx5_fc_query_cached(struct mlx5_fc *counter,
348                           u64 *bytes, u64 *packets, u64 *lastuse)
349 {
350         struct mlx5_fc_cache c;
351
352         c = counter->cache;
353
354         *bytes = c.bytes - counter->lastbytes;
355         *packets = c.packets - counter->lastpackets;
356         *lastuse = c.lastuse;
357
358         counter->lastbytes = c.bytes;
359         counter->lastpackets = c.packets;
360 }
361
362 void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
363                               struct delayed_work *dwork,
364                               unsigned long delay)
365 {
366         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
367
368         queue_delayed_work(fc_stats->wq, dwork, delay);
369 }
370
371 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
372                                       unsigned long interval)
373 {
374         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
375
376         fc_stats->sampling_interval = min_t(unsigned long, interval,
377                                             fc_stats->sampling_interval);
378 }