dt-bindings: arm: rockchip: remove reference to fennec board
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_counters.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/rbtree.h>
36 #include "mlx5_core.h"
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39
40 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41 /* Max number of counters to query in bulk read is 32K */
42 #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
43
44 struct mlx5_fc_cache {
45         u64 packets;
46         u64 bytes;
47         u64 lastuse;
48 };
49
50 struct mlx5_fc {
51         struct list_head list;
52         struct llist_node addlist;
53         struct llist_node dellist;
54
55         /* last{packets,bytes} members are used when calculating the delta since
56          * last reading
57          */
58         u64 lastpackets;
59         u64 lastbytes;
60
61         u32 id;
62         bool aging;
63
64         struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
65 };
66
67 /* locking scheme:
68  *
69  * It is the responsibility of the user to prevent concurrent calls or bad
70  * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
71  * to struct mlx5_fc.
72  * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
73  * dump (access to struct mlx5_fc) after a counter is destroyed.
74  *
75  * access to counter list:
76  * - create (user context)
77  *   - mlx5_fc_create() only adds to an addlist to be used by
78  *     mlx5_fc_stats_query_work(). addlist is a lockless single linked list
79  *     that doesn't require any additional synchronization when adding single
80  *     node.
81  *   - spawn thread to do the actual destroy
82  *
83  * - destroy (user context)
84  *   - add a counter to lockless dellist
85  *   - spawn thread to do the actual del
86  *
87  * - dump (user context)
88  *   user should not call dump after destroy
89  *
90  * - query (single thread workqueue context)
91  *   destroy/dump - no conflict (see destroy)
92  *   query/dump - packets and bytes might be inconsistent (since update is not
93  *                atomic)
94  *   query/create - no conflict (see create)
95  *   since every create/destroy spawn the work, only after necessary time has
96  *   elapsed, the thread will actually query the hardware.
97  */
98
99 static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
100                                                       u32 id)
101 {
102         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
103         unsigned long next_id = (unsigned long)id + 1;
104         struct mlx5_fc *counter;
105         unsigned long tmp;
106
107         rcu_read_lock();
108         /* skip counters that are in idr, but not yet in counters list */
109         idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
110                                        counter, tmp, next_id) {
111                 if (!list_empty(&counter->list))
112                         break;
113         }
114         rcu_read_unlock();
115
116         return counter ? &counter->list : &fc_stats->counters;
117 }
118
119 static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
120                                  struct mlx5_fc *counter)
121 {
122         struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
123
124         list_add_tail(&counter->list, next);
125 }
126
127 static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
128                                  struct mlx5_fc *counter)
129 {
130         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
131
132         list_del(&counter->list);
133
134         spin_lock(&fc_stats->counters_idr_lock);
135         WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
136         spin_unlock(&fc_stats->counters_idr_lock);
137 }
138
139 /* The function returns the last counter that was queried so the caller
140  * function can continue calling it till all counters are queried.
141  */
142 static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
143                                            struct mlx5_fc *first,
144                                            u32 last_id)
145 {
146         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
147         struct mlx5_fc *counter = NULL;
148         struct mlx5_cmd_fc_bulk *b;
149         bool more = false;
150         u32 afirst_id;
151         int num;
152         int err;
153
154         int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
155                              (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
156
157         /* first id must be aligned to 4 when using bulk query */
158         afirst_id = first->id & ~0x3;
159
160         /* number of counters to query inc. the last counter */
161         num = ALIGN(last_id - afirst_id + 1, 4);
162         if (num > max_bulk) {
163                 num = max_bulk;
164                 last_id = afirst_id + num - 1;
165         }
166
167         b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
168         if (!b) {
169                 mlx5_core_err(dev, "Error allocating resources for bulk query\n");
170                 return NULL;
171         }
172
173         err = mlx5_cmd_fc_bulk_query(dev, b);
174         if (err) {
175                 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
176                 goto out;
177         }
178
179         counter = first;
180         list_for_each_entry_from(counter, &fc_stats->counters, list) {
181                 struct mlx5_fc_cache *c = &counter->cache;
182                 u64 packets;
183                 u64 bytes;
184
185                 if (counter->id > last_id) {
186                         more = true;
187                         break;
188                 }
189
190                 mlx5_cmd_fc_bulk_get(dev, b,
191                                      counter->id, &packets, &bytes);
192
193                 if (c->packets == packets)
194                         continue;
195
196                 c->packets = packets;
197                 c->bytes = bytes;
198                 c->lastuse = jiffies;
199         }
200
201 out:
202         mlx5_cmd_fc_bulk_free(b);
203
204         return more ? counter : NULL;
205 }
206
207 static void mlx5_free_fc(struct mlx5_core_dev *dev,
208                          struct mlx5_fc *counter)
209 {
210         mlx5_cmd_fc_free(dev, counter->id);
211         kfree(counter);
212 }
213
214 static void mlx5_fc_stats_work(struct work_struct *work)
215 {
216         struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
217                                                  priv.fc_stats.work.work);
218         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
219         /* Take dellist first to ensure that counters cannot be deleted before
220          * they are inserted.
221          */
222         struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
223         struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
224         struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
225         unsigned long now = jiffies;
226
227         if (addlist || !list_empty(&fc_stats->counters))
228                 queue_delayed_work(fc_stats->wq, &fc_stats->work,
229                                    fc_stats->sampling_interval);
230
231         llist_for_each_entry(counter, addlist, addlist)
232                 mlx5_fc_stats_insert(dev, counter);
233
234         llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
235                 mlx5_fc_stats_remove(dev, counter);
236
237                 mlx5_free_fc(dev, counter);
238         }
239
240         if (time_before(now, fc_stats->next_query) ||
241             list_empty(&fc_stats->counters))
242                 return;
243         last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
244
245         counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
246                                    list);
247         while (counter)
248                 counter = mlx5_fc_stats_query(dev, counter, last->id);
249
250         fc_stats->next_query = now + fc_stats->sampling_interval;
251 }
252
253 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
254 {
255         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
256         struct mlx5_fc *counter;
257         int err;
258
259         counter = kzalloc(sizeof(*counter), GFP_KERNEL);
260         if (!counter)
261                 return ERR_PTR(-ENOMEM);
262         INIT_LIST_HEAD(&counter->list);
263
264         err = mlx5_cmd_fc_alloc(dev, &counter->id);
265         if (err)
266                 goto err_out;
267
268         if (aging) {
269                 u32 id = counter->id;
270
271                 counter->cache.lastuse = jiffies;
272                 counter->aging = true;
273
274                 idr_preload(GFP_KERNEL);
275                 spin_lock(&fc_stats->counters_idr_lock);
276
277                 err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
278                                     GFP_NOWAIT);
279
280                 spin_unlock(&fc_stats->counters_idr_lock);
281                 idr_preload_end();
282                 if (err)
283                         goto err_out_alloc;
284
285                 llist_add(&counter->addlist, &fc_stats->addlist);
286
287                 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
288         }
289
290         return counter;
291
292 err_out_alloc:
293         mlx5_cmd_fc_free(dev, counter->id);
294 err_out:
295         kfree(counter);
296
297         return ERR_PTR(err);
298 }
299 EXPORT_SYMBOL(mlx5_fc_create);
300
301 u32 mlx5_fc_id(struct mlx5_fc *counter)
302 {
303         return counter->id;
304 }
305 EXPORT_SYMBOL(mlx5_fc_id);
306
307 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
308 {
309         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
310
311         if (!counter)
312                 return;
313
314         if (counter->aging) {
315                 llist_add(&counter->dellist, &fc_stats->dellist);
316                 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
317                 return;
318         }
319
320         mlx5_free_fc(dev, counter);
321 }
322 EXPORT_SYMBOL(mlx5_fc_destroy);
323
324 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
325 {
326         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
327
328         spin_lock_init(&fc_stats->counters_idr_lock);
329         idr_init(&fc_stats->counters_idr);
330         INIT_LIST_HEAD(&fc_stats->counters);
331         init_llist_head(&fc_stats->addlist);
332         init_llist_head(&fc_stats->dellist);
333
334         fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
335         if (!fc_stats->wq)
336                 return -ENOMEM;
337
338         fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
339         INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
340
341         return 0;
342 }
343
344 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
345 {
346         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
347         struct llist_node *tmplist;
348         struct mlx5_fc *counter;
349         struct mlx5_fc *tmp;
350
351         cancel_delayed_work_sync(&dev->priv.fc_stats.work);
352         destroy_workqueue(dev->priv.fc_stats.wq);
353         dev->priv.fc_stats.wq = NULL;
354
355         idr_destroy(&fc_stats->counters_idr);
356
357         tmplist = llist_del_all(&fc_stats->addlist);
358         llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
359                 mlx5_free_fc(dev, counter);
360
361         list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
362                 mlx5_free_fc(dev, counter);
363 }
364
365 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
366                   u64 *packets, u64 *bytes)
367 {
368         return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
369 }
370 EXPORT_SYMBOL(mlx5_fc_query);
371
372 void mlx5_fc_query_cached(struct mlx5_fc *counter,
373                           u64 *bytes, u64 *packets, u64 *lastuse)
374 {
375         struct mlx5_fc_cache c;
376
377         c = counter->cache;
378
379         *bytes = c.bytes - counter->lastbytes;
380         *packets = c.packets - counter->lastpackets;
381         *lastuse = c.lastuse;
382
383         counter->lastbytes = c.bytes;
384         counter->lastpackets = c.packets;
385 }
386
387 void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
388                               struct delayed_work *dwork,
389                               unsigned long delay)
390 {
391         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
392
393         queue_delayed_work(fc_stats->wq, dwork, delay);
394 }
395
396 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
397                                       unsigned long interval)
398 {
399         struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
400
401         fc_stats->sampling_interval = min_t(unsigned long, interval,
402                                             fc_stats->sampling_interval);
403 }