net/mlx5: Add new list to store deleted flow counters
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_counters.c
index 58af6be13dfa88a2e08d5d85dd54b3fdaeb4e4f6..f1266f215a31319425d8111ecc804434817964c4 100644 (file)
  * access to counter list:
  * - create (user context)
  *   - mlx5_fc_create() only adds to an addlist to be used by
- *     mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
+ *     mlx5_fc_stats_query_work(). addlist is a lockless single linked list
+ *     that doesn't require any additional synchronization when adding single
+ *     node.
  *   - spawn thread to do the actual destroy
  *
  * - destroy (user context)
- *   - mark a counter as deleted
+ *   - add a counter to lockless dellist
  *   - spawn thread to do the actual del
  *
  * - dump (user context)
@@ -156,50 +158,41 @@ out:
        return node;
 }
 
+static void mlx5_free_fc(struct mlx5_core_dev *dev,
+                        struct mlx5_fc *counter)
+{
+       mlx5_cmd_fc_free(dev, counter->id);
+       kfree(counter);
+}
+
 static void mlx5_fc_stats_work(struct work_struct *work)
 {
        struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
                                                 priv.fc_stats.work.work);
        struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+       struct llist_node *tmplist = llist_del_all(&fc_stats->addlist);
+       struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
        unsigned long now = jiffies;
-       struct mlx5_fc *counter = NULL;
-       struct mlx5_fc *last = NULL;
        struct rb_node *node;
-       LIST_HEAD(tmplist);
-
-       spin_lock(&fc_stats->addlist_lock);
 
-       list_splice_tail_init(&fc_stats->addlist, &tmplist);
-
-       if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
+       if (tmplist || !RB_EMPTY_ROOT(&fc_stats->counters))
                queue_delayed_work(fc_stats->wq, &fc_stats->work,
                                   fc_stats->sampling_interval);
 
-       spin_unlock(&fc_stats->addlist_lock);
-
-       list_for_each_entry(counter, &tmplist, list)
+       llist_for_each_entry(counter, tmplist, addlist)
                mlx5_fc_stats_insert(&fc_stats->counters, counter);
 
-       node = rb_first(&fc_stats->counters);
-       while (node) {
-               counter = rb_entry(node, struct mlx5_fc, node);
-
-               node = rb_next(node);
-
-               if (counter->deleted) {
-                       rb_erase(&counter->node, &fc_stats->counters);
-
-                       mlx5_cmd_fc_free(dev, counter->id);
-
-                       kfree(counter);
-                       continue;
-               }
+       tmplist = llist_del_all(&fc_stats->dellist);
+       llist_for_each_entry_safe(counter, tmp, tmplist, dellist) {
+               rb_erase(&counter->node, &fc_stats->counters);
 
-               last = counter;
+               mlx5_free_fc(dev, counter);
        }
 
-       if (time_before(now, fc_stats->next_query) || !last)
+       node = rb_last(&fc_stats->counters);
+       if (time_before(now, fc_stats->next_query) || !node)
                return;
+       last = rb_entry(node, struct mlx5_fc, node);
 
        node = rb_first(&fc_stats->counters);
        while (node) {
@@ -229,9 +222,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
                counter->cache.lastuse = jiffies;
                counter->aging = true;
 
-               spin_lock(&fc_stats->addlist_lock);
-               list_add(&counter->list, &fc_stats->addlist);
-               spin_unlock(&fc_stats->addlist_lock);
+               llist_add(&counter->addlist, &fc_stats->addlist);
 
                mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
        }
@@ -253,13 +244,12 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
                return;
 
        if (counter->aging) {
-               counter->deleted = true;
+               llist_add(&counter->dellist, &fc_stats->dellist);
                mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
                return;
        }
 
-       mlx5_cmd_fc_free(dev, counter->id);
-       kfree(counter);
+       mlx5_free_fc(dev, counter);
 }
 EXPORT_SYMBOL(mlx5_fc_destroy);
 
@@ -268,8 +258,8 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
        struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
 
        fc_stats->counters = RB_ROOT;
-       INIT_LIST_HEAD(&fc_stats->addlist);
-       spin_lock_init(&fc_stats->addlist_lock);
+       init_llist_head(&fc_stats->addlist);
+       init_llist_head(&fc_stats->dellist);
 
        fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
        if (!fc_stats->wq)
@@ -284,6 +274,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
 {
        struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+       struct llist_node *tmplist;
        struct mlx5_fc *counter;
        struct mlx5_fc *tmp;
        struct rb_node *node;
@@ -292,13 +283,9 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
        destroy_workqueue(dev->priv.fc_stats.wq);
        dev->priv.fc_stats.wq = NULL;
 
-       list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
-               list_del(&counter->list);
-
-               mlx5_cmd_fc_free(dev, counter->id);
-
-               kfree(counter);
-       }
+       tmplist = llist_del_all(&fc_stats->addlist);
+       llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
+               mlx5_free_fc(dev, counter);
 
        node = rb_first(&fc_stats->counters);
        while (node) {
@@ -308,9 +295,7 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
 
                rb_erase(&counter->node, &fc_stats->counters);
 
-               mlx5_cmd_fc_free(dev, counter->id);
-
-               kfree(counter);
+               mlx5_free_fc(dev, counter);
        }
 }