]> git.samba.org - sfrench/cifs-2.6.git/commitdiff
net: Add functions netif_reset_xps_queue and netif_set_xps_queue
authorAlexander Duyck <alexander.h.duyck@intel.com>
Thu, 10 Jan 2013 08:57:02 +0000 (08:57 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 11 Jan 2013 06:47:03 +0000 (22:47 -0800)
This patch adds two functions, netif_reset_xps_queue and
netif_set_xps_queue.  The main idea behind these two functions is to
provide a mechanism through which drivers can update their defaults in
regards to XPS.

Currently no such mechanism exists and as a result we cannot use XPS for
things such as ATR which would require a basic configuration to start in
which the Tx queues are mapped to CPUs via a 1:1 mapping.  With this change
I am making it possible for drivers such as ixgbe to be able to use the XPS
feature by controlling the default configuration.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/netdevice.h
net/core/dev.c
net/core/net-sysfs.c

index 608c3ac4d045bda95d5c0f252360aba97c8527d1..59fe9da4e31558bdde86f636a4c388062912ae54 100644 (file)
@@ -2103,6 +2103,19 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
                __netif_schedule(txq->qdisc);
 }
 
+#ifdef CONFIG_XPS
+extern void netif_reset_xps_queue(struct net_device *dev, u16 index);
+extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
+                              u16 index);
+#else
+static inline int netif_set_xps_queue(struct net_device *dev,
+                                     struct cpumask *mask,
+                                     u16 index)
+{
+       return 0;
+}
+#endif
+
 /*
  * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
  * as a distribution range limit for the returned value.
index 81ff67149f620ce2bb372bba2f0537f8e5c7ae01..257b29516f69c85ee7ca3e1f272f812af65dd5c6 100644 (file)
@@ -1857,6 +1857,161 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
        }
 }
 
+#ifdef CONFIG_XPS
+static DEFINE_MUTEX(xps_map_mutex);
+#define xmap_dereference(P)            \
+       rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
+
+void netif_reset_xps_queue(struct net_device *dev, u16 index)
+{
+       struct xps_dev_maps *dev_maps;
+       struct xps_map *map;
+       int i, pos, nonempty = 0;
+
+       mutex_lock(&xps_map_mutex);
+       dev_maps = xmap_dereference(dev->xps_maps);
+
+       if (!dev_maps)
+               goto out_no_maps;
+
+       for_each_possible_cpu(i) {
+               map = xmap_dereference(dev_maps->cpu_map[i]);
+               if (!map)
+                       continue;
+
+               for (pos = 0; pos < map->len; pos++)
+                       if (map->queues[pos] == index)
+                               break;
+
+               if (pos < map->len) {
+                       if (map->len > 1) {
+                               map->queues[pos] = map->queues[--map->len];
+                       } else {
+                               RCU_INIT_POINTER(dev_maps->cpu_map[i], NULL);
+                               kfree_rcu(map, rcu);
+                               map = NULL;
+                       }
+               }
+               if (map)
+                       nonempty = 1;
+       }
+
+       if (!nonempty) {
+               RCU_INIT_POINTER(dev->xps_maps, NULL);
+               kfree_rcu(dev_maps, rcu);
+       }
+
+out_no_maps:
+       mutex_unlock(&xps_map_mutex);
+}
+
+int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
+{
+       int i, cpu, pos, map_len, alloc_len, need_set;
+       struct xps_map *map, *new_map;
+       struct xps_dev_maps *dev_maps, *new_dev_maps;
+       int nonempty = 0;
+       int numa_node_id = -2;
+       int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
+
+       new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
+       if (!new_dev_maps)
+               return -ENOMEM;
+
+       mutex_lock(&xps_map_mutex);
+
+       dev_maps = xmap_dereference(dev->xps_maps);
+
+       for_each_possible_cpu(cpu) {
+               map = dev_maps ?
+                       xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+               new_map = map;
+               if (map) {
+                       for (pos = 0; pos < map->len; pos++)
+                               if (map->queues[pos] == index)
+                                       break;
+                       map_len = map->len;
+                       alloc_len = map->alloc_len;
+               } else
+                       pos = map_len = alloc_len = 0;
+
+               need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
+#ifdef CONFIG_NUMA
+               if (need_set) {
+                       if (numa_node_id == -2)
+                               numa_node_id = cpu_to_node(cpu);
+                       else if (numa_node_id != cpu_to_node(cpu))
+                               numa_node_id = -1;
+               }
+#endif
+               if (need_set && pos >= map_len) {
+                       /* Need to add queue to this CPU's map */
+                       if (map_len >= alloc_len) {
+                               alloc_len = alloc_len ?
+                                   2 * alloc_len : XPS_MIN_MAP_ALLOC;
+                               new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
+                                                      GFP_KERNEL,
+                                                      cpu_to_node(cpu));
+                               if (!new_map)
+                                       goto error;
+                               new_map->alloc_len = alloc_len;
+                               for (i = 0; i < map_len; i++)
+                                       new_map->queues[i] = map->queues[i];
+                               new_map->len = map_len;
+                       }
+                       new_map->queues[new_map->len++] = index;
+               } else if (!need_set && pos < map_len) {
+                       /* Need to remove queue from this CPU's map */
+                       if (map_len > 1)
+                               new_map->queues[pos] =
+                                   new_map->queues[--new_map->len];
+                       else
+                               new_map = NULL;
+               }
+               RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
+       }
+
+       /* Cleanup old maps */
+       for_each_possible_cpu(cpu) {
+               map = dev_maps ?
+                       xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+               if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
+                       kfree_rcu(map, rcu);
+               if (new_dev_maps->cpu_map[cpu])
+                       nonempty = 1;
+       }
+
+       if (nonempty) {
+               rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+       } else {
+               kfree(new_dev_maps);
+               RCU_INIT_POINTER(dev->xps_maps, NULL);
+       }
+
+       if (dev_maps)
+               kfree_rcu(dev_maps, rcu);
+
+       netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
+                                    (numa_node_id >= 0) ? numa_node_id :
+                                    NUMA_NO_NODE);
+
+       mutex_unlock(&xps_map_mutex);
+
+       return 0;
+error:
+       mutex_unlock(&xps_map_mutex);
+
+       if (new_dev_maps)
+               for_each_possible_cpu(i)
+                       kfree(rcu_dereference_protected(
+                               new_dev_maps->cpu_map[i],
+                               1));
+       kfree(new_dev_maps);
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(netif_set_xps_queue);
+
+#endif
 /*
  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
index 29c884a74c38ceb036366b5de7f5162b822efad8..5ad489d5d06245ef28445ac09859309d11dfc5ae 100644 (file)
@@ -1002,54 +1002,14 @@ static ssize_t show_xps_map(struct netdev_queue *queue,
        return len;
 }
 
-static DEFINE_MUTEX(xps_map_mutex);
-#define xmap_dereference(P)            \
-       rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
-
 static void xps_queue_release(struct netdev_queue *queue)
 {
        struct net_device *dev = queue->dev;
-       struct xps_dev_maps *dev_maps;
-       struct xps_map *map;
        unsigned long index;
-       int i, pos, nonempty = 0;
 
        index = get_netdev_queue_index(queue);
 
-       mutex_lock(&xps_map_mutex);
-       dev_maps = xmap_dereference(dev->xps_maps);
-
-       if (dev_maps) {
-               for_each_possible_cpu(i) {
-                       map = xmap_dereference(dev_maps->cpu_map[i]);
-                       if (!map)
-                               continue;
-
-                       for (pos = 0; pos < map->len; pos++)
-                               if (map->queues[pos] == index)
-                                       break;
-
-                       if (pos < map->len) {
-                               if (map->len > 1)
-                                       map->queues[pos] =
-                                           map->queues[--map->len];
-                               else {
-                                       RCU_INIT_POINTER(dev_maps->cpu_map[i],
-                                           NULL);
-                                       kfree_rcu(map, rcu);
-                                       map = NULL;
-                               }
-                       }
-                       if (map)
-                               nonempty = 1;
-               }
-
-               if (!nonempty) {
-                       RCU_INIT_POINTER(dev->xps_maps, NULL);
-                       kfree_rcu(dev_maps, rcu);
-               }
-       }
-       mutex_unlock(&xps_map_mutex);
+       netif_reset_xps_queue(dev, index);
 }
 
 static ssize_t store_xps_map(struct netdev_queue *queue,
@@ -1057,13 +1017,9 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
                      const char *buf, size_t len)
 {
        struct net_device *dev = queue->dev;
-       cpumask_var_t mask;
-       int err, i, cpu, pos, map_len, alloc_len, need_set;
        unsigned long index;
-       struct xps_map *map, *new_map;
-       struct xps_dev_maps *dev_maps, *new_dev_maps;
-       int nonempty = 0;
-       int numa_node_id = -2;
+       cpumask_var_t mask;
+       int err;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -1079,105 +1035,11 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
                return err;
        }
 
-       new_dev_maps = kzalloc(max_t(unsigned int,
-           XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
-       if (!new_dev_maps) {
-               free_cpumask_var(mask);
-               return -ENOMEM;
-       }
-
-       mutex_lock(&xps_map_mutex);
-
-       dev_maps = xmap_dereference(dev->xps_maps);
-
-       for_each_possible_cpu(cpu) {
-               map = dev_maps ?
-                       xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
-               new_map = map;
-               if (map) {
-                       for (pos = 0; pos < map->len; pos++)
-                               if (map->queues[pos] == index)
-                                       break;
-                       map_len = map->len;
-                       alloc_len = map->alloc_len;
-               } else
-                       pos = map_len = alloc_len = 0;
-
-               need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
-#ifdef CONFIG_NUMA
-               if (need_set) {
-                       if (numa_node_id == -2)
-                               numa_node_id = cpu_to_node(cpu);
-                       else if (numa_node_id != cpu_to_node(cpu))
-                               numa_node_id = -1;
-               }
-#endif
-               if (need_set && pos >= map_len) {
-                       /* Need to add queue to this CPU's map */
-                       if (map_len >= alloc_len) {
-                               alloc_len = alloc_len ?
-                                   2 * alloc_len : XPS_MIN_MAP_ALLOC;
-                               new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
-                                                      GFP_KERNEL,
-                                                      cpu_to_node(cpu));
-                               if (!new_map)
-                                       goto error;
-                               new_map->alloc_len = alloc_len;
-                               for (i = 0; i < map_len; i++)
-                                       new_map->queues[i] = map->queues[i];
-                               new_map->len = map_len;
-                       }
-                       new_map->queues[new_map->len++] = index;
-               } else if (!need_set && pos < map_len) {
-                       /* Need to remove queue from this CPU's map */
-                       if (map_len > 1)
-                               new_map->queues[pos] =
-                                   new_map->queues[--new_map->len];
-                       else
-                               new_map = NULL;
-               }
-               RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
-       }
-
-       /* Cleanup old maps */
-       for_each_possible_cpu(cpu) {
-               map = dev_maps ?
-                       xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
-               if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
-                       kfree_rcu(map, rcu);
-               if (new_dev_maps->cpu_map[cpu])
-                       nonempty = 1;
-       }
-
-       if (nonempty) {
-               rcu_assign_pointer(dev->xps_maps, new_dev_maps);
-       } else {
-               kfree(new_dev_maps);
-               RCU_INIT_POINTER(dev->xps_maps, NULL);
-       }
-
-       if (dev_maps)
-               kfree_rcu(dev_maps, rcu);
-
-       netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
-                                           NUMA_NO_NODE);
-
-       mutex_unlock(&xps_map_mutex);
+       err = netif_set_xps_queue(dev, mask, index);
 
        free_cpumask_var(mask);
-       return len;
 
-error:
-       mutex_unlock(&xps_map_mutex);
-
-       if (new_dev_maps)
-               for_each_possible_cpu(i)
-                       kfree(rcu_dereference_protected(
-                               new_dev_maps->cpu_map[i],
-                               1));
-       kfree(new_dev_maps);
-       free_cpumask_var(mask);
-       return -ENOMEM;
+       return err ? : len;
 }
 
 static struct netdev_queue_attribute xps_cpus_attribute =