Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[sfrench/cifs-2.6.git] / block / blk-mq-cpumap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * CPU <-> hardware queue mapping helpers
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  */
7 #include <linux/kernel.h>
8 #include <linux/threads.h>
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/smp.h>
12 #include <linux/cpu.h>
13
14 #include <linux/blk-mq.h>
15 #include "blk.h"
16 #include "blk-mq.h"
17
18 static int cpu_to_queue_index(struct blk_mq_queue_map *qmap,
19                               unsigned int nr_queues, const int cpu)
20 {
21         return qmap->queue_offset + (cpu % nr_queues);
22 }
23
24 static int get_first_sibling(unsigned int cpu)
25 {
26         unsigned int ret;
27
28         ret = cpumask_first(topology_sibling_cpumask(cpu));
29         if (ret < nr_cpu_ids)
30                 return ret;
31
32         return cpu;
33 }
34
35 int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
36 {
37         unsigned int *map = qmap->mq_map;
38         unsigned int nr_queues = qmap->nr_queues;
39         unsigned int cpu, first_sibling;
40
41         for_each_possible_cpu(cpu) {
42                 /*
43                  * First do sequential mapping between CPUs and queues.
44                  * In case we still have CPUs to map, and we have some number of
45                  * threads per cores then map sibling threads to the same queue for
46                  * performace optimizations.
47                  */
48                 if (cpu < nr_queues) {
49                         map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu);
50                 } else {
51                         first_sibling = get_first_sibling(cpu);
52                         if (first_sibling == cpu)
53                                 map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu);
54                         else
55                                 map[cpu] = map[first_sibling];
56                 }
57         }
58
59         return 0;
60 }
61 EXPORT_SYMBOL_GPL(blk_mq_map_queues);
62
63 /*
64  * We have no quick way of doing reverse lookups. This is only used at
65  * queue init time, so runtime isn't important.
66  */
67 int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
68 {
69         int i;
70
71         for_each_possible_cpu(i) {
72                 if (index == qmap->mq_map[i])
73                         return local_memory_node(cpu_to_node(i));
74         }
75
76         return NUMA_NO_NODE;
77 }