hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/block/blk-mq-cpumap.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * CPU <-> hardware queue mapping helpers
34 *
....@@ -14,9 +15,10 @@
1415 #include "blk.h"
1516 #include "blk-mq.h"
1617
17
-static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
18
+static int queue_index(struct blk_mq_queue_map *qmap,
19
+ unsigned int nr_queues, const int q)
1820 {
19
- return cpu % nr_queues;
21
+ return qmap->queue_offset + (q % nr_queues);
2022 }
2123
2224 static int get_first_sibling(unsigned int cpu)
....@@ -30,25 +32,40 @@
3032 return cpu;
3133 }
3234
33
-int blk_mq_map_queues(struct blk_mq_tag_set *set)
35
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
3436 {
35
- unsigned int *map = set->mq_map;
36
- unsigned int nr_queues = set->nr_hw_queues;
37
- unsigned int cpu, first_sibling;
37
+ unsigned int *map = qmap->mq_map;
38
+ unsigned int nr_queues = qmap->nr_queues;
39
+ unsigned int cpu, first_sibling, q = 0;
40
+
41
+ for_each_possible_cpu(cpu)
42
+ map[cpu] = -1;
43
+
44
+ /*
45
+ * Spread queues among present CPUs first for minimizing
46
+ * count of dead queues which are mapped by all un-present CPUs
47
+ */
48
+ for_each_present_cpu(cpu) {
49
+ if (q >= nr_queues)
50
+ break;
51
+ map[cpu] = queue_index(qmap, nr_queues, q++);
52
+ }
3853
3954 for_each_possible_cpu(cpu) {
55
+ if (map[cpu] != -1)
56
+ continue;
4057 /*
4158 * First do sequential mapping between CPUs and queues.
4259 * In case we still have CPUs to map, and we have some number of
43
- * threads per cores then map sibling threads to the same queue for
44
- * performace optimizations.
60
+ * threads per cores then map sibling threads to the same queue
61
+ * for performance optimizations.
4562 */
46
- if (cpu < nr_queues) {
47
- map[cpu] = cpu_to_queue_index(nr_queues, cpu);
63
+ if (q < nr_queues) {
64
+ map[cpu] = queue_index(qmap, nr_queues, q++);
4865 } else {
4966 first_sibling = get_first_sibling(cpu);
5067 if (first_sibling == cpu)
51
- map[cpu] = cpu_to_queue_index(nr_queues, cpu);
68
+ map[cpu] = queue_index(qmap, nr_queues, q++);
5269 else
5370 map[cpu] = map[first_sibling];
5471 }
....@@ -58,17 +75,21 @@
5875 }
5976 EXPORT_SYMBOL_GPL(blk_mq_map_queues);
6077
61
-/*
78
+/**
79
+ * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
80
+ * @qmap: CPU to hardware queue map.
81
+ * @index: hardware queue index.
82
+ *
6283 * We have no quick way of doing reverse lookups. This is only used at
6384 * queue init time, so runtime isn't important.
6485 */
65
-int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
86
+int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
6687 {
6788 int i;
6889
6990 for_each_possible_cpu(i) {
70
- if (index == mq_map[i])
71
- return local_memory_node(cpu_to_node(i));
91
+ if (index == qmap->mq_map[i])
92
+ return cpu_to_node(i);
7293 }
7394
7495 return NUMA_NO_NODE;