.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
1 | 2 | /* |
---|
2 | 3 | * CPU <-> hardware queue mapping helpers |
---|
3 | 4 | * |
---|
.. | .. |
---|
14 | 15 | #include "blk.h" |
---|
15 | 16 | #include "blk-mq.h" |
---|
16 | 17 | |
---|
17 | | -static int cpu_to_queue_index(unsigned int nr_queues, const int cpu) |
---|
| 18 | +static int queue_index(struct blk_mq_queue_map *qmap, |
---|
| 19 | + unsigned int nr_queues, const int q) |
---|
18 | 20 | { |
---|
19 | | - return cpu % nr_queues; |
---|
| 21 | + return qmap->queue_offset + (q % nr_queues); |
---|
20 | 22 | } |
---|
21 | 23 | |
---|
22 | 24 | static int get_first_sibling(unsigned int cpu) |
---|
.. | .. |
---|
30 | 32 | return cpu; |
---|
31 | 33 | } |
---|
32 | 34 | |
---|
33 | | -int blk_mq_map_queues(struct blk_mq_tag_set *set) |
---|
| 35 | +int blk_mq_map_queues(struct blk_mq_queue_map *qmap) |
---|
34 | 36 | { |
---|
35 | | - unsigned int *map = set->mq_map; |
---|
36 | | - unsigned int nr_queues = set->nr_hw_queues; |
---|
37 | | - unsigned int cpu, first_sibling; |
---|
| 37 | + unsigned int *map = qmap->mq_map; |
---|
| 38 | + unsigned int nr_queues = qmap->nr_queues; |
---|
| 39 | + unsigned int cpu, first_sibling, q = 0; |
---|
| 40 | + |
---|
| 41 | + for_each_possible_cpu(cpu) |
---|
| 42 | + map[cpu] = -1; |
---|
| 43 | + |
---|
| 44 | + /* |
---|
| 45 | + * Spread queues among present CPUs first for minimizing |
---|
| 46 | + * count of dead queues which are mapped by all un-present CPUs |
---|
| 47 | + */ |
---|
| 48 | + for_each_present_cpu(cpu) { |
---|
| 49 | + if (q >= nr_queues) |
---|
| 50 | + break; |
---|
| 51 | + map[cpu] = queue_index(qmap, nr_queues, q++); |
---|
| 52 | + } |
---|
38 | 53 | |
---|
39 | 54 | for_each_possible_cpu(cpu) { |
---|
| 55 | + if (map[cpu] != -1) |
---|
| 56 | + continue; |
---|
40 | 57 | /* |
---|
41 | 58 | * First do sequential mapping between CPUs and queues. |
---|
42 | 59 | * In case we still have CPUs to map, and we have some number of |
---|
43 | | - * threads per cores then map sibling threads to the same queue for |
---|
44 | | - * performace optimizations. |
---|
| 60 | + * threads per cores then map sibling threads to the same queue |
---|
| 61 | + * for performance optimizations. |
---|
45 | 62 | */ |
---|
46 | | - if (cpu < nr_queues) { |
---|
47 | | - map[cpu] = cpu_to_queue_index(nr_queues, cpu); |
---|
| 63 | + if (q < nr_queues) { |
---|
| 64 | + map[cpu] = queue_index(qmap, nr_queues, q++); |
---|
48 | 65 | } else { |
---|
49 | 66 | first_sibling = get_first_sibling(cpu); |
---|
50 | 67 | if (first_sibling == cpu) |
---|
51 | | - map[cpu] = cpu_to_queue_index(nr_queues, cpu); |
---|
| 68 | + map[cpu] = queue_index(qmap, nr_queues, q++); |
---|
52 | 69 | else |
---|
53 | 70 | map[cpu] = map[first_sibling]; |
---|
54 | 71 | } |
---|
.. | .. |
---|
58 | 75 | } |
---|
59 | 76 | EXPORT_SYMBOL_GPL(blk_mq_map_queues); |
---|
60 | 77 | |
---|
61 | | -/* |
---|
| 78 | +/** |
---|
| 79 | + * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index |
---|
| 80 | + * @qmap: CPU to hardware queue map. |
---|
| 81 | + * @index: hardware queue index. |
---|
| 82 | + * |
---|
62 | 83 | * We have no quick way of doing reverse lookups. This is only used at |
---|
63 | 84 | * queue init time, so runtime isn't important. |
---|
64 | 85 | */ |
---|
65 | | -int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) |
---|
| 86 | +int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index) |
---|
66 | 87 | { |
---|
67 | 88 | int i; |
---|
68 | 89 | |
---|
69 | 90 | for_each_possible_cpu(i) { |
---|
70 | | - if (index == mq_map[i]) |
---|
71 | | - return local_memory_node(cpu_to_node(i)); |
---|
| 91 | + if (index == qmap->mq_map[i]) |
---|
| 92 | + return cpu_to_node(i); |
---|
72 | 93 | } |
---|
73 | 94 | |
---|
74 | 95 | return NUMA_NO_NODE; |
---|