1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
// SPDX-License-Identifier: GPL-2.0
/*
* CPU <-> hardware queue mapping helpers
*
* Copyright (C) 2013-2014 Jens Axboe
*/
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/group_cpus.h>
#include <linux/device/bus.h>
#include <linux/sched/isolation.h>
#include "blk.h"
#include "blk-mq.h"
static unsigned int blk_mq_num_queues(const struct cpumask *mask,
unsigned int max_queues)
{
unsigned int num;
num = cpumask_weight(mask);
return min_not_zero(num, max_queues);
}
/**
* blk_mq_num_possible_queues - Calc nr of queues for multiqueue devices
* @max_queues: The maximum number of queues the hardware/driver
* supports. If max_queues is 0, the argument is
* ignored.
*
* Calculates the number of queues to be used for a multiqueue
* device based on the number of possible CPUs.
*/
unsigned int blk_mq_num_possible_queues(unsigned int max_queues)
{
return blk_mq_num_queues(cpu_possible_mask, max_queues);
}
EXPORT_SYMBOL_GPL(blk_mq_num_possible_queues);
/**
* blk_mq_num_online_queues - Calc nr of queues for multiqueue devices
* @max_queues: The maximum number of queues the hardware/driver
* supports. If max_queues is 0, the argument is
* ignored.
*
* Calculates the number of queues to be used for a multiqueue
* device based on the number of online CPUs.
*/
unsigned int blk_mq_num_online_queues(unsigned int max_queues)
{
return blk_mq_num_queues(cpu_online_mask, max_queues);
}
EXPORT_SYMBOL_GPL(blk_mq_num_online_queues);
void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
const struct cpumask *masks;
unsigned int queue, cpu, nr_masks;
masks = group_cpus_evenly(qmap->nr_queues, &nr_masks);
if (!masks) {
for_each_possible_cpu(cpu)
qmap->mq_map[cpu] = qmap->queue_offset;
return;
}
for (queue = 0; queue < qmap->nr_queues; queue++) {
for_each_cpu(cpu, &masks[queue % nr_masks])
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
kfree(masks);
}
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
/**
* blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
* @qmap: CPU to hardware queue map.
* @index: hardware queue index.
*
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
{
int i;
for_each_possible_cpu(i) {
if (index == qmap->mq_map[i])
return cpu_to_node(i);
}
return NUMA_NO_NODE;
}
/**
* blk_mq_map_hw_queues - Create CPU to hardware queue mapping
* @qmap: CPU to hardware queue map
* @dev: The device to map queues
* @offset: Queue offset to use for the device
*
* Create a CPU to hardware queue mapping in @qmap. The struct bus_type
* irq_get_affinity callback will be used to retrieve the affinity.
*/
void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
struct device *dev, unsigned int offset)
{
const struct cpumask *mask;
unsigned int queue, cpu;
if (!dev->bus->irq_get_affinity)
goto fallback;
for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = dev->bus->irq_get_affinity(dev, queue + offset);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
return;
fallback:
blk_mq_map_queues(qmap);
}
EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
|