Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-mq-sysfs.c
29264 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/kernel.h>
3
#include <linux/module.h>
4
#include <linux/backing-dev.h>
5
#include <linux/bio.h>
6
#include <linux/blkdev.h>
7
#include <linux/mm.h>
8
#include <linux/init.h>
9
#include <linux/slab.h>
10
#include <linux/workqueue.h>
11
#include <linux/smp.h>
12
13
#include "blk.h"
14
#include "blk-mq.h"
15
16
static void blk_mq_sysfs_release(struct kobject *kobj)
17
{
18
struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
19
20
free_percpu(ctxs->queue_ctx);
21
kfree(ctxs);
22
}
23
24
static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
25
{
26
struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
27
28
/* ctx->ctxs won't be released until all ctx are freed */
29
kobject_put(&ctx->ctxs->kobj);
30
}
31
32
static void blk_mq_hw_sysfs_release(struct kobject *kobj)
33
{
34
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
35
kobj);
36
37
sbitmap_free(&hctx->ctx_map);
38
free_cpumask_var(hctx->cpumask);
39
kfree(hctx->ctxs);
40
kfree(hctx);
41
}
42
43
struct blk_mq_hw_ctx_sysfs_entry {
44
struct attribute attr;
45
ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
46
};
47
48
static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
49
struct attribute *attr, char *page)
50
{
51
struct blk_mq_hw_ctx_sysfs_entry *entry;
52
struct blk_mq_hw_ctx *hctx;
53
struct request_queue *q;
54
ssize_t res;
55
56
entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
57
hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
58
q = hctx->queue;
59
60
if (!entry->show)
61
return -EIO;
62
63
mutex_lock(&q->elevator_lock);
64
res = entry->show(hctx, page);
65
mutex_unlock(&q->elevator_lock);
66
return res;
67
}
68
69
static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
70
char *page)
71
{
72
return sprintf(page, "%u\n", hctx->tags->nr_tags);
73
}
74
75
static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
76
char *page)
77
{
78
return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
79
}
80
81
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
82
{
83
const size_t size = PAGE_SIZE - 1;
84
unsigned int i, first = 1;
85
int ret = 0, pos = 0;
86
87
for_each_cpu(i, hctx->cpumask) {
88
if (first)
89
ret = snprintf(pos + page, size - pos, "%u", i);
90
else
91
ret = snprintf(pos + page, size - pos, ", %u", i);
92
93
if (ret >= size - pos)
94
break;
95
96
first = 0;
97
pos += ret;
98
}
99
100
ret = snprintf(pos + page, size + 1 - pos, "\n");
101
return pos + ret;
102
}
103
104
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
105
.attr = {.name = "nr_tags", .mode = 0444 },
106
.show = blk_mq_hw_sysfs_nr_tags_show,
107
};
108
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
109
.attr = {.name = "nr_reserved_tags", .mode = 0444 },
110
.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
111
};
112
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
113
.attr = {.name = "cpu_list", .mode = 0444 },
114
.show = blk_mq_hw_sysfs_cpus_show,
115
};
116
117
static struct attribute *default_hw_ctx_attrs[] = {
118
&blk_mq_hw_sysfs_nr_tags.attr,
119
&blk_mq_hw_sysfs_nr_reserved_tags.attr,
120
&blk_mq_hw_sysfs_cpus.attr,
121
NULL,
122
};
123
ATTRIBUTE_GROUPS(default_hw_ctx);
124
125
static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
126
.show = blk_mq_hw_sysfs_show,
127
};
128
129
static const struct kobj_type blk_mq_ktype = {
130
.release = blk_mq_sysfs_release,
131
};
132
133
static const struct kobj_type blk_mq_ctx_ktype = {
134
.release = blk_mq_ctx_sysfs_release,
135
};
136
137
static const struct kobj_type blk_mq_hw_ktype = {
138
.sysfs_ops = &blk_mq_hw_sysfs_ops,
139
.default_groups = default_hw_ctx_groups,
140
.release = blk_mq_hw_sysfs_release,
141
};
142
143
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
144
{
145
struct blk_mq_ctx *ctx;
146
int i;
147
148
if (!hctx->nr_ctx)
149
return;
150
151
hctx_for_each_ctx(hctx, ctx, i)
152
if (ctx->kobj.state_in_sysfs)
153
kobject_del(&ctx->kobj);
154
155
if (hctx->kobj.state_in_sysfs)
156
kobject_del(&hctx->kobj);
157
}
158
159
static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
160
{
161
struct request_queue *q = hctx->queue;
162
struct blk_mq_ctx *ctx;
163
int i, j, ret;
164
165
if (!hctx->nr_ctx)
166
return 0;
167
168
ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
169
if (ret)
170
return ret;
171
172
hctx_for_each_ctx(hctx, ctx, i) {
173
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
174
if (ret)
175
goto out;
176
}
177
178
return 0;
179
out:
180
hctx_for_each_ctx(hctx, ctx, j) {
181
if (j < i)
182
kobject_del(&ctx->kobj);
183
}
184
kobject_del(&hctx->kobj);
185
return ret;
186
}
187
188
void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
189
{
190
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
191
}
192
193
void blk_mq_sysfs_deinit(struct request_queue *q)
194
{
195
struct blk_mq_ctx *ctx;
196
int cpu;
197
198
for_each_possible_cpu(cpu) {
199
ctx = per_cpu_ptr(q->queue_ctx, cpu);
200
kobject_put(&ctx->kobj);
201
}
202
kobject_put(q->mq_kobj);
203
}
204
205
void blk_mq_sysfs_init(struct request_queue *q)
206
{
207
struct blk_mq_ctx *ctx;
208
int cpu;
209
210
kobject_init(q->mq_kobj, &blk_mq_ktype);
211
212
for_each_possible_cpu(cpu) {
213
ctx = per_cpu_ptr(q->queue_ctx, cpu);
214
215
kobject_get(q->mq_kobj);
216
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
217
}
218
}
219
220
int blk_mq_sysfs_register(struct gendisk *disk)
221
{
222
struct request_queue *q = disk->queue;
223
struct blk_mq_hw_ctx *hctx;
224
unsigned long i, j;
225
int ret;
226
227
ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
228
if (ret < 0)
229
return ret;
230
231
kobject_uevent(q->mq_kobj, KOBJ_ADD);
232
233
mutex_lock(&q->tag_set->tag_list_lock);
234
queue_for_each_hw_ctx(q, hctx, i) {
235
ret = blk_mq_register_hctx(hctx);
236
if (ret)
237
goto out_unreg;
238
}
239
mutex_unlock(&q->tag_set->tag_list_lock);
240
return 0;
241
242
out_unreg:
243
queue_for_each_hw_ctx(q, hctx, j) {
244
if (j < i)
245
blk_mq_unregister_hctx(hctx);
246
}
247
mutex_unlock(&q->tag_set->tag_list_lock);
248
249
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
250
kobject_del(q->mq_kobj);
251
return ret;
252
}
253
254
void blk_mq_sysfs_unregister(struct gendisk *disk)
255
{
256
struct request_queue *q = disk->queue;
257
struct blk_mq_hw_ctx *hctx;
258
unsigned long i;
259
260
mutex_lock(&q->tag_set->tag_list_lock);
261
queue_for_each_hw_ctx(q, hctx, i)
262
blk_mq_unregister_hctx(hctx);
263
mutex_unlock(&q->tag_set->tag_list_lock);
264
265
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
266
kobject_del(q->mq_kobj);
267
}
268
269
void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
270
{
271
struct blk_mq_hw_ctx *hctx;
272
unsigned long i;
273
274
if (!blk_queue_registered(q))
275
return;
276
277
queue_for_each_hw_ctx(q, hctx, i)
278
blk_mq_unregister_hctx(hctx);
279
}
280
281
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
282
{
283
struct blk_mq_hw_ctx *hctx;
284
unsigned long i;
285
int ret = 0;
286
287
if (!blk_queue_registered(q))
288
goto out;
289
290
queue_for_each_hw_ctx(q, hctx, i) {
291
ret = blk_mq_register_hctx(hctx);
292
if (ret)
293
break;
294
}
295
296
out:
297
return ret;
298
}
299
300