Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / block / blk-mq-sysfs.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15
16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19
20 struct blk_mq_ctx_sysfs_entry {
21         struct attribute attr;
22         ssize_t (*show)(struct blk_mq_ctx *, char *);
23         ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24 };
25
26 struct blk_mq_hw_ctx_sysfs_entry {
27         struct attribute attr;
28         ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29         ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30 };
31
32 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33                                  char *page)
34 {
35         struct blk_mq_ctx_sysfs_entry *entry;
36         struct blk_mq_ctx *ctx;
37         struct request_queue *q;
38         ssize_t res;
39
40         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42         q = ctx->queue;
43
44         if (!entry->show)
45                 return -EIO;
46
47         res = -ENOENT;
48         mutex_lock(&q->sysfs_lock);
49         if (!blk_queue_dying(q))
50                 res = entry->show(ctx, page);
51         mutex_unlock(&q->sysfs_lock);
52         return res;
53 }
54
55 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56                                   const char *page, size_t length)
57 {
58         struct blk_mq_ctx_sysfs_entry *entry;
59         struct blk_mq_ctx *ctx;
60         struct request_queue *q;
61         ssize_t res;
62
63         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65         q = ctx->queue;
66
67         if (!entry->store)
68                 return -EIO;
69
70         res = -ENOENT;
71         mutex_lock(&q->sysfs_lock);
72         if (!blk_queue_dying(q))
73                 res = entry->store(ctx, page, length);
74         mutex_unlock(&q->sysfs_lock);
75         return res;
76 }
77
78 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79                                     struct attribute *attr, char *page)
80 {
81         struct blk_mq_hw_ctx_sysfs_entry *entry;
82         struct blk_mq_hw_ctx *hctx;
83         struct request_queue *q;
84         ssize_t res;
85
86         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88         q = hctx->queue;
89
90         if (!entry->show)
91                 return -EIO;
92
93         res = -ENOENT;
94         mutex_lock(&q->sysfs_lock);
95         if (!blk_queue_dying(q))
96                 res = entry->show(hctx, page);
97         mutex_unlock(&q->sysfs_lock);
98         return res;
99 }
100
101 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102                                      struct attribute *attr, const char *page,
103                                      size_t length)
104 {
105         struct blk_mq_hw_ctx_sysfs_entry *entry;
106         struct blk_mq_hw_ctx *hctx;
107         struct request_queue *q;
108         ssize_t res;
109
110         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112         q = hctx->queue;
113
114         if (!entry->store)
115                 return -EIO;
116
117         res = -ENOENT;
118         mutex_lock(&q->sysfs_lock);
119         if (!blk_queue_dying(q))
120                 res = entry->store(hctx, page, length);
121         mutex_unlock(&q->sysfs_lock);
122         return res;
123 }
124
125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126 {
127         return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128                                 ctx->rq_dispatched[0]);
129 }
130
131 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132 {
133         return sprintf(page, "%lu\n", ctx->rq_merged);
134 }
135
136 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137 {
138         return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139                                 ctx->rq_completed[0]);
140 }
141
142 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
143 {
144         struct request *rq;
145         int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
146
147         list_for_each_entry(rq, list, queuelist) {
148                 const int rq_len = 2 * sizeof(rq) + 2;
149
150                 /* if the output will be truncated */
151                 if (PAGE_SIZE - 1 < len + rq_len) {
152                         /* backspacing if it can't hold '\t...\n' */
153                         if (PAGE_SIZE - 1 < len + 5)
154                                 len -= rq_len;
155                         len += snprintf(page + len, PAGE_SIZE - 1 - len,
156                                         "\t...\n");
157                         break;
158                 }
159                 len += snprintf(page + len, PAGE_SIZE - 1 - len,
160                                 "\t%p\n", rq);
161         }
162
163         return len;
164 }
165
166 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
167 {
168         ssize_t ret;
169
170         spin_lock(&ctx->lock);
171         ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
172         spin_unlock(&ctx->lock);
173
174         return ret;
175 }
176
177 static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
178 {
179         return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success);
180 }
181
182 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
183                                            char *page)
184 {
185         return sprintf(page, "%lu\n", hctx->queued);
186 }
187
188 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
189 {
190         return sprintf(page, "%lu\n", hctx->run);
191 }
192
193 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
194                                                char *page)
195 {
196         char *start_page = page;
197         int i;
198
199         page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
200
201         for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
202                 unsigned long d = 1U << (i - 1);
203
204                 page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
205         }
206
207         return page - start_page;
208 }
209
210 static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
211                                             char *page)
212 {
213         ssize_t ret;
214
215         spin_lock(&hctx->lock);
216         ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
217         spin_unlock(&hctx->lock);
218
219         return ret;
220 }
221
222 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
223 {
224         return blk_mq_tag_sysfs_show(hctx->tags, page);
225 }
226
227 static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
228 {
229         return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
230 }
231
232 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
233 {
234         unsigned int i, first = 1;
235         ssize_t ret = 0;
236
237         for_each_cpu(i, hctx->cpumask) {
238                 if (first)
239                         ret += sprintf(ret + page, "%u", i);
240                 else
241                         ret += sprintf(ret + page, ", %u", i);
242
243                 first = 0;
244         }
245
246         ret += sprintf(ret + page, "\n");
247         return ret;
248 }
249
250 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
251         .attr = {.name = "dispatched", .mode = S_IRUGO },
252         .show = blk_mq_sysfs_dispatched_show,
253 };
254 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
255         .attr = {.name = "merged", .mode = S_IRUGO },
256         .show = blk_mq_sysfs_merged_show,
257 };
258 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
259         .attr = {.name = "completed", .mode = S_IRUGO },
260         .show = blk_mq_sysfs_completed_show,
261 };
262 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
263         .attr = {.name = "rq_list", .mode = S_IRUGO },
264         .show = blk_mq_sysfs_rq_list_show,
265 };
266
267 static struct attribute *default_ctx_attrs[] = {
268         &blk_mq_sysfs_dispatched.attr,
269         &blk_mq_sysfs_merged.attr,
270         &blk_mq_sysfs_completed.attr,
271         &blk_mq_sysfs_rq_list.attr,
272         NULL,
273 };
274
275 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
276         .attr = {.name = "queued", .mode = S_IRUGO },
277         .show = blk_mq_hw_sysfs_queued_show,
278 };
279 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
280         .attr = {.name = "run", .mode = S_IRUGO },
281         .show = blk_mq_hw_sysfs_run_show,
282 };
283 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
284         .attr = {.name = "dispatched", .mode = S_IRUGO },
285         .show = blk_mq_hw_sysfs_dispatched_show,
286 };
287 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
288         .attr = {.name = "active", .mode = S_IRUGO },
289         .show = blk_mq_hw_sysfs_active_show,
290 };
291 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
292         .attr = {.name = "pending", .mode = S_IRUGO },
293         .show = blk_mq_hw_sysfs_rq_list_show,
294 };
295 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
296         .attr = {.name = "tags", .mode = S_IRUGO },
297         .show = blk_mq_hw_sysfs_tags_show,
298 };
299 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
300         .attr = {.name = "cpu_list", .mode = S_IRUGO },
301         .show = blk_mq_hw_sysfs_cpus_show,
302 };
303 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
304         .attr = {.name = "io_poll", .mode = S_IRUGO },
305         .show = blk_mq_hw_sysfs_poll_show,
306 };
307
308 static struct attribute *default_hw_ctx_attrs[] = {
309         &blk_mq_hw_sysfs_queued.attr,
310         &blk_mq_hw_sysfs_run.attr,
311         &blk_mq_hw_sysfs_dispatched.attr,
312         &blk_mq_hw_sysfs_pending.attr,
313         &blk_mq_hw_sysfs_tags.attr,
314         &blk_mq_hw_sysfs_cpus.attr,
315         &blk_mq_hw_sysfs_active.attr,
316         &blk_mq_hw_sysfs_poll.attr,
317         NULL,
318 };
319
320 static const struct sysfs_ops blk_mq_sysfs_ops = {
321         .show   = blk_mq_sysfs_show,
322         .store  = blk_mq_sysfs_store,
323 };
324
325 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
326         .show   = blk_mq_hw_sysfs_show,
327         .store  = blk_mq_hw_sysfs_store,
328 };
329
330 static struct kobj_type blk_mq_ktype = {
331         .sysfs_ops      = &blk_mq_sysfs_ops,
332         .release        = blk_mq_sysfs_release,
333 };
334
335 static struct kobj_type blk_mq_ctx_ktype = {
336         .sysfs_ops      = &blk_mq_sysfs_ops,
337         .default_attrs  = default_ctx_attrs,
338         .release        = blk_mq_sysfs_release,
339 };
340
341 static struct kobj_type blk_mq_hw_ktype = {
342         .sysfs_ops      = &blk_mq_hw_sysfs_ops,
343         .default_attrs  = default_hw_ctx_attrs,
344         .release        = blk_mq_sysfs_release,
345 };
346
347 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
348 {
349         struct blk_mq_ctx *ctx;
350         int i;
351
352         if (!hctx->nr_ctx)
353                 return;
354
355         hctx_for_each_ctx(hctx, ctx, i)
356                 kobject_del(&ctx->kobj);
357
358         kobject_del(&hctx->kobj);
359 }
360
361 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
362 {
363         struct request_queue *q = hctx->queue;
364         struct blk_mq_ctx *ctx;
365         int i, ret;
366
367         if (!hctx->nr_ctx)
368                 return 0;
369
370         ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
371         if (ret)
372                 return ret;
373
374         hctx_for_each_ctx(hctx, ctx, i) {
375                 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
376                 if (ret)
377                         break;
378         }
379
380         return ret;
381 }
382
383 static void __blk_mq_unregister_disk(struct gendisk *disk)
384 {
385         struct request_queue *q = disk->queue;
386         struct blk_mq_hw_ctx *hctx;
387         struct blk_mq_ctx *ctx;
388         int i, j;
389
390         queue_for_each_hw_ctx(q, hctx, i) {
391                 blk_mq_unregister_hctx(hctx);
392
393                 hctx_for_each_ctx(hctx, ctx, j)
394                         kobject_put(&ctx->kobj);
395
396                 kobject_put(&hctx->kobj);
397         }
398
399         kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
400         kobject_del(&q->mq_kobj);
401         kobject_put(&q->mq_kobj);
402
403         kobject_put(&disk_to_dev(disk)->kobj);
404
405         q->mq_sysfs_init_done = false;
406 }
407
408 void blk_mq_unregister_disk(struct gendisk *disk)
409 {
410         blk_mq_disable_hotplug();
411         __blk_mq_unregister_disk(disk);
412         blk_mq_enable_hotplug();
413 }
414
415 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
416 {
417         kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
418 }
419
420 static void blk_mq_sysfs_init(struct request_queue *q)
421 {
422         struct blk_mq_ctx *ctx;
423         int cpu;
424
425         kobject_init(&q->mq_kobj, &blk_mq_ktype);
426
427         for_each_possible_cpu(cpu) {
428                 ctx = per_cpu_ptr(q->queue_ctx, cpu);
429                 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
430         }
431 }
432
433 int blk_mq_register_disk(struct gendisk *disk)
434 {
435         struct device *dev = disk_to_dev(disk);
436         struct request_queue *q = disk->queue;
437         struct blk_mq_hw_ctx *hctx;
438         int ret, i;
439
440         blk_mq_disable_hotplug();
441
442         blk_mq_sysfs_init(q);
443
444         ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
445         if (ret < 0)
446                 goto out;
447
448         kobject_uevent(&q->mq_kobj, KOBJ_ADD);
449
450         queue_for_each_hw_ctx(q, hctx, i) {
451                 ret = blk_mq_register_hctx(hctx);
452                 if (ret)
453                         break;
454         }
455
456         if (ret)
457                 __blk_mq_unregister_disk(disk);
458         else
459                 q->mq_sysfs_init_done = true;
460 out:
461         blk_mq_enable_hotplug();
462
463         return ret;
464 }
465 EXPORT_SYMBOL_GPL(blk_mq_register_disk);
466
467 void blk_mq_sysfs_unregister(struct request_queue *q)
468 {
469         struct blk_mq_hw_ctx *hctx;
470         int i;
471
472         if (!q->mq_sysfs_init_done)
473                 return;
474
475         queue_for_each_hw_ctx(q, hctx, i)
476                 blk_mq_unregister_hctx(hctx);
477 }
478
479 int blk_mq_sysfs_register(struct request_queue *q)
480 {
481         struct blk_mq_hw_ctx *hctx;
482         int i, ret = 0;
483
484         if (!q->mq_sysfs_init_done)
485                 return ret;
486
487         queue_for_each_hw_ctx(q, hctx, i) {
488                 ret = blk_mq_register_hctx(hctx);
489                 if (ret)
490                         break;
491         }
492
493         return ret;
494 }