blkcg: restructure blkio_group configruation setting
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190
VG
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
9d6a986c 16#include <linux/module.h>
accee785 17#include <linux/err.h>
9195291e 18#include <linux/blkdev.h>
5a0e3ad6 19#include <linux/slab.h>
34d0f179 20#include <linux/genhd.h>
72e06c25 21#include <linux/delay.h>
9a9e8a26 22#include <linux/atomic.h>
72e06c25 23#include "blk-cgroup.h"
5efd6113 24#include "blk.h"
3e252066 25
84c124da
DS
26#define MAX_KEY_LEN 100
27
3e252066
VG
28static DEFINE_SPINLOCK(blkio_list_lock);
29static LIST_HEAD(blkio_list);
b1c35769 30
923adde1
TH
31static DEFINE_MUTEX(all_q_mutex);
32static LIST_HEAD(all_q_list);
33
1cd9e039
VG
34/* List of groups pending per cpu stats allocation */
35static DEFINE_SPINLOCK(alloc_list_lock);
36static LIST_HEAD(alloc_list);
37
38static void blkio_stat_alloc_fn(struct work_struct *);
39static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
40
31e4c28d 41struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
9d6a986c
VG
42EXPORT_SYMBOL_GPL(blkio_root_cgroup);
43
035d10b2
TH
44static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
45
31e4c28d
VG
46struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
47{
48 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
49 struct blkio_cgroup, css);
50}
9d6a986c 51EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 52
4f85cb96 53static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
70087dc3
VG
54{
55 return container_of(task_subsys_state(tsk, blkio_subsys_id),
56 struct blkio_cgroup, css);
57}
4f85cb96
TH
58
59struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
60{
61 if (bio && bio->bi_css)
62 return container_of(bio->bi_css, struct blkio_cgroup, css);
63 return task_blkio_cgroup(current);
64}
65EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
70087dc3 66
c1768268
TH
67static inline void blkio_update_group_weight(struct blkio_group *blkg,
68 int plid, unsigned int weight)
062a644d
VG
69{
70 struct blkio_policy_type *blkiop;
71
72 list_for_each_entry(blkiop, &blkio_list, list) {
73 /* If this policy does not own the blkg, do not send updates */
c1768268 74 if (blkiop->plid != plid)
062a644d
VG
75 continue;
76 if (blkiop->ops.blkio_update_group_weight_fn)
ca32aefc 77 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
fe071437 78 blkg, weight);
062a644d
VG
79 }
80}
81
c1768268 82static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
3a8b31d3 83 u64 bps, int rw)
4c9eefa1
VG
84{
85 struct blkio_policy_type *blkiop;
86
87 list_for_each_entry(blkiop, &blkio_list, list) {
88
89 /* If this policy does not own the blkg, do not send updates */
c1768268 90 if (blkiop->plid != plid)
4c9eefa1
VG
91 continue;
92
3a8b31d3 93 if (rw == READ && blkiop->ops.blkio_update_group_read_bps_fn)
ca32aefc 94 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
fe071437 95 blkg, bps);
4c9eefa1 96
3a8b31d3 97 if (rw == WRITE && blkiop->ops.blkio_update_group_write_bps_fn)
ca32aefc 98 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
fe071437 99 blkg, bps);
4c9eefa1
VG
100 }
101}
102
3a8b31d3
TH
103static inline void blkio_update_group_iops(struct blkio_group *blkg, int plid,
104 u64 iops, int rw)
7702e8f4
VG
105{
106 struct blkio_policy_type *blkiop;
107
108 list_for_each_entry(blkiop, &blkio_list, list) {
109
110 /* If this policy does not own the blkg, do not send updates */
c1768268 111 if (blkiop->plid != plid)
7702e8f4
VG
112 continue;
113
3a8b31d3 114 if (rw == READ && blkiop->ops.blkio_update_group_read_iops_fn)
ca32aefc 115 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
fe071437 116 blkg, iops);
7702e8f4 117
3a8b31d3 118 if (rw == WRITE && blkiop->ops.blkio_update_group_write_iops_fn)
ca32aefc 119 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
fe071437 120 blkg,iops);
7702e8f4
VG
121 }
122}
123
cdc1184c 124#ifdef CONFIG_DEBUG_BLK_CGROUP
edf1b879 125/* This should be called with the queue_lock held. */
812df48d 126static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
c1768268
TH
127 struct blkio_policy_type *pol,
128 struct blkio_group *curr_blkg)
812df48d 129{
c1768268 130 struct blkg_policy_data *pd = blkg->pd[pol->plid];
549d3aa8
TH
131
132 if (blkio_blkg_waiting(&pd->stats))
812df48d
DS
133 return;
134 if (blkg == curr_blkg)
135 return;
549d3aa8
TH
136 pd->stats.start_group_wait_time = sched_clock();
137 blkio_mark_blkg_waiting(&pd->stats);
812df48d
DS
138}
139
edf1b879 140/* This should be called with the queue_lock held. */
812df48d
DS
141static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
142{
143 unsigned long long now;
144
145 if (!blkio_blkg_waiting(stats))
146 return;
147
148 now = sched_clock();
149 if (time_after64(now, stats->start_group_wait_time))
edcb0722
TH
150 blkg_stat_add(&stats->group_wait_time,
151 now - stats->start_group_wait_time);
812df48d
DS
152 blkio_clear_blkg_waiting(stats);
153}
154
edf1b879 155/* This should be called with the queue_lock held. */
812df48d
DS
156static void blkio_end_empty_time(struct blkio_group_stats *stats)
157{
158 unsigned long long now;
159
160 if (!blkio_blkg_empty(stats))
161 return;
162
163 now = sched_clock();
164 if (time_after64(now, stats->start_empty_time))
edcb0722
TH
165 blkg_stat_add(&stats->empty_time,
166 now - stats->start_empty_time);
812df48d
DS
167 blkio_clear_blkg_empty(stats);
168}
169
c1768268
TH
170void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
171 struct blkio_policy_type *pol)
812df48d 172{
edf1b879 173 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
812df48d 174
edf1b879
TH
175 lockdep_assert_held(blkg->q->queue_lock);
176 BUG_ON(blkio_blkg_idling(stats));
177
178 stats->start_idle_time = sched_clock();
179 blkio_mark_blkg_idling(stats);
812df48d
DS
180}
181EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
182
c1768268
TH
183void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
184 struct blkio_policy_type *pol)
812df48d 185{
edf1b879
TH
186 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
187
188 lockdep_assert_held(blkg->q->queue_lock);
812df48d 189
812df48d 190 if (blkio_blkg_idling(stats)) {
edf1b879
TH
191 unsigned long long now = sched_clock();
192
edcb0722
TH
193 if (time_after64(now, stats->start_idle_time))
194 blkg_stat_add(&stats->idle_time,
195 now - stats->start_idle_time);
812df48d
DS
196 blkio_clear_blkg_idling(stats);
197 }
812df48d
DS
198}
199EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
200
c1768268
TH
201void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
202 struct blkio_policy_type *pol)
cdc1184c 203{
edf1b879 204 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
cdc1184c 205
edf1b879
TH
206 lockdep_assert_held(blkg->q->queue_lock);
207
edcb0722
TH
208 blkg_stat_add(&stats->avg_queue_size_sum,
209 blkg_rwstat_sum(&stats->queued));
210 blkg_stat_add(&stats->avg_queue_size_samples, 1);
812df48d 211 blkio_update_group_wait_time(stats);
cdc1184c 212}
a11cdaa7
DS
213EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
214
c1768268
TH
215void blkiocg_set_start_empty_time(struct blkio_group *blkg,
216 struct blkio_policy_type *pol)
28baf442 217{
edf1b879 218 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
28baf442 219
edf1b879 220 lockdep_assert_held(blkg->q->queue_lock);
28baf442 221
edcb0722 222 if (blkg_rwstat_sum(&stats->queued))
28baf442 223 return;
28baf442
DS
224
225 /*
e5ff082e
VG
226 * group is already marked empty. This can happen if cfqq got new
227 * request in parent group and moved to this group while being added
228 * to service tree. Just ignore the event and move on.
28baf442 229 */
edf1b879 230 if (blkio_blkg_empty(stats))
e5ff082e 231 return;
e5ff082e 232
28baf442
DS
233 stats->start_empty_time = sched_clock();
234 blkio_mark_blkg_empty(stats);
28baf442
DS
235}
236EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
237
a11cdaa7 238void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
c1768268
TH
239 struct blkio_policy_type *pol,
240 unsigned long dequeue)
a11cdaa7 241{
c1768268 242 struct blkg_policy_data *pd = blkg->pd[pol->plid];
549d3aa8 243
edf1b879
TH
244 lockdep_assert_held(blkg->q->queue_lock);
245
edcb0722 246 blkg_stat_add(&pd->stats.dequeue, dequeue);
a11cdaa7
DS
247}
248EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
812df48d
DS
249#else
250static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
c1768268
TH
251 struct blkio_policy_type *pol,
252 struct blkio_group *curr_blkg) { }
253static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
cdc1184c
DS
254#endif
255
a11cdaa7 256void blkiocg_update_io_add_stats(struct blkio_group *blkg,
c1768268
TH
257 struct blkio_policy_type *pol,
258 struct blkio_group *curr_blkg, bool direction,
259 bool sync)
cdc1184c 260{
edf1b879 261 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
edcb0722 262 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
edf1b879
TH
263
264 lockdep_assert_held(blkg->q->queue_lock);
265
edcb0722 266 blkg_rwstat_add(&stats->queued, rw, 1);
edf1b879 267 blkio_end_empty_time(stats);
c1768268 268 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
cdc1184c 269}
a11cdaa7 270EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
cdc1184c 271
a11cdaa7 272void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
c1768268
TH
273 struct blkio_policy_type *pol,
274 bool direction, bool sync)
cdc1184c 275{
edf1b879 276 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
edcb0722 277 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
edf1b879
TH
278
279 lockdep_assert_held(blkg->q->queue_lock);
cdc1184c 280
edcb0722 281 blkg_rwstat_add(&stats->queued, rw, -1);
cdc1184c 282}
a11cdaa7 283EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
cdc1184c 284
c1768268
TH
285void blkiocg_update_timeslice_used(struct blkio_group *blkg,
286 struct blkio_policy_type *pol,
287 unsigned long time,
288 unsigned long unaccounted_time)
22084190 289{
edf1b879
TH
290 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
291
292 lockdep_assert_held(blkg->q->queue_lock);
303a3acb 293
edcb0722 294 blkg_stat_add(&stats->time, time);
a23e6869 295#ifdef CONFIG_DEBUG_BLK_CGROUP
edcb0722 296 blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
a23e6869 297#endif
22084190 298}
303a3acb 299EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
22084190 300
5624a4e4
VG
301/*
302 * should be called under rcu read lock or queue lock to make sure blkg pointer
303 * is valid.
304 */
84c124da 305void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
c1768268
TH
306 struct blkio_policy_type *pol,
307 uint64_t bytes, bool direction, bool sync)
9195291e 308{
edcb0722 309 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
c1768268 310 struct blkg_policy_data *pd = blkg->pd[pol->plid];
5624a4e4 311 struct blkio_group_stats_cpu *stats_cpu;
575969a0
VG
312 unsigned long flags;
313
1cd9e039
VG
314 /* If per cpu stats are not allocated yet, don't do any accounting. */
315 if (pd->stats_cpu == NULL)
316 return;
317
575969a0
VG
318 /*
319 * Disabling interrupts to provide mutual exclusion between two
320 * writes on same cpu. It probably is not needed for 64bit. Not
321 * optimizing that case yet.
322 */
323 local_irq_save(flags);
9195291e 324
549d3aa8 325 stats_cpu = this_cpu_ptr(pd->stats_cpu);
5624a4e4 326
edcb0722
TH
327 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
328 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
329 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
330
575969a0 331 local_irq_restore(flags);
9195291e 332}
84c124da 333EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
9195291e 334
84c124da 335void blkiocg_update_completion_stats(struct blkio_group *blkg,
c1768268
TH
336 struct blkio_policy_type *pol,
337 uint64_t start_time,
338 uint64_t io_start_time, bool direction,
339 bool sync)
9195291e 340{
edf1b879 341 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
9195291e 342 unsigned long long now = sched_clock();
edcb0722 343 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
9195291e 344
edf1b879
TH
345 lockdep_assert_held(blkg->q->queue_lock);
346
84c124da 347 if (time_after64(now, io_start_time))
edcb0722 348 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
84c124da 349 if (time_after64(io_start_time, start_time))
edcb0722
TH
350 blkg_rwstat_add(&stats->wait_time, rw,
351 io_start_time - start_time);
9195291e 352}
84c124da 353EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
9195291e 354
317389a7 355/* Merged stats are per cpu. */
c1768268
TH
356void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
357 struct blkio_policy_type *pol,
358 bool direction, bool sync)
812d4026 359{
edf1b879 360 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
edcb0722 361 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
edf1b879
TH
362
363 lockdep_assert_held(blkg->q->queue_lock);
812d4026 364
edcb0722 365 blkg_rwstat_add(&stats->merged, rw, 1);
812d4026
DS
366}
367EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
368
1cd9e039
VG
369/*
370 * Worker for allocating per cpu stat for blk groups. This is scheduled on
371 * the system_nrt_wq once there are some groups on the alloc_list waiting
372 * for allocation.
373 */
374static void blkio_stat_alloc_fn(struct work_struct *work)
375{
376 static void *pcpu_stats[BLKIO_NR_POLICIES];
377 struct delayed_work *dwork = to_delayed_work(work);
378 struct blkio_group *blkg;
379 int i;
380 bool empty = false;
381
382alloc_stats:
383 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
384 if (pcpu_stats[i] != NULL)
385 continue;
386
387 pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
388
389 /* Allocation failed. Try again after some time. */
390 if (pcpu_stats[i] == NULL) {
391 queue_delayed_work(system_nrt_wq, dwork,
392 msecs_to_jiffies(10));
393 return;
394 }
395 }
396
397 spin_lock_irq(&blkio_list_lock);
398 spin_lock(&alloc_list_lock);
399
400 /* cgroup got deleted or queue exited. */
401 if (!list_empty(&alloc_list)) {
402 blkg = list_first_entry(&alloc_list, struct blkio_group,
403 alloc_node);
404 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
405 struct blkg_policy_data *pd = blkg->pd[i];
406
407 if (blkio_policy[i] && pd && !pd->stats_cpu)
408 swap(pd->stats_cpu, pcpu_stats[i]);
409 }
410
411 list_del_init(&blkg->alloc_node);
412 }
413
414 empty = list_empty(&alloc_list);
415
416 spin_unlock(&alloc_list_lock);
417 spin_unlock_irq(&blkio_list_lock);
418
419 if (!empty)
420 goto alloc_stats;
421}
422
0381411e
TH
423/**
424 * blkg_free - free a blkg
425 * @blkg: blkg to free
426 *
427 * Free @blkg which may be partially allocated.
428 */
429static void blkg_free(struct blkio_group *blkg)
430{
e8989fae 431 int i;
549d3aa8
TH
432
433 if (!blkg)
434 return;
435
e8989fae
TH
436 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
437 struct blkg_policy_data *pd = blkg->pd[i];
438
439 if (pd) {
440 free_percpu(pd->stats_cpu);
441 kfree(pd);
442 }
0381411e 443 }
e8989fae 444
549d3aa8 445 kfree(blkg);
0381411e
TH
446}
447
448/**
449 * blkg_alloc - allocate a blkg
450 * @blkcg: block cgroup the new blkg is associated with
451 * @q: request_queue the new blkg is associated with
0381411e 452 *
e8989fae 453 * Allocate a new blkg assocating @blkcg and @q.
0381411e
TH
454 */
455static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
e8989fae 456 struct request_queue *q)
0381411e
TH
457{
458 struct blkio_group *blkg;
e8989fae 459 int i;
0381411e
TH
460
461 /* alloc and init base part */
462 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
463 if (!blkg)
464 return NULL;
465
c875f4d0 466 blkg->q = q;
e8989fae 467 INIT_LIST_HEAD(&blkg->q_node);
1cd9e039 468 INIT_LIST_HEAD(&blkg->alloc_node);
0381411e 469 blkg->blkcg = blkcg;
1adaf3dd 470 blkg->refcnt = 1;
0381411e
TH
471 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
472
e8989fae
TH
473 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
474 struct blkio_policy_type *pol = blkio_policy[i];
475 struct blkg_policy_data *pd;
0381411e 476
e8989fae
TH
477 if (!pol)
478 continue;
479
480 /* alloc per-policy data and attach it to blkg */
481 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
482 q->node);
483 if (!pd) {
484 blkg_free(blkg);
485 return NULL;
486 }
549d3aa8 487
e8989fae
TH
488 blkg->pd[i] = pd;
489 pd->blkg = blkg;
0381411e
TH
490 }
491
549d3aa8 492 /* invoke per-policy init */
e8989fae
TH
493 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
494 struct blkio_policy_type *pol = blkio_policy[i];
495
496 if (pol)
497 pol->ops.blkio_init_group_fn(blkg);
498 }
499
0381411e
TH
500 return blkg;
501}
502
cd1604fa
TH
503struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
504 struct request_queue *q,
cd1604fa
TH
505 bool for_root)
506 __releases(q->queue_lock) __acquires(q->queue_lock)
5624a4e4 507{
1cd9e039 508 struct blkio_group *blkg;
5624a4e4 509
cd1604fa
TH
510 WARN_ON_ONCE(!rcu_read_lock_held());
511 lockdep_assert_held(q->queue_lock);
512
513 /*
514 * This could be the first entry point of blkcg implementation and
515 * we shouldn't allow anything to go through for a bypassing queue.
516 * The following can be removed if blkg lookup is guaranteed to
517 * fail on a bypassing queue.
518 */
519 if (unlikely(blk_queue_bypass(q)) && !for_root)
520 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
521
e8989fae 522 blkg = blkg_lookup(blkcg, q);
cd1604fa
TH
523 if (blkg)
524 return blkg;
525
7ee9c562 526 /* blkg holds a reference to blkcg */
cd1604fa
TH
527 if (!css_tryget(&blkcg->css))
528 return ERR_PTR(-EINVAL);
529
530 /*
531 * Allocate and initialize.
cd1604fa 532 */
1cd9e039 533 blkg = blkg_alloc(blkcg, q);
cd1604fa
TH
534
535 /* did alloc fail? */
1cd9e039 536 if (unlikely(!blkg)) {
cd1604fa
TH
537 blkg = ERR_PTR(-ENOMEM);
538 goto out;
539 }
540
541 /* insert */
542 spin_lock(&blkcg->lock);
31e4c28d 543 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
e8989fae 544 list_add(&blkg->q_node, &q->blkg_list);
cd1604fa 545 spin_unlock(&blkcg->lock);
1cd9e039
VG
546
547 spin_lock(&alloc_list_lock);
548 list_add(&blkg->alloc_node, &alloc_list);
549 /* Queue per cpu stat allocation from worker thread. */
550 queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
551 spin_unlock(&alloc_list_lock);
cd1604fa 552out:
cd1604fa 553 return blkg;
31e4c28d 554}
cd1604fa 555EXPORT_SYMBOL_GPL(blkg_lookup_create);
31e4c28d 556
31e4c28d 557/* called under rcu_read_lock(). */
cd1604fa 558struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
e8989fae 559 struct request_queue *q)
31e4c28d
VG
560{
561 struct blkio_group *blkg;
562 struct hlist_node *n;
31e4c28d 563
ca32aefc 564 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
e8989fae 565 if (blkg->q == q)
31e4c28d 566 return blkg;
31e4c28d
VG
567 return NULL;
568}
cd1604fa 569EXPORT_SYMBOL_GPL(blkg_lookup);
31e4c28d 570
e8989fae 571static void blkg_destroy(struct blkio_group *blkg)
03aa264a
TH
572{
573 struct request_queue *q = blkg->q;
9f13ef67 574 struct blkio_cgroup *blkcg = blkg->blkcg;
03aa264a
TH
575
576 lockdep_assert_held(q->queue_lock);
9f13ef67 577 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
578
579 /* Something wrong if we are trying to remove same group twice */
e8989fae 580 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 581 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
e8989fae 582 list_del_init(&blkg->q_node);
9f13ef67 583 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 584
1cd9e039
VG
585 spin_lock(&alloc_list_lock);
586 list_del_init(&blkg->alloc_node);
587 spin_unlock(&alloc_list_lock);
588
03aa264a
TH
589 /*
590 * Put the reference taken at the time of creation so that when all
591 * queues are gone, group can be destroyed.
592 */
593 blkg_put(blkg);
594}
595
e8989fae
TH
596/*
597 * XXX: This updates blkg policy data in-place for root blkg, which is
598 * necessary across elevator switch and policy registration as root blkgs
599 * aren't shot down. This broken and racy implementation is temporary.
600 * Eventually, blkg shoot down will be replaced by proper in-place update.
601 */
602void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
603{
604 struct blkio_policy_type *pol = blkio_policy[plid];
605 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
606 struct blkg_policy_data *pd;
607
608 if (!blkg)
609 return;
610
611 kfree(blkg->pd[plid]);
612 blkg->pd[plid] = NULL;
613
614 if (!pol)
615 return;
616
617 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
618 WARN_ON_ONCE(!pd);
619
620 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
621 WARN_ON_ONCE(!pd->stats_cpu);
622
623 blkg->pd[plid] = pd;
624 pd->blkg = blkg;
625 pol->ops.blkio_init_group_fn(blkg);
626}
627EXPORT_SYMBOL_GPL(update_root_blkg_pd);
628
9f13ef67
TH
629/**
630 * blkg_destroy_all - destroy all blkgs associated with a request_queue
631 * @q: request_queue of interest
632 * @destroy_root: whether to destroy root blkg or not
633 *
634 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
635 * destroyed; otherwise, root blkg is left alone.
636 */
e8989fae 637void blkg_destroy_all(struct request_queue *q, bool destroy_root)
72e06c25 638{
03aa264a 639 struct blkio_group *blkg, *n;
72e06c25 640
9f13ef67 641 spin_lock_irq(q->queue_lock);
72e06c25 642
9f13ef67
TH
643 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
644 struct blkio_cgroup *blkcg = blkg->blkcg;
72e06c25 645
9f13ef67
TH
646 /* skip root? */
647 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
648 continue;
72e06c25 649
9f13ef67
TH
650 spin_lock(&blkcg->lock);
651 blkg_destroy(blkg);
652 spin_unlock(&blkcg->lock);
72e06c25 653 }
9f13ef67
TH
654
655 spin_unlock_irq(q->queue_lock);
72e06c25 656}
03aa264a 657EXPORT_SYMBOL_GPL(blkg_destroy_all);
72e06c25 658
1adaf3dd
TH
659static void blkg_rcu_free(struct rcu_head *rcu_head)
660{
661 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
662}
663
664void __blkg_release(struct blkio_group *blkg)
665{
666 /* release the extra blkcg reference this blkg has been holding */
667 css_put(&blkg->blkcg->css);
668
669 /*
670 * A group is freed in rcu manner. But having an rcu lock does not
671 * mean that one can access all the fields of blkg and assume these
672 * are valid. For example, don't try to follow throtl_data and
673 * request queue links.
674 *
675 * Having a reference to blkg under an rcu allows acess to only
676 * values local to groups like group stats and group rate limits
677 */
678 call_rcu(&blkg->rcu_head, blkg_rcu_free);
679}
680EXPORT_SYMBOL_GPL(__blkg_release);
681
c1768268 682static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
f0bdc8cd 683{
c1768268 684 struct blkg_policy_data *pd = blkg->pd[plid];
997a026c 685 int cpu;
1cd9e039
VG
686
687 if (pd->stats_cpu == NULL)
688 return;
997a026c
TH
689
690 for_each_possible_cpu(cpu) {
691 struct blkio_group_stats_cpu *sc =
692 per_cpu_ptr(pd->stats_cpu, cpu);
693
edcb0722
TH
694 blkg_rwstat_reset(&sc->service_bytes);
695 blkg_rwstat_reset(&sc->serviced);
696 blkg_stat_reset(&sc->sectors);
f0bdc8cd
VG
697 }
698}
699
303a3acb 700static int
84c124da 701blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb 702{
997a026c 703 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
303a3acb
DS
704 struct blkio_group *blkg;
705 struct hlist_node *n;
303a3acb 706
e8989fae 707 spin_lock(&blkio_list_lock);
303a3acb 708 spin_lock_irq(&blkcg->lock);
997a026c
TH
709
710 /*
711 * Note that stat reset is racy - it doesn't synchronize against
712 * stat updates. This is a debug feature which shouldn't exist
713 * anyway. If you get hit by a race, retry.
714 */
303a3acb 715 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
e8989fae 716 struct blkio_policy_type *pol;
549d3aa8 717
e8989fae
TH
718 list_for_each_entry(pol, &blkio_list, list) {
719 struct blkg_policy_data *pd = blkg->pd[pol->plid];
997a026c
TH
720 struct blkio_group_stats *stats = &pd->stats;
721
722 /* queued stats shouldn't be cleared */
edcb0722
TH
723 blkg_rwstat_reset(&stats->merged);
724 blkg_rwstat_reset(&stats->service_time);
725 blkg_rwstat_reset(&stats->wait_time);
726 blkg_stat_reset(&stats->time);
812df48d 727#ifdef CONFIG_DEBUG_BLK_CGROUP
edcb0722
TH
728 blkg_stat_reset(&stats->unaccounted_time);
729 blkg_stat_reset(&stats->avg_queue_size_sum);
730 blkg_stat_reset(&stats->avg_queue_size_samples);
731 blkg_stat_reset(&stats->dequeue);
732 blkg_stat_reset(&stats->group_wait_time);
733 blkg_stat_reset(&stats->idle_time);
734 blkg_stat_reset(&stats->empty_time);
812df48d 735#endif
e8989fae
TH
736 blkio_reset_stats_cpu(blkg, pol->plid);
737 }
303a3acb 738 }
f0bdc8cd 739
303a3acb 740 spin_unlock_irq(&blkcg->lock);
e8989fae 741 spin_unlock(&blkio_list_lock);
303a3acb
DS
742 return 0;
743}
744
d3d32e69 745static const char *blkg_dev_name(struct blkio_group *blkg)
303a3acb 746{
d3d32e69
TH
747 /* some drivers (floppy) instantiate a queue w/o disk registered */
748 if (blkg->q->backing_dev_info.dev)
749 return dev_name(blkg->q->backing_dev_info.dev);
750 return NULL;
303a3acb
DS
751}
752
d3d32e69
TH
753/**
754 * blkcg_print_blkgs - helper for printing per-blkg data
755 * @sf: seq_file to print to
756 * @blkcg: blkcg of interest
757 * @prfill: fill function to print out a blkg
758 * @pol: policy in question
759 * @data: data to be passed to @prfill
760 * @show_total: to print out sum of prfill return values or not
761 *
762 * This function invokes @prfill on each blkg of @blkcg if pd for the
763 * policy specified by @pol exists. @prfill is invoked with @sf, the
764 * policy data and @data. If @show_total is %true, the sum of the return
765 * values from @prfill is printed with "Total" label at the end.
766 *
767 * This is to be used to construct print functions for
768 * cftype->read_seq_string method.
769 */
770static void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
771 u64 (*prfill)(struct seq_file *,
772 struct blkg_policy_data *, int),
773 int pol, int data, bool show_total)
5624a4e4 774{
d3d32e69
TH
775 struct blkio_group *blkg;
776 struct hlist_node *n;
777 u64 total = 0;
5624a4e4 778
d3d32e69
TH
779 spin_lock_irq(&blkcg->lock);
780 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
781 if (blkg->pd[pol])
782 total += prfill(sf, blkg->pd[pol], data);
783 spin_unlock_irq(&blkcg->lock);
784
785 if (show_total)
786 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
787}
788
789/**
790 * __blkg_prfill_u64 - prfill helper for a single u64 value
791 * @sf: seq_file to print to
792 * @pd: policy data of interest
793 * @v: value to print
794 *
795 * Print @v to @sf for the device assocaited with @pd.
796 */
797static u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd,
798 u64 v)
799{
800 const char *dname = blkg_dev_name(pd->blkg);
801
802 if (!dname)
803 return 0;
804
805 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
806 return v;
807}
808
809/**
810 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
811 * @sf: seq_file to print to
812 * @pd: policy data of interest
813 * @rwstat: rwstat to print
814 *
815 * Print @rwstat to @sf for the device assocaited with @pd.
816 */
817static u64 __blkg_prfill_rwstat(struct seq_file *sf,
818 struct blkg_policy_data *pd,
819 const struct blkg_rwstat *rwstat)
820{
821 static const char *rwstr[] = {
822 [BLKG_RWSTAT_READ] = "Read",
823 [BLKG_RWSTAT_WRITE] = "Write",
824 [BLKG_RWSTAT_SYNC] = "Sync",
825 [BLKG_RWSTAT_ASYNC] = "Async",
826 };
827 const char *dname = blkg_dev_name(pd->blkg);
828 u64 v;
829 int i;
830
831 if (!dname)
832 return 0;
833
834 for (i = 0; i < BLKG_RWSTAT_NR; i++)
835 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
836 (unsigned long long)rwstat->cnt[i]);
837
838 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
839 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
840 return v;
841}
842
843static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
844 int off)
845{
846 return __blkg_prfill_u64(sf, pd,
847 blkg_stat_read((void *)&pd->stats + off));
848}
849
850static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
851 int off)
852{
853 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off);
854
855 return __blkg_prfill_rwstat(sf, pd, &rwstat);
856}
857
858/* print blkg_stat specified by BLKCG_STAT_PRIV() */
859static int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
860 struct seq_file *sf)
861{
862 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
863
864 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat,
865 BLKCG_STAT_POL(cft->private),
866 BLKCG_STAT_OFF(cft->private), false);
867 return 0;
868}
869
870/* print blkg_rwstat specified by BLKCG_STAT_PRIV() */
871static int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
872 struct seq_file *sf)
873{
874 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
875
876 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat,
877 BLKCG_STAT_POL(cft->private),
878 BLKCG_STAT_OFF(cft->private), true);
879 return 0;
880}
881
882static u64 blkg_prfill_cpu_stat(struct seq_file *sf,
883 struct blkg_policy_data *pd, int off)
884{
885 u64 v = 0;
886 int cpu;
1cd9e039 887
5624a4e4 888 for_each_possible_cpu(cpu) {
d3d32e69 889 struct blkio_group_stats_cpu *sc =
edcb0722 890 per_cpu_ptr(pd->stats_cpu, cpu);
edcb0722 891
d3d32e69 892 v += blkg_stat_read((void *)sc + off);
5624a4e4
VG
893 }
894
d3d32e69 895 return __blkg_prfill_u64(sf, pd, v);
5624a4e4
VG
896}
897
d3d32e69
TH
898static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
899 struct blkg_policy_data *pd, int off)
5624a4e4 900{
d3d32e69
TH
901 struct blkg_rwstat rwstat = { }, tmp;
902 int i, cpu;
903
904 for_each_possible_cpu(cpu) {
905 struct blkio_group_stats_cpu *sc =
906 per_cpu_ptr(pd->stats_cpu, cpu);
5624a4e4 907
d3d32e69
TH
908 tmp = blkg_rwstat_read((void *)sc + off);
909 for (i = 0; i < BLKG_RWSTAT_NR; i++)
910 rwstat.cnt[i] += tmp.cnt[i];
5624a4e4
VG
911 }
912
d3d32e69
TH
913 return __blkg_prfill_rwstat(sf, pd, &rwstat);
914}
5624a4e4 915
d3d32e69
TH
916/* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
917static int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
918 struct seq_file *sf)
919{
920 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
921
922 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat,
923 BLKCG_STAT_POL(cft->private),
924 BLKCG_STAT_OFF(cft->private), false);
925 return 0;
5624a4e4
VG
926}
927
d3d32e69
TH
928/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
929static int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
930 struct seq_file *sf)
303a3acb 931{
d3d32e69 932 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
edcb0722 933
d3d32e69
TH
934 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
935 BLKCG_STAT_POL(cft->private),
936 BLKCG_STAT_OFF(cft->private), true);
937 return 0;
938}
303a3acb 939
d3d32e69
TH
940#ifdef CONFIG_DEBUG_BLK_CGROUP
941static u64 blkg_prfill_avg_queue_size(struct seq_file *sf,
942 struct blkg_policy_data *pd, int off)
943{
944 u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples);
945 u64 v = 0;
c4c76a05 946
d3d32e69
TH
947 if (samples) {
948 v = blkg_stat_read(&pd->stats.avg_queue_size_sum);
949 do_div(v, samples);
edcb0722 950 }
d3d32e69
TH
951 __blkg_prfill_u64(sf, pd, v);
952 return 0;
953}
c4c76a05 954
d3d32e69
TH
955/* print avg_queue_size */
956static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
957 struct seq_file *sf)
958{
959 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
c4c76a05 960
d3d32e69
TH
961 blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size,
962 BLKIO_POLICY_PROP, 0, false);
963 return 0;
303a3acb 964}
d3d32e69 965#endif /* CONFIG_DEBUG_BLK_CGROUP */
303a3acb 966
3a8b31d3
TH
967struct blkg_conf_ctx {
968 struct gendisk *disk;
969 struct blkio_group *blkg;
970 u64 v;
971};
972
973/**
974 * blkg_conf_prep - parse and prepare for per-blkg config update
975 * @blkcg: target block cgroup
976 * @input: input string
977 * @ctx: blkg_conf_ctx to be filled
978 *
979 * Parse per-blkg config update from @input and initialize @ctx with the
980 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
981 * value. This function returns with RCU read locked and must be paired
982 * with blkg_conf_finish().
983 */
984static int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
985 struct blkg_conf_ctx *ctx)
986 __acquires(rcu)
34d0f179 987{
3a8b31d3
TH
988 struct gendisk *disk;
989 struct blkio_group *blkg;
990 char *buf, *s[4], *p, *major_s, *minor_s;
d11bb446 991 unsigned long major, minor;
ece84241
TH
992 int i = 0, ret = -EINVAL;
993 int part;
34d0f179 994 dev_t dev;
d11bb446 995 u64 temp;
34d0f179 996
3a8b31d3
TH
997 buf = kstrdup(input, GFP_KERNEL);
998 if (!buf)
999 return -ENOMEM;
1000
34d0f179
GJ
1001 memset(s, 0, sizeof(s));
1002
1003 while ((p = strsep(&buf, " ")) != NULL) {
1004 if (!*p)
1005 continue;
1006
1007 s[i++] = p;
1008
1009 /* Prevent from inputing too many things */
1010 if (i == 3)
1011 break;
1012 }
1013
1014 if (i != 2)
ece84241 1015 goto out;
34d0f179
GJ
1016
1017 p = strsep(&s[0], ":");
1018 if (p != NULL)
1019 major_s = p;
1020 else
ece84241 1021 goto out;
34d0f179
GJ
1022
1023 minor_s = s[0];
1024 if (!minor_s)
ece84241 1025 goto out;
34d0f179 1026
ece84241
TH
1027 if (strict_strtoul(major_s, 10, &major))
1028 goto out;
34d0f179 1029
ece84241
TH
1030 if (strict_strtoul(minor_s, 10, &minor))
1031 goto out;
34d0f179
GJ
1032
1033 dev = MKDEV(major, minor);
1034
ece84241
TH
1035 if (strict_strtoull(s[1], 10, &temp))
1036 goto out;
34d0f179 1037
e56da7e2 1038 disk = get_gendisk(dev, &part);
4bfd482e 1039 if (!disk || part)
e56da7e2 1040 goto out;
e56da7e2
TH
1041
1042 rcu_read_lock();
1043
4bfd482e 1044 spin_lock_irq(disk->queue->queue_lock);
aaec55a0 1045 blkg = blkg_lookup_create(blkcg, disk->queue, false);
4bfd482e 1046 spin_unlock_irq(disk->queue->queue_lock);
e56da7e2 1047
4bfd482e
TH
1048 if (IS_ERR(blkg)) {
1049 ret = PTR_ERR(blkg);
3a8b31d3
TH
1050 rcu_read_unlock();
1051 put_disk(disk);
1052 /*
1053 * If queue was bypassing, we should retry. Do so after a
1054 * short msleep(). It isn't strictly necessary but queue
1055 * can be bypassing for some time and it's always nice to
1056 * avoid busy looping.
1057 */
1058 if (ret == -EBUSY) {
1059 msleep(10);
1060 ret = restart_syscall();
7702e8f4 1061 }
3a8b31d3 1062 goto out;
062a644d 1063 }
3a8b31d3
TH
1064
1065 ctx->disk = disk;
1066 ctx->blkg = blkg;
1067 ctx->v = temp;
ece84241
TH
1068 ret = 0;
1069out:
3a8b31d3 1070 kfree(buf);
ece84241 1071 return ret;
34d0f179
GJ
1072}
1073
3a8b31d3
TH
1074/**
1075 * blkg_conf_finish - finish up per-blkg config update
1076 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
1077 *
1078 * Finish up after per-blkg config update. This function must be paired
1079 * with blkg_conf_prep().
1080 */
1081static void blkg_conf_finish(struct blkg_conf_ctx *ctx)
1082 __releases(rcu)
34d0f179 1083{
3a8b31d3
TH
1084 rcu_read_unlock();
1085 put_disk(ctx->disk);
34d0f179
GJ
1086}
1087
c4682aec
TH
1088/* for propio conf */
1089static u64 blkg_prfill_weight_device(struct seq_file *sf,
1090 struct blkg_policy_data *pd, int off)
34d0f179 1091{
c4682aec
TH
1092 if (!pd->conf.weight)
1093 return 0;
1094 return __blkg_prfill_u64(sf, pd, pd->conf.weight);
062a644d 1095}
34d0f179 1096
c4682aec
TH
1097static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1098 struct seq_file *sf)
34d0f179 1099{
c4682aec
TH
1100 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1101 blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
1102 false);
1103 return 0;
062a644d
VG
1104}
1105
c4682aec
TH
1106static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
1107 struct seq_file *sf)
062a644d 1108{
c4682aec 1109 seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight);
062a644d
VG
1110 return 0;
1111}
1112
3a8b31d3
TH
1113static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1114 const char *buf)
1115{
1116 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1117 struct blkg_policy_data *pd;
1118 struct blkg_conf_ctx ctx;
1119 int ret;
1120
1121 ret = blkg_conf_prep(blkcg, buf, &ctx);
1122 if (ret)
1123 return ret;
1124
1125 ret = -EINVAL;
1126 pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
1127 if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
1128 ctx.v <= BLKIO_WEIGHT_MAX))) {
1129 pd->conf.weight = ctx.v;
1130 blkio_update_group_weight(ctx.blkg, BLKIO_POLICY_PROP,
1131 ctx.v ?: blkcg->weight);
1132 ret = 0;
1133 }
1134
1135 blkg_conf_finish(&ctx);
1136 return ret;
1137}
1138
627f29f4 1139static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
062a644d 1140{
627f29f4 1141 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
062a644d
VG
1142 struct blkio_group *blkg;
1143 struct hlist_node *n;
062a644d
VG
1144
1145 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1146 return -EINVAL;
1147
1148 spin_lock(&blkio_list_lock);
1149 spin_lock_irq(&blkcg->lock);
1150 blkcg->weight = (unsigned int)val;
1151
549d3aa8 1152 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
627f29f4 1153 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP];
549d3aa8 1154
627f29f4
TH
1155 if (pd && !pd->conf.weight)
1156 blkio_update_group_weight(blkg, BLKIO_POLICY_PROP,
1157 blkcg->weight);
549d3aa8 1158 }
062a644d 1159
062a644d
VG
1160 spin_unlock_irq(&blkcg->lock);
1161 spin_unlock(&blkio_list_lock);
1162 return 0;
1163}
1164
c4682aec
TH
1165/* for blk-throttle conf */
1166#ifdef CONFIG_BLK_DEV_THROTTLING
1167static u64 blkg_prfill_conf_u64(struct seq_file *sf,
1168 struct blkg_policy_data *pd, int off)
1169{
1170 u64 v = *(u64 *)((void *)&pd->conf + off);
1171
1172 if (!v)
1173 return 0;
1174 return __blkg_prfill_u64(sf, pd, v);
1175}
062a644d 1176
c4682aec
TH
1177static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1178 struct seq_file *sf)
1179{
c4682aec
TH
1180 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1181 blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
3a8b31d3 1182 cft->private, false);
062a644d
VG
1183 return 0;
1184}
3a8b31d3
TH
1185
1186static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1187 const char *buf, int rw,
1188 void (*update)(struct blkio_group *, int, u64, int))
1189{
1190 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1191 struct blkg_policy_data *pd;
1192 struct blkg_conf_ctx ctx;
1193 int ret;
1194
1195 ret = blkg_conf_prep(blkcg, buf, &ctx);
1196 if (ret)
1197 return ret;
1198
1199 ret = -EINVAL;
1200 pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
1201 if (pd) {
1202 *(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
1203 update(ctx.blkg, BLKIO_POLICY_THROTL, ctx.v ?: -1, rw);
1204 ret = 0;
1205 }
1206
1207 blkg_conf_finish(&ctx);
1208 return ret;
1209}
1210
1211static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
1212 const char *buf)
1213{
1214 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_bps);
1215}
1216
1217static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
1218 const char *buf)
1219{
1220 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_bps);
1221}
1222
1223static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
1224 const char *buf)
1225{
1226 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_iops);
1227}
1228
1229static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
1230 const char *buf)
1231{
1232 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_iops);
1233}
c4682aec 1234#endif
062a644d 1235
31e4c28d 1236struct cftype blkio_files[] = {
34d0f179
GJ
1237 {
1238 .name = "weight_device",
c4682aec 1239 .read_seq_string = blkcg_print_weight_device,
3a8b31d3 1240 .write_string = blkcg_set_weight_device,
34d0f179
GJ
1241 .max_write_len = 256,
1242 },
31e4c28d
VG
1243 {
1244 .name = "weight",
c4682aec 1245 .read_seq_string = blkcg_print_weight,
627f29f4 1246 .write_u64 = blkcg_set_weight,
31e4c28d 1247 },
22084190
VG
1248 {
1249 .name = "time",
d3d32e69
TH
1250 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1251 offsetof(struct blkio_group_stats, time)),
1252 .read_seq_string = blkcg_print_stat,
22084190
VG
1253 },
1254 {
1255 .name = "sectors",
d3d32e69
TH
1256 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1257 offsetof(struct blkio_group_stats_cpu, sectors)),
1258 .read_seq_string = blkcg_print_cpu_stat,
303a3acb
DS
1259 },
1260 {
1261 .name = "io_service_bytes",
d3d32e69
TH
1262 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1263 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1264 .read_seq_string = blkcg_print_cpu_rwstat,
303a3acb
DS
1265 },
1266 {
1267 .name = "io_serviced",
d3d32e69
TH
1268 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1269 offsetof(struct blkio_group_stats_cpu, serviced)),
1270 .read_seq_string = blkcg_print_cpu_rwstat,
303a3acb
DS
1271 },
1272 {
1273 .name = "io_service_time",
d3d32e69
TH
1274 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1275 offsetof(struct blkio_group_stats, service_time)),
1276 .read_seq_string = blkcg_print_rwstat,
303a3acb
DS
1277 },
1278 {
1279 .name = "io_wait_time",
d3d32e69
TH
1280 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1281 offsetof(struct blkio_group_stats, wait_time)),
1282 .read_seq_string = blkcg_print_rwstat,
84c124da 1283 },
812d4026
DS
1284 {
1285 .name = "io_merged",
d3d32e69
TH
1286 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1287 offsetof(struct blkio_group_stats, merged)),
1288 .read_seq_string = blkcg_print_rwstat,
812d4026 1289 },
cdc1184c
DS
1290 {
1291 .name = "io_queued",
d3d32e69
TH
1292 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1293 offsetof(struct blkio_group_stats, queued)),
1294 .read_seq_string = blkcg_print_rwstat,
cdc1184c 1295 },
84c124da
DS
1296 {
1297 .name = "reset_stats",
1298 .write_u64 = blkiocg_reset_stats,
22084190 1299 },
13f98250
VG
1300#ifdef CONFIG_BLK_DEV_THROTTLING
1301 {
1302 .name = "throttle.read_bps_device",
3a8b31d3 1303 .private = offsetof(struct blkio_group_conf, bps[READ]),
c4682aec 1304 .read_seq_string = blkcg_print_conf_u64,
3a8b31d3 1305 .write_string = blkcg_set_conf_bps_r,
13f98250
VG
1306 .max_write_len = 256,
1307 },
1308
1309 {
1310 .name = "throttle.write_bps_device",
3a8b31d3 1311 .private = offsetof(struct blkio_group_conf, bps[WRITE]),
c4682aec 1312 .read_seq_string = blkcg_print_conf_u64,
3a8b31d3 1313 .write_string = blkcg_set_conf_bps_w,
13f98250
VG
1314 .max_write_len = 256,
1315 },
1316
1317 {
1318 .name = "throttle.read_iops_device",
3a8b31d3 1319 .private = offsetof(struct blkio_group_conf, iops[READ]),
c4682aec 1320 .read_seq_string = blkcg_print_conf_u64,
3a8b31d3 1321 .write_string = blkcg_set_conf_iops_r,
13f98250
VG
1322 .max_write_len = 256,
1323 },
1324
1325 {
1326 .name = "throttle.write_iops_device",
3a8b31d3 1327 .private = offsetof(struct blkio_group_conf, iops[WRITE]),
c4682aec 1328 .read_seq_string = blkcg_print_conf_u64,
3a8b31d3 1329 .write_string = blkcg_set_conf_iops_w,
13f98250
VG
1330 .max_write_len = 256,
1331 },
1332 {
1333 .name = "throttle.io_service_bytes",
d3d32e69
TH
1334 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1335 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1336 .read_seq_string = blkcg_print_cpu_rwstat,
13f98250
VG
1337 },
1338 {
1339 .name = "throttle.io_serviced",
d3d32e69
TH
1340 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1341 offsetof(struct blkio_group_stats_cpu, serviced)),
1342 .read_seq_string = blkcg_print_cpu_rwstat,
13f98250
VG
1343 },
1344#endif /* CONFIG_BLK_DEV_THROTTLING */
1345
22084190 1346#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
1347 {
1348 .name = "avg_queue_size",
d3d32e69 1349 .read_seq_string = blkcg_print_avg_queue_size,
cdc1184c 1350 },
812df48d
DS
1351 {
1352 .name = "group_wait_time",
d3d32e69
TH
1353 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1354 offsetof(struct blkio_group_stats, group_wait_time)),
1355 .read_seq_string = blkcg_print_stat,
812df48d
DS
1356 },
1357 {
1358 .name = "idle_time",
d3d32e69
TH
1359 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1360 offsetof(struct blkio_group_stats, idle_time)),
1361 .read_seq_string = blkcg_print_stat,
812df48d
DS
1362 },
1363 {
1364 .name = "empty_time",
d3d32e69
TH
1365 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1366 offsetof(struct blkio_group_stats, empty_time)),
1367 .read_seq_string = blkcg_print_stat,
812df48d 1368 },
cdc1184c 1369 {
22084190 1370 .name = "dequeue",
d3d32e69
TH
1371 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1372 offsetof(struct blkio_group_stats, dequeue)),
1373 .read_seq_string = blkcg_print_stat,
cdc1184c 1374 },
9026e521
JT
1375 {
1376 .name = "unaccounted_time",
d3d32e69
TH
1377 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1378 offsetof(struct blkio_group_stats, unaccounted_time)),
1379 .read_seq_string = blkcg_print_stat,
9026e521 1380 },
22084190 1381#endif
4baf6e33 1382 { } /* terminate */
31e4c28d
VG
1383};
1384
9f13ef67
TH
1385/**
1386 * blkiocg_pre_destroy - cgroup pre_destroy callback
9f13ef67
TH
1387 * @cgroup: cgroup of interest
1388 *
1389 * This function is called when @cgroup is about to go away and responsible
1390 * for shooting down all blkgs associated with @cgroup. blkgs should be
1391 * removed while holding both q and blkcg locks. As blkcg lock is nested
1392 * inside q lock, this function performs reverse double lock dancing.
1393 *
1394 * This is the blkcg counterpart of ioc_release_fn().
1395 */
959d851c 1396static int blkiocg_pre_destroy(struct cgroup *cgroup)
31e4c28d
VG
1397{
1398 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769 1399
9f13ef67 1400 spin_lock_irq(&blkcg->lock);
7ee9c562 1401
9f13ef67
TH
1402 while (!hlist_empty(&blkcg->blkg_list)) {
1403 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
1404 struct blkio_group, blkcg_node);
c875f4d0 1405 struct request_queue *q = blkg->q;
b1c35769 1406
9f13ef67
TH
1407 if (spin_trylock(q->queue_lock)) {
1408 blkg_destroy(blkg);
1409 spin_unlock(q->queue_lock);
1410 } else {
1411 spin_unlock_irq(&blkcg->lock);
9f13ef67 1412 cpu_relax();
a5567932 1413 spin_lock_irq(&blkcg->lock);
0f3942a3 1414 }
9f13ef67 1415 }
b1c35769 1416
9f13ef67 1417 spin_unlock_irq(&blkcg->lock);
7ee9c562
TH
1418 return 0;
1419}
1420
959d851c 1421static void blkiocg_destroy(struct cgroup *cgroup)
7ee9c562
TH
1422{
1423 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1424
67523c48
BB
1425 if (blkcg != &blkio_root_cgroup)
1426 kfree(blkcg);
31e4c28d
VG
1427}
1428
761b3ef5 1429static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
31e4c28d 1430{
9a9e8a26 1431 static atomic64_t id_seq = ATOMIC64_INIT(0);
0341509f
LZ
1432 struct blkio_cgroup *blkcg;
1433 struct cgroup *parent = cgroup->parent;
31e4c28d 1434
0341509f 1435 if (!parent) {
31e4c28d
VG
1436 blkcg = &blkio_root_cgroup;
1437 goto done;
1438 }
1439
31e4c28d
VG
1440 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1441 if (!blkcg)
1442 return ERR_PTR(-ENOMEM);
1443
1444 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
9a9e8a26 1445 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
31e4c28d
VG
1446done:
1447 spin_lock_init(&blkcg->lock);
1448 INIT_HLIST_HEAD(&blkcg->blkg_list);
1449
1450 return &blkcg->css;
1451}
1452
5efd6113
TH
1453/**
1454 * blkcg_init_queue - initialize blkcg part of request queue
1455 * @q: request_queue to initialize
1456 *
1457 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1458 * part of new request_queue @q.
1459 *
1460 * RETURNS:
1461 * 0 on success, -errno on failure.
1462 */
1463int blkcg_init_queue(struct request_queue *q)
1464{
923adde1
TH
1465 int ret;
1466
5efd6113
TH
1467 might_sleep();
1468
923adde1
TH
1469 ret = blk_throtl_init(q);
1470 if (ret)
1471 return ret;
1472
1473 mutex_lock(&all_q_mutex);
1474 INIT_LIST_HEAD(&q->all_q_node);
1475 list_add_tail(&q->all_q_node, &all_q_list);
1476 mutex_unlock(&all_q_mutex);
1477
1478 return 0;
5efd6113
TH
1479}
1480
1481/**
1482 * blkcg_drain_queue - drain blkcg part of request_queue
1483 * @q: request_queue to drain
1484 *
1485 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1486 */
1487void blkcg_drain_queue(struct request_queue *q)
1488{
1489 lockdep_assert_held(q->queue_lock);
1490
1491 blk_throtl_drain(q);
1492}
1493
1494/**
1495 * blkcg_exit_queue - exit and release blkcg part of request_queue
1496 * @q: request_queue being released
1497 *
1498 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1499 */
1500void blkcg_exit_queue(struct request_queue *q)
1501{
923adde1
TH
1502 mutex_lock(&all_q_mutex);
1503 list_del_init(&q->all_q_node);
1504 mutex_unlock(&all_q_mutex);
1505
e8989fae
TH
1506 blkg_destroy_all(q, true);
1507
5efd6113
TH
1508 blk_throtl_exit(q);
1509}
1510
31e4c28d
VG
1511/*
1512 * We cannot support shared io contexts, as we have no mean to support
1513 * two tasks with the same ioc in two different groups without major rework
1514 * of the main cic data structures. For now we allow a task to change
1515 * its cgroup only if it's the only owner of its ioc.
1516 */
761b3ef5 1517static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
31e4c28d 1518{
bb9d97b6 1519 struct task_struct *task;
31e4c28d
VG
1520 struct io_context *ioc;
1521 int ret = 0;
1522
1523 /* task_lock() is needed to avoid races with exit_io_context() */
bb9d97b6
TH
1524 cgroup_taskset_for_each(task, cgrp, tset) {
1525 task_lock(task);
1526 ioc = task->io_context;
1527 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1528 ret = -EINVAL;
1529 task_unlock(task);
1530 if (ret)
1531 break;
1532 }
31e4c28d
VG
1533 return ret;
1534}
1535
923adde1
TH
1536static void blkcg_bypass_start(void)
1537 __acquires(&all_q_mutex)
1538{
1539 struct request_queue *q;
1540
1541 mutex_lock(&all_q_mutex);
1542
1543 list_for_each_entry(q, &all_q_list, all_q_node) {
1544 blk_queue_bypass_start(q);
e8989fae 1545 blkg_destroy_all(q, false);
923adde1
TH
1546 }
1547}
1548
1549static void blkcg_bypass_end(void)
1550 __releases(&all_q_mutex)
1551{
1552 struct request_queue *q;
1553
1554 list_for_each_entry(q, &all_q_list, all_q_node)
1555 blk_queue_bypass_end(q);
1556
1557 mutex_unlock(&all_q_mutex);
1558}
1559
676f7c8f
TH
1560struct cgroup_subsys blkio_subsys = {
1561 .name = "blkio",
1562 .create = blkiocg_create,
1563 .can_attach = blkiocg_can_attach,
959d851c 1564 .pre_destroy = blkiocg_pre_destroy,
676f7c8f 1565 .destroy = blkiocg_destroy,
676f7c8f 1566 .subsys_id = blkio_subsys_id,
4baf6e33 1567 .base_cftypes = blkio_files,
676f7c8f
TH
1568 .module = THIS_MODULE,
1569};
1570EXPORT_SYMBOL_GPL(blkio_subsys);
1571
3e252066
VG
1572void blkio_policy_register(struct blkio_policy_type *blkiop)
1573{
e8989fae
TH
1574 struct request_queue *q;
1575
923adde1 1576 blkcg_bypass_start();
3e252066 1577 spin_lock(&blkio_list_lock);
035d10b2
TH
1578
1579 BUG_ON(blkio_policy[blkiop->plid]);
1580 blkio_policy[blkiop->plid] = blkiop;
3e252066 1581 list_add_tail(&blkiop->list, &blkio_list);
035d10b2 1582
3e252066 1583 spin_unlock(&blkio_list_lock);
e8989fae
TH
1584 list_for_each_entry(q, &all_q_list, all_q_node)
1585 update_root_blkg_pd(q, blkiop->plid);
923adde1 1586 blkcg_bypass_end();
3e252066
VG
1587}
1588EXPORT_SYMBOL_GPL(blkio_policy_register);
1589
1590void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1591{
e8989fae
TH
1592 struct request_queue *q;
1593
923adde1 1594 blkcg_bypass_start();
3e252066 1595 spin_lock(&blkio_list_lock);
035d10b2
TH
1596
1597 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1598 blkio_policy[blkiop->plid] = NULL;
3e252066 1599 list_del_init(&blkiop->list);
035d10b2 1600
3e252066 1601 spin_unlock(&blkio_list_lock);
e8989fae
TH
1602 list_for_each_entry(q, &all_q_list, all_q_node)
1603 update_root_blkg_pd(q, blkiop->plid);
923adde1 1604 blkcg_bypass_end();
3e252066
VG
1605}
1606EXPORT_SYMBOL_GPL(blkio_policy_unregister);