io-controller: Document for blkio.weight_device
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190
VG
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
9d6a986c 16#include <linux/module.h>
accee785 17#include <linux/err.h>
9195291e 18#include <linux/blkdev.h>
31e4c28d 19#include "blk-cgroup.h"
34d0f179 20#include <linux/genhd.h>
3e252066 21
84c124da
DS
22#define MAX_KEY_LEN 100
23
3e252066
VG
24static DEFINE_SPINLOCK(blkio_list_lock);
25static LIST_HEAD(blkio_list);
b1c35769 26
31e4c28d 27struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
9d6a986c
VG
28EXPORT_SYMBOL_GPL(blkio_root_cgroup);
29
67523c48
BB
30static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
31 struct cgroup *);
32static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
33 struct task_struct *, bool);
34static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
35 struct cgroup *, struct task_struct *, bool);
36static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
37static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
38
39struct cgroup_subsys blkio_subsys = {
40 .name = "blkio",
41 .create = blkiocg_create,
42 .can_attach = blkiocg_can_attach,
43 .attach = blkiocg_attach,
44 .destroy = blkiocg_destroy,
45 .populate = blkiocg_populate,
46#ifdef CONFIG_BLK_CGROUP
47 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
48 .subsys_id = blkio_subsys_id,
49#endif
50 .use_id = 1,
51 .module = THIS_MODULE,
52};
53EXPORT_SYMBOL_GPL(blkio_subsys);
54
34d0f179
GJ
55static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
56 struct blkio_policy_node *pn)
57{
58 list_add(&pn->node, &blkcg->policy_list);
59}
60
61/* Must be called with blkcg->lock held */
62static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
63{
64 list_del(&pn->node);
65}
66
67/* Must be called with blkcg->lock held */
68static struct blkio_policy_node *
69blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
70{
71 struct blkio_policy_node *pn;
72
73 list_for_each_entry(pn, &blkcg->policy_list, node) {
74 if (pn->dev == dev)
75 return pn;
76 }
77
78 return NULL;
79}
80
31e4c28d
VG
81struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
82{
83 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
84 struct blkio_cgroup, css);
85}
9d6a986c 86EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 87
84c124da
DS
88void blkio_group_init(struct blkio_group *blkg)
89{
90 spin_lock_init(&blkg->stats_lock);
91}
92EXPORT_SYMBOL_GPL(blkio_group_init);
93
9195291e
DS
94/*
95 * Add to the appropriate stat variable depending on the request type.
96 * This should be called with the blkg->stats_lock held.
97 */
84c124da
DS
98static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
99 bool sync)
9195291e 100{
84c124da
DS
101 if (direction)
102 stat[BLKIO_STAT_WRITE] += add;
9195291e 103 else
84c124da
DS
104 stat[BLKIO_STAT_READ] += add;
105 if (sync)
106 stat[BLKIO_STAT_SYNC] += add;
9195291e 107 else
84c124da 108 stat[BLKIO_STAT_ASYNC] += add;
9195291e
DS
109}
110
cdc1184c
DS
111/*
112 * Decrements the appropriate stat variable if non-zero depending on the
113 * request type. Panics on value being zero.
114 * This should be called with the blkg->stats_lock held.
115 */
116static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
117{
118 if (direction) {
119 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
120 stat[BLKIO_STAT_WRITE]--;
121 } else {
122 BUG_ON(stat[BLKIO_STAT_READ] == 0);
123 stat[BLKIO_STAT_READ]--;
124 }
125 if (sync) {
126 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
127 stat[BLKIO_STAT_SYNC]--;
128 } else {
129 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
130 stat[BLKIO_STAT_ASYNC]--;
131 }
132}
133
134#ifdef CONFIG_DEBUG_BLK_CGROUP
812df48d
DS
135/* This should be called with the blkg->stats_lock held. */
136static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
137 struct blkio_group *curr_blkg)
138{
139 if (blkio_blkg_waiting(&blkg->stats))
140 return;
141 if (blkg == curr_blkg)
142 return;
143 blkg->stats.start_group_wait_time = sched_clock();
144 blkio_mark_blkg_waiting(&blkg->stats);
145}
146
147/* This should be called with the blkg->stats_lock held. */
148static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
149{
150 unsigned long long now;
151
152 if (!blkio_blkg_waiting(stats))
153 return;
154
155 now = sched_clock();
156 if (time_after64(now, stats->start_group_wait_time))
157 stats->group_wait_time += now - stats->start_group_wait_time;
158 blkio_clear_blkg_waiting(stats);
159}
160
161/* This should be called with the blkg->stats_lock held. */
162static void blkio_end_empty_time(struct blkio_group_stats *stats)
163{
164 unsigned long long now;
165
166 if (!blkio_blkg_empty(stats))
167 return;
168
169 now = sched_clock();
170 if (time_after64(now, stats->start_empty_time))
171 stats->empty_time += now - stats->start_empty_time;
172 blkio_clear_blkg_empty(stats);
173}
174
175void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
176{
177 unsigned long flags;
178
179 spin_lock_irqsave(&blkg->stats_lock, flags);
180 BUG_ON(blkio_blkg_idling(&blkg->stats));
181 blkg->stats.start_idle_time = sched_clock();
182 blkio_mark_blkg_idling(&blkg->stats);
183 spin_unlock_irqrestore(&blkg->stats_lock, flags);
184}
185EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
186
187void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
188{
189 unsigned long flags;
190 unsigned long long now;
191 struct blkio_group_stats *stats;
192
193 spin_lock_irqsave(&blkg->stats_lock, flags);
194 stats = &blkg->stats;
195 if (blkio_blkg_idling(stats)) {
196 now = sched_clock();
197 if (time_after64(now, stats->start_idle_time))
198 stats->idle_time += now - stats->start_idle_time;
199 blkio_clear_blkg_idling(stats);
200 }
201 spin_unlock_irqrestore(&blkg->stats_lock, flags);
202}
203EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
204
cdc1184c
DS
205void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg)
206{
207 unsigned long flags;
208 struct blkio_group_stats *stats;
209
210 spin_lock_irqsave(&blkg->stats_lock, flags);
211 stats = &blkg->stats;
212 stats->avg_queue_size_sum +=
213 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
214 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
215 stats->avg_queue_size_samples++;
812df48d 216 blkio_update_group_wait_time(stats);
cdc1184c
DS
217 spin_unlock_irqrestore(&blkg->stats_lock, flags);
218}
219EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats);
812df48d
DS
220#else
221static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
222 struct blkio_group *curr_blkg) {}
223static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
cdc1184c
DS
224#endif
225
226void blkiocg_update_request_add_stats(struct blkio_group *blkg,
227 struct blkio_group *curr_blkg, bool direction,
228 bool sync)
229{
230 unsigned long flags;
231
232 spin_lock_irqsave(&blkg->stats_lock, flags);
233 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
234 sync);
812df48d
DS
235 blkio_end_empty_time(&blkg->stats);
236 blkio_set_start_group_wait_time(blkg, curr_blkg);
cdc1184c
DS
237 spin_unlock_irqrestore(&blkg->stats_lock, flags);
238}
239EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats);
240
241void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
242 bool direction, bool sync)
243{
244 unsigned long flags;
245
246 spin_lock_irqsave(&blkg->stats_lock, flags);
247 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
248 direction, sync);
249 spin_unlock_irqrestore(&blkg->stats_lock, flags);
250}
251EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats);
252
303a3acb 253void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
22084190 254{
303a3acb
DS
255 unsigned long flags;
256
257 spin_lock_irqsave(&blkg->stats_lock, flags);
258 blkg->stats.time += time;
259 spin_unlock_irqrestore(&blkg->stats_lock, flags);
22084190 260}
303a3acb 261EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
22084190 262
812df48d
DS
263void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
264{
265 unsigned long flags;
266 struct blkio_group_stats *stats;
267
268 spin_lock_irqsave(&blkg->stats_lock, flags);
269 stats = &blkg->stats;
270
271 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
272 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
273 spin_unlock_irqrestore(&blkg->stats_lock, flags);
274 return;
275 }
276
277 /*
278 * If ignore is set, we do not panic on the empty flag being set
279 * already. This is to avoid cases where there are superfluous timeslice
280 * complete events (for eg., forced_dispatch in CFQ) when no IOs are
281 * served which could result in triggering the empty check incorrectly.
282 */
283 BUG_ON(!ignore && blkio_blkg_empty(stats));
284 stats->start_empty_time = sched_clock();
285 blkio_mark_blkg_empty(stats);
286 spin_unlock_irqrestore(&blkg->stats_lock, flags);
287}
288EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
289
84c124da
DS
290void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
291 uint64_t bytes, bool direction, bool sync)
9195291e
DS
292{
293 struct blkio_group_stats *stats;
294 unsigned long flags;
295
296 spin_lock_irqsave(&blkg->stats_lock, flags);
297 stats = &blkg->stats;
84c124da
DS
298 stats->sectors += bytes >> 9;
299 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
300 sync);
301 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
302 direction, sync);
9195291e
DS
303 spin_unlock_irqrestore(&blkg->stats_lock, flags);
304}
84c124da 305EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
9195291e 306
84c124da
DS
307void blkiocg_update_completion_stats(struct blkio_group *blkg,
308 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
9195291e
DS
309{
310 struct blkio_group_stats *stats;
311 unsigned long flags;
312 unsigned long long now = sched_clock();
313
314 spin_lock_irqsave(&blkg->stats_lock, flags);
315 stats = &blkg->stats;
84c124da
DS
316 if (time_after64(now, io_start_time))
317 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
318 now - io_start_time, direction, sync);
319 if (time_after64(io_start_time, start_time))
320 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
321 io_start_time - start_time, direction, sync);
9195291e
DS
322 spin_unlock_irqrestore(&blkg->stats_lock, flags);
323}
84c124da 324EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
9195291e 325
812d4026
DS
326void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
327 bool sync)
328{
329 unsigned long flags;
330
331 spin_lock_irqsave(&blkg->stats_lock, flags);
332 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
333 sync);
334 spin_unlock_irqrestore(&blkg->stats_lock, flags);
335}
336EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
337
31e4c28d 338void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
22084190 339 struct blkio_group *blkg, void *key, dev_t dev)
31e4c28d
VG
340{
341 unsigned long flags;
342
343 spin_lock_irqsave(&blkcg->lock, flags);
344 rcu_assign_pointer(blkg->key, key);
b1c35769 345 blkg->blkcg_id = css_id(&blkcg->css);
31e4c28d
VG
346 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
347 spin_unlock_irqrestore(&blkcg->lock, flags);
2868ef7b
VG
348#ifdef CONFIG_DEBUG_BLK_CGROUP
349 /* Need to take css reference ? */
350 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
351#endif
22084190 352 blkg->dev = dev;
31e4c28d 353}
9d6a986c 354EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
31e4c28d 355
b1c35769
VG
356static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
357{
358 hlist_del_init_rcu(&blkg->blkcg_node);
359 blkg->blkcg_id = 0;
360}
361
362/*
363 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
364 * indicating that blk_group was unhashed by the time we got to it.
365 */
31e4c28d
VG
366int blkiocg_del_blkio_group(struct blkio_group *blkg)
367{
b1c35769
VG
368 struct blkio_cgroup *blkcg;
369 unsigned long flags;
370 struct cgroup_subsys_state *css;
371 int ret = 1;
372
373 rcu_read_lock();
374 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
375 if (!css)
376 goto out;
377
378 blkcg = container_of(css, struct blkio_cgroup, css);
379 spin_lock_irqsave(&blkcg->lock, flags);
380 if (!hlist_unhashed(&blkg->blkcg_node)) {
381 __blkiocg_del_blkio_group(blkg);
382 ret = 0;
383 }
384 spin_unlock_irqrestore(&blkcg->lock, flags);
385out:
386 rcu_read_unlock();
387 return ret;
31e4c28d 388}
9d6a986c 389EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
31e4c28d
VG
390
391/* called under rcu_read_lock(). */
392struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
393{
394 struct blkio_group *blkg;
395 struct hlist_node *n;
396 void *__key;
397
398 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
399 __key = blkg->key;
400 if (__key == key)
401 return blkg;
402 }
403
404 return NULL;
405}
9d6a986c 406EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
31e4c28d
VG
407
408#define SHOW_FUNCTION(__VAR) \
409static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
410 struct cftype *cftype) \
411{ \
412 struct blkio_cgroup *blkcg; \
413 \
414 blkcg = cgroup_to_blkio_cgroup(cgroup); \
415 return (u64)blkcg->__VAR; \
416}
417
418SHOW_FUNCTION(weight);
419#undef SHOW_FUNCTION
420
421static int
422blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
423{
424 struct blkio_cgroup *blkcg;
f8d461d6
VG
425 struct blkio_group *blkg;
426 struct hlist_node *n;
3e252066 427 struct blkio_policy_type *blkiop;
34d0f179 428 struct blkio_policy_node *pn;
31e4c28d
VG
429
430 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
431 return -EINVAL;
432
433 blkcg = cgroup_to_blkio_cgroup(cgroup);
bcf4dd43 434 spin_lock(&blkio_list_lock);
f8d461d6 435 spin_lock_irq(&blkcg->lock);
31e4c28d 436 blkcg->weight = (unsigned int)val;
34d0f179 437
3e252066 438 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
34d0f179
GJ
439 pn = blkio_policy_search_node(blkcg, blkg->dev);
440
441 if (pn)
442 continue;
443
3e252066
VG
444 list_for_each_entry(blkiop, &blkio_list, list)
445 blkiop->ops.blkio_update_group_weight_fn(blkg,
446 blkcg->weight);
3e252066 447 }
f8d461d6 448 spin_unlock_irq(&blkcg->lock);
bcf4dd43 449 spin_unlock(&blkio_list_lock);
31e4c28d
VG
450 return 0;
451}
452
303a3acb 453static int
84c124da 454blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb
DS
455{
456 struct blkio_cgroup *blkcg;
457 struct blkio_group *blkg;
812df48d 458 struct blkio_group_stats *stats;
303a3acb 459 struct hlist_node *n;
cdc1184c
DS
460 uint64_t queued[BLKIO_STAT_TOTAL];
461 int i;
812df48d
DS
462#ifdef CONFIG_DEBUG_BLK_CGROUP
463 bool idling, waiting, empty;
464 unsigned long long now = sched_clock();
465#endif
303a3acb
DS
466
467 blkcg = cgroup_to_blkio_cgroup(cgroup);
468 spin_lock_irq(&blkcg->lock);
469 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
470 spin_lock(&blkg->stats_lock);
812df48d
DS
471 stats = &blkg->stats;
472#ifdef CONFIG_DEBUG_BLK_CGROUP
473 idling = blkio_blkg_idling(stats);
474 waiting = blkio_blkg_waiting(stats);
475 empty = blkio_blkg_empty(stats);
476#endif
cdc1184c 477 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
478 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
479 memset(stats, 0, sizeof(struct blkio_group_stats));
cdc1184c 480 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
481 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
482#ifdef CONFIG_DEBUG_BLK_CGROUP
483 if (idling) {
484 blkio_mark_blkg_idling(stats);
485 stats->start_idle_time = now;
486 }
487 if (waiting) {
488 blkio_mark_blkg_waiting(stats);
489 stats->start_group_wait_time = now;
490 }
491 if (empty) {
492 blkio_mark_blkg_empty(stats);
493 stats->start_empty_time = now;
494 }
495#endif
303a3acb
DS
496 spin_unlock(&blkg->stats_lock);
497 }
498 spin_unlock_irq(&blkcg->lock);
499 return 0;
500}
501
84c124da
DS
502static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
503 int chars_left, bool diskname_only)
303a3acb 504{
84c124da 505 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
303a3acb
DS
506 chars_left -= strlen(str);
507 if (chars_left <= 0) {
508 printk(KERN_WARNING
509 "Possibly incorrect cgroup stat display format");
510 return;
511 }
84c124da
DS
512 if (diskname_only)
513 return;
303a3acb 514 switch (type) {
84c124da 515 case BLKIO_STAT_READ:
303a3acb
DS
516 strlcat(str, " Read", chars_left);
517 break;
84c124da 518 case BLKIO_STAT_WRITE:
303a3acb
DS
519 strlcat(str, " Write", chars_left);
520 break;
84c124da 521 case BLKIO_STAT_SYNC:
303a3acb
DS
522 strlcat(str, " Sync", chars_left);
523 break;
84c124da 524 case BLKIO_STAT_ASYNC:
303a3acb
DS
525 strlcat(str, " Async", chars_left);
526 break;
84c124da 527 case BLKIO_STAT_TOTAL:
303a3acb
DS
528 strlcat(str, " Total", chars_left);
529 break;
530 default:
531 strlcat(str, " Invalid", chars_left);
532 }
533}
534
84c124da
DS
535static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
536 struct cgroup_map_cb *cb, dev_t dev)
537{
538 blkio_get_key_name(0, dev, str, chars_left, true);
539 cb->fill(cb, str, val);
540 return val;
541}
303a3acb 542
84c124da
DS
543/* This should be called with blkg->stats_lock held */
544static uint64_t blkio_get_stat(struct blkio_group *blkg,
545 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
303a3acb
DS
546{
547 uint64_t disk_total;
548 char key_str[MAX_KEY_LEN];
84c124da
DS
549 enum stat_sub_type sub_type;
550
551 if (type == BLKIO_STAT_TIME)
552 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
553 blkg->stats.time, cb, dev);
554 if (type == BLKIO_STAT_SECTORS)
555 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
556 blkg->stats.sectors, cb, dev);
557#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
558 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
559 uint64_t sum = blkg->stats.avg_queue_size_sum;
560 uint64_t samples = blkg->stats.avg_queue_size_samples;
561 if (samples)
562 do_div(sum, samples);
563 else
564 sum = 0;
565 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
566 }
812df48d
DS
567 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
568 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
569 blkg->stats.group_wait_time, cb, dev);
570 if (type == BLKIO_STAT_IDLE_TIME)
571 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
572 blkg->stats.idle_time, cb, dev);
573 if (type == BLKIO_STAT_EMPTY_TIME)
574 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
575 blkg->stats.empty_time, cb, dev);
84c124da
DS
576 if (type == BLKIO_STAT_DEQUEUE)
577 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
578 blkg->stats.dequeue, cb, dev);
579#endif
303a3acb 580
84c124da
DS
581 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
582 sub_type++) {
583 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
584 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
303a3acb 585 }
84c124da
DS
586 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
587 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
588 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
303a3acb
DS
589 cb->fill(cb, key_str, disk_total);
590 return disk_total;
591}
592
84c124da 593#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
22084190 594static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
303a3acb 595 struct cftype *cftype, struct cgroup_map_cb *cb) \
22084190
VG
596{ \
597 struct blkio_cgroup *blkcg; \
598 struct blkio_group *blkg; \
599 struct hlist_node *n; \
303a3acb 600 uint64_t cgroup_total = 0; \
22084190
VG
601 \
602 if (!cgroup_lock_live_group(cgroup)) \
603 return -ENODEV; \
604 \
605 blkcg = cgroup_to_blkio_cgroup(cgroup); \
606 rcu_read_lock(); \
607 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
303a3acb
DS
608 if (blkg->dev) { \
609 spin_lock_irq(&blkg->stats_lock); \
84c124da
DS
610 cgroup_total += blkio_get_stat(blkg, cb, \
611 blkg->dev, type); \
303a3acb
DS
612 spin_unlock_irq(&blkg->stats_lock); \
613 } \
22084190 614 } \
303a3acb
DS
615 if (show_total) \
616 cb->fill(cb, "Total", cgroup_total); \
22084190
VG
617 rcu_read_unlock(); \
618 cgroup_unlock(); \
619 return 0; \
620}
621
84c124da
DS
622SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
623SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
624SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
625SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
626SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
627SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
812d4026 628SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
cdc1184c 629SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
22084190 630#ifdef CONFIG_DEBUG_BLK_CGROUP
84c124da 631SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
cdc1184c 632SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
812df48d
DS
633SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
634SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
635SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
22084190
VG
636#endif
637#undef SHOW_FUNCTION_PER_GROUP
638
639#ifdef CONFIG_DEBUG_BLK_CGROUP
9195291e 640void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
22084190
VG
641 unsigned long dequeue)
642{
303a3acb 643 blkg->stats.dequeue += dequeue;
22084190 644}
9195291e 645EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
22084190
VG
646#endif
647
34d0f179
GJ
648static int blkio_check_dev_num(dev_t dev)
649{
650 int part = 0;
651 struct gendisk *disk;
652
653 disk = get_gendisk(dev, &part);
654 if (!disk || part)
655 return -ENODEV;
656
657 return 0;
658}
659
660static int blkio_policy_parse_and_set(char *buf,
661 struct blkio_policy_node *newpn)
662{
663 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
664 int ret;
665 unsigned long major, minor, temp;
666 int i = 0;
667 dev_t dev;
668
669 memset(s, 0, sizeof(s));
670
671 while ((p = strsep(&buf, " ")) != NULL) {
672 if (!*p)
673 continue;
674
675 s[i++] = p;
676
677 /* Prevent from inputing too many things */
678 if (i == 3)
679 break;
680 }
681
682 if (i != 2)
683 return -EINVAL;
684
685 p = strsep(&s[0], ":");
686 if (p != NULL)
687 major_s = p;
688 else
689 return -EINVAL;
690
691 minor_s = s[0];
692 if (!minor_s)
693 return -EINVAL;
694
695 ret = strict_strtoul(major_s, 10, &major);
696 if (ret)
697 return -EINVAL;
698
699 ret = strict_strtoul(minor_s, 10, &minor);
700 if (ret)
701 return -EINVAL;
702
703 dev = MKDEV(major, minor);
704
705 ret = blkio_check_dev_num(dev);
706 if (ret)
707 return ret;
708
709 newpn->dev = dev;
710
711 if (s[1] == NULL)
712 return -EINVAL;
713
714 ret = strict_strtoul(s[1], 10, &temp);
715 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
716 temp > BLKIO_WEIGHT_MAX)
717 return -EINVAL;
718
719 newpn->weight = temp;
720
721 return 0;
722}
723
724unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
725 dev_t dev)
726{
727 struct blkio_policy_node *pn;
728
729 pn = blkio_policy_search_node(blkcg, dev);
730 if (pn)
731 return pn->weight;
732 else
733 return blkcg->weight;
734}
735EXPORT_SYMBOL_GPL(blkcg_get_weight);
736
737
738static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
739 const char *buffer)
740{
741 int ret = 0;
742 char *buf;
743 struct blkio_policy_node *newpn, *pn;
744 struct blkio_cgroup *blkcg;
745 struct blkio_group *blkg;
746 int keep_newpn = 0;
747 struct hlist_node *n;
748 struct blkio_policy_type *blkiop;
749
750 buf = kstrdup(buffer, GFP_KERNEL);
751 if (!buf)
752 return -ENOMEM;
753
754 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
755 if (!newpn) {
756 ret = -ENOMEM;
757 goto free_buf;
758 }
759
760 ret = blkio_policy_parse_and_set(buf, newpn);
761 if (ret)
762 goto free_newpn;
763
764 blkcg = cgroup_to_blkio_cgroup(cgrp);
765
766 spin_lock_irq(&blkcg->lock);
767
768 pn = blkio_policy_search_node(blkcg, newpn->dev);
769 if (!pn) {
770 if (newpn->weight != 0) {
771 blkio_policy_insert_node(blkcg, newpn);
772 keep_newpn = 1;
773 }
774 spin_unlock_irq(&blkcg->lock);
775 goto update_io_group;
776 }
777
778 if (newpn->weight == 0) {
779 /* weight == 0 means deleteing a specific weight */
780 blkio_policy_delete_node(pn);
781 spin_unlock_irq(&blkcg->lock);
782 goto update_io_group;
783 }
784 spin_unlock_irq(&blkcg->lock);
785
786 pn->weight = newpn->weight;
787
788update_io_group:
789 /* update weight for each cfqg */
790 spin_lock(&blkio_list_lock);
791 spin_lock_irq(&blkcg->lock);
792
793 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
794 if (newpn->dev == blkg->dev) {
795 list_for_each_entry(blkiop, &blkio_list, list)
796 blkiop->ops.blkio_update_group_weight_fn(blkg,
797 newpn->weight ?
798 newpn->weight :
799 blkcg->weight);
800 }
801 }
802
803 spin_unlock_irq(&blkcg->lock);
804 spin_unlock(&blkio_list_lock);
805
806free_newpn:
807 if (!keep_newpn)
808 kfree(newpn);
809free_buf:
810 kfree(buf);
811 return ret;
812}
813
814static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
815 struct seq_file *m)
816{
817 struct blkio_cgroup *blkcg;
818 struct blkio_policy_node *pn;
819
820 seq_printf(m, "dev\tweight\n");
821
822 blkcg = cgroup_to_blkio_cgroup(cgrp);
823 if (list_empty(&blkcg->policy_list))
824 goto out;
825
826 spin_lock_irq(&blkcg->lock);
827 list_for_each_entry(pn, &blkcg->policy_list, node) {
828 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
829 MINOR(pn->dev), pn->weight);
830 }
831 spin_unlock_irq(&blkcg->lock);
832
833out:
834 return 0;
835}
836
31e4c28d 837struct cftype blkio_files[] = {
34d0f179
GJ
838 {
839 .name = "weight_device",
840 .read_seq_string = blkiocg_weight_device_read,
841 .write_string = blkiocg_weight_device_write,
842 .max_write_len = 256,
843 },
31e4c28d
VG
844 {
845 .name = "weight",
846 .read_u64 = blkiocg_weight_read,
847 .write_u64 = blkiocg_weight_write,
848 },
22084190
VG
849 {
850 .name = "time",
303a3acb 851 .read_map = blkiocg_time_read,
22084190
VG
852 },
853 {
854 .name = "sectors",
303a3acb 855 .read_map = blkiocg_sectors_read,
303a3acb
DS
856 },
857 {
858 .name = "io_service_bytes",
859 .read_map = blkiocg_io_service_bytes_read,
303a3acb
DS
860 },
861 {
862 .name = "io_serviced",
863 .read_map = blkiocg_io_serviced_read,
303a3acb
DS
864 },
865 {
866 .name = "io_service_time",
867 .read_map = blkiocg_io_service_time_read,
303a3acb
DS
868 },
869 {
870 .name = "io_wait_time",
871 .read_map = blkiocg_io_wait_time_read,
84c124da 872 },
812d4026
DS
873 {
874 .name = "io_merged",
875 .read_map = blkiocg_io_merged_read,
876 },
cdc1184c
DS
877 {
878 .name = "io_queued",
879 .read_map = blkiocg_io_queued_read,
880 },
84c124da
DS
881 {
882 .name = "reset_stats",
883 .write_u64 = blkiocg_reset_stats,
22084190
VG
884 },
885#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
886 {
887 .name = "avg_queue_size",
888 .read_map = blkiocg_avg_queue_size_read,
889 },
812df48d
DS
890 {
891 .name = "group_wait_time",
892 .read_map = blkiocg_group_wait_time_read,
893 },
894 {
895 .name = "idle_time",
896 .read_map = blkiocg_idle_time_read,
897 },
898 {
899 .name = "empty_time",
900 .read_map = blkiocg_empty_time_read,
901 },
cdc1184c 902 {
22084190 903 .name = "dequeue",
303a3acb 904 .read_map = blkiocg_dequeue_read,
cdc1184c 905 },
22084190 906#endif
31e4c28d
VG
907};
908
909static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
910{
911 return cgroup_add_files(cgroup, subsys, blkio_files,
912 ARRAY_SIZE(blkio_files));
913}
914
915static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
916{
917 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769
VG
918 unsigned long flags;
919 struct blkio_group *blkg;
920 void *key;
3e252066 921 struct blkio_policy_type *blkiop;
34d0f179 922 struct blkio_policy_node *pn, *pntmp;
b1c35769
VG
923
924 rcu_read_lock();
925remove_entry:
926 spin_lock_irqsave(&blkcg->lock, flags);
927
928 if (hlist_empty(&blkcg->blkg_list)) {
929 spin_unlock_irqrestore(&blkcg->lock, flags);
930 goto done;
931 }
932
933 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
934 blkcg_node);
935 key = rcu_dereference(blkg->key);
936 __blkiocg_del_blkio_group(blkg);
31e4c28d 937
b1c35769
VG
938 spin_unlock_irqrestore(&blkcg->lock, flags);
939
940 /*
941 * This blkio_group is being unlinked as associated cgroup is going
942 * away. Let all the IO controlling policies know about this event.
943 *
944 * Currently this is static call to one io controlling policy. Once
945 * we have more policies in place, we need some dynamic registration
946 * of callback function.
947 */
3e252066
VG
948 spin_lock(&blkio_list_lock);
949 list_for_each_entry(blkiop, &blkio_list, list)
950 blkiop->ops.blkio_unlink_group_fn(key, blkg);
951 spin_unlock(&blkio_list_lock);
b1c35769 952 goto remove_entry;
34d0f179 953
b1c35769 954done:
34d0f179
GJ
955 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
956 blkio_policy_delete_node(pn);
957 kfree(pn);
958 }
31e4c28d 959 free_css_id(&blkio_subsys, &blkcg->css);
b1c35769 960 rcu_read_unlock();
67523c48
BB
961 if (blkcg != &blkio_root_cgroup)
962 kfree(blkcg);
31e4c28d
VG
963}
964
965static struct cgroup_subsys_state *
966blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
967{
968 struct blkio_cgroup *blkcg, *parent_blkcg;
969
970 if (!cgroup->parent) {
971 blkcg = &blkio_root_cgroup;
972 goto done;
973 }
974
975 /* Currently we do not support hierarchy deeper than two level (0,1) */
976 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
977 if (css_depth(&parent_blkcg->css) > 0)
978 return ERR_PTR(-EINVAL);
979
980 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
981 if (!blkcg)
982 return ERR_PTR(-ENOMEM);
983
984 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
985done:
986 spin_lock_init(&blkcg->lock);
987 INIT_HLIST_HEAD(&blkcg->blkg_list);
988
34d0f179 989 INIT_LIST_HEAD(&blkcg->policy_list);
31e4c28d
VG
990 return &blkcg->css;
991}
992
993/*
994 * We cannot support shared io contexts, as we have no mean to support
995 * two tasks with the same ioc in two different groups without major rework
996 * of the main cic data structures. For now we allow a task to change
997 * its cgroup only if it's the only owner of its ioc.
998 */
999static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1000 struct cgroup *cgroup, struct task_struct *tsk,
1001 bool threadgroup)
1002{
1003 struct io_context *ioc;
1004 int ret = 0;
1005
1006 /* task_lock() is needed to avoid races with exit_io_context() */
1007 task_lock(tsk);
1008 ioc = tsk->io_context;
1009 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1010 ret = -EINVAL;
1011 task_unlock(tsk);
1012
1013 return ret;
1014}
1015
1016static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1017 struct cgroup *prev, struct task_struct *tsk,
1018 bool threadgroup)
1019{
1020 struct io_context *ioc;
1021
1022 task_lock(tsk);
1023 ioc = tsk->io_context;
1024 if (ioc)
1025 ioc->cgroup_changed = 1;
1026 task_unlock(tsk);
1027}
1028
3e252066
VG
1029void blkio_policy_register(struct blkio_policy_type *blkiop)
1030{
1031 spin_lock(&blkio_list_lock);
1032 list_add_tail(&blkiop->list, &blkio_list);
1033 spin_unlock(&blkio_list_lock);
1034}
1035EXPORT_SYMBOL_GPL(blkio_policy_register);
1036
1037void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1038{
1039 spin_lock(&blkio_list_lock);
1040 list_del_init(&blkiop->list);
1041 spin_unlock(&blkio_list_lock);
1042}
1043EXPORT_SYMBOL_GPL(blkio_policy_unregister);
67523c48
BB
1044
1045static int __init init_cgroup_blkio(void)
1046{
1047 return cgroup_load_subsys(&blkio_subsys);
1048}
1049
1050static void __exit exit_cgroup_blkio(void)
1051{
1052 cgroup_unload_subsys(&blkio_subsys);
1053}
1054
1055module_init(init_cgroup_blkio);
1056module_exit(exit_cgroup_blkio);
1057MODULE_LICENSE("GPL");