2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
24 static struct blkcg_policy blkcg_policy_throtl
;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct
*kthrotld_workqueue
;
29 struct throtl_service_queue
{
31 * Bios queued directly to this service_queue or dispatched from
32 * children throtl_grp's.
34 struct bio_list bio_lists
[2]; /* queued bios [READ/WRITE] */
35 unsigned int nr_queued
[2]; /* number of queued bios */
38 * RB tree of active children throtl_grp's, which are sorted by
41 struct rb_root pending_tree
; /* RB tree of active tgs */
42 struct rb_node
*first_pending
; /* first node in the tree */
43 unsigned int nr_pending
; /* # queued in the tree */
44 unsigned long first_pending_disptime
; /* disptime of the first tg */
48 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
49 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
52 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
54 /* Per-cpu group stats */
56 /* total bytes transferred */
57 struct blkg_rwstat service_bytes
;
58 /* total IOs serviced, post merge */
59 struct blkg_rwstat serviced
;
63 /* must be the first member */
64 struct blkg_policy_data pd
;
66 /* active throtl group service_queue member */
67 struct rb_node rb_node
;
69 /* throtl_data this group belongs to */
70 struct throtl_data
*td
;
72 /* this group's service queue */
73 struct throtl_service_queue service_queue
;
76 * Dispatch time in jiffies. This is the estimated time when group
77 * will unthrottle and is ready to dispatch more bio. It is used as
78 * key to sort active groups in service tree.
80 unsigned long disptime
;
84 /* bytes per second rate limits */
90 /* Number of bytes disptached in current slice */
91 uint64_t bytes_disp
[2];
92 /* Number of bio's dispatched in current slice */
93 unsigned int io_disp
[2];
95 /* When did we start a new slice */
96 unsigned long slice_start
[2];
97 unsigned long slice_end
[2];
99 /* Per cpu stats pointer */
100 struct tg_stats_cpu __percpu
*stats_cpu
;
102 /* List of tgs waiting for per cpu stats memory to be allocated */
103 struct list_head stats_alloc_node
;
108 /* service tree for active throtl groups */
109 struct throtl_service_queue service_queue
;
111 struct request_queue
*queue
;
113 /* Total Number of queued bios on READ and WRITE lists */
114 unsigned int nr_queued
[2];
117 * number of total undestroyed groups
119 unsigned int nr_undestroyed_grps
;
121 /* Work for dispatching throttled bios */
122 struct delayed_work dispatch_work
;
125 /* list and work item to allocate percpu group stats */
126 static DEFINE_SPINLOCK(tg_stats_alloc_lock
);
127 static LIST_HEAD(tg_stats_alloc_list
);
129 static void tg_stats_alloc_fn(struct work_struct
*);
130 static DECLARE_DELAYED_WORK(tg_stats_alloc_work
, tg_stats_alloc_fn
);
132 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
134 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
137 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
139 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
142 static inline struct blkcg_gq
*tg_to_blkg(struct throtl_grp
*tg
)
144 return pd_to_blkg(&tg
->pd
);
147 static inline struct throtl_grp
*td_root_tg(struct throtl_data
*td
)
149 return blkg_to_tg(td
->queue
->root_blkg
);
152 #define throtl_log_tg(tg, fmt, args...) do { \
155 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
156 blk_add_trace_msg((tg)->td->queue, "throtl %s " fmt, __pbuf, ##args); \
159 #define throtl_log(td, fmt, args...) \
160 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
163 * Worker for allocating per cpu stat for tgs. This is scheduled on the
164 * system_wq once there are some groups on the alloc_list waiting for
167 static void tg_stats_alloc_fn(struct work_struct
*work
)
169 static struct tg_stats_cpu
*stats_cpu
; /* this fn is non-reentrant */
170 struct delayed_work
*dwork
= to_delayed_work(work
);
175 stats_cpu
= alloc_percpu(struct tg_stats_cpu
);
177 /* allocation failed, try again after some time */
178 schedule_delayed_work(dwork
, msecs_to_jiffies(10));
183 spin_lock_irq(&tg_stats_alloc_lock
);
185 if (!list_empty(&tg_stats_alloc_list
)) {
186 struct throtl_grp
*tg
= list_first_entry(&tg_stats_alloc_list
,
189 swap(tg
->stats_cpu
, stats_cpu
);
190 list_del_init(&tg
->stats_alloc_node
);
193 empty
= list_empty(&tg_stats_alloc_list
);
194 spin_unlock_irq(&tg_stats_alloc_lock
);
199 /* init a service_queue, assumes the caller zeroed it */
200 static void throtl_service_queue_init(struct throtl_service_queue
*sq
)
202 bio_list_init(&sq
->bio_lists
[0]);
203 bio_list_init(&sq
->bio_lists
[1]);
204 sq
->pending_tree
= RB_ROOT
;
207 static void throtl_pd_init(struct blkcg_gq
*blkg
)
209 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
212 throtl_service_queue_init(&tg
->service_queue
);
213 RB_CLEAR_NODE(&tg
->rb_node
);
214 tg
->td
= blkg
->q
->td
;
219 tg
->iops
[WRITE
] = -1;
222 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
223 * but percpu allocator can't be called from IO path. Queue tg on
224 * tg_stats_alloc_list and allocate from work item.
226 spin_lock_irqsave(&tg_stats_alloc_lock
, flags
);
227 list_add(&tg
->stats_alloc_node
, &tg_stats_alloc_list
);
228 schedule_delayed_work(&tg_stats_alloc_work
, 0);
229 spin_unlock_irqrestore(&tg_stats_alloc_lock
, flags
);
232 static void throtl_pd_exit(struct blkcg_gq
*blkg
)
234 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
237 spin_lock_irqsave(&tg_stats_alloc_lock
, flags
);
238 list_del_init(&tg
->stats_alloc_node
);
239 spin_unlock_irqrestore(&tg_stats_alloc_lock
, flags
);
241 free_percpu(tg
->stats_cpu
);
244 static void throtl_pd_reset_stats(struct blkcg_gq
*blkg
)
246 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
249 if (tg
->stats_cpu
== NULL
)
252 for_each_possible_cpu(cpu
) {
253 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
255 blkg_rwstat_reset(&sc
->service_bytes
);
256 blkg_rwstat_reset(&sc
->serviced
);
260 static struct throtl_grp
*throtl_lookup_tg(struct throtl_data
*td
,
264 * This is the common case when there are no blkcgs. Avoid lookup
267 if (blkcg
== &blkcg_root
)
268 return td_root_tg(td
);
270 return blkg_to_tg(blkg_lookup(blkcg
, td
->queue
));
273 static struct throtl_grp
*throtl_lookup_create_tg(struct throtl_data
*td
,
276 struct request_queue
*q
= td
->queue
;
277 struct throtl_grp
*tg
= NULL
;
280 * This is the common case when there are no blkcgs. Avoid lookup
283 if (blkcg
== &blkcg_root
) {
286 struct blkcg_gq
*blkg
;
288 blkg
= blkg_lookup_create(blkcg
, q
);
290 /* if %NULL and @q is alive, fall back to root_tg */
292 tg
= blkg_to_tg(blkg
);
293 else if (!blk_queue_dying(q
))
300 static struct throtl_grp
*
301 throtl_rb_first(struct throtl_service_queue
*parent_sq
)
303 /* Service tree is empty */
304 if (!parent_sq
->nr_pending
)
307 if (!parent_sq
->first_pending
)
308 parent_sq
->first_pending
= rb_first(&parent_sq
->pending_tree
);
310 if (parent_sq
->first_pending
)
311 return rb_entry_tg(parent_sq
->first_pending
);
316 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
322 static void throtl_rb_erase(struct rb_node
*n
,
323 struct throtl_service_queue
*parent_sq
)
325 if (parent_sq
->first_pending
== n
)
326 parent_sq
->first_pending
= NULL
;
327 rb_erase_init(n
, &parent_sq
->pending_tree
);
328 --parent_sq
->nr_pending
;
331 static void update_min_dispatch_time(struct throtl_service_queue
*parent_sq
)
333 struct throtl_grp
*tg
;
335 tg
= throtl_rb_first(parent_sq
);
339 parent_sq
->first_pending_disptime
= tg
->disptime
;
342 static void tg_service_queue_add(struct throtl_grp
*tg
,
343 struct throtl_service_queue
*parent_sq
)
345 struct rb_node
**node
= &parent_sq
->pending_tree
.rb_node
;
346 struct rb_node
*parent
= NULL
;
347 struct throtl_grp
*__tg
;
348 unsigned long key
= tg
->disptime
;
351 while (*node
!= NULL
) {
353 __tg
= rb_entry_tg(parent
);
355 if (time_before(key
, __tg
->disptime
))
356 node
= &parent
->rb_left
;
358 node
= &parent
->rb_right
;
364 parent_sq
->first_pending
= &tg
->rb_node
;
366 rb_link_node(&tg
->rb_node
, parent
, node
);
367 rb_insert_color(&tg
->rb_node
, &parent_sq
->pending_tree
);
370 static void __throtl_enqueue_tg(struct throtl_grp
*tg
,
371 struct throtl_service_queue
*parent_sq
)
373 tg_service_queue_add(tg
, parent_sq
);
374 tg
->flags
|= THROTL_TG_PENDING
;
375 parent_sq
->nr_pending
++;
378 static void throtl_enqueue_tg(struct throtl_grp
*tg
,
379 struct throtl_service_queue
*parent_sq
)
381 if (!(tg
->flags
& THROTL_TG_PENDING
))
382 __throtl_enqueue_tg(tg
, parent_sq
);
385 static void __throtl_dequeue_tg(struct throtl_grp
*tg
,
386 struct throtl_service_queue
*parent_sq
)
388 throtl_rb_erase(&tg
->rb_node
, parent_sq
);
389 tg
->flags
&= ~THROTL_TG_PENDING
;
392 static void throtl_dequeue_tg(struct throtl_grp
*tg
,
393 struct throtl_service_queue
*parent_sq
)
395 if (tg
->flags
& THROTL_TG_PENDING
)
396 __throtl_dequeue_tg(tg
, parent_sq
);
399 /* Call with queue lock held */
400 static void throtl_schedule_delayed_work(struct throtl_data
*td
,
403 struct delayed_work
*dwork
= &td
->dispatch_work
;
405 mod_delayed_work(kthrotld_workqueue
, dwork
, delay
);
406 throtl_log(td
, "schedule work. delay=%lu jiffies=%lu", delay
, jiffies
);
409 static void throtl_schedule_next_dispatch(struct throtl_data
*td
)
411 struct throtl_service_queue
*sq
= &td
->service_queue
;
413 /* any pending children left? */
417 update_min_dispatch_time(sq
);
419 if (time_before_eq(sq
->first_pending_disptime
, jiffies
))
420 throtl_schedule_delayed_work(td
, 0);
422 throtl_schedule_delayed_work(td
, sq
->first_pending_disptime
- jiffies
);
425 static inline void throtl_start_new_slice(struct throtl_grp
*tg
, bool rw
)
427 tg
->bytes_disp
[rw
] = 0;
429 tg
->slice_start
[rw
] = jiffies
;
430 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
431 throtl_log_tg(tg
, "[%c] new slice start=%lu end=%lu jiffies=%lu",
432 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
433 tg
->slice_end
[rw
], jiffies
);
436 static inline void throtl_set_slice_end(struct throtl_grp
*tg
, bool rw
,
437 unsigned long jiffy_end
)
439 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
442 static inline void throtl_extend_slice(struct throtl_grp
*tg
, bool rw
,
443 unsigned long jiffy_end
)
445 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
446 throtl_log_tg(tg
, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
447 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
448 tg
->slice_end
[rw
], jiffies
);
451 /* Determine if previously allocated or extended slice is complete or not */
452 static bool throtl_slice_used(struct throtl_grp
*tg
, bool rw
)
454 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
460 /* Trim the used slices and adjust slice start accordingly */
461 static inline void throtl_trim_slice(struct throtl_grp
*tg
, bool rw
)
463 unsigned long nr_slices
, time_elapsed
, io_trim
;
466 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
469 * If bps are unlimited (-1), then time slice don't get
470 * renewed. Don't try to trim the slice if slice is used. A new
471 * slice will start when appropriate.
473 if (throtl_slice_used(tg
, rw
))
477 * A bio has been dispatched. Also adjust slice_end. It might happen
478 * that initially cgroup limit was very low resulting in high
479 * slice_end, but later limit was bumped up and bio was dispached
480 * sooner, then we need to reduce slice_end. A high bogus slice_end
481 * is bad because it does not allow new slice to start.
484 throtl_set_slice_end(tg
, rw
, jiffies
+ throtl_slice
);
486 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
488 nr_slices
= time_elapsed
/ throtl_slice
;
492 tmp
= tg
->bps
[rw
] * throtl_slice
* nr_slices
;
496 io_trim
= (tg
->iops
[rw
] * throtl_slice
* nr_slices
)/HZ
;
498 if (!bytes_trim
&& !io_trim
)
501 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
502 tg
->bytes_disp
[rw
] -= bytes_trim
;
504 tg
->bytes_disp
[rw
] = 0;
506 if (tg
->io_disp
[rw
] >= io_trim
)
507 tg
->io_disp
[rw
] -= io_trim
;
511 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
513 throtl_log_tg(tg
, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
514 " start=%lu end=%lu jiffies=%lu",
515 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
516 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
519 static bool tg_with_in_iops_limit(struct throtl_grp
*tg
, struct bio
*bio
,
522 bool rw
= bio_data_dir(bio
);
523 unsigned int io_allowed
;
524 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
527 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
529 /* Slice has just started. Consider one slice interval */
531 jiffy_elapsed_rnd
= throtl_slice
;
533 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
536 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
537 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
538 * will allow dispatch after 1 second and after that slice should
542 tmp
= (u64
)tg
->iops
[rw
] * jiffy_elapsed_rnd
;
546 io_allowed
= UINT_MAX
;
550 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
556 /* Calc approx time to dispatch */
557 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
)/tg
->iops
[rw
] + 1;
559 if (jiffy_wait
> jiffy_elapsed
)
560 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
569 static bool tg_with_in_bps_limit(struct throtl_grp
*tg
, struct bio
*bio
,
572 bool rw
= bio_data_dir(bio
);
573 u64 bytes_allowed
, extra_bytes
, tmp
;
574 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
576 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
578 /* Slice has just started. Consider one slice interval */
580 jiffy_elapsed_rnd
= throtl_slice
;
582 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
584 tmp
= tg
->bps
[rw
] * jiffy_elapsed_rnd
;
588 if (tg
->bytes_disp
[rw
] + bio
->bi_size
<= bytes_allowed
) {
594 /* Calc approx time to dispatch */
595 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_size
- bytes_allowed
;
596 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg
->bps
[rw
]);
602 * This wait time is without taking into consideration the rounding
603 * up we did. Add that time also.
605 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
611 static bool tg_no_rule_group(struct throtl_grp
*tg
, bool rw
) {
612 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1)
618 * Returns whether one can dispatch a bio or not. Also returns approx number
619 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
621 static bool tg_may_dispatch(struct throtl_grp
*tg
, struct bio
*bio
,
624 bool rw
= bio_data_dir(bio
);
625 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
628 * Currently whole state machine of group depends on first bio
629 * queued in the group bio list. So one should not be calling
630 * this function with a different bio if there are other bios
633 BUG_ON(tg
->service_queue
.nr_queued
[rw
] &&
634 bio
!= bio_list_peek(&tg
->service_queue
.bio_lists
[rw
]));
636 /* If tg->bps = -1, then BW is unlimited */
637 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1) {
644 * If previous slice expired, start a new one otherwise renew/extend
645 * existing slice to make sure it is at least throtl_slice interval
648 if (throtl_slice_used(tg
, rw
))
649 throtl_start_new_slice(tg
, rw
);
651 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
652 throtl_extend_slice(tg
, rw
, jiffies
+ throtl_slice
);
655 if (tg_with_in_bps_limit(tg
, bio
, &bps_wait
) &&
656 tg_with_in_iops_limit(tg
, bio
, &iops_wait
)) {
662 max_wait
= max(bps_wait
, iops_wait
);
667 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
668 throtl_extend_slice(tg
, rw
, jiffies
+ max_wait
);
673 static void throtl_update_dispatch_stats(struct blkcg_gq
*blkg
, u64 bytes
,
676 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
677 struct tg_stats_cpu
*stats_cpu
;
680 /* If per cpu stats are not allocated yet, don't do any accounting. */
681 if (tg
->stats_cpu
== NULL
)
685 * Disabling interrupts to provide mutual exclusion between two
686 * writes on same cpu. It probably is not needed for 64bit. Not
687 * optimizing that case yet.
689 local_irq_save(flags
);
691 stats_cpu
= this_cpu_ptr(tg
->stats_cpu
);
693 blkg_rwstat_add(&stats_cpu
->serviced
, rw
, 1);
694 blkg_rwstat_add(&stats_cpu
->service_bytes
, rw
, bytes
);
696 local_irq_restore(flags
);
699 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
701 bool rw
= bio_data_dir(bio
);
703 /* Charge the bio to the group */
704 tg
->bytes_disp
[rw
] += bio
->bi_size
;
707 throtl_update_dispatch_stats(tg_to_blkg(tg
), bio
->bi_size
, bio
->bi_rw
);
710 static void throtl_add_bio_tg(struct bio
*bio
, struct throtl_grp
*tg
,
711 struct throtl_service_queue
*parent_sq
)
713 struct throtl_service_queue
*sq
= &tg
->service_queue
;
714 bool rw
= bio_data_dir(bio
);
717 * If @tg doesn't currently have any bios queued in the same
718 * direction, queueing @bio can change when @tg should be
719 * dispatched. Mark that @tg was empty. This is automatically
720 * cleaered on the next tg_update_disptime().
722 if (!sq
->nr_queued
[rw
])
723 tg
->flags
|= THROTL_TG_WAS_EMPTY
;
725 bio_list_add(&sq
->bio_lists
[rw
], bio
);
726 /* Take a bio reference on tg */
727 blkg_get(tg_to_blkg(tg
));
729 tg
->td
->nr_queued
[rw
]++;
730 throtl_enqueue_tg(tg
, parent_sq
);
733 static void tg_update_disptime(struct throtl_grp
*tg
,
734 struct throtl_service_queue
*parent_sq
)
736 struct throtl_service_queue
*sq
= &tg
->service_queue
;
737 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
740 if ((bio
= bio_list_peek(&sq
->bio_lists
[READ
])))
741 tg_may_dispatch(tg
, bio
, &read_wait
);
743 if ((bio
= bio_list_peek(&sq
->bio_lists
[WRITE
])))
744 tg_may_dispatch(tg
, bio
, &write_wait
);
746 min_wait
= min(read_wait
, write_wait
);
747 disptime
= jiffies
+ min_wait
;
749 /* Update dispatch time */
750 throtl_dequeue_tg(tg
, parent_sq
);
751 tg
->disptime
= disptime
;
752 throtl_enqueue_tg(tg
, parent_sq
);
754 /* see throtl_add_bio_tg() */
755 tg
->flags
&= ~THROTL_TG_WAS_EMPTY
;
758 static void tg_dispatch_one_bio(struct throtl_grp
*tg
, bool rw
,
759 struct throtl_service_queue
*parent_sq
)
761 struct throtl_service_queue
*sq
= &tg
->service_queue
;
764 bio
= bio_list_pop(&sq
->bio_lists
[rw
]);
766 /* Drop bio reference on blkg */
767 blkg_put(tg_to_blkg(tg
));
769 BUG_ON(tg
->td
->nr_queued
[rw
] <= 0);
770 tg
->td
->nr_queued
[rw
]--;
772 throtl_charge_bio(tg
, bio
);
773 bio_list_add(&parent_sq
->bio_lists
[rw
], bio
);
774 bio
->bi_rw
|= REQ_THROTTLED
;
776 throtl_trim_slice(tg
, rw
);
779 static int throtl_dispatch_tg(struct throtl_grp
*tg
,
780 struct throtl_service_queue
*parent_sq
)
782 struct throtl_service_queue
*sq
= &tg
->service_queue
;
783 unsigned int nr_reads
= 0, nr_writes
= 0;
784 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
785 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
788 /* Try to dispatch 75% READS and 25% WRITES */
790 while ((bio
= bio_list_peek(&sq
->bio_lists
[READ
])) &&
791 tg_may_dispatch(tg
, bio
, NULL
)) {
793 tg_dispatch_one_bio(tg
, bio_data_dir(bio
), parent_sq
);
796 if (nr_reads
>= max_nr_reads
)
800 while ((bio
= bio_list_peek(&sq
->bio_lists
[WRITE
])) &&
801 tg_may_dispatch(tg
, bio
, NULL
)) {
803 tg_dispatch_one_bio(tg
, bio_data_dir(bio
), parent_sq
);
806 if (nr_writes
>= max_nr_writes
)
810 return nr_reads
+ nr_writes
;
813 static int throtl_select_dispatch(struct throtl_service_queue
*parent_sq
)
815 unsigned int nr_disp
= 0;
818 struct throtl_grp
*tg
= throtl_rb_first(parent_sq
);
819 struct throtl_service_queue
*sq
= &tg
->service_queue
;
824 if (time_before(jiffies
, tg
->disptime
))
827 throtl_dequeue_tg(tg
, parent_sq
);
829 nr_disp
+= throtl_dispatch_tg(tg
, parent_sq
);
831 if (sq
->nr_queued
[0] || sq
->nr_queued
[1])
832 tg_update_disptime(tg
, parent_sq
);
834 if (nr_disp
>= throtl_quantum
)
841 /* work function to dispatch throttled bios */
842 void blk_throtl_dispatch_work_fn(struct work_struct
*work
)
844 struct throtl_data
*td
= container_of(to_delayed_work(work
),
845 struct throtl_data
, dispatch_work
);
846 struct throtl_service_queue
*sq
= &td
->service_queue
;
847 struct request_queue
*q
= td
->queue
;
848 unsigned int nr_disp
= 0;
849 struct bio_list bio_list_on_stack
;
851 struct blk_plug plug
;
854 spin_lock_irq(q
->queue_lock
);
856 bio_list_init(&bio_list_on_stack
);
858 throtl_log(td
, "dispatch nr_queued=%u read=%u write=%u",
859 td
->nr_queued
[READ
] + td
->nr_queued
[WRITE
],
860 td
->nr_queued
[READ
], td
->nr_queued
[WRITE
]);
862 nr_disp
= throtl_select_dispatch(sq
);
865 for (rw
= READ
; rw
<= WRITE
; rw
++) {
866 bio_list_merge(&bio_list_on_stack
, &sq
->bio_lists
[rw
]);
867 bio_list_init(&sq
->bio_lists
[rw
]);
869 throtl_log(td
, "bios disp=%u", nr_disp
);
872 throtl_schedule_next_dispatch(td
);
874 spin_unlock_irq(q
->queue_lock
);
877 * If we dispatched some requests, unplug the queue to make sure
881 blk_start_plug(&plug
);
882 while((bio
= bio_list_pop(&bio_list_on_stack
)))
883 generic_make_request(bio
);
884 blk_finish_plug(&plug
);
888 static u64
tg_prfill_cpu_rwstat(struct seq_file
*sf
,
889 struct blkg_policy_data
*pd
, int off
)
891 struct throtl_grp
*tg
= pd_to_tg(pd
);
892 struct blkg_rwstat rwstat
= { }, tmp
;
895 for_each_possible_cpu(cpu
) {
896 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
898 tmp
= blkg_rwstat_read((void *)sc
+ off
);
899 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
900 rwstat
.cnt
[i
] += tmp
.cnt
[i
];
903 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
906 static int tg_print_cpu_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
909 struct blkcg
*blkcg
= cgroup_to_blkcg(cgrp
);
911 blkcg_print_blkgs(sf
, blkcg
, tg_prfill_cpu_rwstat
, &blkcg_policy_throtl
,
916 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
919 struct throtl_grp
*tg
= pd_to_tg(pd
);
920 u64 v
= *(u64
*)((void *)tg
+ off
);
924 return __blkg_prfill_u64(sf
, pd
, v
);
927 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
930 struct throtl_grp
*tg
= pd_to_tg(pd
);
931 unsigned int v
= *(unsigned int *)((void *)tg
+ off
);
935 return __blkg_prfill_u64(sf
, pd
, v
);
938 static int tg_print_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
941 blkcg_print_blkgs(sf
, cgroup_to_blkcg(cgrp
), tg_prfill_conf_u64
,
942 &blkcg_policy_throtl
, cft
->private, false);
946 static int tg_print_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
949 blkcg_print_blkgs(sf
, cgroup_to_blkcg(cgrp
), tg_prfill_conf_uint
,
950 &blkcg_policy_throtl
, cft
->private, false);
954 static int tg_set_conf(struct cgroup
*cgrp
, struct cftype
*cft
, const char *buf
,
957 struct blkcg
*blkcg
= cgroup_to_blkcg(cgrp
);
958 struct blkg_conf_ctx ctx
;
959 struct throtl_grp
*tg
;
960 struct throtl_data
*td
;
963 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_throtl
, buf
, &ctx
);
967 tg
= blkg_to_tg(ctx
.blkg
);
968 td
= ctx
.blkg
->q
->td
;
974 *(u64
*)((void *)tg
+ cft
->private) = ctx
.v
;
976 *(unsigned int *)((void *)tg
+ cft
->private) = ctx
.v
;
978 throtl_log_tg(tg
, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
979 tg
->bps
[READ
], tg
->bps
[WRITE
],
980 tg
->iops
[READ
], tg
->iops
[WRITE
]);
983 * We're already holding queue_lock and know @tg is valid. Let's
984 * apply the new config directly.
986 * Restart the slices for both READ and WRITES. It might happen
987 * that a group's limit are dropped suddenly and we don't want to
988 * account recently dispatched IO with new low rate.
990 throtl_start_new_slice(tg
, 0);
991 throtl_start_new_slice(tg
, 1);
993 if (tg
->flags
& THROTL_TG_PENDING
) {
994 tg_update_disptime(tg
, &td
->service_queue
);
995 throtl_schedule_next_dispatch(td
);
998 blkg_conf_finish(&ctx
);
1002 static int tg_set_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
1005 return tg_set_conf(cgrp
, cft
, buf
, true);
1008 static int tg_set_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
1011 return tg_set_conf(cgrp
, cft
, buf
, false);
1014 static struct cftype throtl_files
[] = {
1016 .name
= "throttle.read_bps_device",
1017 .private = offsetof(struct throtl_grp
, bps
[READ
]),
1018 .read_seq_string
= tg_print_conf_u64
,
1019 .write_string
= tg_set_conf_u64
,
1020 .max_write_len
= 256,
1023 .name
= "throttle.write_bps_device",
1024 .private = offsetof(struct throtl_grp
, bps
[WRITE
]),
1025 .read_seq_string
= tg_print_conf_u64
,
1026 .write_string
= tg_set_conf_u64
,
1027 .max_write_len
= 256,
1030 .name
= "throttle.read_iops_device",
1031 .private = offsetof(struct throtl_grp
, iops
[READ
]),
1032 .read_seq_string
= tg_print_conf_uint
,
1033 .write_string
= tg_set_conf_uint
,
1034 .max_write_len
= 256,
1037 .name
= "throttle.write_iops_device",
1038 .private = offsetof(struct throtl_grp
, iops
[WRITE
]),
1039 .read_seq_string
= tg_print_conf_uint
,
1040 .write_string
= tg_set_conf_uint
,
1041 .max_write_len
= 256,
1044 .name
= "throttle.io_service_bytes",
1045 .private = offsetof(struct tg_stats_cpu
, service_bytes
),
1046 .read_seq_string
= tg_print_cpu_rwstat
,
1049 .name
= "throttle.io_serviced",
1050 .private = offsetof(struct tg_stats_cpu
, serviced
),
1051 .read_seq_string
= tg_print_cpu_rwstat
,
1056 static void throtl_shutdown_wq(struct request_queue
*q
)
1058 struct throtl_data
*td
= q
->td
;
1060 cancel_delayed_work_sync(&td
->dispatch_work
);
1063 static struct blkcg_policy blkcg_policy_throtl
= {
1064 .pd_size
= sizeof(struct throtl_grp
),
1065 .cftypes
= throtl_files
,
1067 .pd_init_fn
= throtl_pd_init
,
1068 .pd_exit_fn
= throtl_pd_exit
,
1069 .pd_reset_stats_fn
= throtl_pd_reset_stats
,
1072 bool blk_throtl_bio(struct request_queue
*q
, struct bio
*bio
)
1074 struct throtl_data
*td
= q
->td
;
1075 struct throtl_grp
*tg
;
1076 struct throtl_service_queue
*sq
;
1077 bool rw
= bio_data_dir(bio
);
1078 struct blkcg
*blkcg
;
1079 bool throttled
= false;
1081 if (bio
->bi_rw
& REQ_THROTTLED
) {
1082 bio
->bi_rw
&= ~REQ_THROTTLED
;
1087 * A throtl_grp pointer retrieved under rcu can be used to access
1088 * basic fields like stats and io rates. If a group has no rules,
1089 * just update the dispatch stats in lockless manner and return.
1092 blkcg
= bio_blkcg(bio
);
1093 tg
= throtl_lookup_tg(td
, blkcg
);
1095 if (tg_no_rule_group(tg
, rw
)) {
1096 throtl_update_dispatch_stats(tg_to_blkg(tg
),
1097 bio
->bi_size
, bio
->bi_rw
);
1098 goto out_unlock_rcu
;
1103 * Either group has not been allocated yet or it is not an unlimited
1106 spin_lock_irq(q
->queue_lock
);
1107 tg
= throtl_lookup_create_tg(td
, blkcg
);
1111 sq
= &tg
->service_queue
;
1113 /* throtl is FIFO - if other bios are already queued, should queue */
1114 if (sq
->nr_queued
[rw
])
1117 /* Bio is with-in rate limit of group */
1118 if (tg_may_dispatch(tg
, bio
, NULL
)) {
1119 throtl_charge_bio(tg
, bio
);
1122 * We need to trim slice even when bios are not being queued
1123 * otherwise it might happen that a bio is not queued for
1124 * a long time and slice keeps on extending and trim is not
1125 * called for a long time. Now if limits are reduced suddenly
1126 * we take into account all the IO dispatched so far at new
1127 * low rate and * newly queued IO gets a really long dispatch
1130 * So keep on trimming slice even if bio is not queued.
1132 throtl_trim_slice(tg
, rw
);
1137 throtl_log_tg(tg
, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1138 " iodisp=%u iops=%u queued=%d/%d",
1139 rw
== READ
? 'R' : 'W',
1140 tg
->bytes_disp
[rw
], bio
->bi_size
, tg
->bps
[rw
],
1141 tg
->io_disp
[rw
], tg
->iops
[rw
],
1142 sq
->nr_queued
[READ
], sq
->nr_queued
[WRITE
]);
1144 bio_associate_current(bio
);
1145 throtl_add_bio_tg(bio
, tg
, &q
->td
->service_queue
);
1148 /* update @tg's dispatch time if @tg was empty before @bio */
1149 if (tg
->flags
& THROTL_TG_WAS_EMPTY
) {
1150 tg_update_disptime(tg
, &td
->service_queue
);
1151 throtl_schedule_next_dispatch(td
);
1155 spin_unlock_irq(q
->queue_lock
);
1163 * blk_throtl_drain - drain throttled bios
1164 * @q: request_queue to drain throttled bios for
1166 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1168 void blk_throtl_drain(struct request_queue
*q
)
1169 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
1171 struct throtl_data
*td
= q
->td
;
1172 struct throtl_service_queue
*parent_sq
= &td
->service_queue
;
1173 struct throtl_grp
*tg
;
1177 queue_lockdep_assert_held(q
);
1179 while ((tg
= throtl_rb_first(parent_sq
))) {
1180 struct throtl_service_queue
*sq
= &tg
->service_queue
;
1182 throtl_dequeue_tg(tg
, parent_sq
);
1184 while ((bio
= bio_list_peek(&sq
->bio_lists
[READ
])))
1185 tg_dispatch_one_bio(tg
, bio_data_dir(bio
), parent_sq
);
1186 while ((bio
= bio_list_peek(&sq
->bio_lists
[WRITE
])))
1187 tg_dispatch_one_bio(tg
, bio_data_dir(bio
), parent_sq
);
1189 spin_unlock_irq(q
->queue_lock
);
1191 for (rw
= READ
; rw
<= WRITE
; rw
++)
1192 while ((bio
= bio_list_pop(&parent_sq
->bio_lists
[rw
])))
1193 generic_make_request(bio
);
1195 spin_lock_irq(q
->queue_lock
);
1198 int blk_throtl_init(struct request_queue
*q
)
1200 struct throtl_data
*td
;
1203 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
1207 INIT_DELAYED_WORK(&td
->dispatch_work
, blk_throtl_dispatch_work_fn
);
1208 throtl_service_queue_init(&td
->service_queue
);
1213 /* activate policy */
1214 ret
= blkcg_activate_policy(q
, &blkcg_policy_throtl
);
1220 void blk_throtl_exit(struct request_queue
*q
)
1223 throtl_shutdown_wq(q
);
1224 blkcg_deactivate_policy(q
, &blkcg_policy_throtl
);
1228 static int __init
throtl_init(void)
1230 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
1231 if (!kthrotld_workqueue
)
1232 panic("Failed to create kthrotld\n");
1234 return blkcg_policy_register(&blkcg_policy_throtl
);
1237 module_init(throtl_init
);