block: make block cgroup policies follow bio task association
authorTejun Heo <tj@kernel.org>
Mon, 5 Mar 2012 21:15:28 +0000 (13:15 -0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 6 Mar 2012 20:27:24 +0000 (21:27 +0100)
Implement bio_blkio_cgroup() which returns the blkcg associated with
the bio if exists or %current's blkcg, and use it in blk-throttle and
cfq-iosched propio.  This makes both cgroup policies honor task
association for the bio instead of always assuming %current.

As nobody is using bio_set_task() yet, this doesn't introduce any
behavior change.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-throttle.c
block/cfq-iosched.c

index 27d39a810cb6efa883f7a78d464c9d3250b71dd9..ee962f327ba543cf1f0e1aa6db7734294b5b2dcb 100644 (file)
@@ -71,12 +71,19 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
 }
 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
 
-struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
+static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
 {
        return container_of(task_subsys_state(tsk, blkio_subsys_id),
                            struct blkio_cgroup, css);
 }
-EXPORT_SYMBOL_GPL(task_blkio_cgroup);
+
+struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
+{
+       if (bio && bio->bi_css)
+               return container_of(bio->bi_css, struct blkio_cgroup, css);
+       return task_blkio_cgroup(current);
+}
+EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
 
 static inline void blkio_update_group_weight(struct blkio_group *blkg,
                                             int plid, unsigned int weight)
index 66eaefefcbd2ff2ce96b7c1143b4c81b04be5aab..98cd8533378f68f21fd9c9a8de1915c7fb2da8cf 100644 (file)
@@ -379,7 +379,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
 #ifdef CONFIG_BLK_CGROUP
 extern struct blkio_cgroup blkio_root_cgroup;
 extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
-extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
+extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
 extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
                                       struct request_queue *q);
 struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
@@ -413,7 +413,7 @@ struct cgroup;
 static inline struct blkio_cgroup *
 cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
 static inline struct blkio_cgroup *
-task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
+bio_blkio_cgroup(struct bio *bio) { return NULL; }
 
 static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
                                              void *key) { return NULL; }
index bfa5168249eba067f2342ab2cf51a0f94f04e73f..08b7ab292a803846fcc338618f37dc66c2bc6b75 100644 (file)
@@ -900,7 +900,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
         * just update the dispatch stats in lockless manner and return.
         */
        rcu_read_lock();
-       blkcg = task_blkio_cgroup(current);
+       blkcg = bio_blkio_cgroup(bio);
        tg = throtl_lookup_tg(td, blkcg);
        if (tg) {
                if (tg_no_rule_group(tg, rw)) {
index abac87337d706f9398e2e46e7899f72d32d5b561..f2387b50f82e05a62e55e04e6d95b18c7723f31f 100644 (file)
@@ -467,8 +467,9 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 }
 
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
-                                      struct io_context *, gfp_t);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
+                                      struct io_context *ioc, struct bio *bio,
+                                      gfp_t gfp_mask);
 
 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 {
@@ -2601,7 +2602,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
        cfq_clear_cfqq_prio_changed(cfqq);
 }
 
-static void changed_ioprio(struct cfq_io_cq *cic)
+static void changed_ioprio(struct cfq_io_cq *cic, struct bio *bio)
 {
        struct cfq_data *cfqd = cic_to_cfqd(cic);
        struct cfq_queue *cfqq;
@@ -2613,7 +2614,7 @@ static void changed_ioprio(struct cfq_io_cq *cic)
        if (cfqq) {
                struct cfq_queue *new_cfqq;
                new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
-                                               GFP_ATOMIC);
+                                        bio, GFP_ATOMIC);
                if (new_cfqq) {
                        cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
                        cfq_put_queue(cfqq);
@@ -2671,7 +2672,7 @@ static void changed_cgroup(struct cfq_io_cq *cic)
 
 static struct cfq_queue *
 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
-                    struct io_context *ioc, gfp_t gfp_mask)
+                    struct io_context *ioc, struct bio *bio, gfp_t gfp_mask)
 {
        struct blkio_cgroup *blkcg;
        struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -2681,7 +2682,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
 retry:
        rcu_read_lock();
 
-       blkcg = task_blkio_cgroup(current);
+       blkcg = bio_blkio_cgroup(bio);
 
        cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
 
@@ -2746,7 +2747,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
 
 static struct cfq_queue *
 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
-             gfp_t gfp_mask)
+             struct bio *bio, gfp_t gfp_mask)
 {
        const int ioprio = task_ioprio(ioc);
        const int ioprio_class = task_ioprio_class(ioc);
@@ -2759,7 +2760,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
        }
 
        if (!cfqq)
-               cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+               cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, bio, gfp_mask);
 
        /*
         * pin the queue now that it's allocated, scheduler exit will prune it
@@ -3316,7 +3317,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
        /* handle changed notifications */
        changed = icq_get_changed(&cic->icq);
        if (unlikely(changed & ICQ_IOPRIO_CHANGED))
-               changed_ioprio(cic);
+               changed_ioprio(cic, bio);
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
        if (unlikely(changed & ICQ_CGROUP_CHANGED))
                changed_cgroup(cic);
@@ -3325,7 +3326,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
 new_queue:
        cfqq = cic_to_cfqq(cic, is_sync);
        if (!cfqq || cfqq == &cfqd->oom_cfqq) {
-               cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
+               cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, bio, gfp_mask);
                cic_set_cfqq(cic, cfqq, is_sync);
        } else {
                /*