blkcg: implement per-blkg request allocation
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / blk-sysfs.c
index cf150011d808bc71fee8ff894387de0581235f56..9628b291f96057a42cbf6a5492bd7480fe7e93da 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/blktrace_api.h>
 
 #include "blk.h"
+#include "blk-cgroup.h"
 
 struct queue_sysfs_entry {
        struct attribute attr;
@@ -39,7 +40,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
 static ssize_t
 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
-       struct request_list *rl = &q->rq;
+       struct request_list *rl;
        unsigned long nr;
        int ret;
 
@@ -54,6 +55,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
        q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
 
+       /* congestion isn't cgroup aware and follows root blkcg for now */
+       rl = &q->root_rl;
+
        if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
                blk_set_queue_congested(q, BLK_RW_SYNC);
        else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
@@ -64,19 +68,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
        else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
                blk_clear_queue_congested(q, BLK_RW_ASYNC);
 
-       if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
-               blk_set_queue_full(q, BLK_RW_SYNC);
-       } else {
-               blk_clear_queue_full(q, BLK_RW_SYNC);
-               wake_up(&rl->wait[BLK_RW_SYNC]);
+       blk_queue_for_each_rl(rl, q) {
+               if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+                       blk_set_rl_full(rl, BLK_RW_SYNC);
+               } else {
+                       blk_clear_rl_full(rl, BLK_RW_SYNC);
+                       wake_up(&rl->wait[BLK_RW_SYNC]);
+               }
+
+               if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+                       blk_set_rl_full(rl, BLK_RW_ASYNC);
+               } else {
+                       blk_clear_rl_full(rl, BLK_RW_ASYNC);
+                       wake_up(&rl->wait[BLK_RW_ASYNC]);
+               }
        }
 
-       if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
-               blk_set_queue_full(q, BLK_RW_ASYNC);
-       } else {
-               blk_clear_queue_full(q, BLK_RW_ASYNC);
-               wake_up(&rl->wait[BLK_RW_ASYNC]);
-       }
        spin_unlock_irq(q->queue_lock);
        return ret;
 }
@@ -475,10 +482,11 @@ static void blk_release_queue(struct kobject *kobj)
 {
        struct request_queue *q =
                container_of(kobj, struct request_queue, kobj);
-       struct request_list *rl = &q->rq;
 
        blk_sync_queue(q);
 
+       blkcg_exit_queue(q);
+
        if (q->elevator) {
                spin_lock_irq(q->queue_lock);
                ioc_clear_queue(q);
@@ -486,15 +494,11 @@ static void blk_release_queue(struct kobject *kobj)
                elevator_exit(q->elevator);
        }
 
-       blk_throtl_exit(q);
-
-       if (rl->rq_pool)
-               mempool_destroy(rl->rq_pool);
+       blk_exit_rl(&q->root_rl);
 
        if (q->queue_tags)
                __blk_queue_free_tags(q);
 
-       blk_throtl_release(q);
        blk_trace_shutdown(q);
 
        bdi_destroy(&q->backing_dev_info);