Merge branch 'for-3.9/core' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Feb 2013 20:52:24 +0000 (12:52 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Feb 2013 20:52:24 +0000 (12:52 -0800)
Pull block IO core bits from Jens Axboe:
 "Below are the core block IO bits for 3.9.  It was delayed a few days
  since my workstation kept crashing every 2-8h after pulling it into
  current -git, but turns out it is a bug in the new pstate code (divide
  by zero, will report separately).  In any case, it contains:

   - The big cfq/blkcg update from Tejun and and Vivek.

   - Additional block and writeback tracepoints from Tejun.

   - Improvement of the should sort (based on queues) logic in the plug
     flushing.

   - _io() variants of the wait_for_completion() interface, using
     io_schedule() instead of schedule() to contribute to io wait
     properly.

   - Various little fixes.

  You'll get two trivial merge conflicts, which should be easy enough to
  fix up"

Fix up the trivial conflicts due to hlist traversal cleanups (commit
b67bfe0d42ca: "hlist: drop the node parameter from iterators").

* 'for-3.9/core' of git://git.kernel.dk/linux-block: (39 commits)
  block: remove redundant check to bd_openers()
  block: use i_size_write() in bd_set_size()
  cfq: fix lock imbalance with failed allocations
  drivers/block/swim3.c: fix null pointer dereference
  block: don't select PERCPU_RWSEM
  block: account iowait time when waiting for completion of IO request
  sched: add wait_for_completion_io[_timeout]
  writeback: add more tracepoints
  block: add block_{touch|dirty}_buffer tracepoint
  buffer: make touch_buffer() an exported function
  block: add @req to bio_{front|back}_merge tracepoints
  block: add missing block_bio_complete() tracepoint
  block: Remove should_sort judgement when flush blk_plug
  block,elevator: use new hashtable implementation
  cfq-iosched: add hierarchical cfq_group statistics
  cfq-iosched: collect stats from dead cfqgs
  cfq-iosched: separate out cfqg_stats_reset() from cfq_pd_reset_stats()
  blkcg: make blkcg_print_blkgs() grab q locks instead of blkcg lock
  block: RCU free request_queue
  blkcg: implement blkg_[rw]stat_recursive_sum() and blkg_[rw]stat_merge()
  ...

14 files changed:
1  2 
Documentation/cgroups/blkio-controller.txt
block/blk-cgroup.c
block/blk-core.c
block/blk-exec.c
block/cfq-iosched.c
block/elevator.c
drivers/md/dm.c
drivers/md/raid5.c
fs/block_dev.c
fs/buffer.c
include/linux/elevator.h
kernel/sched/core.c
kernel/trace/blktrace.c
mm/page-writeback.c

index 8bdebb6781e1445ff629fb4dd06ffbcf97257f38,87ea95d1f533ead81e8c25adcdabcf98ab1bd431..b2b9837f9dd3475be841d41ed52dd7715e936d4b
@@@ -414,13 -518,17 +517,16 @@@ void blkcg_print_blkgs(struct seq_file 
                       bool show_total)
  {
        struct blkcg_gq *blkg;
 -      struct hlist_node *n;
        u64 total = 0;
  
-       spin_lock_irq(&blkcg->lock);
-       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node)
+       rcu_read_lock();
 -      hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
++      hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+               spin_lock_irq(blkg->q->queue_lock);
                if (blkcg_policy_enabled(blkg->q, pol))
                        total += prfill(sf, blkg->pd[pol->plid], data);
-       spin_unlock_irq(&blkcg->lock);
+               spin_unlock_irq(blkg->q->queue_lock);
+       }
+       rcu_read_unlock();
  
        if (show_total)
                seq_printf(sf, "Total %llu\n", (unsigned long long)total);
Simple merge
Simple merge
index ec52807cdd0949ec842319303d4dc3e8aa27c273,1bf9307e8f56730c6de401b78331d646c063292c..4f0ade74cfd04a1c48f22218a6c4369517efa88b
@@@ -1440,13 -1703,25 +1702,25 @@@ static int __cfq_set_weight(struct cgro
                return -EINVAL;
  
        spin_lock_irq(&blkcg->lock);
-       blkcg->cfq_weight = (unsigned int)val;
+       if (!is_leaf_weight)
+               blkcg->cfq_weight = val;
+       else
+               blkcg->cfq_leaf_weight = val;
  
 -      hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
 +      hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
                struct cfq_group *cfqg = blkg_to_cfqg(blkg);
  
-               if (cfqg && !cfqg->dev_weight)
-                       cfqg->new_weight = blkcg->cfq_weight;
+               if (!cfqg)
+                       continue;
+               if (!is_leaf_weight) {
+                       if (!cfqg->dev_weight)
+                               cfqg->new_weight = blkcg->cfq_weight;
+               } else {
+                       if (!cfqg->dev_leaf_weight)
+                               cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
+               }
        }
  
        spin_unlock_irq(&blkcg->lock);
index d0acb31cc083fca5340b9f719e0ec640607ce1eb,11683bb10b7b75e6f94d1124d91fe549a8934a65..a0ffdd943c98aa5e0f39f102b98f1bc4cf9777f5
@@@ -287,11 -252,10 +273,10 @@@ static void elv_rqhash_reposition(struc
  static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  {
        struct elevator_queue *e = q->elevator;
-       struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
 -      struct hlist_node *entry, *next;
 +      struct hlist_node *next;
        struct request *rq;
  
-       hlist_for_each_entry_safe(rq, next, hash_list, hash) {
 -      hash_for_each_possible_safe(e->hash, rq, entry, next, hash, offset) {
++      hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
                BUG_ON(!ELV_ON_HASH(rq));
  
                if (unlikely(!rq_mergeable(rq))) {
diff --cc drivers/md/dm.c
Simple merge
Simple merge
diff --cc fs/block_dev.c
Simple merge
diff --cc fs/buffer.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge