return;
cpu = part_stat_lock();
+ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
- if (!new_io) {
- part = rq->part;
+ if (!new_io)
part_stat_inc(cpu, part, merges[rw]);
- } else {
- part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+ else {
part_round_stats(cpu, part);
part_inc_in_flight(part, rw);
- rq->part = part;
}
part_stat_unlock();
rq->ref_count = 1;
rq->start_time = jiffies;
set_start_time_ns(rq);
- rq->part = NULL;
}
EXPORT_SYMBOL(blk_rq_init);
rl->starved[is_sync] = 0;
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
- if (priv) {
+ if (priv)
rl->elvpriv++;
- /*
- * Don't do stats for non-priv requests
- */
- if (blk_queue_io_stat(q))
- rw_flags |= REQ_IO_STAT;
- }
-
+ if (blk_queue_io_stat(q))
+ rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);
rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
int cpu;
cpu = part_stat_lock();
- part = req->part;
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
part_stat_unlock();
}
int cpu;
cpu = part_stat_lock();
- part = req->part;
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
int cpu;
cpu = part_stat_lock();
- part = req->part;
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_round_stats(cpu, part);
part_dec_in_flight(part, rq_data_dir(req));
int blk_dev_init(void);
+void elv_quiesce_start(struct request_queue *q);
+void elv_quiesce_end(struct request_queue *q);
+
+
/*
* Return the threshold (number of used requests) at which the queue is
* considered to be congested. It include a little hysteresis to keep the
{
struct disk_part_tbl *ptbl =
container_of(head, struct disk_part_tbl, rcu_head);
- struct gendisk *disk = ptbl->disk;
- struct request_queue *q = disk->queue;
- unsigned long flags;
kfree(ptbl);
-
- spin_lock_irqsave(q->queue_lock, flags);
- elv_quiesce_end(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
struct disk_part_tbl *new_ptbl)
{
struct disk_part_tbl *old_ptbl = disk->part_tbl;
- struct request_queue *q = disk->queue;
rcu_assign_pointer(disk->part_tbl, new_ptbl);
if (old_ptbl) {
rcu_assign_pointer(old_ptbl->last_lookup, NULL);
-
- spin_lock_irq(q->queue_lock);
- elv_quiesce_start(q);
- spin_unlock_irq(q->queue_lock);
-
call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
}
}
return -ENOMEM;
new_ptbl->len = target;
- new_ptbl->disk = disk;
for (i = 0; i < len; i++)
rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
static void delete_partition_rcu_cb(struct rcu_head *head)
{
struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
- struct gendisk *disk = part_to_disk(part);
- struct request_queue *q = disk->queue;
- unsigned long flags;
part->start_sect = 0;
part->nr_sects = 0;
part_stat_set_all(part, 0);
put_device(part_to_dev(part));
-
- spin_lock_irqsave(q->queue_lock, flags);
- elv_quiesce_end(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
}
void delete_partition(struct gendisk *disk, int partno)
{
struct disk_part_tbl *ptbl = disk->part_tbl;
struct hd_struct *part;
- struct request_queue *q = disk->queue;
if (partno >= ptbl->len)
return;
kobject_put(part->holder_dir);
device_del(part_to_dev(part));
- spin_lock_irq(q->queue_lock);
- elv_quiesce_start(q);
- spin_unlock_irq(q->queue_lock);
-
call_rcu(&part->rcu_head, delete_partition_rcu_cb);
}
void *elevator_private3;
struct gendisk *rq_disk;
- struct hd_struct *part;
unsigned long start_time;
#ifdef CONFIG_BLK_CGROUP
unsigned long long start_time_ns;
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
extern void elv_put_request(struct request_queue *, struct request *);
extern void elv_drain_elevator(struct request_queue *);
-extern void elv_quiesce_start(struct request_queue *);
-extern void elv_quiesce_end(struct request_queue *);
/*
* io scheduler registration
struct rcu_head rcu_head;
int len;
struct hd_struct __rcu *last_lookup;
- struct gendisk *disk;
struct hd_struct __rcu *part[];
};