atomic_set(&io->pending, 0);
kcryptd_queue_io(io);
- return 0;
+ return DM_MAPIO_SUBMITTED;
}
static int crypt_status(struct dm_target *ti, status_type_t type,
bio->bi_bdev = lc->dev->bdev;
bio->bi_sector = lc->start + (bio->bi_sector - ti->begin);
- return 1;
+ return DM_MAPIO_REMAPPED;
}
static int linear_status(struct dm_target *ti, status_type_t type,
static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
unsigned was_queued)
{
- int r = 1;
+ int r = DM_MAPIO_REMAPPED;
unsigned long flags;
struct pgpath *pgpath;
!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
- r = 0;
+ r = DM_MAPIO_SUBMITTED;
} else if (!pgpath)
r = -EIO; /* Failed */
else
r = map_io(m, bio, mpio, 1);
if (r < 0)
bio_endio(bio, bio->bi_size, r);
- else if (r == 1)
+ else if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = next;
queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
- return 1; /* io not complete */
+ return DM_ENDIO_INCOMPLETE; /* io not complete */
}
static int multipath_end_io(struct dm_target *ti, struct bio *bio,
if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path);
}
- if (r <= 0)
+ if (r != DM_ENDIO_INCOMPLETE)
mempool_free(mpio, m->mpio_pool);
return r;
if (rw == WRITE) {
queue_bio(ms, bio, rw);
- return 0;
+ return DM_MAPIO_SUBMITTED;
}
r = ms->rh.log->type->in_sync(ms->rh.log,
return r;
if (r == -EWOULDBLOCK) /* FIXME: ugly */
- r = 0;
+ r = DM_MAPIO_SUBMITTED;
/*
* We don't want to fast track a recovery just for a read
if (!r) {
/* Pass this io over to the daemon */
queue_bio(ms, bio, rw);
- return 0;
+ return DM_MAPIO_SUBMITTED;
}
m = choose_mirror(ms, bio->bi_sector);
return -EIO;
map_bio(ms, m, bio);
- return 1;
+ return DM_MAPIO_REMAPPED;
}
static int mirror_end_io(struct dm_target *ti, struct bio *bio,
{
struct exception *e;
struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
- int r = 1;
+ int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
struct pending_exception *pe = NULL;
remap_exception(s, &pe->e, bio);
bio_list_add(&pe->snapshot_bios, bio);
- r = 0;
+ r = DM_MAPIO_SUBMITTED;
if (!pe->started) {
/* this is protected by snap->lock */
*---------------------------------------------------------------*/
static int __origin_write(struct list_head *snapshots, struct bio *bio)
{
- int r = 1, first = 0;
+ int r = DM_MAPIO_REMAPPED, first = 0;
struct dm_snapshot *snap;
struct exception *e;
struct pending_exception *pe, *next_pe, *primary_pe = NULL;
bio_list_add(&primary_pe->origin_bios, bio);
- r = 0;
+ r = DM_MAPIO_SUBMITTED;
}
if (!pe->primary_pe) {
static int do_origin(struct dm_dev *origin, struct bio *bio)
{
struct origin *o;
- int r = 1;
+ int r = DM_MAPIO_REMAPPED;
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
return -EOPNOTSUPP;
/* Only tell snapshots if this is a write */
- return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : 1;
+ return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
bio->bi_sector = sc->stripe[stripe].physical_start +
(chunk << sc->chunk_shift) + (offset & sc->chunk_mask);
- return 1;
+ return DM_MAPIO_REMAPPED;
}
static int stripe_status(struct dm_target *ti,
bio_endio(bio, bio->bi_size, 0);
/* accepted bio, don't make new request */
- return 0;
+ return DM_MAPIO_SUBMITTED;
}
static struct target_type zero_target = {