#endif
block = CURRENT->sector + cnt;
if ((int)block > floppy->blocks) {
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
goto repeat;
}
if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
printk(KERN_WARNING "do_fd_request: unknown command\n");
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
goto repeat;
}
if (get_track(drive, track) == -1) {
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
goto repeat;
}
/* keep the drive spinning while writes are scheduled */
if (!fd_motor_on(drive)) {
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
goto repeat;
}
/*
CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
+ __blk_end_request_cur(CURRENT, 0);
goto repeat;
}
CURRENT->errors++;
if (CURRENT->errors >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
}
else if (CURRENT->errors == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
/* all sectors finished */
CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
+ __blk_end_request_cur(CURRENT, 0);
redo_fd_request();
return;
}
/* all sectors finished */
CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
+ __blk_end_request_cur(CURRENT, 0);
redo_fd_request();
}
return;
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
goto repeat;
}
/* user supplied disk type */
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
goto repeat;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
goto repeat;
}
type = minor2disktype[type].index;
}
if (CURRENT->sector + 1 > UDT->blocks) {
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
goto repeat;
}
if (req != NULL) {
struct hd_i_struct *disk = req->rq_disk->private_data;
if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
disk->special_op = disk->recalibrate = 1;
} else if (req->errors % RESET_FREQ == 0)
reset = 1;
req->buffer+512);
#endif
if (req->current_nr_sectors <= 0)
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
if (i > 0) {
SET_HANDLER(&read_intr);
return;
--req->current_nr_sectors;
req->buffer += 512;
if (!i || (req->bio && req->current_nr_sectors <= 0))
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
if (i > 0) {
SET_HANDLER(&write_intr);
outsw(HD_DATA, req->buffer, 256);
#ifdef DEBUG
printk("%s: too many errors\n", name);
#endif
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
}
hd_request();
spin_unlock_irq(hd_queue->queue_lock);
}
if (disk->head > 16) {
printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
}
disk->special_op = 0;
return 1;
((block+nsect) > get_capacity(req->rq_disk))) {
printk("%s: bad access: block=%d, count=%d\n",
req->rq_disk->disk_name, block, nsect);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
goto repeat;
}
break;
default:
printk("unknown hd-command\n");
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
break;
}
}
if (req != NULL)
if (++req->errors >= MG_MAX_ERRORS ||
host->error == MG_ERR_TIMEOUT)
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
}
static unsigned int mg_out(struct mg_host *host,
if (req->current_nr_sectors <= 0) {
MG_DBG("remain : %d sects\n", remains);
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
if (remains > 0)
req = elv_next_request(host->breq);
}
if (req->current_nr_sectors <= 0) {
MG_DBG("remain : %d sects\n", remains);
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
if (remains > 0)
req = elv_next_request(host->breq);
}
/* let know if current segment done */
if (req->current_nr_sectors <= 0)
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
/* set handler if read remains */
if (i > 0) {
/* let know if current segment or all done */
if (!i || (req->bio && req->current_nr_sectors <= 0))
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
/* write 1 sector and set handler if remains */
if (i > 0) {
default:
printk(KERN_WARNING "%s:%d unknown command\n",
__func__, __LINE__);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
break;
}
}
default:
printk(KERN_WARNING "%s:%d unknown command\n",
__func__, __LINE__);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
break;
}
return MG_ERR_NONE;
"%s: bad access: sector=%d, count=%d\n",
req->rq_disk->disk_name,
sect_num, sect_cnt);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
} else
- end_request(pcd_req, 0);
+ __blk_end_request_cur(pcd_req, -EIO);
}
}
-static inline void next_request(int success)
+static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pcd_lock, saved_flags);
- end_request(pcd_req, success);
+ __blk_end_request_cur(pcd_req, err);
pcd_busy = 0;
do_pcd_request(pcd_queue);
spin_unlock_irqrestore(&pcd_lock, saved_flags);
if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
pcd_bufblk = -1;
- next_request(0);
+ next_request(-EIO);
return;
}
pcd_retries = 0;
pcd_transfer();
if (!pcd_count) {
- next_request(1);
+ next_request(0);
return;
}
return;
}
pcd_bufblk = -1;
- next_request(0);
+ next_request(-EIO);
return;
}
pd_claimed = 0;
phase = NULL;
spin_lock_irqsave(&pd_lock, saved_flags);
- end_request(pd_req, res);
+ __blk_end_request_cur(pd_req,
+ res == Ok ? 0 : -EIO);
pd_req = elv_next_request(pd_queue);
if (!pd_req)
stop = 1;
if (pd_count)
return 0;
spin_lock_irqsave(&pd_lock, saved_flags);
- end_request(pd_req, 1);
+ __blk_end_request_cur(pd_req, 0);
pd_count = pd_req->current_nr_sectors;
pd_buf = pd_req->buffer;
spin_unlock_irqrestore(&pd_lock, saved_flags);
static struct request_queue *pf_queue;
-static void pf_end_request(int uptodate)
+static void pf_end_request(int err)
{
if (pf_req) {
- end_request(pf_req, uptodate);
+ __blk_end_request_cur(pf_req, err);
pf_req = NULL;
}
}
pf_count = pf_req->current_nr_sectors;
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
- pf_end_request(0);
+ pf_end_request(-EIO);
goto repeat;
}
pi_do_claimed(pf_current->pi, do_pf_write);
else {
pf_busy = 0;
- pf_end_request(0);
+ pf_end_request(-EIO);
goto repeat;
}
}
return 1;
if (!pf_count) {
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(1);
+ pf_end_request(0);
pf_req = elv_next_request(pf_queue);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
if (!pf_req)
return 0;
}
-static inline void next_request(int success)
+static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(success);
+ pf_end_request(err);
pf_busy = 0;
do_pf_request(pf_queue);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pf_mask = STAT_DRQ;
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_read_block(pf_current->pi, pf_buf, 512);
break;
}
pi_disconnect(pf_current->pi);
- next_request(1);
+ next_request(0);
}
static void do_pf_write(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_write_block(pf_current->pi, pf_buf, 512);
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_disconnect(pf_current->pi);
- next_request(1);
+ next_request(0);
}
static int __init pf_init(void)
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
return 0;
}
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
return 0;
}
break;
} else {
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
}
fs = req->rq_disk->private_data;
if (req->sector < 0 || req->sector >= fs->total_secs) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
if (req->current_nr_sectors == 0) {
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
continue;
}
if (!fs->disk_in) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
if (rq_data_dir(req) == WRITE) {
if (fs->write_protected) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
}
switch (rq_data_dir(req)) {
case WRITE:
/* NOT IMPLEMENTED */
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
break;
case READ:
if (floppy_read_sectors(fs, req->sector,
req->current_nr_sectors,
req->buffer)) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
req->nr_sectors -= req->current_nr_sectors;
req->sector += req->current_nr_sectors;
req->buffer += req->current_nr_sectors * 512;
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
break;
}
}
#endif
if (req->sector < 0 || req->sector >= fs->total_secs) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
if (req->current_nr_sectors == 0) {
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
continue;
}
if (fs->ejected) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
}
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ __blk_end_request_cur(fd_req, -EIO);
fs->state = idle;
return;
}
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ __blk_end_request_cur(fd_req, -EIO);
fs->state = idle;
start_request(fs);
} else {
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
printk(KERN_ERR "swim3: seek timeout\n");
- end_request(fd_req, 0);
+ __blk_end_request_cur(fd_req, -EIO);
fs->state = idle;
start_request(fs);
}
return;
}
printk(KERN_ERR "swim3: seek settle timeout\n");
- end_request(fd_req, 0);
+ __blk_end_request_cur(fd_req, -EIO);
fs->state = idle;
start_request(fs);
}
fd_req->current_nr_sectors -= s;
printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
(rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
- end_request(fd_req, 0);
+ __blk_end_request_cur(fd_req, -EIO);
fs->state = idle;
start_request(fs);
}
printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ __blk_end_request_cur(fd_req, -EIO);
fs->state = idle;
start_request(fs);
} else {
printk("swim3: error %sing block %ld (err=%x)\n",
rq_data_dir(fd_req) == WRITE? "writ": "read",
(long)fd_req->sector, err);
- end_request(fd_req, 0);
+ __blk_end_request_cur(fd_req, -EIO);
fs->state = idle;
}
} else {
printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(fd_req), intr, err);
- end_request(fd_req, 0);
+ __blk_end_request_cur(fd_req, -EIO);
fs->state = idle;
start_request(fs);
break;
fd_req->current_nr_sectors -= fs->scount;
fd_req->buffer += fs->scount * 512;
if (fd_req->current_nr_sectors <= 0) {
- end_request(fd_req, 1);
+ __blk_end_request_cur(fd_req, 0);
fs->state = idle;
} else {
fs->req_sector += fs->scount;
int retry;
if (!blk_fs_request(req)) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
if (block + count > get_capacity(req->rq_disk)) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
if (rw != READ && rw != WRITE) {
printk("do_xd_request: unknown request\n");
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
res = xd_readwrite(rw, disk, req->buffer, block, count);
- end_request(req, res); /* wrap up, 0 = fail, 1 = success */
+ /* wrap up, 0 = success, -errno = fail */
+ __blk_end_request_cur(req, res);
}
}
printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
xd_recalibrate(drive);
spin_lock_irq(&xd_lock);
- return (0);
+ return -EIO;
case 2:
if (sense[0] & 0x30) {
printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
else
printk(" - no valid disk address\n");
spin_lock_irq(&xd_lock);
- return (0);
+ return -EIO;
}
if (xd_dma_buffer)
for (i=0; i < (temp * 0x200); i++)
count -= temp, buffer += temp * 0x200, block += temp;
}
spin_lock_irq(&xd_lock);
- return (1);
+ return 0;
}
/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
while ((req = elv_next_request(rq)) != NULL) {
info = req->rq_disk->private_data;
if (!blk_fs_request(req)) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
while ((req = elv_next_request(q)) != NULL) {
if (blk_fs_request(req))
break;
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
}
return req;
}
/* Drop all pending requests */
while ((req = elv_next_request(ace->queue)) != NULL)
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
if (start + len > z2ram_size) {
printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
req->sector, req->current_nr_sectors);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
while (len) {
start += size;
len -= size;
}
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
}
}
while ((req = elv_next_request(rq)) != NULL) {
if (!blk_fs_request(req)) {
printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
}
if (rq_data_dir(req) != READ) {
printk(KERN_NOTICE "GDROM: Read only device -");
printk(" write request ignored\n");
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
}
if (req->nr_sectors)
gdrom_request_handler_dma(req);
else
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
}
}
break;
}
} else
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
}
};
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_DISCARD)
- return !tr->discard(dev, block, nsect);
+ return tr->discard(dev, block, nsect);
if (!blk_fs_request(req))
- return 0;
+ return -EIO;
if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
- return 0;
+ return -EIO;
switch(rq_data_dir(req)) {
case READ:
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
- return 0;
- return 1;
+ return -EIO;
+ return 0;
case WRITE:
if (!tr->writesect)
- return 0;
+ return -EIO;
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->writesect(dev, block, buf))
- return 0;
- return 1;
+ return -EIO;
+ return 0;
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
- return 0;
+ return -EIO;
}
}
while (!kthread_should_stop()) {
struct request *req;
struct mtd_blktrans_dev *dev;
- int res = 0;
+ int res;
req = elv_next_request(rq);
spin_lock_irq(rq->queue_lock);
- end_request(req, res);
+ __blk_end_request_cur(req, res);
}
spin_unlock_irq(rq->queue_lock);
size_t len = req->current_nr_sectors << 9;
if ((offset + len) > jdp->dsize) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
if (rq_data_dir(req) != READ) {
printk(KERN_ERR "jsfd: write\n");
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
if ((jdp->dbase & 0xff000000) != 0x20000000) {
printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
continue;
}
jsfd_read(req->buffer, jdp->dbase + offset, len);
- end_request(req, 1);
+ __blk_end_request_cur(req, 0);
}
}
* blk_update_request() completes given number of bytes and updates
* the request without completing it.
*
- * blk_end_request() and friends. __blk_end_request() and
- * end_request() must be called with the request queue spinlock
- * acquired.
+ * blk_end_request() and friends. __blk_end_request() must be called
+ * with the request queue spinlock acquired.
*
* Several drivers define their own end_request and call
* blk_end_request() for parts of the original function.
BUG_ON(pending);
}
+/**
+ * blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @err: %0 for success, < %0 for error
+ *
+ * Description:
+ * Complete the current consecutively mapped chunk from @rq.
+ */
+static inline void blk_end_request_cur(struct request *rq, int error)
+{
+ blk_end_request(rq, error, rq->hard_cur_sectors << 9);
+}
+
/**
* __blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
}
/**
- * end_request - end I/O on the current segment of the request
- * @rq: the request being processed
- * @uptodate: error value or %0/%1 uptodate flag
+ * __blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @err: %0 for success, < %0 for error
*
* Description:
- * Ends I/O on the current segment of a request. If that is the only
- * remaining segment, the request is also completed and freed.
- *
- * This is a remnant of how older block drivers handled I/O completions.
- * Modern drivers typically end I/O on the full request in one go, unless
- * they have a residual value to account for. For that case this function
- * isn't really useful, unless the residual just happens to be the
- * full current segment. In other words, don't use this function in new
- * code. Use blk_end_request() or __blk_end_request() to end a request.
- **/
-static inline void end_request(struct request *rq, int uptodate)
+ * Complete the current consecutively mapped chunk from @rq. Must
+ * be called with queue lock held.
+ */
+static inline void __blk_end_request_cur(struct request *rq, int error)
{
- int error = 0;
-
- if (uptodate <= 0)
- error = uptodate ? uptodate : -EIO;
-
- __blk_end_bidi_request(rq, error, rq->hard_cur_sectors << 9, 0);
+ __blk_end_request(rq, error, rq->hard_cur_sectors << 9);
}
extern void blk_complete_request(struct request *);