{
struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
struct tcm_vhost_cmd, tvc_se_cmd);
- struct se_session *se_sess = se_cmd->se_sess;
+ struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
int i;
if (tv_cmd->tvc_sgl_count) {
ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
cmd->tvc_upages, write);
if (ret < 0) {
- for (i = 0; i < cmd->tvc_sgl_count; i++)
- put_page(sg_page(&cmd->tvc_sgl[i]));
-
+ for (i = 0; i < cmd->tvc_sgl_count; i++) {
+ struct page *page = sg_page(&cmd->tvc_sgl[i]);
+ if (page)
+ put_page(page);
+ }
cmd->tvc_sgl_count = 0;
return ret;
}
ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
cmd->tvc_upages, write);
if (ret < 0) {
- for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
- put_page(sg_page(&cmd->tvc_prot_sgl[i]));
-
+ for (i = 0; i < cmd->tvc_prot_sgl_count; i++) {
+ struct page *page = sg_page(&cmd->tvc_prot_sgl[i]);
+ if (page)
+ put_page(page);
+ }
cmd->tvc_prot_sgl_count = 0;
return ret;
}
if (unlikely(vq->iov[0].iov_len < req_size)) {
pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
req_size, vq->iov[0].iov_len);
- break;
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
}
ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
if (unlikely(ret)) {
vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
- break;
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
}
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
if (data_direction != DMA_TO_DEVICE) {
vq_err(vq, "Received non zero do_pi_niov"
", but wrong data_direction\n");
- goto err_cmd;
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
}
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
} else if (v_req_pi.pi_bytesin) {
if (data_direction != DMA_FROM_DEVICE) {
vq_err(vq, "Received non zero di_pi_niov"
", but wrong data_direction\n");
- goto err_cmd;
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
}
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
}
vq_err(vq, "Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
- goto err_cmd;
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
}
cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
if (IS_ERR(cmd)) {
vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
PTR_ERR(cmd));
- goto err_cmd;
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
}
pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to"
" prot_sgl\n");
- goto err_free;
+ tcm_vhost_release_cmd(&cmd->tvc_se_cmd);
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
}
}
if (data_direction != DMA_NONE) {
data_direction == DMA_FROM_DEVICE);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
- goto err_free;
+ tcm_vhost_release_cmd(&cmd->tvc_se_cmd);
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
}
}
/*
INIT_WORK(&cmd->work, tcm_vhost_submission_work);
queue_work(tcm_vhost_workqueue, &cmd->work);
}
-
- mutex_unlock(&vq->mutex);
- return;
-
-err_free:
- vhost_scsi_free_cmd(cmd);
-err_cmd:
- vhost_scsi_send_bad_target(vs, vq, head, out);
out:
mutex_unlock(&vq->mutex);
}