2 * Virtio SCSI HBA driver
4 * Copyright IBM Corp. 2010
5 * Copyright Red Hat, Inc. 2011
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/mempool.h>
19 #include <linux/virtio.h>
20 #include <linux/virtio_ids.h>
21 #include <linux/virtio_config.h>
22 #include <linux/virtio_scsi.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_device.h>
25 #include <scsi/scsi_cmnd.h>
27 #define VIRTIO_SCSI_MEMPOOL_SZ 64
29 /* Command queue element */
30 struct virtio_scsi_cmd
{
32 struct completion
*comp
;
34 struct virtio_scsi_cmd_req cmd
;
35 struct virtio_scsi_ctrl_tmf_req tmf
;
36 struct virtio_scsi_ctrl_an_req an
;
39 struct virtio_scsi_cmd_resp cmd
;
40 struct virtio_scsi_ctrl_tmf_resp tmf
;
41 struct virtio_scsi_ctrl_an_resp an
;
42 struct virtio_scsi_event evt
;
44 } ____cacheline_aligned_in_smp
;
46 /* Driver instance state */
48 /* Protects ctrl_vq, req_vq and sg[] */
51 struct virtio_device
*vdev
;
52 struct virtqueue
*ctrl_vq
;
53 struct virtqueue
*event_vq
;
54 struct virtqueue
*req_vq
;
56 /* For sglist construction when adding commands to the virtqueue. */
57 struct scatterlist sg
[];
60 static struct kmem_cache
*virtscsi_cmd_cache
;
61 static mempool_t
*virtscsi_cmd_pool
;
63 static inline struct Scsi_Host
*virtio_scsi_host(struct virtio_device
*vdev
)
68 static void virtscsi_compute_resid(struct scsi_cmnd
*sc
, u32 resid
)
73 if (!scsi_bidi_cmnd(sc
)) {
74 scsi_set_resid(sc
, resid
);
78 scsi_in(sc
)->resid
= min(resid
, scsi_in(sc
)->length
);
79 scsi_out(sc
)->resid
= resid
- scsi_in(sc
)->resid
;
83 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
85 * Called with vq_lock held.
87 static void virtscsi_complete_cmd(void *buf
)
89 struct virtio_scsi_cmd
*cmd
= buf
;
90 struct scsi_cmnd
*sc
= cmd
->sc
;
91 struct virtio_scsi_cmd_resp
*resp
= &cmd
->resp
.cmd
;
93 dev_dbg(&sc
->device
->sdev_gendev
,
94 "cmd %p response %u status %#02x sense_len %u\n",
95 sc
, resp
->response
, resp
->status
, resp
->sense_len
);
97 sc
->result
= resp
->status
;
98 virtscsi_compute_resid(sc
, resp
->resid
);
99 switch (resp
->response
) {
100 case VIRTIO_SCSI_S_OK
:
101 set_host_byte(sc
, DID_OK
);
103 case VIRTIO_SCSI_S_OVERRUN
:
104 set_host_byte(sc
, DID_ERROR
);
106 case VIRTIO_SCSI_S_ABORTED
:
107 set_host_byte(sc
, DID_ABORT
);
109 case VIRTIO_SCSI_S_BAD_TARGET
:
110 set_host_byte(sc
, DID_BAD_TARGET
);
112 case VIRTIO_SCSI_S_RESET
:
113 set_host_byte(sc
, DID_RESET
);
115 case VIRTIO_SCSI_S_BUSY
:
116 set_host_byte(sc
, DID_BUS_BUSY
);
118 case VIRTIO_SCSI_S_TRANSPORT_FAILURE
:
119 set_host_byte(sc
, DID_TRANSPORT_DISRUPTED
);
121 case VIRTIO_SCSI_S_TARGET_FAILURE
:
122 set_host_byte(sc
, DID_TARGET_FAILURE
);
124 case VIRTIO_SCSI_S_NEXUS_FAILURE
:
125 set_host_byte(sc
, DID_NEXUS_FAILURE
);
128 scmd_printk(KERN_WARNING
, sc
, "Unknown response %d",
131 case VIRTIO_SCSI_S_FAILURE
:
132 set_host_byte(sc
, DID_ERROR
);
136 WARN_ON(resp
->sense_len
> VIRTIO_SCSI_SENSE_SIZE
);
137 if (sc
->sense_buffer
) {
138 memcpy(sc
->sense_buffer
, resp
->sense
,
139 min_t(u32
, resp
->sense_len
, VIRTIO_SCSI_SENSE_SIZE
));
141 set_driver_byte(sc
, DRIVER_SENSE
);
144 mempool_free(cmd
, virtscsi_cmd_pool
);
148 static void virtscsi_vq_done(struct virtqueue
*vq
, void (*fn
)(void *buf
))
150 struct Scsi_Host
*sh
= virtio_scsi_host(vq
->vdev
);
151 struct virtio_scsi
*vscsi
= shost_priv(sh
);
156 spin_lock_irqsave(&vscsi
->vq_lock
, flags
);
159 virtqueue_disable_cb(vq
);
160 while ((buf
= virtqueue_get_buf(vq
, &len
)) != NULL
)
162 } while (!virtqueue_enable_cb(vq
));
164 spin_unlock_irqrestore(&vscsi
->vq_lock
, flags
);
167 static void virtscsi_req_done(struct virtqueue
*vq
)
169 virtscsi_vq_done(vq
, virtscsi_complete_cmd
);
172 static void virtscsi_complete_free(void *buf
)
174 struct virtio_scsi_cmd
*cmd
= buf
;
177 complete_all(cmd
->comp
);
178 mempool_free(cmd
, virtscsi_cmd_pool
);
181 static void virtscsi_ctrl_done(struct virtqueue
*vq
)
183 virtscsi_vq_done(vq
, virtscsi_complete_free
);
186 static void virtscsi_event_done(struct virtqueue
*vq
)
188 virtscsi_vq_done(vq
, virtscsi_complete_free
);
191 static void virtscsi_map_sgl(struct scatterlist
*sg
, unsigned int *p_idx
,
192 struct scsi_data_buffer
*sdb
)
194 struct sg_table
*table
= &sdb
->table
;
195 struct scatterlist
*sg_elem
;
196 unsigned int idx
= *p_idx
;
199 for_each_sg(table
->sgl
, sg_elem
, table
->nents
, i
)
200 sg_set_buf(&sg
[idx
++], sg_virt(sg_elem
), sg_elem
->length
);
206 * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
207 * @vscsi : virtio_scsi state
208 * @cmd : command structure
209 * @out_num : number of read-only elements
210 * @in_num : number of write-only elements
211 * @req_size : size of the request buffer
212 * @resp_size : size of the response buffer
214 * Called with vq_lock held.
216 static void virtscsi_map_cmd(struct virtio_scsi
*vscsi
,
217 struct virtio_scsi_cmd
*cmd
,
218 unsigned *out_num
, unsigned *in_num
,
219 size_t req_size
, size_t resp_size
)
221 struct scsi_cmnd
*sc
= cmd
->sc
;
222 struct scatterlist
*sg
= vscsi
->sg
;
223 unsigned int idx
= 0;
226 struct Scsi_Host
*shost
= virtio_scsi_host(vscsi
->vdev
);
227 BUG_ON(scsi_sg_count(sc
) > shost
->sg_tablesize
);
229 /* TODO: check feature bit and fail if unsupported? */
230 BUG_ON(sc
->sc_data_direction
== DMA_BIDIRECTIONAL
);
233 /* Request header. */
234 sg_set_buf(&sg
[idx
++], &cmd
->req
, req_size
);
236 /* Data-out buffer. */
237 if (sc
&& sc
->sc_data_direction
!= DMA_FROM_DEVICE
)
238 virtscsi_map_sgl(sg
, &idx
, scsi_out(sc
));
242 /* Response header. */
243 sg_set_buf(&sg
[idx
++], &cmd
->resp
, resp_size
);
246 if (sc
&& sc
->sc_data_direction
!= DMA_TO_DEVICE
)
247 virtscsi_map_sgl(sg
, &idx
, scsi_in(sc
));
249 *in_num
= idx
- *out_num
;
252 static int virtscsi_kick_cmd(struct virtio_scsi
*vscsi
, struct virtqueue
*vq
,
253 struct virtio_scsi_cmd
*cmd
,
254 size_t req_size
, size_t resp_size
, gfp_t gfp
)
256 unsigned int out_num
, in_num
;
260 spin_lock_irqsave(&vscsi
->vq_lock
, flags
);
262 virtscsi_map_cmd(vscsi
, cmd
, &out_num
, &in_num
, req_size
, resp_size
);
264 ret
= virtqueue_add_buf(vq
, vscsi
->sg
, out_num
, in_num
, cmd
, gfp
);
268 spin_unlock_irqrestore(&vscsi
->vq_lock
, flags
);
272 static int virtscsi_queuecommand(struct Scsi_Host
*sh
, struct scsi_cmnd
*sc
)
274 struct virtio_scsi
*vscsi
= shost_priv(sh
);
275 struct virtio_scsi_cmd
*cmd
;
278 dev_dbg(&sc
->device
->sdev_gendev
,
279 "cmd %p CDB: %#02x\n", sc
, sc
->cmnd
[0]);
281 ret
= SCSI_MLQUEUE_HOST_BUSY
;
282 cmd
= mempool_alloc(virtscsi_cmd_pool
, GFP_ATOMIC
);
286 memset(cmd
, 0, sizeof(*cmd
));
288 cmd
->req
.cmd
= (struct virtio_scsi_cmd_req
){
290 .lun
[1] = sc
->device
->id
,
291 .lun
[2] = (sc
->device
->lun
>> 8) | 0x40,
292 .lun
[3] = sc
->device
->lun
& 0xff,
293 .tag
= (unsigned long)sc
,
294 .task_attr
= VIRTIO_SCSI_S_SIMPLE
,
299 BUG_ON(sc
->cmd_len
> VIRTIO_SCSI_CDB_SIZE
);
300 memcpy(cmd
->req
.cmd
.cdb
, sc
->cmnd
, sc
->cmd_len
);
302 if (virtscsi_kick_cmd(vscsi
, vscsi
->req_vq
, cmd
,
303 sizeof cmd
->req
.cmd
, sizeof cmd
->resp
.cmd
,
311 static int virtscsi_tmf(struct virtio_scsi
*vscsi
, struct virtio_scsi_cmd
*cmd
)
313 DECLARE_COMPLETION_ONSTACK(comp
);
317 ret
= virtscsi_kick_cmd(vscsi
, vscsi
->ctrl_vq
, cmd
,
318 sizeof cmd
->req
.tmf
, sizeof cmd
->resp
.tmf
,
323 wait_for_completion(&comp
);
324 if (cmd
->resp
.tmf
.response
!= VIRTIO_SCSI_S_OK
&&
325 cmd
->resp
.tmf
.response
!= VIRTIO_SCSI_S_FUNCTION_SUCCEEDED
)
331 static int virtscsi_device_reset(struct scsi_cmnd
*sc
)
333 struct virtio_scsi
*vscsi
= shost_priv(sc
->device
->host
);
334 struct virtio_scsi_cmd
*cmd
;
336 sdev_printk(KERN_INFO
, sc
->device
, "device reset\n");
337 cmd
= mempool_alloc(virtscsi_cmd_pool
, GFP_NOIO
);
341 memset(cmd
, 0, sizeof(*cmd
));
343 cmd
->req
.tmf
= (struct virtio_scsi_ctrl_tmf_req
){
344 .type
= VIRTIO_SCSI_T_TMF
,
345 .subtype
= VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET
,
347 .lun
[1] = sc
->device
->id
,
348 .lun
[2] = (sc
->device
->lun
>> 8) | 0x40,
349 .lun
[3] = sc
->device
->lun
& 0xff,
351 return virtscsi_tmf(vscsi
, cmd
);
354 static int virtscsi_abort(struct scsi_cmnd
*sc
)
356 struct virtio_scsi
*vscsi
= shost_priv(sc
->device
->host
);
357 struct virtio_scsi_cmd
*cmd
;
359 scmd_printk(KERN_INFO
, sc
, "abort\n");
360 cmd
= mempool_alloc(virtscsi_cmd_pool
, GFP_NOIO
);
364 memset(cmd
, 0, sizeof(*cmd
));
366 cmd
->req
.tmf
= (struct virtio_scsi_ctrl_tmf_req
){
367 .type
= VIRTIO_SCSI_T_TMF
,
368 .subtype
= VIRTIO_SCSI_T_TMF_ABORT_TASK
,
370 .lun
[1] = sc
->device
->id
,
371 .lun
[2] = (sc
->device
->lun
>> 8) | 0x40,
372 .lun
[3] = sc
->device
->lun
& 0xff,
373 .tag
= (unsigned long)sc
,
375 return virtscsi_tmf(vscsi
, cmd
);
378 static struct scsi_host_template virtscsi_host_template
= {
379 .module
= THIS_MODULE
,
380 .name
= "Virtio SCSI HBA",
381 .proc_name
= "virtio_scsi",
382 .queuecommand
= virtscsi_queuecommand
,
384 .eh_abort_handler
= virtscsi_abort
,
385 .eh_device_reset_handler
= virtscsi_device_reset
,
388 .dma_boundary
= UINT_MAX
,
389 .use_clustering
= ENABLE_CLUSTERING
,
392 #define virtscsi_config_get(vdev, fld) \
394 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
395 vdev->config->get(vdev, \
396 offsetof(struct virtio_scsi_config, fld), \
397 &__val, sizeof(__val)); \
401 #define virtscsi_config_set(vdev, fld, val) \
403 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
404 vdev->config->set(vdev, \
405 offsetof(struct virtio_scsi_config, fld), \
406 &__val, sizeof(__val)); \
409 static int virtscsi_init(struct virtio_device
*vdev
,
410 struct virtio_scsi
*vscsi
)
413 struct virtqueue
*vqs
[3];
414 vq_callback_t
*callbacks
[] = {
419 const char *names
[] = {
425 /* Discover virtqueues and write information to configuration. */
426 err
= vdev
->config
->find_vqs(vdev
, 3, vqs
, callbacks
, names
);
430 vscsi
->ctrl_vq
= vqs
[0];
431 vscsi
->event_vq
= vqs
[1];
432 vscsi
->req_vq
= vqs
[2];
434 virtscsi_config_set(vdev
, cdb_size
, VIRTIO_SCSI_CDB_SIZE
);
435 virtscsi_config_set(vdev
, sense_size
, VIRTIO_SCSI_SENSE_SIZE
);
439 static int __devinit
virtscsi_probe(struct virtio_device
*vdev
)
441 struct Scsi_Host
*shost
;
442 struct virtio_scsi
*vscsi
;
447 /* We need to know how many segments before we allocate.
448 * We need an extra sg elements at head and tail.
450 sg_elems
= virtscsi_config_get(vdev
, seg_max
) ?: 1;
452 /* Allocate memory and link the structs together. */
453 shost
= scsi_host_alloc(&virtscsi_host_template
,
454 sizeof(*vscsi
) + sizeof(vscsi
->sg
[0]) * (sg_elems
+ 2));
459 shost
->sg_tablesize
= sg_elems
;
460 vscsi
= shost_priv(shost
);
464 /* Random initializations. */
465 spin_lock_init(&vscsi
->vq_lock
);
466 sg_init_table(vscsi
->sg
, sg_elems
+ 2);
468 err
= virtscsi_init(vdev
, vscsi
);
470 goto virtscsi_init_failed
;
472 cmd_per_lun
= virtscsi_config_get(vdev
, cmd_per_lun
) ?: 1;
473 shost
->cmd_per_lun
= min_t(u32
, cmd_per_lun
, shost
->can_queue
);
474 shost
->max_sectors
= virtscsi_config_get(vdev
, max_sectors
) ?: 0xFFFF;
475 shost
->max_lun
= virtscsi_config_get(vdev
, max_lun
) + 1;
476 shost
->max_id
= virtscsi_config_get(vdev
, max_target
) + 1;
477 shost
->max_channel
= 0;
478 shost
->max_cmd_len
= VIRTIO_SCSI_CDB_SIZE
;
479 err
= scsi_add_host(shost
, &vdev
->dev
);
481 goto scsi_add_host_failed
;
483 scsi_scan_host(shost
);
487 scsi_add_host_failed
:
488 vdev
->config
->del_vqs(vdev
);
489 virtscsi_init_failed
:
490 scsi_host_put(shost
);
494 static void virtscsi_remove_vqs(struct virtio_device
*vdev
)
496 /* Stop all the virtqueues. */
497 vdev
->config
->reset(vdev
);
499 vdev
->config
->del_vqs(vdev
);
502 static void __devexit
virtscsi_remove(struct virtio_device
*vdev
)
504 struct Scsi_Host
*shost
= virtio_scsi_host(vdev
);
506 scsi_remove_host(shost
);
508 virtscsi_remove_vqs(vdev
);
509 scsi_host_put(shost
);
513 static int virtscsi_freeze(struct virtio_device
*vdev
)
515 virtscsi_remove_vqs(vdev
);
519 static int virtscsi_restore(struct virtio_device
*vdev
)
521 struct Scsi_Host
*sh
= virtio_scsi_host(vdev
);
522 struct virtio_scsi
*vscsi
= shost_priv(sh
);
524 return virtscsi_init(vdev
, vscsi
);
528 static struct virtio_device_id id_table
[] = {
529 { VIRTIO_ID_SCSI
, VIRTIO_DEV_ANY_ID
},
533 static struct virtio_driver virtio_scsi_driver
= {
534 .driver
.name
= KBUILD_MODNAME
,
535 .driver
.owner
= THIS_MODULE
,
536 .id_table
= id_table
,
537 .probe
= virtscsi_probe
,
539 .freeze
= virtscsi_freeze
,
540 .restore
= virtscsi_restore
,
542 .remove
= __devexit_p(virtscsi_remove
),
545 static int __init
init(void)
549 virtscsi_cmd_cache
= KMEM_CACHE(virtio_scsi_cmd
, 0);
550 if (!virtscsi_cmd_cache
) {
551 printk(KERN_ERR
"kmem_cache_create() for "
552 "virtscsi_cmd_cache failed\n");
558 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ
,
560 if (!virtscsi_cmd_pool
) {
561 printk(KERN_ERR
"mempool_create() for"
562 "virtscsi_cmd_pool failed\n");
565 ret
= register_virtio_driver(&virtio_scsi_driver
);
572 if (virtscsi_cmd_pool
) {
573 mempool_destroy(virtscsi_cmd_pool
);
574 virtscsi_cmd_pool
= NULL
;
576 if (virtscsi_cmd_cache
) {
577 kmem_cache_destroy(virtscsi_cmd_cache
);
578 virtscsi_cmd_cache
= NULL
;
583 static void __exit
fini(void)
585 unregister_virtio_driver(&virtio_scsi_driver
);
586 mempool_destroy(virtscsi_cmd_pool
);
587 kmem_cache_destroy(virtscsi_cmd_cache
);
592 MODULE_DEVICE_TABLE(virtio
, id_table
);
593 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
594 MODULE_LICENSE("GPL");