Staging: hv: storvsc_drv: Directly assign the driver name
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / hv / storvsc_drv.c
CommitLineData
bef4a34a 1/*
bef4a34a
HJ
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
972621c9 20 * K. Y. Srinivasan <kys@microsoft.com>
bef4a34a 21 */
bef4a34a 22#include <linux/init.h>
5a0e3ad6 23#include <linux/slab.h>
bef4a34a
HJ
24#include <linux/module.h>
25#include <linux/device.h>
26#include <linux/blkdev.h>
bef4a34a
HJ
27#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_tcq.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_devinfo.h>
bef4a34a 34#include <scsi/scsi_dbg.h>
e3fe0bb6 35#include "hv_api.h"
645954c5 36#include "logging.h"
2d82f6c7 37#include "version_info.h"
870cde80 38#include "vmbus.h"
bb969793 39#include "storvsc_api.h"
af3043c6
S
40#include "vstorage.h"
41#include "channel.h"
bef4a34a 42
2db2cabe 43static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
bef4a34a 44
3d598ce1
S
45module_param(storvsc_ringbuffer_size, int, S_IRUGO);
46MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
47
0f0cdc6a 48static const char *driver_name = "storvsc";
6dec2442
S
49
50/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
51static const struct hv_guid gStorVscDeviceType = {
52 .data = {
53 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
54 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f
55 }
56};
57
972621c9 58struct hv_host_device {
97c15296 59 struct hv_device *dev;
ff568d3a
GKH
60 struct kmem_cache *request_pool;
61 unsigned int port;
62 unsigned char path;
63 unsigned char target;
bef4a34a
HJ
64};
65
66struct storvsc_cmd_request {
ff568d3a
GKH
67 struct list_head entry;
68 struct scsi_cmnd *cmd;
bef4a34a
HJ
69
70 unsigned int bounce_sgl_count;
ff568d3a 71 struct scatterlist *bounce_sgl;
bef4a34a 72
0b3f6834 73 struct hv_storvsc_request request;
bef4a34a
HJ
74};
75
bef4a34a 76
6dec2442 77/*
cf6fdfb3 78 * storvsc_initialize - Main entry point
6dec2442 79 */
cf6fdfb3 80static int storvsc_initialize(struct hv_driver *driver)
6dec2442 81{
2e79505d 82 struct storvsc_driver *stor_driver;
6dec2442 83
ced01b0d 84 stor_driver = hvdr_to_stordr(driver);
6dec2442 85
6dec2442
S
86
87 /* Make sure we are at least 2 pages since 1 page is used for control */
88
0f0cdc6a 89 driver->name = driver_name;
6dec2442
S
90 memcpy(&driver->dev_type, &gStorVscDeviceType,
91 sizeof(struct hv_guid));
92
6dec2442
S
93 return 0;
94}
95
5b60acee
S
96static int storvsc_device_alloc(struct scsi_device *sdevice)
97{
98 /*
99 * This enables luns to be located sparsely. Otherwise, we may not
100 * discovered them.
101 */
102 sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
103 return 0;
104}
105
a1ebfeae
S
106static int storvsc_merge_bvec(struct request_queue *q,
107 struct bvec_merge_data *bmd, struct bio_vec *bvec)
108{
109 /* checking done by caller. */
110 return bvec->bv_len;
111}
112
419f2d03
S
113static int storvsc_device_configure(struct scsi_device *sdevice)
114{
115 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
116 STORVSC_MAX_IO_REQUESTS);
117
118 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting max segment size to %ld",
119 sdevice, PAGE_SIZE);
120 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
121
122 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - adding merge bio vec routine",
123 sdevice);
124 blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
125
126 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
419f2d03
S
127
128 return 0;
129}
130
49a3c7a9
S
131static void destroy_bounce_buffer(struct scatterlist *sgl,
132 unsigned int sg_count)
133{
134 int i;
135 struct page *page_buf;
136
137 for (i = 0; i < sg_count; i++) {
138 page_buf = sg_page((&sgl[i]));
139 if (page_buf != NULL)
140 __free_page(page_buf);
141 }
142
143 kfree(sgl);
144}
145
3862ef3e
S
146static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
147{
148 int i;
149
150 /* No need to check */
151 if (sg_count < 2)
152 return -1;
153
154 /* We have at least 2 sg entries */
155 for (i = 0; i < sg_count; i++) {
156 if (i == 0) {
157 /* make sure 1st one does not have hole */
158 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
159 return i;
160 } else if (i == sg_count - 1) {
161 /* make sure last one does not have hole */
162 if (sgl[i].offset != 0)
163 return i;
164 } else {
165 /* make sure no hole in the middle */
166 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
167 return i;
168 }
169 }
170 return -1;
171}
172
a9753cbd
S
173static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
174 unsigned int sg_count,
175 unsigned int len)
176{
177 int i;
178 int num_pages;
179 struct scatterlist *bounce_sgl;
180 struct page *page_buf;
181
182 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
183
184 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
185 if (!bounce_sgl)
186 return NULL;
187
188 for (i = 0; i < num_pages; i++) {
189 page_buf = alloc_page(GFP_ATOMIC);
190 if (!page_buf)
191 goto cleanup;
192 sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
193 }
194
195 return bounce_sgl;
196
197cleanup:
198 destroy_bounce_buffer(bounce_sgl, num_pages);
199 return NULL;
200}
201
29fe2c9b
S
202
203/* Assume the original sgl has enough room */
204static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
205 struct scatterlist *bounce_sgl,
206 unsigned int orig_sgl_count)
207{
208 int i;
209 int j = 0;
210 unsigned long src, dest;
211 unsigned int srclen, destlen, copylen;
212 unsigned int total_copied = 0;
213 unsigned long bounce_addr = 0;
214 unsigned long dest_addr = 0;
215 unsigned long flags;
216
217 local_irq_save(flags);
218
219 for (i = 0; i < orig_sgl_count; i++) {
220 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
221 KM_IRQ0) + orig_sgl[i].offset;
222 dest = dest_addr;
223 destlen = orig_sgl[i].length;
224
225 if (bounce_addr == 0)
226 bounce_addr =
227 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
228 KM_IRQ0);
229
230 while (destlen) {
231 src = bounce_addr + bounce_sgl[j].offset;
232 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
233
234 copylen = min(srclen, destlen);
235 memcpy((void *)dest, (void *)src, copylen);
236
237 total_copied += copylen;
238 bounce_sgl[j].offset += copylen;
239 destlen -= copylen;
240 dest += copylen;
241
242 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
243 /* full */
244 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
245 j++;
246
247 /* if we need to use another bounce buffer */
248 if (destlen || i != orig_sgl_count - 1)
249 bounce_addr =
250 (unsigned long)kmap_atomic(
251 sg_page((&bounce_sgl[j])), KM_IRQ0);
252 } else if (destlen == 0 && i == orig_sgl_count - 1) {
253 /* unmap the last bounce that is < PAGE_SIZE */
254 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
255 }
256 }
257
258 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
259 KM_IRQ0);
260 }
261
262 local_irq_restore(flags);
263
264 return total_copied;
265}
266
6a8ff44b
S
267
268/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
269static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
270 struct scatterlist *bounce_sgl,
271 unsigned int orig_sgl_count)
272{
273 int i;
274 int j = 0;
275 unsigned long src, dest;
276 unsigned int srclen, destlen, copylen;
277 unsigned int total_copied = 0;
278 unsigned long bounce_addr = 0;
279 unsigned long src_addr = 0;
280 unsigned long flags;
281
282 local_irq_save(flags);
283
284 for (i = 0; i < orig_sgl_count; i++) {
285 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
286 KM_IRQ0) + orig_sgl[i].offset;
287 src = src_addr;
288 srclen = orig_sgl[i].length;
289
290 if (bounce_addr == 0)
291 bounce_addr =
292 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
293 KM_IRQ0);
294
295 while (srclen) {
296 /* assume bounce offset always == 0 */
297 dest = bounce_addr + bounce_sgl[j].length;
298 destlen = PAGE_SIZE - bounce_sgl[j].length;
299
300 copylen = min(srclen, destlen);
301 memcpy((void *)dest, (void *)src, copylen);
302
303 total_copied += copylen;
304 bounce_sgl[j].length += copylen;
305 srclen -= copylen;
306 src += copylen;
307
308 if (bounce_sgl[j].length == PAGE_SIZE) {
309 /* full..move to next entry */
310 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
311 j++;
312
313 /* if we need to use another bounce buffer */
314 if (srclen || i != orig_sgl_count - 1)
315 bounce_addr =
316 (unsigned long)kmap_atomic(
317 sg_page((&bounce_sgl[j])), KM_IRQ0);
318
319 } else if (srclen == 0 && i == orig_sgl_count - 1) {
320 /* unmap the last bounce that is < PAGE_SIZE */
321 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
322 }
323 }
324
325 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
326 }
327
328 local_irq_restore(flags);
329
330 return total_copied;
331}
332
5c5c0234
S
333
334/*
335 * storvsc_remove - Callback when our device is removed
336 */
337static int storvsc_remove(struct hv_device *dev)
338{
5c5c0234
S
339 struct Scsi_Host *host = dev_get_drvdata(&dev->device);
340 struct hv_host_device *host_dev =
341 (struct hv_host_device *)host->hostdata;
342
343 /*
344 * Call to the vsc driver to let it know that the device is being
345 * removed
346 */
6cdc57c0 347 storvsc_dev_remove(dev);
5c5c0234
S
348
349 if (host_dev->request_pool) {
350 kmem_cache_destroy(host_dev->request_pool);
351 host_dev->request_pool = NULL;
352 }
353
354 DPRINT_INFO(STORVSC, "removing host adapter (%p)...", host);
355 scsi_remove_host(host);
356
357 DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host);
358 scsi_host_put(host);
359 return 0;
360}
361
62838ce2
S
362
363static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
364 sector_t capacity, int *info)
365{
5326fd5c
S
366 sector_t nsect = capacity;
367 sector_t cylinders = nsect;
368 int heads, sectors_pt;
62838ce2 369
5326fd5c
S
370 /*
371 * We are making up these values; let us keep it simple.
372 */
373 heads = 0xff;
374 sectors_pt = 0x3f; /* Sectors per track */
375 sector_div(cylinders, heads * sectors_pt);
376 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
377 cylinders = 0xffff;
62838ce2
S
378
379 info[0] = heads;
5326fd5c
S
380 info[1] = sectors_pt;
381 info[2] = (int)cylinders;
62838ce2 382
5326fd5c
S
383 DPRINT_INFO(STORVSC_DRV, "CHS (%d, %d, %d)", (int)cylinders, heads,
384 sectors_pt);
62838ce2
S
385
386 return 0;
387}
aa3d789e
S
388
389static int storvsc_host_reset(struct hv_device *device)
390{
391 struct storvsc_device *stor_device;
392 struct hv_storvsc_request *request;
393 struct vstor_packet *vstor_packet;
394 int ret, t;
395
396 DPRINT_INFO(STORVSC, "resetting host adapter...");
397
398 stor_device = get_stor_device(device);
399 if (!stor_device)
400 return -1;
401
402 request = &stor_device->reset_request;
403 vstor_packet = &request->vstor_packet;
404
405 init_completion(&request->wait_event);
406
407 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
408 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
409 vstor_packet->vm_srb.path_id = stor_device->path_id;
410
411 ret = vmbus_sendpacket(device->channel, vstor_packet,
412 sizeof(struct vstor_packet),
413 (unsigned long)&stor_device->reset_request,
414 VM_PKT_DATA_INBAND,
415 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
416 if (ret != 0)
417 goto cleanup;
418
419 t = wait_for_completion_timeout(&request->wait_event, HZ);
420 if (t == 0) {
421 ret = -ETIMEDOUT;
422 goto cleanup;
423 }
424
425 DPRINT_INFO(STORVSC, "host adapter reset completed");
426
427 /*
428 * At this point, all outstanding requests in the adapter
429 * should have been flushed out and return to us
430 */
431
432cleanup:
433 put_stor_device(device);
434 return ret;
435}
436
437
96e690be
S
438/*
439 * storvsc_host_reset_handler - Reset the scsi HBA
440 */
441static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
442{
443 int ret;
444 struct hv_host_device *host_dev =
445 (struct hv_host_device *)scmnd->device->host->hostdata;
446 struct hv_device *dev = host_dev->dev;
447
448 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...",
449 scmnd->device, dev);
450
451 /* Invokes the vsc to reset the host/bus */
452 ret = storvsc_host_reset(dev);
453 if (ret != 0)
454 return ret;
455
456 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted",
457 scmnd->device, dev);
458
459 return ret;
460}
461
8a411bad
S
462
463/*
464 * storvsc_commmand_completion - Command completion processing
465 */
466static void storvsc_commmand_completion(struct hv_storvsc_request *request)
467{
468 struct storvsc_cmd_request *cmd_request =
469 (struct storvsc_cmd_request *)request->context;
470 struct scsi_cmnd *scmnd = cmd_request->cmd;
471 struct hv_host_device *host_dev =
472 (struct hv_host_device *)scmnd->device->host->hostdata;
473 void (*scsi_done_fn)(struct scsi_cmnd *);
474 struct scsi_sense_hdr sense_hdr;
475 struct vmscsi_request *vm_srb;
476
8a411bad 477 if (cmd_request->bounce_sgl_count) {
8a411bad
S
478
479 /* FIXME: We can optimize on writes by just skipping this */
480 copy_from_bounce_buffer(scsi_sglist(scmnd),
481 cmd_request->bounce_sgl,
482 scsi_sg_count(scmnd));
483 destroy_bounce_buffer(cmd_request->bounce_sgl,
484 cmd_request->bounce_sgl_count);
485 }
486
487 vm_srb = &request->vstor_packet.vm_srb;
488 scmnd->result = vm_srb->scsi_status;
489
490 if (scmnd->result) {
491 if (scsi_normalize_sense(scmnd->sense_buffer,
492 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
493 scsi_print_sense_hdr("storvsc", &sense_hdr);
494 }
495
8a411bad
S
496 scsi_set_resid(scmnd,
497 request->data_buffer.len -
498 vm_srb->data_transfer_length);
499
500 scsi_done_fn = scmnd->scsi_done;
501
502 scmnd->host_scribble = NULL;
503 scmnd->scsi_done = NULL;
504
505 /* !!DO NOT MODIFY the scmnd after this call */
506 scsi_done_fn(scmnd);
507
508 kmem_cache_free(host_dev->request_pool, cmd_request);
509}
510
c5b463ae
S
511
512/*
513 * storvsc_queuecommand - Initiate command processing
514 */
515static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
516 void (*done)(struct scsi_cmnd *))
517{
518 int ret;
519 struct hv_host_device *host_dev =
520 (struct hv_host_device *)scmnd->device->host->hostdata;
521 struct hv_device *dev = host_dev->dev;
c5b463ae
S
522 struct hv_storvsc_request *request;
523 struct storvsc_cmd_request *cmd_request;
524 unsigned int request_size = 0;
525 int i;
526 struct scatterlist *sgl;
527 unsigned int sg_count = 0;
528 struct vmscsi_request *vm_srb;
529
530
531 /* If retrying, no need to prep the cmd */
532 if (scmnd->host_scribble) {
c5b463ae
S
533
534 cmd_request =
535 (struct storvsc_cmd_request *)scmnd->host_scribble;
536 DPRINT_INFO(STORVSC_DRV, "retrying scmnd %p cmd_request %p",
537 scmnd, cmd_request);
538
539 goto retry_request;
540 }
541
c5b463ae
S
542 scmnd->scsi_done = done;
543
544 request_size = sizeof(struct storvsc_cmd_request);
545
546 cmd_request = kmem_cache_zalloc(host_dev->request_pool,
547 GFP_ATOMIC);
548 if (!cmd_request) {
549 scmnd->scsi_done = NULL;
550 return SCSI_MLQUEUE_DEVICE_BUSY;
551 }
552
553 /* Setup the cmd request */
554 cmd_request->bounce_sgl_count = 0;
555 cmd_request->bounce_sgl = NULL;
556 cmd_request->cmd = scmnd;
557
558 scmnd->host_scribble = (unsigned char *)cmd_request;
559
560 request = &cmd_request->request;
561 vm_srb = &request->vstor_packet.vm_srb;
562
563
564 /* Build the SRB */
565 switch (scmnd->sc_data_direction) {
566 case DMA_TO_DEVICE:
567 vm_srb->data_in = WRITE_TYPE;
568 break;
569 case DMA_FROM_DEVICE:
570 vm_srb->data_in = READ_TYPE;
571 break;
572 default:
573 vm_srb->data_in = UNKNOWN_TYPE;
574 break;
575 }
576
577 request->on_io_completion = storvsc_commmand_completion;
578 request->context = cmd_request;/* scmnd; */
579
c5b463ae
S
580 vm_srb->port_number = host_dev->port;
581 vm_srb->path_id = scmnd->device->channel;
582 vm_srb->target_id = scmnd->device->id;
583 vm_srb->lun = scmnd->device->lun;
584
c5b463ae
S
585 vm_srb->cdb_length = scmnd->cmd_len;
586
587 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
588
589 request->sense_buffer = scmnd->sense_buffer;
590
591
592 request->data_buffer.len = scsi_bufflen(scmnd);
593 if (scsi_sg_count(scmnd)) {
594 sgl = (struct scatterlist *)scsi_sglist(scmnd);
595 sg_count = scsi_sg_count(scmnd);
596
597 /* check if we need to bounce the sgl */
598 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
599 cmd_request->bounce_sgl =
600 create_bounce_buffer(sgl, scsi_sg_count(scmnd),
601 scsi_bufflen(scmnd));
602 if (!cmd_request->bounce_sgl) {
603 scmnd->scsi_done = NULL;
604 scmnd->host_scribble = NULL;
605 kmem_cache_free(host_dev->request_pool,
606 cmd_request);
607
608 return SCSI_MLQUEUE_HOST_BUSY;
609 }
610
611 cmd_request->bounce_sgl_count =
612 ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
613 PAGE_SHIFT;
614
615 /*
616 * FIXME: We can optimize on reads by just skipping
617 * this
618 */
619 copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl,
620 scsi_sg_count(scmnd));
621
622 sgl = cmd_request->bounce_sgl;
623 sg_count = cmd_request->bounce_sgl_count;
624 }
625
626 request->data_buffer.offset = sgl[0].offset;
627
628 for (i = 0; i < sg_count; i++)
629 request->data_buffer.pfn_array[i] =
630 page_to_pfn(sg_page((&sgl[i])));
631
632 } else if (scsi_sglist(scmnd)) {
c5b463ae
S
633 request->data_buffer.offset =
634 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
635 request->data_buffer.pfn_array[0] =
636 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
637 }
638
639retry_request:
640 /* Invokes the vsc to start an IO */
636f0fd1
S
641 ret = storvsc_do_io(dev, &cmd_request->request);
642
c5b463ae
S
643 if (ret == -1) {
644 /* no more space */
645
646 if (cmd_request->bounce_sgl_count) {
647 /*
648 * FIXME: We can optimize on writes by just skipping
649 * this
650 */
651 copy_from_bounce_buffer(scsi_sglist(scmnd),
652 cmd_request->bounce_sgl,
653 scsi_sg_count(scmnd));
654 destroy_bounce_buffer(cmd_request->bounce_sgl,
655 cmd_request->bounce_sgl_count);
656 }
657
658 kmem_cache_free(host_dev->request_pool, cmd_request);
659
660 scmnd->scsi_done = NULL;
661 scmnd->host_scribble = NULL;
662
663 ret = SCSI_MLQUEUE_DEVICE_BUSY;
664 }
665
666 return ret;
667}
668
669static DEF_SCSI_QCMD(storvsc_queuecommand)
670
bef4a34a 671
454f18a9 672/* Scsi driver */
bef4a34a 673static struct scsi_host_template scsi_driver = {
ff568d3a
GKH
674 .module = THIS_MODULE,
675 .name = "storvsc_host_t",
676 .bios_param = storvsc_get_chs,
677 .queuecommand = storvsc_queuecommand,
678 .eh_host_reset_handler = storvsc_host_reset_handler,
679 .slave_alloc = storvsc_device_alloc,
680 .slave_configure = storvsc_device_configure,
681 .cmd_per_lun = 1,
682 /* 64 max_queue * 1 target */
0686e4f4 683 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
ff568d3a 684 .this_id = -1,
454f18a9 685 /* no use setting to 0 since ll_blk_rw reset it to 1 */
ff568d3a
GKH
686 /* currently 32 */
687 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
688 /*
689 * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
690 * into 1 sg element. If set, we must limit the max_segment_size to
691 * PAGE_SIZE, otherwise we may get 1 sg element that represents
692 * multiple
693 */
454f18a9 694 /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
ff568d3a 695 .use_clustering = ENABLE_CLUSTERING,
454f18a9 696 /* Make sure we dont get a sg segment crosses a page boundary */
ff568d3a 697 .dma_boundary = PAGE_SIZE-1,
bef4a34a
HJ
698};
699
700
3e189519 701/*
ff568d3a
GKH
702 * storvsc_probe - Add a new device for this driver
703 */
f5c78872 704
9efd21e1 705static int storvsc_probe(struct hv_device *device)
bef4a34a 706{
ff568d3a 707 int ret;
bef4a34a 708 struct Scsi_Host *host;
795b613d 709 struct hv_host_device *host_dev;
9f0c7d2c 710 struct storvsc_device_info device_info;
bef4a34a 711
ff568d3a 712 host = scsi_host_alloc(&scsi_driver,
972621c9 713 sizeof(struct hv_host_device));
f8feed06 714 if (!host)
bef4a34a 715 return -ENOMEM;
bef4a34a 716
9efd21e1 717 dev_set_drvdata(&device->device, host);
bef4a34a 718
795b613d
S
719 host_dev = (struct hv_host_device *)host->hostdata;
720 memset(host_dev, 0, sizeof(struct hv_host_device));
bef4a34a 721
795b613d 722 host_dev->port = host->host_no;
97c15296 723 host_dev->dev = device;
bef4a34a 724
795b613d 725 host_dev->request_pool =
9efd21e1 726 kmem_cache_create(dev_name(&device->device),
1e05d88e 727 sizeof(struct storvsc_cmd_request), 0,
ff568d3a
GKH
728 SLAB_HWCACHE_ALIGN, NULL);
729
795b613d 730 if (!host_dev->request_pool) {
bef4a34a 731 scsi_host_put(host);
bef4a34a
HJ
732 return -ENOMEM;
733 }
734
8a046024 735 device_info.port_number = host->host_no;
fa4d123a 736 device_info.ring_buffer_size = storvsc_ringbuffer_size;
454f18a9 737 /* Call to the vsc driver to add the device */
58f1f5cb 738 ret = storvsc_dev_add(device, (void *)&device_info);
9efd21e1 739
ff568d3a 740 if (ret != 0) {
795b613d 741 kmem_cache_destroy(host_dev->request_pool);
bef4a34a 742 scsi_host_put(host);
bef4a34a
HJ
743 return -1;
744 }
745
795b613d
S
746 host_dev->path = device_info.path_id;
747 host_dev->target = device_info.target_id;
bef4a34a 748
ff568d3a
GKH
749 /* max # of devices per target */
750 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
751 /* max # of targets per channel */
752 host->max_id = STORVSC_MAX_TARGETS;
753 /* max # of channels */
754 host->max_channel = STORVSC_MAX_CHANNELS - 1;
bef4a34a 755
454f18a9 756 /* Register the HBA and start the scsi bus scan */
9efd21e1 757 ret = scsi_add_host(host, &device->device);
ff568d3a 758 if (ret != 0) {
bef4a34a 759
6cdc57c0 760 storvsc_dev_remove(device);
bef4a34a 761
795b613d 762 kmem_cache_destroy(host_dev->request_pool);
bef4a34a 763 scsi_host_put(host);
bef4a34a
HJ
764 return -1;
765 }
766
767 scsi_scan_host(host);
bef4a34a
HJ
768 return ret;
769}
770
7bd05b91
S
771/* The one and only one */
772
39ae6fae
S
773static struct storvsc_driver storvsc_drv = {
774 .base.probe = storvsc_probe,
775 .base.remove = storvsc_remove,
776};
7bd05b91
S
777
778
f5c78872
S
779/*
780 * storvsc_drv_init - StorVsc driver initialization.
781 */
782static int storvsc_drv_init(void)
783{
784 int ret;
565c38de
S
785 struct storvsc_driver *storvsc_drv_obj = &storvsc_drv;
786 struct hv_driver *drv = &storvsc_drv.base;
01415ab3
S
787 u32 max_outstanding_req_per_channel;
788
789 /*
790 * Divide the ring buffer data size (which is 1 page less
791 * than the ring buffer size since that page is reserved for
792 * the ring buffer indices) by the max request size (which is
793 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
794 */
795
796 max_outstanding_req_per_channel =
797 ((storvsc_ringbuffer_size - PAGE_SIZE) /
798 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
799 sizeof(struct vstor_packet) + sizeof(u64),
800 sizeof(u64)));
f5c78872 801
f5c78872
S
802 /* Callback to client driver to complete the initialization */
803 storvsc_initialize(&storvsc_drv_obj->base);
804
01415ab3 805 if (max_outstanding_req_per_channel <
f5c78872
S
806 STORVSC_MAX_IO_REQUESTS)
807 return -1;
808
206cf17b 809 drv->driver.name = driver_name;
f5c78872 810
f5c78872
S
811
812 /* The driver belongs to vmbus */
813 ret = vmbus_child_driver_register(&drv->driver);
814
815 return ret;
816}
817
818static int storvsc_drv_exit_cb(struct device *dev, void *data)
819{
820 struct device **curr = (struct device **)data;
821 *curr = dev;
822 return 1; /* stop iterating */
823}
824
825static void storvsc_drv_exit(void)
826{
565c38de 827 struct hv_driver *drv = &storvsc_drv.base;
f5c78872
S
828 struct device *current_dev = NULL;
829 int ret;
830
831 while (1) {
832 current_dev = NULL;
833
834 /* Get the device */
835 ret = driver_for_each_device(&drv->driver, NULL,
836 (void *) &current_dev,
837 storvsc_drv_exit_cb);
838
839
840 if (current_dev == NULL)
841 break;
842
843 /* Initiate removal from the top-down */
844 device_unregister(current_dev);
845 }
846
f5c78872
S
847 vmbus_child_driver_unregister(&drv->driver);
848 return;
849}
850
bef4a34a
HJ
851static int __init storvsc_init(void)
852{
853 int ret;
854
bef4a34a 855 DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
db085777 856 ret = storvsc_drv_init();
bef4a34a
HJ
857 return ret;
858}
859
860static void __exit storvsc_exit(void)
861{
bef4a34a 862 storvsc_drv_exit();
bef4a34a
HJ
863}
864
ff568d3a 865MODULE_LICENSE("GPL");
26c14cc1 866MODULE_VERSION(HV_DRV_VERSION);
3afc7cc3 867MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
bef4a34a
HJ
868module_init(storvsc_init);
869module_exit(storvsc_exit);