Merge branch 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/lrg/asoc...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / hv / storvsc_drv.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/blkdev.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_devinfo.h>
33 #include <scsi/scsi_dbg.h>
34 #include "hv_api.h"
35 #include "logging.h"
36 #include "version_info.h"
37 #include "vmbus.h"
38 #include "storvsc_api.h"
39
40
41 struct host_device_context {
42 /* must be 1st field
43 * FIXME this is a bug */
44 /* point back to our device context */
45 struct hv_device *device_ctx;
46 struct kmem_cache *request_pool;
47 unsigned int port;
48 unsigned char path;
49 unsigned char target;
50 };
51
52 struct storvsc_cmd_request {
53 struct list_head entry;
54 struct scsi_cmnd *cmd;
55
56 unsigned int bounce_sgl_count;
57 struct scatterlist *bounce_sgl;
58
59 struct hv_storvsc_request request;
60 /* !!!DO NOT ADD ANYTHING BELOW HERE!!! */
61 /* The extension buffer falls right here and is pointed to by
62 * request.Extension;
63 * Which sounds like a very bad design... */
64 };
65
66
67 /* Static decl */
68 static int storvsc_probe(struct device *dev);
69 static int storvsc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd);
70 static int storvsc_device_alloc(struct scsi_device *);
71 static int storvsc_device_configure(struct scsi_device *);
72 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd);
73 static int storvsc_remove(struct device *dev);
74
75 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
76 unsigned int sg_count,
77 unsigned int len);
78 static void destroy_bounce_buffer(struct scatterlist *sgl,
79 unsigned int sg_count);
80 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count);
81 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
82 struct scatterlist *bounce_sgl,
83 unsigned int orig_sgl_count);
84 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
85 struct scatterlist *bounce_sgl,
86 unsigned int orig_sgl_count);
87
88 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device *bdev,
89 sector_t capacity, int *info);
90
91
92 static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
93 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
94 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
95
96 /* The one and only one */
97 static struct storvsc_driver_object g_storvsc_drv;
98
99 /* Scsi driver */
100 static struct scsi_host_template scsi_driver = {
101 .module = THIS_MODULE,
102 .name = "storvsc_host_t",
103 .bios_param = storvsc_get_chs,
104 .queuecommand = storvsc_queuecommand,
105 .eh_host_reset_handler = storvsc_host_reset_handler,
106 .slave_alloc = storvsc_device_alloc,
107 .slave_configure = storvsc_device_configure,
108 .cmd_per_lun = 1,
109 /* 64 max_queue * 1 target */
110 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
111 .this_id = -1,
112 /* no use setting to 0 since ll_blk_rw reset it to 1 */
113 /* currently 32 */
114 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
115 /*
116 * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
117 * into 1 sg element. If set, we must limit the max_segment_size to
118 * PAGE_SIZE, otherwise we may get 1 sg element that represents
119 * multiple
120 */
121 /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
122 .use_clustering = ENABLE_CLUSTERING,
123 /* Make sure we dont get a sg segment crosses a page boundary */
124 .dma_boundary = PAGE_SIZE-1,
125 };
126
127
128 /*
129 * storvsc_drv_init - StorVsc driver initialization.
130 */
131 static int storvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
132 {
133 int ret;
134 struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv;
135 struct hv_driver *drv = &g_storvsc_drv.base;
136
137 storvsc_drv_obj->ring_buffer_size = storvsc_ringbuffer_size;
138
139 /* Callback to client driver to complete the initialization */
140 drv_init(&storvsc_drv_obj->base);
141
142 drv->priv = storvsc_drv_obj;
143
144 DPRINT_INFO(STORVSC_DRV,
145 "request extension size %u, max outstanding reqs %u",
146 storvsc_drv_obj->request_ext_size,
147 storvsc_drv_obj->max_outstanding_req_per_channel);
148
149 if (storvsc_drv_obj->max_outstanding_req_per_channel <
150 STORVSC_MAX_IO_REQUESTS) {
151 DPRINT_ERR(STORVSC_DRV,
152 "The number of outstanding io requests (%d) "
153 "is larger than that supported (%d) internally.",
154 STORVSC_MAX_IO_REQUESTS,
155 storvsc_drv_obj->max_outstanding_req_per_channel);
156 return -1;
157 }
158
159 drv->driver.name = storvsc_drv_obj->base.name;
160
161 drv->driver.probe = storvsc_probe;
162 drv->driver.remove = storvsc_remove;
163
164 /* The driver belongs to vmbus */
165 ret = vmbus_child_driver_register(&drv->driver);
166
167 return ret;
168 }
169
170 static int storvsc_drv_exit_cb(struct device *dev, void *data)
171 {
172 struct device **curr = (struct device **)data;
173 *curr = dev;
174 return 1; /* stop iterating */
175 }
176
177 static void storvsc_drv_exit(void)
178 {
179 struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv;
180 struct hv_driver *drv = &g_storvsc_drv.base;
181 struct device *current_dev = NULL;
182 int ret;
183
184 while (1) {
185 current_dev = NULL;
186
187 /* Get the device */
188 ret = driver_for_each_device(&drv->driver, NULL,
189 (void *) &current_dev,
190 storvsc_drv_exit_cb);
191
192 if (ret)
193 DPRINT_WARN(STORVSC_DRV,
194 "driver_for_each_device returned %d", ret);
195
196 if (current_dev == NULL)
197 break;
198
199 /* Initiate removal from the top-down */
200 device_unregister(current_dev);
201 }
202
203 if (storvsc_drv_obj->base.cleanup)
204 storvsc_drv_obj->base.cleanup(&storvsc_drv_obj->base);
205
206 vmbus_child_driver_unregister(&drv->driver);
207 return;
208 }
209
210 /*
211 * storvsc_probe - Add a new device for this driver
212 */
213 static int storvsc_probe(struct device *device)
214 {
215 int ret;
216 struct hv_driver *drv =
217 drv_to_hv_drv(device->driver);
218 struct storvsc_driver_object *storvsc_drv_obj = drv->priv;
219 struct hv_device *device_obj = device_to_hv_device(device);
220 struct Scsi_Host *host;
221 struct host_device_context *host_device_ctx;
222 struct storvsc_device_info device_info;
223
224 if (!storvsc_drv_obj->base.dev_add)
225 return -1;
226
227 host = scsi_host_alloc(&scsi_driver,
228 sizeof(struct host_device_context));
229 if (!host) {
230 DPRINT_ERR(STORVSC_DRV, "unable to allocate scsi host object");
231 return -ENOMEM;
232 }
233
234 dev_set_drvdata(device, host);
235
236 host_device_ctx = (struct host_device_context *)host->hostdata;
237 memset(host_device_ctx, 0, sizeof(struct host_device_context));
238
239 host_device_ctx->port = host->host_no;
240 host_device_ctx->device_ctx = device_obj;
241
242 host_device_ctx->request_pool =
243 kmem_cache_create(dev_name(&device_obj->device),
244 sizeof(struct storvsc_cmd_request) +
245 storvsc_drv_obj->request_ext_size, 0,
246 SLAB_HWCACHE_ALIGN, NULL);
247
248 if (!host_device_ctx->request_pool) {
249 scsi_host_put(host);
250 return -ENOMEM;
251 }
252
253 device_info.port_number = host->host_no;
254 /* Call to the vsc driver to add the device */
255 ret = storvsc_drv_obj->base.dev_add(device_obj,
256 (void *)&device_info);
257 if (ret != 0) {
258 DPRINT_ERR(STORVSC_DRV, "unable to add scsi vsc device");
259 kmem_cache_destroy(host_device_ctx->request_pool);
260 scsi_host_put(host);
261 return -1;
262 }
263
264 /* host_device_ctx->port = device_info.PortNumber; */
265 host_device_ctx->path = device_info.path_id;
266 host_device_ctx->target = device_info.target_id;
267
268 /* max # of devices per target */
269 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
270 /* max # of targets per channel */
271 host->max_id = STORVSC_MAX_TARGETS;
272 /* max # of channels */
273 host->max_channel = STORVSC_MAX_CHANNELS - 1;
274
275 /* Register the HBA and start the scsi bus scan */
276 ret = scsi_add_host(host, device);
277 if (ret != 0) {
278 DPRINT_ERR(STORVSC_DRV, "unable to add scsi host device");
279
280 storvsc_drv_obj->base.dev_rm(device_obj);
281
282 kmem_cache_destroy(host_device_ctx->request_pool);
283 scsi_host_put(host);
284 return -1;
285 }
286
287 scsi_scan_host(host);
288 return ret;
289 }
290
291 /*
292 * storvsc_remove - Callback when our device is removed
293 */
294 static int storvsc_remove(struct device *device)
295 {
296 int ret;
297 struct hv_driver *drv =
298 drv_to_hv_drv(device->driver);
299 struct storvsc_driver_object *storvsc_drv_obj = drv->priv;
300 struct hv_device *device_obj = device_to_hv_device(device);
301 struct Scsi_Host *host = dev_get_drvdata(device);
302 struct host_device_context *host_device_ctx =
303 (struct host_device_context *)host->hostdata;
304
305
306 if (!storvsc_drv_obj->base.dev_rm)
307 return -1;
308
309 /*
310 * Call to the vsc driver to let it know that the device is being
311 * removed
312 */
313 ret = storvsc_drv_obj->base.dev_rm(device_obj);
314 if (ret != 0) {
315 /* TODO: */
316 DPRINT_ERR(STORVSC, "unable to remove vsc device (ret %d)",
317 ret);
318 }
319
320 if (host_device_ctx->request_pool) {
321 kmem_cache_destroy(host_device_ctx->request_pool);
322 host_device_ctx->request_pool = NULL;
323 }
324
325 DPRINT_INFO(STORVSC, "removing host adapter (%p)...", host);
326 scsi_remove_host(host);
327
328 DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host);
329 scsi_host_put(host);
330 return ret;
331 }
332
333 /*
334 * storvsc_commmand_completion - Command completion processing
335 */
336 static void storvsc_commmand_completion(struct hv_storvsc_request *request)
337 {
338 struct storvsc_cmd_request *cmd_request =
339 (struct storvsc_cmd_request *)request->context;
340 struct scsi_cmnd *scmnd = cmd_request->cmd;
341 struct host_device_context *host_device_ctx =
342 (struct host_device_context *)scmnd->device->host->hostdata;
343 void (*scsi_done_fn)(struct scsi_cmnd *);
344 struct scsi_sense_hdr sense_hdr;
345
346 /* ASSERT(request == &cmd_request->request); */
347 /* ASSERT(scmnd); */
348 /* ASSERT((unsigned long)scmnd->host_scribble == */
349 /* (unsigned long)cmd_request); */
350 /* ASSERT(scmnd->scsi_done); */
351
352 if (cmd_request->bounce_sgl_count) {
353 /* using bounce buffer */
354 /* printk("copy_from_bounce_buffer\n"); */
355
356 /* FIXME: We can optimize on writes by just skipping this */
357 copy_from_bounce_buffer(scsi_sglist(scmnd),
358 cmd_request->bounce_sgl,
359 scsi_sg_count(scmnd));
360 destroy_bounce_buffer(cmd_request->bounce_sgl,
361 cmd_request->bounce_sgl_count);
362 }
363
364 scmnd->result = request->status;
365
366 if (scmnd->result) {
367 if (scsi_normalize_sense(scmnd->sense_buffer,
368 request->sense_buffer_size, &sense_hdr))
369 scsi_print_sense_hdr("storvsc", &sense_hdr);
370 }
371
372 /* ASSERT(request->BytesXfer <= request->data_buffer.Length); */
373 scsi_set_resid(scmnd,
374 request->data_buffer.len - request->bytes_xfer);
375
376 scsi_done_fn = scmnd->scsi_done;
377
378 scmnd->host_scribble = NULL;
379 scmnd->scsi_done = NULL;
380
381 /* !!DO NOT MODIFY the scmnd after this call */
382 scsi_done_fn(scmnd);
383
384 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
385 }
386
387 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
388 {
389 int i;
390
391 /* No need to check */
392 if (sg_count < 2)
393 return -1;
394
395 /* We have at least 2 sg entries */
396 for (i = 0; i < sg_count; i++) {
397 if (i == 0) {
398 /* make sure 1st one does not have hole */
399 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
400 return i;
401 } else if (i == sg_count - 1) {
402 /* make sure last one does not have hole */
403 if (sgl[i].offset != 0)
404 return i;
405 } else {
406 /* make sure no hole in the middle */
407 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
408 return i;
409 }
410 }
411 return -1;
412 }
413
414 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
415 unsigned int sg_count,
416 unsigned int len)
417 {
418 int i;
419 int num_pages;
420 struct scatterlist *bounce_sgl;
421 struct page *page_buf;
422
423 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
424
425 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
426 if (!bounce_sgl)
427 return NULL;
428
429 for (i = 0; i < num_pages; i++) {
430 page_buf = alloc_page(GFP_ATOMIC);
431 if (!page_buf)
432 goto cleanup;
433 sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
434 }
435
436 return bounce_sgl;
437
438 cleanup:
439 destroy_bounce_buffer(bounce_sgl, num_pages);
440 return NULL;
441 }
442
443 static void destroy_bounce_buffer(struct scatterlist *sgl,
444 unsigned int sg_count)
445 {
446 int i;
447 struct page *page_buf;
448
449 for (i = 0; i < sg_count; i++) {
450 page_buf = sg_page((&sgl[i]));
451 if (page_buf != NULL)
452 __free_page(page_buf);
453 }
454
455 kfree(sgl);
456 }
457
458 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
459 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
460 struct scatterlist *bounce_sgl,
461 unsigned int orig_sgl_count)
462 {
463 int i;
464 int j = 0;
465 unsigned long src, dest;
466 unsigned int srclen, destlen, copylen;
467 unsigned int total_copied = 0;
468 unsigned long bounce_addr = 0;
469 unsigned long src_addr = 0;
470 unsigned long flags;
471
472 local_irq_save(flags);
473
474 for (i = 0; i < orig_sgl_count; i++) {
475 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
476 KM_IRQ0) + orig_sgl[i].offset;
477 src = src_addr;
478 srclen = orig_sgl[i].length;
479
480 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
481
482 if (bounce_addr == 0)
483 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
484
485 while (srclen) {
486 /* assume bounce offset always == 0 */
487 dest = bounce_addr + bounce_sgl[j].length;
488 destlen = PAGE_SIZE - bounce_sgl[j].length;
489
490 copylen = min(srclen, destlen);
491 memcpy((void *)dest, (void *)src, copylen);
492
493 total_copied += copylen;
494 bounce_sgl[j].length += copylen;
495 srclen -= copylen;
496 src += copylen;
497
498 if (bounce_sgl[j].length == PAGE_SIZE) {
499 /* full..move to next entry */
500 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
501 j++;
502
503 /* if we need to use another bounce buffer */
504 if (srclen || i != orig_sgl_count - 1)
505 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
506 } else if (srclen == 0 && i == orig_sgl_count - 1) {
507 /* unmap the last bounce that is < PAGE_SIZE */
508 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
509 }
510 }
511
512 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
513 }
514
515 local_irq_restore(flags);
516
517 return total_copied;
518 }
519
520 /* Assume the original sgl has enough room */
521 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
522 struct scatterlist *bounce_sgl,
523 unsigned int orig_sgl_count)
524 {
525 int i;
526 int j = 0;
527 unsigned long src, dest;
528 unsigned int srclen, destlen, copylen;
529 unsigned int total_copied = 0;
530 unsigned long bounce_addr = 0;
531 unsigned long dest_addr = 0;
532 unsigned long flags;
533
534 local_irq_save(flags);
535
536 for (i = 0; i < orig_sgl_count; i++) {
537 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
538 KM_IRQ0) + orig_sgl[i].offset;
539 dest = dest_addr;
540 destlen = orig_sgl[i].length;
541 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
542
543 if (bounce_addr == 0)
544 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
545
546 while (destlen) {
547 src = bounce_addr + bounce_sgl[j].offset;
548 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
549
550 copylen = min(srclen, destlen);
551 memcpy((void *)dest, (void *)src, copylen);
552
553 total_copied += copylen;
554 bounce_sgl[j].offset += copylen;
555 destlen -= copylen;
556 dest += copylen;
557
558 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
559 /* full */
560 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
561 j++;
562
563 /* if we need to use another bounce buffer */
564 if (destlen || i != orig_sgl_count - 1)
565 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
566 } else if (destlen == 0 && i == orig_sgl_count - 1) {
567 /* unmap the last bounce that is < PAGE_SIZE */
568 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
569 }
570 }
571
572 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
573 KM_IRQ0);
574 }
575
576 local_irq_restore(flags);
577
578 return total_copied;
579 }
580
581 /*
582 * storvsc_queuecommand - Initiate command processing
583 */
584 static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
585 void (*done)(struct scsi_cmnd *))
586 {
587 int ret;
588 struct host_device_context *host_device_ctx =
589 (struct host_device_context *)scmnd->device->host->hostdata;
590 struct hv_device *device_ctx = host_device_ctx->device_ctx;
591 struct hv_driver *drv =
592 drv_to_hv_drv(device_ctx->device.driver);
593 struct storvsc_driver_object *storvsc_drv_obj = drv->priv;
594 struct hv_storvsc_request *request;
595 struct storvsc_cmd_request *cmd_request;
596 unsigned int request_size = 0;
597 int i;
598 struct scatterlist *sgl;
599 unsigned int sg_count = 0;
600
601 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
602 "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
603 scsi_sg_count(scmnd), scsi_sglist(scmnd),
604 scsi_bufflen(scmnd), scmnd->device->queue_depth,
605 scmnd->device->tagged_supported);
606
607 /* If retrying, no need to prep the cmd */
608 if (scmnd->host_scribble) {
609 /* ASSERT(scmnd->scsi_done != NULL); */
610
611 cmd_request =
612 (struct storvsc_cmd_request *)scmnd->host_scribble;
613 DPRINT_INFO(STORVSC_DRV, "retrying scmnd %p cmd_request %p",
614 scmnd, cmd_request);
615
616 goto retry_request;
617 }
618
619 /* ASSERT(scmnd->scsi_done == NULL); */
620 /* ASSERT(scmnd->host_scribble == NULL); */
621
622 scmnd->scsi_done = done;
623
624 request_size = sizeof(struct storvsc_cmd_request);
625
626 cmd_request = kmem_cache_alloc(host_device_ctx->request_pool,
627 GFP_ATOMIC);
628 if (!cmd_request) {
629 DPRINT_ERR(STORVSC_DRV, "scmnd (%p) - unable to allocate "
630 "storvsc_cmd_request...marking queue busy", scmnd);
631 scmnd->scsi_done = NULL;
632 return SCSI_MLQUEUE_DEVICE_BUSY;
633 }
634
635 /* Setup the cmd request */
636 cmd_request->bounce_sgl_count = 0;
637 cmd_request->bounce_sgl = NULL;
638 cmd_request->cmd = scmnd;
639
640 scmnd->host_scribble = (unsigned char *)cmd_request;
641
642 request = &cmd_request->request;
643
644 request->extension =
645 (void *)((unsigned long)cmd_request + request_size);
646 DPRINT_DBG(STORVSC_DRV, "req %p size %d ext %d", request, request_size,
647 storvsc_drv_obj->request_ext_size);
648
649 /* Build the SRB */
650 switch (scmnd->sc_data_direction) {
651 case DMA_TO_DEVICE:
652 request->type = WRITE_TYPE;
653 break;
654 case DMA_FROM_DEVICE:
655 request->type = READ_TYPE;
656 break;
657 default:
658 request->type = UNKNOWN_TYPE;
659 break;
660 }
661
662 request->on_io_completion = storvsc_commmand_completion;
663 request->context = cmd_request;/* scmnd; */
664
665 /* request->PortId = scmnd->device->channel; */
666 request->host = host_device_ctx->port;
667 request->bus = scmnd->device->channel;
668 request->target_id = scmnd->device->id;
669 request->lun_id = scmnd->device->lun;
670
671 /* ASSERT(scmnd->cmd_len <= 16); */
672 request->cdb_len = scmnd->cmd_len;
673 request->cdb = scmnd->cmnd;
674
675 request->sense_buffer = scmnd->sense_buffer;
676 request->sense_buffer_size = SCSI_SENSE_BUFFERSIZE;
677
678
679 request->data_buffer.len = scsi_bufflen(scmnd);
680 if (scsi_sg_count(scmnd)) {
681 sgl = (struct scatterlist *)scsi_sglist(scmnd);
682 sg_count = scsi_sg_count(scmnd);
683
684 /* check if we need to bounce the sgl */
685 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
686 DPRINT_INFO(STORVSC_DRV,
687 "need to bounce buffer for this scmnd %p",
688 scmnd);
689 cmd_request->bounce_sgl =
690 create_bounce_buffer(sgl, scsi_sg_count(scmnd),
691 scsi_bufflen(scmnd));
692 if (!cmd_request->bounce_sgl) {
693 DPRINT_ERR(STORVSC_DRV,
694 "unable to create bounce buffer for "
695 "this scmnd %p", scmnd);
696
697 scmnd->scsi_done = NULL;
698 scmnd->host_scribble = NULL;
699 kmem_cache_free(host_device_ctx->request_pool,
700 cmd_request);
701
702 return SCSI_MLQUEUE_HOST_BUSY;
703 }
704
705 cmd_request->bounce_sgl_count =
706 ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
707 PAGE_SHIFT;
708
709 /*
710 * FIXME: We can optimize on reads by just skipping
711 * this
712 */
713 copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl,
714 scsi_sg_count(scmnd));
715
716 sgl = cmd_request->bounce_sgl;
717 sg_count = cmd_request->bounce_sgl_count;
718 }
719
720 request->data_buffer.offset = sgl[0].offset;
721
722 for (i = 0; i < sg_count; i++) {
723 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
724 i, sgl[i].length, sgl[i].offset);
725 request->data_buffer.pfn_array[i] =
726 page_to_pfn(sg_page((&sgl[i])));
727 }
728 } else if (scsi_sglist(scmnd)) {
729 /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
730 request->data_buffer.offset =
731 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
732 request->data_buffer.pfn_array[0] =
733 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
734 }
735
736 retry_request:
737 /* Invokes the vsc to start an IO */
738 ret = storvsc_drv_obj->on_io_request(device_ctx,
739 &cmd_request->request);
740 if (ret == -1) {
741 /* no more space */
742 DPRINT_ERR(STORVSC_DRV,
743 "scmnd (%p) - queue FULL...marking queue busy",
744 scmnd);
745
746 if (cmd_request->bounce_sgl_count) {
747 /*
748 * FIXME: We can optimize on writes by just skipping
749 * this
750 */
751 copy_from_bounce_buffer(scsi_sglist(scmnd),
752 cmd_request->bounce_sgl,
753 scsi_sg_count(scmnd));
754 destroy_bounce_buffer(cmd_request->bounce_sgl,
755 cmd_request->bounce_sgl_count);
756 }
757
758 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
759
760 scmnd->scsi_done = NULL;
761 scmnd->host_scribble = NULL;
762
763 ret = SCSI_MLQUEUE_DEVICE_BUSY;
764 }
765
766 return ret;
767 }
768
769 static DEF_SCSI_QCMD(storvsc_queuecommand)
770
771 static int storvsc_merge_bvec(struct request_queue *q,
772 struct bvec_merge_data *bmd, struct bio_vec *bvec)
773 {
774 /* checking done by caller. */
775 return bvec->bv_len;
776 }
777
778 /*
779 * storvsc_device_configure - Configure the specified scsi device
780 */
781 static int storvsc_device_alloc(struct scsi_device *sdevice)
782 {
783 DPRINT_DBG(STORVSC_DRV, "sdev (%p) - setting device flag to %d",
784 sdevice, BLIST_SPARSELUN);
785 /*
786 * This enables luns to be located sparsely. Otherwise, we may not
787 * discovered them.
788 */
789 sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
790 return 0;
791 }
792
793 static int storvsc_device_configure(struct scsi_device *sdevice)
794 {
795 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - curr queue depth %d", sdevice,
796 sdevice->queue_depth);
797
798 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting queue depth to %d",
799 sdevice, STORVSC_MAX_IO_REQUESTS);
800 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
801 STORVSC_MAX_IO_REQUESTS);
802
803 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting max segment size to %ld",
804 sdevice, PAGE_SIZE);
805 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
806
807 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - adding merge bio vec routine",
808 sdevice);
809 blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
810
811 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
812 /* sdevice->timeout = (2000 * HZ);//(75 * HZ); */
813
814 return 0;
815 }
816
817 /*
818 * storvsc_host_reset_handler - Reset the scsi HBA
819 */
820 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
821 {
822 int ret;
823 struct host_device_context *host_device_ctx =
824 (struct host_device_context *)scmnd->device->host->hostdata;
825 struct hv_device *device_ctx = host_device_ctx->device_ctx;
826
827 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...",
828 scmnd->device, device_ctx);
829
830 /* Invokes the vsc to reset the host/bus */
831 ret = stor_vsc_on_host_reset(device_ctx);
832 if (ret != 0)
833 return ret;
834
835 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted",
836 scmnd->device, device_ctx);
837
838 return ret;
839 }
840
841 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
842 sector_t capacity, int *info)
843 {
844 sector_t total_sectors = capacity;
845 sector_t cylinder_times_heads = 0;
846 sector_t temp = 0;
847
848 int sectors_per_track = 0;
849 int heads = 0;
850 int cylinders = 0;
851 int rem = 0;
852
853 if (total_sectors > (65535 * 16 * 255))
854 total_sectors = (65535 * 16 * 255);
855
856 if (total_sectors >= (65535 * 16 * 63)) {
857 sectors_per_track = 255;
858 heads = 16;
859
860 cylinder_times_heads = total_sectors;
861 /* sector_div stores the quotient in cylinder_times_heads */
862 rem = sector_div(cylinder_times_heads, sectors_per_track);
863 } else {
864 sectors_per_track = 17;
865
866 cylinder_times_heads = total_sectors;
867 /* sector_div stores the quotient in cylinder_times_heads */
868 rem = sector_div(cylinder_times_heads, sectors_per_track);
869
870 temp = cylinder_times_heads + 1023;
871 /* sector_div stores the quotient in temp */
872 rem = sector_div(temp, 1024);
873
874 heads = temp;
875
876 if (heads < 4)
877 heads = 4;
878
879 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
880 sectors_per_track = 31;
881 heads = 16;
882
883 cylinder_times_heads = total_sectors;
884 /*
885 * sector_div stores the quotient in
886 * cylinder_times_heads
887 */
888 rem = sector_div(cylinder_times_heads,
889 sectors_per_track);
890 }
891
892 if (cylinder_times_heads >= (heads * 1024)) {
893 sectors_per_track = 63;
894 heads = 16;
895
896 cylinder_times_heads = total_sectors;
897 /*
898 * sector_div stores the quotient in
899 * cylinder_times_heads
900 */
901 rem = sector_div(cylinder_times_heads,
902 sectors_per_track);
903 }
904 }
905
906 temp = cylinder_times_heads;
907 /* sector_div stores the quotient in temp */
908 rem = sector_div(temp, heads);
909 cylinders = temp;
910
911 info[0] = heads;
912 info[1] = sectors_per_track;
913 info[2] = cylinders;
914
915 DPRINT_INFO(STORVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads,
916 sectors_per_track);
917
918 return 0;
919 }
920
921 static int __init storvsc_init(void)
922 {
923 int ret;
924
925 DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
926 ret = storvsc_drv_init(stor_vsc_initialize);
927 return ret;
928 }
929
930 static void __exit storvsc_exit(void)
931 {
932 storvsc_drv_exit();
933 }
934
935 MODULE_LICENSE("GPL");
936 MODULE_VERSION(HV_DRV_VERSION);
937 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
938 module_init(storvsc_init);
939 module_exit(storvsc_exit);