Staging: hv: add the Hyper-V virtual storage driver
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / hv / storvsc_drv.c
CommitLineData
bef4a34a
HJ
1/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 *
22 */
23
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/device.h>
28#include <linux/blkdev.h>
29
30#include <scsi/scsi.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_tcq.h>
35#include <scsi/scsi_eh.h>
36#include <scsi/scsi_devinfo.h>
37
38#ifdef KERNEL_2_6_5
39#else
40#include <scsi/scsi_dbg.h>
41#endif
42
43#include "logging.h"
44#include "vmbus.h"
45
46#include "StorVscApi.h"
47
48//
49// #defines
50//
51
52//
53// Data types
54//
55struct host_device_context {
56 struct work_struct host_rescan_work; //must be 1st field
57 struct device_context *device_ctx; // point back to our device context
58#ifdef KERNEL_2_6_27
59 struct kmem_cache *request_pool;
60#else
61 kmem_cache_t *request_pool;
62#endif
63 unsigned int port;
64 unsigned char path;
65 unsigned char target;
66};
67
68struct storvsc_cmd_request {
69 struct list_head entry;
70 struct scsi_cmnd *cmd;
71
72 unsigned int bounce_sgl_count;
73 struct scatterlist *bounce_sgl;
74
75 STORVSC_REQUEST request;
76 // !!!DO NOT ADD ANYTHING BELOW HERE!!!
77 // The extension buffer falls right here and is pointed to by request.Extension;
78};
79
80struct storvsc_driver_context {
81 // !! These must be the first 2 fields !!
82 struct driver_context drv_ctx;
83 STORVSC_DRIVER_OBJECT drv_obj;
84};
85
86// Static decl
87static int storvsc_probe(struct device *dev);
88static int storvsc_queuecommand(struct scsi_cmnd *scmnd, void (*done)(struct scsi_cmnd *));
89static int storvsc_device_alloc(struct scsi_device *);
90static int storvsc_device_configure(struct scsi_device *);
91static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd);
92#ifdef KERNEL_2_6_27
93static void storvsc_host_rescan_callback(struct work_struct *work);
94#else
95static void storvsc_host_rescan_callback(void* context);
96#endif
97static void storvsc_host_rescan(DEVICE_OBJECT* device_obj);
98static int storvsc_remove(struct device *dev);
99
100static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count, unsigned int len);
101static void destroy_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count);
102static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count);
103static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, struct scatterlist *bounce_sgl, unsigned int orig_sgl_count);
104static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, struct scatterlist *bounce_sgl, unsigned int orig_sgl_count);
105
106static int storvsc_report_luns(struct scsi_device *sdev, unsigned int luns[], unsigned int *lun_count);
107static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, sector_t capacity, int *info);
108
109
110static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
111
112// The one and only one
113static struct storvsc_driver_context g_storvsc_drv;
114
115// Scsi driver
116static struct scsi_host_template scsi_driver = {
117 .module = THIS_MODULE,
118 .name = "storvsc_host_t",
119 .bios_param = storvsc_get_chs,
120 .queuecommand = storvsc_queuecommand,
121 .eh_host_reset_handler = storvsc_host_reset_handler,
122 .slave_alloc = storvsc_device_alloc,
123 .slave_configure = storvsc_device_configure,
124 .cmd_per_lun = 1,
125 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, // 64 max_queue * 1 target
126 .this_id = -1,
127 // no use setting to 0 since ll_blk_rw reset it to 1
128 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,// currently 32
129 // ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge into 1 sg element. If set, we must
130 // limit the max_segment_size to PAGE_SIZE, otherwise we may get 1 sg element that represents multiple
131 // physically contig pfns (ie sg[x].length > PAGE_SIZE).
132 .use_clustering = ENABLE_CLUSTERING,
133 // Make sure we dont get a sg segment crosses a page boundary
134 .dma_boundary = PAGE_SIZE-1,
135};
136
137
138/*++
139
140Name: storvsc_drv_init()
141
142Desc: StorVsc driver initialization.
143
144--*/
145int storvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init)
146{
147 int ret=0;
148 STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_storvsc_drv.drv_obj;
149 struct driver_context *drv_ctx=&g_storvsc_drv.drv_ctx;
150
151 DPRINT_ENTER(STORVSC_DRV);
152
153 vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
154
155 storvsc_drv_obj->RingBufferSize = storvsc_ringbuffer_size;
156 storvsc_drv_obj->OnHostRescan = storvsc_host_rescan;
157
158 // Callback to client driver to complete the initialization
159 pfn_drv_init(&storvsc_drv_obj->Base);
160
161 DPRINT_INFO(STORVSC_DRV, "request extension size %u, max outstanding reqs %u", storvsc_drv_obj->RequestExtSize, storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
162
163 if (storvsc_drv_obj->MaxOutstandingRequestsPerChannel < STORVSC_MAX_IO_REQUESTS)
164 {
165 DPRINT_ERR(STORVSC_DRV, "The number of outstanding io requests (%d) is larger than that supported (%d) internally.",
166 STORVSC_MAX_IO_REQUESTS, storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
167 return -1;
168 }
169
170 drv_ctx->driver.name = storvsc_drv_obj->Base.name;
171 memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType, sizeof(GUID));
172
173#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
174 drv_ctx->driver.probe = storvsc_probe;
175 drv_ctx->driver.remove = storvsc_remove;
176#else
177 drv_ctx->probe = storvsc_probe;
178 drv_ctx->remove = storvsc_remove;
179#endif
180
181 // The driver belongs to vmbus
182 vmbus_child_driver_register(drv_ctx);
183
184 DPRINT_EXIT(STORVSC_DRV);
185
186 return ret;
187}
188
189
190static int storvsc_drv_exit_cb(struct device *dev, void *data)
191{
192 struct device **curr = (struct device **)data;
193 *curr = dev;
194 return 1; // stop iterating
195}
196
197/*++
198
199Name: storvsc_drv_exit()
200
201Desc:
202
203--*/
204void storvsc_drv_exit(void)
205{
206 STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_storvsc_drv.drv_obj;
207 struct driver_context *drv_ctx=&g_storvsc_drv.drv_ctx;
208
209 struct device *current_dev=NULL;
210
211#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
212#define driver_for_each_device(drv, start, data, fn) \
213 struct list_head *ptr, *n; \
214 list_for_each_safe(ptr, n, &((drv)->devices)) {\
215 struct device *curr_dev;\
216 curr_dev = list_entry(ptr, struct device, driver_list);\
217 fn(curr_dev, data);\
218 }
219#endif // KERNEL_2_6_9
220
221 DPRINT_ENTER(STORVSC_DRV);
222
223 while (1)
224 {
225 current_dev = NULL;
226
227 // Get the device
228 driver_for_each_device(&drv_ctx->driver, NULL, (void*)&current_dev, storvsc_drv_exit_cb);
229
230 if (current_dev == NULL)
231 break;
232
233 // Initiate removal from the top-down
234 device_unregister(current_dev);
235 }
236
237 if (storvsc_drv_obj->Base.OnCleanup)
238 storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
239
240 vmbus_child_driver_unregister(drv_ctx);
241
242 DPRINT_EXIT(STORVSC_DRV);
243
244 return;
245}
246
247/*++
248
249Name: storvsc_probe()
250
251Desc: Add a new device for this driver
252
253--*/
254static int storvsc_probe(struct device *device)
255{
256 int ret=0;
257
258 struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
259 struct storvsc_driver_context *storvsc_drv_ctx = (struct storvsc_driver_context*)driver_ctx;
260 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &storvsc_drv_ctx->drv_obj;
261
262 struct device_context *device_ctx = device_to_device_context(device);
263 DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
264
265 struct Scsi_Host *host;
266 struct host_device_context *host_device_ctx;
267 STORVSC_DEVICE_INFO device_info;
268
269 DPRINT_ENTER(STORVSC_DRV);
270
271 if (!storvsc_drv_obj->Base.OnDeviceAdd)
272 return -1;
273
274 host = scsi_host_alloc(&scsi_driver, sizeof(struct host_device_context));
275 if (!host)
276 {
277 DPRINT_ERR(STORVSC_DRV, "unable to allocate scsi host object");
278 return -ENOMEM;
279 }
280
281 device->driver_data = host;
282
283 host_device_ctx = (struct host_device_context*)host->hostdata;
284 memset(host_device_ctx, 0, sizeof(struct host_device_context));
285
286 host_device_ctx->port = host->host_no;
287 host_device_ctx->device_ctx = device_ctx;
288
289#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
290#elif defined(KERNEL_2_6_27)
291 INIT_WORK(&host_device_ctx->host_rescan_work, storvsc_host_rescan_callback);
292#else
293 INIT_WORK(&host_device_ctx->host_rescan_work, storvsc_host_rescan_callback, device_obj);
294#endif
295
296#if defined(KERNEL_2_6_27)
297 host_device_ctx->request_pool =
298 kmem_cache_create
299 (device_ctx->device.bus_id,
300 sizeof(struct storvsc_cmd_request) + storvsc_drv_obj->RequestExtSize,
301 0,
302 SLAB_HWCACHE_ALIGN, NULL);
303#else
304 host_device_ctx->request_pool =
305 kmem_cache_create
306 (device_ctx->device.bus_id,
307 sizeof(struct storvsc_cmd_request) + storvsc_drv_obj->RequestExtSize,
308 0,
309 SLAB_HWCACHE_ALIGN, NULL, NULL);
310#endif
311
312 if (!host_device_ctx->request_pool)
313 {
314 scsi_host_put(host);
315 DPRINT_EXIT(STORVSC_DRV);
316
317 return -ENOMEM;
318 }
319
320 device_info.PortNumber = host->host_no;
321 // Call to the vsc driver to add the device
322 ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, (void*)&device_info);
323 if (ret != 0)
324 {
325 DPRINT_ERR(STORVSC_DRV, "unable to add scsi vsc device");
326 kmem_cache_destroy(host_device_ctx->request_pool);
327 scsi_host_put(host);
328 DPRINT_EXIT(STORVSC_DRV);
329
330 return -1;
331 }
332
333 //host_device_ctx->port = device_info.PortNumber;
334 host_device_ctx->path = device_info.PathId;
335 host_device_ctx->target = device_info.TargetId;
336
337 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; // max # of devices per target
338 host->max_id = STORVSC_MAX_TARGETS; // max # of targets per channel
339 host->max_channel = STORVSC_MAX_CHANNELS -1; // max # of channels
340
341 // Register the HBA and start the scsi bus scan
342 ret = scsi_add_host(host, device);
343 if (ret != 0)
344 {
345 DPRINT_ERR(STORVSC_DRV, "unable to add scsi host device");
346
347 storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
348
349 kmem_cache_destroy(host_device_ctx->request_pool);
350 scsi_host_put(host);
351 DPRINT_EXIT(STORVSC_DRV);
352
353 return -1;
354 }
355
356 scsi_scan_host(host);
357
358 DPRINT_EXIT(STORVSC_DRV);
359
360 return ret;
361}
362
363
364/*++
365
366Name: storvsc_remove()
367
368Desc: Callback when our device is removed
369
370--*/
371static int storvsc_remove(struct device *device)
372{
373 int ret=0;
374
375 struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
376 struct storvsc_driver_context *storvsc_drv_ctx = (struct storvsc_driver_context*)driver_ctx;
377 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &storvsc_drv_ctx->drv_obj;
378
379 struct device_context *device_ctx = device_to_device_context(device);
380 DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
381
382 struct Scsi_Host *host = (struct Scsi_Host *)device->driver_data;
383 struct host_device_context *host_device_ctx=(struct host_device_context*)host->hostdata;
384
385
386 DPRINT_ENTER(STORVSC_DRV);
387
388 if (!storvsc_drv_obj->Base.OnDeviceRemove)
389 {
390 DPRINT_EXIT(STORVSC_DRV);
391 return -1;
392 }
393
394 // Call to the vsc driver to let it know that the device is being removed
395 ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
396 if (ret != 0)
397 {
398 // TODO:
399 DPRINT_ERR(STORVSC, "unable to remove vsc device (ret %d)", ret);
400 }
401
402 if (host_device_ctx->request_pool)
403 {
404 kmem_cache_destroy(host_device_ctx->request_pool);
405 host_device_ctx->request_pool = NULL;
406 }
407
408 DPRINT_INFO(STORVSC, "removing host adapter (%p)...", host);
409 scsi_remove_host(host);
410
411 DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host);
412 scsi_host_put(host);
413
414 DPRINT_EXIT(STORVSC_DRV);
415
416 return ret;
417}
418
419/*++
420
421Name: storvsc_commmand_completion()
422
423Desc: Command completion processing
424
425--*/
426static void storvsc_commmand_completion(STORVSC_REQUEST* request)
427{
428 struct storvsc_cmd_request *cmd_request = (struct storvsc_cmd_request*)request->Context;
429 struct scsi_cmnd *scmnd = cmd_request->cmd;
430 struct host_device_context *host_device_ctx = (struct host_device_context*)scmnd->device->host->hostdata;
431 void (*scsi_done_fn)(struct scsi_cmnd *);
432#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
433#else
434 struct scsi_sense_hdr sense_hdr;
435#endif
436
437 ASSERT(request == &cmd_request->request);
438 ASSERT((unsigned long)scmnd->host_scribble == (unsigned long)cmd_request);
439 ASSERT(scmnd);
440 ASSERT(scmnd->scsi_done);
441
442 DPRINT_ENTER(STORVSC_DRV);
443
444 if (cmd_request->bounce_sgl_count)// using bounce buffer
445 {
446 //printk("copy_from_bounce_buffer\n");
447
448 // FIXME: We can optimize on writes by just skipping this
449#ifdef KERNEL_2_6_27
450 copy_from_bounce_buffer(scsi_sglist(scmnd), cmd_request->bounce_sgl, scsi_sg_count(scmnd));
451#else
452 copy_from_bounce_buffer(scmnd->request_buffer, cmd_request->bounce_sgl, scmnd->use_sg);
453#endif
454 destroy_bounce_buffer(cmd_request->bounce_sgl, cmd_request->bounce_sgl_count);
455 }
456
457 scmnd->result = request->Status;
458
459 if (scmnd->result)
460 {
461#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
462 DPRINT_INFO(STORVSC_DRV, "scsi result nonzero - %d", scmnd->result);
463#else
464 if (scsi_normalize_sense(scmnd->sense_buffer, request->SenseBufferSize, &sense_hdr))
465 {
466 scsi_print_sense_hdr("storvsc", &sense_hdr);
467 }
468#endif
469 }
470
471 ASSERT(request->BytesXfer <= request->DataBuffer.Length);
472#ifdef KERNEL_2_6_27
473 scsi_set_resid(scmnd, request->DataBuffer.Length - request->BytesXfer);
474#else
475 scmnd->resid = request->DataBuffer.Length - request->BytesXfer;
476#endif
477
478 scsi_done_fn = scmnd->scsi_done;
479
480 scmnd->host_scribble = NULL;
481 scmnd->scsi_done = NULL;
482
483 // !!DO NOT MODIFY the scmnd after this call
484 scsi_done_fn(scmnd);
485
486 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
487
488 DPRINT_EXIT(STORVSC_DRV);
489}
490
491static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
492{
493 int i=0;
494
495 // No need to check
496 if (sg_count < 2)
497 return -1;
498
499 // We have at least 2 sg entries
500 for ( i=0; i<sg_count; i++ )
501 {
502 if (i == 0) // make sure 1st one does not have hole
503 {
504 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
505 return i;
506 }
507 else if (i == sg_count - 1) // make sure last one does not have hole
508 {
509 if (sgl[i].offset != 0)
510 return i;
511 }
512 else // make sure no hole in the middle
513 {
514 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
515 {
516 return i;
517 }
518 }
519 }
520 return -1;
521}
522
523static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count, unsigned int len)
524{
525 int i;
526 int num_pages=0;
527 struct scatterlist* bounce_sgl;
528 struct page *page_buf;
529
530 num_pages = ALIGN_UP(len, PAGE_SIZE) >> PAGE_SHIFT;
531
532 bounce_sgl = kzalloc(num_pages * sizeof(struct scatterlist), GFP_ATOMIC);
533 if (!bounce_sgl)
534 {
535 return NULL;
536 }
537
538 for(i=0; i<num_pages; i++)
539 {
540 page_buf = alloc_page(GFP_ATOMIC);
541 if (!page_buf)
542 {
543 goto cleanup;
544 }
545#ifdef KERNEL_2_6_27
546 sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
547#else
548 bounce_sgl[i].page = page_buf;
549 bounce_sgl[i].offset = 0;
550 bounce_sgl[i].length = 0;
551#endif
552 }
553
554 return bounce_sgl;
555
556cleanup:
557 destroy_bounce_buffer(bounce_sgl, num_pages);
558 return NULL;
559}
560
561static void destroy_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
562{
563 int i;
564 struct page *page_buf;
565
566 for (i=0; i<sg_count; i++)
567 {
568#ifdef KERNEL_2_6_27
569 if ((page_buf = sg_page((&sgl[i]))) != NULL)
570#else
571 if ((page_buf = sgl[i].page) != NULL)
572#endif
573
574 {
575 __free_page(page_buf);
576 }
577 }
578
579 kfree(sgl);
580}
581
582// Assume the bounce_sgl has enough room ie using the create_bounce_buffer()
583static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, struct scatterlist *bounce_sgl, unsigned int orig_sgl_count)
584{
585 int i=0,j=0;
586 unsigned long src, dest;
587 unsigned int srclen, destlen, copylen;
588 unsigned int total_copied=0;
589 unsigned long bounce_addr=0;
590 unsigned long src_addr=0;
591 unsigned long flags;
592
593 local_irq_save(flags);
594
595 for (i=0; i<orig_sgl_count; i++)
596 {
597#ifdef KERNEL_2_6_27
598 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), KM_IRQ0) + orig_sgl[i].offset;
599#else
600 src_addr = (unsigned long)kmap_atomic(orig_sgl[i].page, KM_IRQ0) + orig_sgl[i].offset;
601#endif
602 src = src_addr;
603 srclen = orig_sgl[i].length;
604
605 //if (PageHighMem(orig_sgl[i].page))
606 // printk("HighMem page detected - addr %p", (void*)src);
607
608 ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
609
610 if (j == 0)
611 {
612#ifdef KERNEL_2_6_27
613 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
614#else
615 bounce_addr = (unsigned long)kmap_atomic(bounce_sgl[j].page, KM_IRQ0);
616#endif
617 }
618
619 while (srclen)
620 {
621 // assume bounce offset always == 0
622 dest = bounce_addr + bounce_sgl[j].length;
623 destlen = PAGE_SIZE - bounce_sgl[j].length;
624
625 copylen = MIN(srclen, destlen);
626 memcpy((void*)dest, (void*)src, copylen);
627
628 total_copied += copylen;
629 bounce_sgl[j].length += copylen;
630 srclen -= copylen;
631 src += copylen;
632
633 if (bounce_sgl[j].length == PAGE_SIZE) // full..move to next entry
634 {
635 kunmap_atomic((void*)bounce_addr, KM_IRQ0);
636 j++;
637
638 // if we need to use another bounce buffer
639 if (srclen || i != orig_sgl_count -1)
640 {
641#ifdef KERNEL_2_6_27
642 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
643#else
644 bounce_addr = (unsigned long)kmap_atomic(bounce_sgl[j].page, KM_IRQ0);
645#endif
646 }
647 }
648 else if (srclen == 0 && i == orig_sgl_count -1) // // unmap the last bounce that is < PAGE_SIZE
649 {
650 kunmap_atomic((void*)bounce_addr, KM_IRQ0);
651 }
652 }
653
654 kunmap_atomic((void*)(src_addr - orig_sgl[i].offset), KM_IRQ0);
655 }
656
657 local_irq_restore(flags);
658
659 return total_copied;
660}
661
662// Assume the original sgl has enough room
663static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, struct scatterlist *bounce_sgl, unsigned int orig_sgl_count)
664{
665 int i=0,j=0;
666 unsigned long src, dest;
667 unsigned int srclen, destlen, copylen;
668 unsigned int total_copied=0;
669 unsigned long bounce_addr=0;
670 unsigned long dest_addr=0;
671 unsigned long flags;
672
673 local_irq_save(flags);
674
675 for (i=0; i<orig_sgl_count; i++)
676 {
677#ifdef KERNEL_2_6_27
678 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), KM_IRQ0) + orig_sgl[i].offset;
679#else
680 dest_addr = (unsigned long)kmap_atomic(orig_sgl[i].page, KM_IRQ0) + orig_sgl[i].offset;
681#endif
682 dest = dest_addr;
683 destlen = orig_sgl[i].length;
684 ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
685
686 if (j == 0)
687 {
688#ifdef KERNEL_2_6_27
689 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
690#else
691 bounce_addr = (unsigned long)kmap_atomic(bounce_sgl[j].page, KM_IRQ0);
692#endif
693 }
694
695 while (destlen)
696 {
697 src = bounce_addr + bounce_sgl[j].offset;
698 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
699
700 copylen = MIN(srclen, destlen);
701 memcpy((void*)dest, (void*)src, copylen);
702
703 total_copied += copylen;
704 bounce_sgl[j].offset += copylen;
705 destlen -= copylen;
706 dest += copylen;
707
708 if (bounce_sgl[j].offset == bounce_sgl[j].length) // full
709 {
710 kunmap_atomic((void*)bounce_addr, KM_IRQ0);
711 j++;
712
713 // if we need to use another bounce buffer
714 if (destlen || i != orig_sgl_count -1)
715 {
716#ifdef KERNEL_2_6_27
717 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
718#else
719 bounce_addr = (unsigned long)kmap_atomic(bounce_sgl[j].page, KM_IRQ0);
720#endif
721 }
722 }
723 else if (destlen == 0 && i == orig_sgl_count -1) // unmap the last bounce that is < PAGE_SIZE
724 {
725 kunmap_atomic((void*)bounce_addr, KM_IRQ0);
726 }
727 }
728
729 kunmap_atomic((void*)(dest_addr - orig_sgl[i].offset), KM_IRQ0);
730 }
731
732 local_irq_restore(flags);
733
734 return total_copied;
735}
736
737
738/*++
739
740Name: storvsc_queuecommand()
741
742Desc: Initiate command processing
743
744--*/
745static int storvsc_queuecommand(struct scsi_cmnd *scmnd, void (*done)(struct scsi_cmnd *))
746{
747 int ret=0;
748 struct host_device_context *host_device_ctx = (struct host_device_context*)scmnd->device->host->hostdata;
749 struct device_context *device_ctx=host_device_ctx->device_ctx;
750 struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver);
751 struct storvsc_driver_context *storvsc_drv_ctx = (struct storvsc_driver_context*)driver_ctx;
752 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &storvsc_drv_ctx->drv_obj;
753
754 STORVSC_REQUEST *request;
755 struct storvsc_cmd_request *cmd_request;
756 unsigned int request_size=0;
757 int i;
758 struct scatterlist *sgl;
759
760 DPRINT_ENTER(STORVSC_DRV);
761
762#ifdef KERNEL_2_6_27
763 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d queue depth %d tagged %d",
764 scmnd,
765 scmnd->sc_data_direction,
766 scsi_sg_count(scmnd),
767 scsi_sglist(scmnd),
768 scsi_bufflen(scmnd),
769 scmnd->device->queue_depth,
770 scmnd->device->tagged_supported);
771#else
772 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d queue depth %d tagged %d",
773 scmnd,
774 scmnd->sc_data_direction,
775 scmnd->use_sg,
776 scmnd->request_buffer,
777 scmnd->request_bufflen,
778 scmnd->device->queue_depth,
779 scmnd->device->tagged_supported);
780#endif
781
782 // If retrying, no need to prep the cmd
783 if (scmnd->host_scribble)
784 {
785 ASSERT(scmnd->scsi_done != NULL);
786
787 cmd_request = (struct storvsc_cmd_request* )scmnd->host_scribble;
788 DPRINT_INFO(STORVSC_DRV, "retrying scmnd %p cmd_request %p", scmnd, cmd_request);
789
790 goto retry_request;
791 }
792
793 ASSERT(scmnd->scsi_done == NULL);
794 ASSERT(scmnd->host_scribble == NULL);
795
796 scmnd->scsi_done = done;
797
798 request_size = sizeof(struct storvsc_cmd_request);
799
800 cmd_request = kmem_cache_alloc(host_device_ctx->request_pool, GFP_ATOMIC);
801 if (!cmd_request)
802 {
803 DPRINT_ERR(STORVSC_DRV, "scmnd (%p) - unable to allocate storvsc_cmd_request...marking queue busy", scmnd);
804
805 scmnd->scsi_done = NULL;
806 return SCSI_MLQUEUE_DEVICE_BUSY;
807 }
808
809 // Setup the cmd request
810 cmd_request->bounce_sgl_count = 0;
811 cmd_request->bounce_sgl = NULL;
812 cmd_request->cmd = scmnd;
813
814 scmnd->host_scribble = (unsigned char*)cmd_request;
815
816 request = &cmd_request->request;
817
818 request->Extension = (void*)((unsigned long)cmd_request + request_size);
819 DPRINT_DBG(STORVSC_DRV, "req %p size %d ext %d", request, request_size, storvsc_drv_obj->RequestExtSize);
820
821 // Build the SRB
822 switch(scmnd->sc_data_direction)
823 {
824 case DMA_TO_DEVICE:
825 request->Type = WRITE_TYPE;
826 break;
827 case DMA_FROM_DEVICE:
828 request->Type = READ_TYPE;
829 break;
830 default:
831 request->Type = UNKNOWN_TYPE;
832 break;
833 }
834
835 request->OnIOCompletion = storvsc_commmand_completion;
836 request->Context = cmd_request;//scmnd;
837
838 //request->PortId = scmnd->device->channel;
839 request->Host = host_device_ctx->port;
840 request->Bus = scmnd->device->channel;
841 request->TargetId = scmnd->device->id;
842 request->LunId = scmnd->device->lun;
843
844 ASSERT(scmnd->cmd_len <= 16);
845 request->CdbLen = scmnd->cmd_len;
846 request->Cdb = scmnd->cmnd;
847
848 request->SenseBuffer = scmnd->sense_buffer;
849 request->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
850
851
852#ifdef KERNEL_2_6_27
853 request->DataBuffer.Length = scsi_bufflen(scmnd);
854 if (scsi_sg_count(scmnd))
855#else
856 request->DataBuffer.Length = scmnd->request_bufflen;
857 if (scmnd->use_sg)
858#endif
859 {
860#ifdef KERNEL_2_6_27
861 sgl = (struct scatterlist*)scsi_sglist(scmnd);
862#else
863 sgl = (struct scatterlist*)(scmnd->request_buffer);
864#endif
865
866 // check if we need to bounce the sgl
867#ifdef KERNEL_2_6_27
868 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1)
869#else
870 if (do_bounce_buffer(sgl, scmnd->use_sg) != -1)
871#endif
872 {
873 DPRINT_INFO(STORVSC_DRV, "need to bounce buffer for this scmnd %p", scmnd);
874#ifdef KERNEL_2_6_27
875 cmd_request->bounce_sgl = create_bounce_buffer(sgl, scsi_sg_count(scmnd), scsi_bufflen(scmnd));
876#else
877 cmd_request->bounce_sgl = create_bounce_buffer(
878 sgl,
879 scmnd->use_sg, scmnd->request_bufflen);
880#endif
881 if (!cmd_request->bounce_sgl)
882 {
883 DPRINT_ERR(STORVSC_DRV, "unable to create bounce buffer for this scmnd %p", scmnd);
884
885 scmnd->scsi_done = NULL;
886 scmnd->host_scribble = NULL;
887 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
888
889 return SCSI_MLQUEUE_HOST_BUSY;
890 }
891
892#ifdef KERNEL_2_6_27
893 cmd_request->bounce_sgl_count = ALIGN_UP(scsi_bufflen(scmnd), PAGE_SIZE) >> PAGE_SHIFT;
894#else
895 cmd_request->bounce_sgl_count = ALIGN_UP(scmnd->request_bufflen, PAGE_SIZE) >> PAGE_SHIFT;
896#endif
897
898 //printk("bouncing buffer allocated %p original buffer %p\n", bounce_sgl, sgl);
899 //printk("copy_to_bounce_buffer\n");
900 // FIXME: We can optimize on reads by just skipping this
901#ifdef KERNEL_2_6_27
902 copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl, scsi_sg_count(scmnd));
903#else
904 copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl, scmnd->use_sg);
905#endif
906
907 sgl = cmd_request->bounce_sgl;
908 }
909
910 request->DataBuffer.Offset = sgl[0].offset;
911
912#ifdef KERNEL_2_6_27
913 for (i = 0; i < scsi_sg_count(scmnd); i++ )
914#else
915 for (i = 0; i < scmnd->use_sg; i++ )
916#endif
917 {
918 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d \n", i, sgl[i].length, sgl[i].offset);
919#ifdef KERNEL_2_6_27
920 request->DataBuffer.PfnArray[i] = page_to_pfn(sg_page((&sgl[i])));
921#else
922 request->DataBuffer.PfnArray[i] = page_to_pfn(sgl[i].page);
923#endif
924 }
925 }
926
927#ifdef KERNEL_2_6_27
928 else if (scsi_sglist(scmnd))
929 {
930 ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE);
931 request->DataBuffer.Offset = virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
932 request->DataBuffer.PfnArray[0] = virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
933 }
934 else
935 {
936 ASSERT(scsi_bufflen(scmnd) == 0);
937 }
938#else
939 else if (scmnd->request_buffer)
940 {
941 ASSERT(scmnd->request_bufflen <= PAGE_SIZE);
942 request->DataBuffer.Offset = virt_to_phys(scmnd->request_buffer) & (PAGE_SIZE-1);
943 request->DataBuffer.PfnArray[0] = virt_to_phys(scmnd->request_buffer) >> PAGE_SHIFT;
944 }
945 else
946 {
947 ASSERT(scmnd->request_bufflen == 0);
948 }
949#endif
950
951retry_request:
952
953 // Invokes the vsc to start an IO
954 ret = storvsc_drv_obj->OnIORequest(&device_ctx->device_obj, &cmd_request->request);
955 if (ret == -1) // no more space
956 {
957 DPRINT_ERR(STORVSC_DRV, "scmnd (%p) - queue FULL...marking queue busy", scmnd);
958
959 if (cmd_request->bounce_sgl_count)
960 {
961 // FIXME: We can optimize on writes by just skipping this
962#ifdef KERNEL_2_6_27
963 copy_from_bounce_buffer(scsi_sglist(scmnd), cmd_request->bounce_sgl, scsi_sg_count(scmnd));
964#else
965 copy_from_bounce_buffer(
966 scmnd->request_buffer,
967 cmd_request->bounce_sgl,
968 scmnd->use_sg);
969#endif
970 destroy_bounce_buffer(cmd_request->bounce_sgl, cmd_request->bounce_sgl_count);
971 }
972
973 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
974
975 scmnd->scsi_done = NULL;
976 scmnd->host_scribble = NULL;
977
978 ret = SCSI_MLQUEUE_DEVICE_BUSY;
979 }
980
981 DPRINT_EXIT(STORVSC_DRV);
982
983 return ret;
984}
985
986#ifdef KERNEL_2_6_27
987static int storvsc_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, struct bio_vec *bvec)
988{
989 return bvec->bv_len; //checking done by caller.
990}
991#else
992static int storvsc_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec)
993{
994 // Check if we are adding a new bvec
995 if (bio->bi_vcnt > 0)
996 {
997 //printk("storvsc_merge_bvec() - cnt %u offset %u len %u\n", bio->bi_vcnt, bvec->bv_offset, bvec->bv_len);
998
999 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
1000 if (bvec == prev)
1001 return bvec->bv_len; // success
1002
1003 // Adding new bvec. Make sure the prev one is a complete page
1004 if (prev->bv_len == PAGE_SIZE && prev->bv_offset == 0)
1005 {
1006 return bvec->bv_len; // success
1007 }
1008 else
1009 {
1010 // Dont reject if the new bvec starts off from the prev one since
1011 // they will be merge into 1 bvec or blk_rq_map_sg() will merge them into 1 sg element
1012 if ((bvec->bv_page == prev->bv_page) &&
1013 (bvec->bv_offset == prev->bv_offset + prev->bv_len))
1014 {
1015 return bvec->bv_len; // success
1016 }
1017 else
1018 {
1019 DPRINT_INFO(STORVSC_DRV, "detected holes in bio request (%p) - cnt %u offset %u len %u", bio, bio->bi_vcnt, bvec->bv_offset, bvec->bv_len);
1020 return 0; // dont add the bvec to this bio since we dont allow holes in the middle of a multi-pages bio
1021 }
1022 }
1023 }
1024
1025 return bvec->bv_len; // success
1026
1027}
1028
1029#endif
1030
1031/*++
1032
1033Name: storvsc_device_configure()
1034
1035Desc: Configure the specified scsi device
1036
1037--*/
1038static int storvsc_device_alloc(struct scsi_device *sdevice)
1039{
1040#ifdef KERNEL_2_6_5
1041#else
1042 DPRINT_DBG(STORVSC_DRV, "sdev (%p) - setting device flag to %d", sdevice, BLIST_SPARSELUN);
1043 // This enables luns to be located sparsely. Otherwise, we may not discovered them.
1044 sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
1045#endif
1046 return 0;
1047}
1048
1049static int storvsc_device_configure(struct scsi_device *sdevice)
1050{
1051 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - curr queue depth %d", sdevice, sdevice->queue_depth);
1052
1053 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting queue depth to %d", sdevice, STORVSC_MAX_IO_REQUESTS);
1054 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG, STORVSC_MAX_IO_REQUESTS);
1055
1056 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting max segment size to %d", sdevice, PAGE_SIZE);
1057 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
1058
1059 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - adding merge bio vec routine", sdevice);
1060 blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
1061
1062 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
1063 //sdevice->timeout = (2000 * HZ);//(75 * HZ);
1064
1065 return 0;
1066}
1067
1068/*++
1069
1070Name: storvsc_host_reset_handler()
1071
1072Desc: Reset the scsi HBA
1073
1074--*/
1075static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1076{
1077 int ret=SUCCESS;
1078 struct host_device_context *host_device_ctx = (struct host_device_context*)scmnd->device->host->hostdata;
1079 struct device_context *device_ctx = host_device_ctx->device_ctx;
1080 struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver);
1081 struct storvsc_driver_context *storvsc_drv_ctx = (struct storvsc_driver_context*)driver_ctx;
1082
1083 STORVSC_DRIVER_OBJECT *storvsc_drv_obj = &storvsc_drv_ctx->drv_obj;
1084
1085 DPRINT_ENTER(STORVSC_DRV);
1086
1087 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...", scmnd->device, &device_ctx->device_obj);
1088
1089 // Invokes the vsc to reset the host/bus
1090 ASSERT(storvsc_drv_obj->OnHostReset);
1091 ret = storvsc_drv_obj->OnHostReset(&device_ctx->device_obj);
1092 if (ret != 0)
1093 {
1094 DPRINT_EXIT(STORVSC_DRV);
1095 return ret;
1096 }
1097
1098 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted", scmnd->device, &device_ctx->device_obj);
1099
1100 DPRINT_EXIT(STORVSC_DRV);
1101
1102 return ret;
1103}
1104
1105/*++
1106
1107Name: storvsc_host_rescan
1108
1109Desc: Rescan the scsi HBA
1110
1111--*/
1112#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
1113#else
1114
1115#ifdef KERNEL_2_6_27
1116static void storvsc_host_rescan_callback(struct work_struct *work)
1117{
1118 DEVICE_OBJECT* device_obj =
1119 &((struct host_device_context*)work)->device_ctx->device_obj;
1120#else
1121static void storvsc_host_rescan_callback(void* context)
1122{
1123
1124 DEVICE_OBJECT* device_obj = (DEVICE_OBJECT*)context;
1125#endif
1126 struct device_context* device_ctx = to_device_context(device_obj);
1127 struct Scsi_Host *host = (struct Scsi_Host *)device_ctx->device.driver_data;
1128 struct scsi_device *sdev;
1129 struct host_device_context *host_device_ctx;
1130 struct scsi_device **sdevs_remove_list;
1131 unsigned int sdevs_count=0;
1132 unsigned int found;
1133 unsigned int i;
1134 unsigned int lun_count=0;
1135 unsigned int *lun_list;
1136
1137 DPRINT_ENTER(STORVSC_DRV);
1138
1139 host_device_ctx = (struct host_device_context*)host->hostdata;
1140 lun_list = kzalloc(sizeof(unsigned int)*STORVSC_MAX_LUNS_PER_TARGET, GFP_ATOMIC);
1141 if (!lun_list)
1142 {
1143 DPRINT_ERR(STORVSC_DRV, "unable to allocate lun list");
1144 return;
1145 }
1146
1147 sdevs_remove_list = kzalloc(sizeof(void*)*STORVSC_MAX_LUNS_PER_TARGET, GFP_ATOMIC);
1148 if (!sdevs_remove_list)
1149 {
1150 kfree(lun_list);
1151 DPRINT_ERR(STORVSC_DRV, "unable to allocate lun remove list");
1152 return;
1153 }
1154
1155 DPRINT_INFO(STORVSC_DRV, "rescanning host for new scsi devices...", device_obj, host_device_ctx->target, host_device_ctx->path);
1156
1157 // Rescan for new device
1158 scsi_scan_target(&host->shost_gendev, host_device_ctx->path, host_device_ctx->target, SCAN_WILD_CARD, 1);
1159
1160 DPRINT_INFO(STORVSC_DRV, "rescanning host for removed scsi device...");
1161
1162 // Use the 1st device to send the report luns cmd
1163 shost_for_each_device(sdev, host)
1164 {
1165 lun_count=STORVSC_MAX_LUNS_PER_TARGET;
1166 storvsc_report_luns(sdev, lun_list, &lun_count);
1167
1168 DPRINT_INFO(STORVSC_DRV, "report luns on scsi device (%p) found %u luns ", sdev, lun_count);
1169 DPRINT_INFO(STORVSC_DRV, "existing luns on scsi device (%p) host (%d)", sdev, host->host_no);
1170
1171 scsi_device_put(sdev);
1172 break;
1173 }
1174
1175 for (i=0; i<lun_count; i++)
1176 {
1177 DPRINT_INFO(STORVSC_DRV, "%d) lun %u", i, lun_list[i]);
1178 }
1179
1180 // Rescan for devices that may have been removed.
1181 // We do not have to worry that new devices may have been added since
1182 // this callback is serialized by the workqueue ie add/remove are done here.
1183 shost_for_each_device(sdev, host)
1184 {
1185 // See if this device is still here
1186 found = 0;
1187 for (i=0; i<lun_count; i++)
1188 {
1189 if (sdev->lun == lun_list[i])
1190 {
1191 found = 1;
1192 break;
1193 }
1194 }
1195 if (!found)
1196 {
1197 DPRINT_INFO(STORVSC_DRV, "lun (%u) does not exists", sdev->lun);
1198 sdevs_remove_list[sdevs_count++] = sdev;
1199 }
1200 }
1201
1202 // Now remove the devices
1203 for (i=0; i< sdevs_count; i++)
1204 {
1205 DPRINT_INFO(STORVSC_DRV, "removing scsi device (%p) lun (%u)...",
1206 sdevs_remove_list[i], sdevs_remove_list[i]->lun);
1207
1208 // make sure it is not removed from underneath us
1209 if (!scsi_device_get(sdevs_remove_list[i]))
1210 {
1211 scsi_remove_device(sdevs_remove_list[i]);
1212 scsi_device_put(sdevs_remove_list[i]);
1213 }
1214 }
1215
1216 DPRINT_INFO(STORVSC_DRV, "rescan completed on dev obj (%p) target (%u) bus (%u)", device_obj, host_device_ctx->target, host_device_ctx->path);
1217
1218 kfree(lun_list);
1219 kfree(sdevs_remove_list);
1220
1221 DPRINT_EXIT(STORVSC_DRV);
1222}
1223
1224static int storvsc_report_luns(struct scsi_device *sdev, unsigned int luns[], unsigned int *lun_count)
1225{
1226 int i,j;
1227 unsigned int lun=0;
1228 unsigned int num_luns;
1229 int result;
1230 unsigned char *data;
1231#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
1232#else
1233 struct scsi_sense_hdr sshdr;
1234#endif
1235 unsigned char cmd[16]={0};
1236 unsigned int report_len = 8*(STORVSC_MAX_LUNS_PER_TARGET+1); // Add 1 to cover the report_lun header
1237 unsigned long long *report_luns;
1238 const unsigned int in_lun_count = *lun_count;
1239
1240 *lun_count = 0;
1241
1242 report_luns = kzalloc(report_len, GFP_ATOMIC);
1243 if (!report_luns)
1244 {
1245 return -ENOMEM;
1246 }
1247
1248 cmd[0] = REPORT_LUNS;
1249
1250 // cmd length
1251 *(unsigned int*)&cmd[6] = cpu_to_be32(report_len);
1252
1253 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, (unsigned char*)report_luns, report_len, &sshdr, 30*HZ, 3);
1254 if (result != 0)
1255 {
1256 kfree(report_luns);
1257 return -EBUSY;
1258 }
1259
1260 // get the length from the first four bytes
1261 report_len = be32_to_cpu(*(unsigned int*)&report_luns[0]);
1262
1263 num_luns = (report_len / sizeof(unsigned long long));
1264 if (num_luns > in_lun_count)
1265 {
1266 kfree(report_luns);
1267 return -EINVAL;
1268 }
1269
1270 *lun_count = num_luns;
1271
1272 DPRINT_DBG(STORVSC_DRV, "report luns on scsi device (%p) found %u luns ", sdev, num_luns);
1273
1274 // lun id starts at 1
1275 for (i=1; i< num_luns+1; i++)
1276 {
1277 lun = 0;
1278 data = (unsigned char*)&report_luns[i];
1279 for (j = 0; j < sizeof(lun); j += 2)
1280 {
1281 lun = lun | (((data[j] << 8) | data[j + 1]) << (j * 8));
1282 }
1283
1284 luns[i-1] = lun;
1285 }
1286
1287 kfree(report_luns);
1288 return 0;
1289}
1290#endif // KERNEL_2_6_9
1291
1292static void storvsc_host_rescan(DEVICE_OBJECT* device_obj)
1293{
1294 struct device_context* device_ctx = to_device_context(device_obj);
1295 struct Scsi_Host *host = (struct Scsi_Host *)device_ctx->device.driver_data;
1296 struct host_device_context *host_device_ctx;
1297
1298 DPRINT_ENTER(STORVSC_DRV);
1299#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
1300 DPRINT_ERR(STORVSC_DRV, "rescan not supported on 2.6.9 kernels!! You will need to reboot if you have added or removed the scsi lun device");
1301#else
1302
1303 host_device_ctx = (struct host_device_context*)host->hostdata;
1304
1305 DPRINT_INFO(STORVSC_DRV, "initiating rescan on dev obj (%p) target (%u) bus (%u)...", device_obj, host_device_ctx->target, host_device_ctx->path);
1306
1307 // We need to queue this since the scanning may block and the caller may be in an intr context
1308 //scsi_queue_work(host, &host_device_ctx->host_rescan_work);
1309 schedule_work(&host_device_ctx->host_rescan_work);
1310#endif // KERNEL_2_6_9
1311 DPRINT_EXIT(STORVSC_DRV);
1312}
1313
1314static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, sector_t capacity, int *info)
1315{
1316 sector_t total_sectors = capacity;
1317 sector_t cylinder_times_heads=0;
1318 sector_t temp=0;
1319
1320 int sectors_per_track=0;
1321 int heads=0;
1322 int cylinders=0;
1323 int rem=0;
1324
1325 if (total_sectors > (65535 * 16 * 255)) {
1326 total_sectors = (65535 * 16 * 255);
1327 }
1328
1329 if (total_sectors >= (65535 * 16 * 63)) {
1330 sectors_per_track = 255;
1331 heads = 16;
1332
1333 cylinder_times_heads = total_sectors;
1334 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1335 }
1336 else
1337 {
1338 sectors_per_track = 17;
1339
1340 cylinder_times_heads = total_sectors;
1341 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1342
1343 temp = cylinder_times_heads + 1023;
1344 rem = sector_div(temp, 1024); // sector_div stores the quotient in temp
1345
1346 heads = temp;
1347
1348 if (heads < 4) {
1349 heads = 4;
1350 }
1351
1352 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1353 sectors_per_track = 31;
1354 heads = 16;
1355
1356 cylinder_times_heads = total_sectors;
1357 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1358 }
1359
1360 if (cylinder_times_heads >= (heads * 1024)) {
1361 sectors_per_track = 63;
1362 heads = 16;
1363
1364 cylinder_times_heads = total_sectors;
1365 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1366 }
1367 }
1368
1369 temp = cylinder_times_heads;
1370 rem = sector_div(temp, heads); // sector_div stores the quotient in temp
1371 cylinders = temp;
1372
1373 info[0] = heads;
1374 info[1] = sectors_per_track;
1375 info[2] = cylinders;
1376
1377 DPRINT_INFO(STORVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, sectors_per_track);
1378
1379 return 0;
1380}
1381
1382MODULE_LICENSE("GPL");
1383
1384static int __init storvsc_init(void)
1385{
1386 int ret;
1387
1388 DPRINT_ENTER(STORVSC_DRV);
1389
1390 DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
1391
1392 ret = storvsc_drv_init(StorVscInitialize);
1393
1394 DPRINT_EXIT(STORVSC_DRV);
1395
1396 return ret;
1397}
1398
1399static void __exit storvsc_exit(void)
1400{
1401 DPRINT_ENTER(STORVSC_DRV);
1402
1403 storvsc_drv_exit();
1404
1405 DPRINT_ENTER(STORVSC_DRV);
1406}
1407
1408module_param(storvsc_ringbuffer_size, int, S_IRUGO);
1409
1410module_init(storvsc_init);
1411module_exit(storvsc_exit);
1412
1413// eof