Commit | Line | Data |
---|---|---|
bef4a34a | 1 | /* |
bef4a34a HJ |
2 | * Copyright (c) 2009, Microsoft Corporation. |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
17 | * Authors: | |
18 | * Haiyang Zhang <haiyangz@microsoft.com> | |
19 | * Hank Janssen <hjanssen@microsoft.com> | |
bef4a34a | 20 | */ |
bef4a34a | 21 | #include <linux/init.h> |
5a0e3ad6 | 22 | #include <linux/slab.h> |
bef4a34a HJ |
23 | #include <linux/module.h> |
24 | #include <linux/device.h> | |
25 | #include <linux/blkdev.h> | |
bef4a34a HJ |
26 | #include <scsi/scsi.h> |
27 | #include <scsi/scsi_cmnd.h> | |
28 | #include <scsi/scsi_host.h> | |
29 | #include <scsi/scsi_device.h> | |
30 | #include <scsi/scsi_tcq.h> | |
31 | #include <scsi/scsi_eh.h> | |
32 | #include <scsi/scsi_devinfo.h> | |
bef4a34a | 33 | #include <scsi/scsi_dbg.h> |
e3fe0bb6 | 34 | #include "hv_api.h" |
645954c5 | 35 | #include "logging.h" |
2d82f6c7 | 36 | #include "version_info.h" |
870cde80 | 37 | #include "vmbus.h" |
bb969793 | 38 | #include "storvsc_api.h" |
af3043c6 S |
39 | #include "vstorage.h" |
40 | #include "channel.h" | |
bef4a34a | 41 | |
bef4a34a | 42 | |
6dec2442 S |
43 | static const char *g_driver_name = "storvsc"; |
44 | ||
45 | /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ | |
46 | static const struct hv_guid gStorVscDeviceType = { | |
47 | .data = { | |
48 | 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, | |
49 | 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f | |
50 | } | |
51 | }; | |
52 | ||
bef4a34a | 53 | struct host_device_context { |
ff568d3a GKH |
54 | /* must be 1st field |
55 | * FIXME this is a bug */ | |
ff568d3a | 56 | /* point back to our device context */ |
6bad88da | 57 | struct hv_device *device_ctx; |
ff568d3a GKH |
58 | struct kmem_cache *request_pool; |
59 | unsigned int port; | |
60 | unsigned char path; | |
61 | unsigned char target; | |
bef4a34a HJ |
62 | }; |
63 | ||
64 | struct storvsc_cmd_request { | |
ff568d3a GKH |
65 | struct list_head entry; |
66 | struct scsi_cmnd *cmd; | |
bef4a34a HJ |
67 | |
68 | unsigned int bounce_sgl_count; | |
ff568d3a | 69 | struct scatterlist *bounce_sgl; |
bef4a34a | 70 | |
0b3f6834 | 71 | struct hv_storvsc_request request; |
bef4a34a HJ |
72 | }; |
73 | ||
bef4a34a | 74 | |
6dec2442 S |
75 | /* |
76 | * stor_vsc_initialize - Main entry point | |
77 | */ | |
2b8b3582 | 78 | static int stor_vsc_initialize(struct hv_driver *driver) |
6dec2442 S |
79 | { |
80 | struct storvsc_driver_object *stor_driver; | |
81 | ||
ced01b0d | 82 | stor_driver = hvdr_to_stordr(driver); |
6dec2442 | 83 | |
d7a1bdb9 S |
84 | DPRINT_DBG(STORVSC, |
85 | "sizeof(struct hv_storvsc_request)=%zd " | |
6dec2442 S |
86 | "sizeof(struct vstor_packet)=%zd, " |
87 | "sizeof(struct vmscsi_request)=%zd", | |
88 | sizeof(struct hv_storvsc_request), | |
6dec2442 S |
89 | sizeof(struct vstor_packet), |
90 | sizeof(struct vmscsi_request)); | |
91 | ||
92 | /* Make sure we are at least 2 pages since 1 page is used for control */ | |
93 | ||
94 | driver->name = g_driver_name; | |
95 | memcpy(&driver->dev_type, &gStorVscDeviceType, | |
96 | sizeof(struct hv_guid)); | |
97 | ||
6dec2442 S |
98 | |
99 | /* | |
100 | * Divide the ring buffer data size (which is 1 page less | |
101 | * than the ring buffer size since that page is reserved for | |
102 | * the ring buffer indices) by the max request size (which is | |
103 | * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) | |
104 | */ | |
105 | stor_driver->max_outstanding_req_per_channel = | |
106 | ((stor_driver->ring_buffer_size - PAGE_SIZE) / | |
107 | ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + | |
108 | sizeof(struct vstor_packet) + sizeof(u64), | |
109 | sizeof(u64))); | |
110 | ||
111 | DPRINT_INFO(STORVSC, "max io %u, currently %u\n", | |
112 | stor_driver->max_outstanding_req_per_channel, | |
113 | STORVSC_MAX_IO_REQUESTS); | |
114 | ||
115 | /* Setup the dispatch table */ | |
116 | stor_driver->base.dev_add = stor_vsc_on_device_add; | |
117 | stor_driver->base.dev_rm = stor_vsc_on_device_remove; | |
118 | stor_driver->base.cleanup = stor_vsc_on_cleanup; | |
119 | ||
120 | stor_driver->on_io_request = stor_vsc_on_io_request; | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
454f18a9 | 125 | /* Static decl */ |
bef4a34a | 126 | static int storvsc_probe(struct device *dev); |
f281233d | 127 | static int storvsc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd); |
bef4a34a HJ |
128 | static int storvsc_device_alloc(struct scsi_device *); |
129 | static int storvsc_device_configure(struct scsi_device *); | |
130 | static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd); | |
bef4a34a HJ |
131 | static int storvsc_remove(struct device *dev); |
132 | ||
ff568d3a GKH |
133 | static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, |
134 | unsigned int sg_count, | |
135 | unsigned int len); | |
136 | static void destroy_bounce_buffer(struct scatterlist *sgl, | |
137 | unsigned int sg_count); | |
bef4a34a | 138 | static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count); |
ff568d3a GKH |
139 | static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, |
140 | struct scatterlist *bounce_sgl, | |
141 | unsigned int orig_sgl_count); | |
142 | static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, | |
143 | struct scatterlist *bounce_sgl, | |
144 | unsigned int orig_sgl_count); | |
bef4a34a | 145 | |
ff568d3a GKH |
146 | static int storvsc_get_chs(struct scsi_device *sdev, struct block_device *bdev, |
147 | sector_t capacity, int *info); | |
bef4a34a HJ |
148 | |
149 | ||
150 | static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE; | |
3afc7cc3 SH |
151 | module_param(storvsc_ringbuffer_size, int, S_IRUGO); |
152 | MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); | |
bef4a34a | 153 | |
454f18a9 | 154 | /* The one and only one */ |
4af27d70 | 155 | static struct storvsc_driver_object g_storvsc_drv; |
bef4a34a | 156 | |
454f18a9 | 157 | /* Scsi driver */ |
bef4a34a | 158 | static struct scsi_host_template scsi_driver = { |
ff568d3a GKH |
159 | .module = THIS_MODULE, |
160 | .name = "storvsc_host_t", | |
161 | .bios_param = storvsc_get_chs, | |
162 | .queuecommand = storvsc_queuecommand, | |
163 | .eh_host_reset_handler = storvsc_host_reset_handler, | |
164 | .slave_alloc = storvsc_device_alloc, | |
165 | .slave_configure = storvsc_device_configure, | |
166 | .cmd_per_lun = 1, | |
167 | /* 64 max_queue * 1 target */ | |
0686e4f4 | 168 | .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, |
ff568d3a | 169 | .this_id = -1, |
454f18a9 | 170 | /* no use setting to 0 since ll_blk_rw reset it to 1 */ |
ff568d3a GKH |
171 | /* currently 32 */ |
172 | .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT, | |
173 | /* | |
174 | * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge | |
175 | * into 1 sg element. If set, we must limit the max_segment_size to | |
176 | * PAGE_SIZE, otherwise we may get 1 sg element that represents | |
177 | * multiple | |
178 | */ | |
454f18a9 | 179 | /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */ |
ff568d3a | 180 | .use_clustering = ENABLE_CLUSTERING, |
454f18a9 | 181 | /* Make sure we dont get a sg segment crosses a page boundary */ |
ff568d3a | 182 | .dma_boundary = PAGE_SIZE-1, |
bef4a34a HJ |
183 | }; |
184 | ||
185 | ||
3e189519 | 186 | /* |
ff568d3a GKH |
187 | * storvsc_drv_init - StorVsc driver initialization. |
188 | */ | |
db085777 | 189 | static int storvsc_drv_init(void) |
bef4a34a | 190 | { |
ff568d3a | 191 | int ret; |
4af27d70 S |
192 | struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv; |
193 | struct hv_driver *drv = &g_storvsc_drv.base; | |
bef4a34a | 194 | |
8a046024 | 195 | storvsc_drv_obj->ring_buffer_size = storvsc_ringbuffer_size; |
bef4a34a | 196 | |
454f18a9 | 197 | /* Callback to client driver to complete the initialization */ |
db085777 | 198 | stor_vsc_initialize(&storvsc_drv_obj->base); |
bef4a34a | 199 | |
150f9398 S |
200 | drv->priv = storvsc_drv_obj; |
201 | ||
ff568d3a | 202 | DPRINT_INFO(STORVSC_DRV, |
1e05d88e | 203 | "max outstanding reqs %u", |
8a046024 | 204 | storvsc_drv_obj->max_outstanding_req_per_channel); |
ff568d3a | 205 | |
8a046024 | 206 | if (storvsc_drv_obj->max_outstanding_req_per_channel < |
ff568d3a GKH |
207 | STORVSC_MAX_IO_REQUESTS) { |
208 | DPRINT_ERR(STORVSC_DRV, | |
209 | "The number of outstanding io requests (%d) " | |
210 | "is larger than that supported (%d) internally.", | |
211 | STORVSC_MAX_IO_REQUESTS, | |
8a046024 | 212 | storvsc_drv_obj->max_outstanding_req_per_channel); |
bef4a34a HJ |
213 | return -1; |
214 | } | |
215 | ||
150f9398 | 216 | drv->driver.name = storvsc_drv_obj->base.name; |
bef4a34a | 217 | |
150f9398 S |
218 | drv->driver.probe = storvsc_probe; |
219 | drv->driver.remove = storvsc_remove; | |
bef4a34a | 220 | |
454f18a9 | 221 | /* The driver belongs to vmbus */ |
150f9398 | 222 | ret = vmbus_child_driver_register(&drv->driver); |
bef4a34a | 223 | |
bef4a34a HJ |
224 | return ret; |
225 | } | |
226 | ||
af3043c6 | 227 | |
12cb12ef | 228 | static int stor_vsc_on_host_reset(struct hv_device *device) |
af3043c6 S |
229 | { |
230 | struct storvsc_device *stor_device; | |
d7a1bdb9 | 231 | struct hv_storvsc_request *request; |
af3043c6 | 232 | struct vstor_packet *vstor_packet; |
93958465 | 233 | int ret, t; |
af3043c6 S |
234 | |
235 | DPRINT_INFO(STORVSC, "resetting host adapter..."); | |
236 | ||
237 | stor_device = get_stor_device(device); | |
238 | if (!stor_device) { | |
239 | DPRINT_ERR(STORVSC, "unable to get stor device..." | |
240 | "device being destroyed?"); | |
241 | return -1; | |
242 | } | |
243 | ||
244 | request = &stor_device->reset_request; | |
245 | vstor_packet = &request->vstor_packet; | |
246 | ||
93958465 | 247 | init_completion(&request->wait_event); |
af3043c6 S |
248 | |
249 | vstor_packet->operation = VSTOR_OPERATION_RESET_BUS; | |
250 | vstor_packet->flags = REQUEST_COMPLETION_FLAG; | |
251 | vstor_packet->vm_srb.path_id = stor_device->path_id; | |
252 | ||
af3043c6 S |
253 | ret = vmbus_sendpacket(device->channel, vstor_packet, |
254 | sizeof(struct vstor_packet), | |
255 | (unsigned long)&stor_device->reset_request, | |
256 | VM_PKT_DATA_INBAND, | |
257 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | |
258 | if (ret != 0) { | |
259 | DPRINT_ERR(STORVSC, "Unable to send reset packet %p ret %d", | |
260 | vstor_packet, ret); | |
261 | goto cleanup; | |
262 | } | |
263 | ||
93958465 S |
264 | t = wait_for_completion_timeout(&request->wait_event, HZ); |
265 | if (t == 0) { | |
af3043c6 S |
266 | ret = -ETIMEDOUT; |
267 | goto cleanup; | |
268 | } | |
269 | ||
270 | DPRINT_INFO(STORVSC, "host adapter reset completed"); | |
271 | ||
272 | /* | |
273 | * At this point, all outstanding requests in the adapter | |
274 | * should have been flushed out and return to us | |
275 | */ | |
276 | ||
277 | cleanup: | |
278 | put_stor_device(device); | |
279 | return ret; | |
280 | } | |
281 | ||
bef4a34a HJ |
282 | static int storvsc_drv_exit_cb(struct device *dev, void *data) |
283 | { | |
284 | struct device **curr = (struct device **)data; | |
285 | *curr = dev; | |
454f18a9 | 286 | return 1; /* stop iterating */ |
bef4a34a HJ |
287 | } |
288 | ||
bd1de709 | 289 | static void storvsc_drv_exit(void) |
bef4a34a | 290 | { |
4af27d70 S |
291 | struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv; |
292 | struct hv_driver *drv = &g_storvsc_drv.base; | |
ff568d3a | 293 | struct device *current_dev = NULL; |
2295ba2e | 294 | int ret; |
bef4a34a | 295 | |
ff568d3a | 296 | while (1) { |
bef4a34a HJ |
297 | current_dev = NULL; |
298 | ||
454f18a9 | 299 | /* Get the device */ |
150f9398 | 300 | ret = driver_for_each_device(&drv->driver, NULL, |
2295ba2e BP |
301 | (void *) ¤t_dev, |
302 | storvsc_drv_exit_cb); | |
303 | ||
304 | if (ret) | |
305 | DPRINT_WARN(STORVSC_DRV, | |
306 | "driver_for_each_device returned %d", ret); | |
bef4a34a HJ |
307 | |
308 | if (current_dev == NULL) | |
309 | break; | |
310 | ||
454f18a9 | 311 | /* Initiate removal from the top-down */ |
bef4a34a HJ |
312 | device_unregister(current_dev); |
313 | } | |
314 | ||
ca623ad3 HZ |
315 | if (storvsc_drv_obj->base.cleanup) |
316 | storvsc_drv_obj->base.cleanup(&storvsc_drv_obj->base); | |
bef4a34a | 317 | |
150f9398 | 318 | vmbus_child_driver_unregister(&drv->driver); |
bef4a34a HJ |
319 | return; |
320 | } | |
321 | ||
3e189519 | 322 | /* |
ff568d3a GKH |
323 | * storvsc_probe - Add a new device for this driver |
324 | */ | |
bef4a34a HJ |
325 | static int storvsc_probe(struct device *device) |
326 | { | |
ff568d3a | 327 | int ret; |
150f9398 S |
328 | struct hv_driver *drv = |
329 | drv_to_hv_drv(device->driver); | |
4af27d70 | 330 | struct storvsc_driver_object *storvsc_drv_obj = drv->priv; |
6bad88da | 331 | struct hv_device *device_obj = device_to_hv_device(device); |
bef4a34a HJ |
332 | struct Scsi_Host *host; |
333 | struct host_device_context *host_device_ctx; | |
9f0c7d2c | 334 | struct storvsc_device_info device_info; |
bef4a34a | 335 | |
ca623ad3 | 336 | if (!storvsc_drv_obj->base.dev_add) |
bef4a34a HJ |
337 | return -1; |
338 | ||
ff568d3a GKH |
339 | host = scsi_host_alloc(&scsi_driver, |
340 | sizeof(struct host_device_context)); | |
341 | if (!host) { | |
bef4a34a HJ |
342 | DPRINT_ERR(STORVSC_DRV, "unable to allocate scsi host object"); |
343 | return -ENOMEM; | |
344 | } | |
345 | ||
0883c52b | 346 | dev_set_drvdata(device, host); |
bef4a34a | 347 | |
ff568d3a | 348 | host_device_ctx = (struct host_device_context *)host->hostdata; |
bef4a34a HJ |
349 | memset(host_device_ctx, 0, sizeof(struct host_device_context)); |
350 | ||
351 | host_device_ctx->port = host->host_no; | |
6bad88da | 352 | host_device_ctx->device_ctx = device_obj; |
bef4a34a | 353 | |
bef4a34a | 354 | host_device_ctx->request_pool = |
6bad88da | 355 | kmem_cache_create(dev_name(&device_obj->device), |
1e05d88e | 356 | sizeof(struct storvsc_cmd_request), 0, |
ff568d3a GKH |
357 | SLAB_HWCACHE_ALIGN, NULL); |
358 | ||
359 | if (!host_device_ctx->request_pool) { | |
bef4a34a | 360 | scsi_host_put(host); |
bef4a34a HJ |
361 | return -ENOMEM; |
362 | } | |
363 | ||
8a046024 | 364 | device_info.port_number = host->host_no; |
454f18a9 | 365 | /* Call to the vsc driver to add the device */ |
ca623ad3 | 366 | ret = storvsc_drv_obj->base.dev_add(device_obj, |
ff568d3a GKH |
367 | (void *)&device_info); |
368 | if (ret != 0) { | |
bef4a34a HJ |
369 | DPRINT_ERR(STORVSC_DRV, "unable to add scsi vsc device"); |
370 | kmem_cache_destroy(host_device_ctx->request_pool); | |
371 | scsi_host_put(host); | |
bef4a34a HJ |
372 | return -1; |
373 | } | |
374 | ||
454f18a9 | 375 | /* host_device_ctx->port = device_info.PortNumber; */ |
8a046024 HJ |
376 | host_device_ctx->path = device_info.path_id; |
377 | host_device_ctx->target = device_info.target_id; | |
bef4a34a | 378 | |
ff568d3a GKH |
379 | /* max # of devices per target */ |
380 | host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; | |
381 | /* max # of targets per channel */ | |
382 | host->max_id = STORVSC_MAX_TARGETS; | |
383 | /* max # of channels */ | |
384 | host->max_channel = STORVSC_MAX_CHANNELS - 1; | |
bef4a34a | 385 | |
454f18a9 | 386 | /* Register the HBA and start the scsi bus scan */ |
bef4a34a | 387 | ret = scsi_add_host(host, device); |
ff568d3a | 388 | if (ret != 0) { |
bef4a34a HJ |
389 | DPRINT_ERR(STORVSC_DRV, "unable to add scsi host device"); |
390 | ||
ca623ad3 | 391 | storvsc_drv_obj->base.dev_rm(device_obj); |
bef4a34a HJ |
392 | |
393 | kmem_cache_destroy(host_device_ctx->request_pool); | |
394 | scsi_host_put(host); | |
bef4a34a HJ |
395 | return -1; |
396 | } | |
397 | ||
398 | scsi_scan_host(host); | |
bef4a34a HJ |
399 | return ret; |
400 | } | |
401 | ||
3e189519 | 402 | /* |
ff568d3a GKH |
403 | * storvsc_remove - Callback when our device is removed |
404 | */ | |
bef4a34a HJ |
405 | static int storvsc_remove(struct device *device) |
406 | { | |
ff568d3a | 407 | int ret; |
150f9398 S |
408 | struct hv_driver *drv = |
409 | drv_to_hv_drv(device->driver); | |
4af27d70 | 410 | struct storvsc_driver_object *storvsc_drv_obj = drv->priv; |
6bad88da | 411 | struct hv_device *device_obj = device_to_hv_device(device); |
0883c52b | 412 | struct Scsi_Host *host = dev_get_drvdata(device); |
ff568d3a GKH |
413 | struct host_device_context *host_device_ctx = |
414 | (struct host_device_context *)host->hostdata; | |
bef4a34a HJ |
415 | |
416 | ||
ca623ad3 | 417 | if (!storvsc_drv_obj->base.dev_rm) |
bef4a34a | 418 | return -1; |
bef4a34a | 419 | |
ff568d3a GKH |
420 | /* |
421 | * Call to the vsc driver to let it know that the device is being | |
422 | * removed | |
423 | */ | |
ca623ad3 | 424 | ret = storvsc_drv_obj->base.dev_rm(device_obj); |
ff568d3a | 425 | if (ret != 0) { |
454f18a9 | 426 | /* TODO: */ |
ff568d3a GKH |
427 | DPRINT_ERR(STORVSC, "unable to remove vsc device (ret %d)", |
428 | ret); | |
bef4a34a HJ |
429 | } |
430 | ||
ff568d3a | 431 | if (host_device_ctx->request_pool) { |
bef4a34a HJ |
432 | kmem_cache_destroy(host_device_ctx->request_pool); |
433 | host_device_ctx->request_pool = NULL; | |
434 | } | |
435 | ||
436 | DPRINT_INFO(STORVSC, "removing host adapter (%p)...", host); | |
437 | scsi_remove_host(host); | |
438 | ||
439 | DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host); | |
440 | scsi_host_put(host); | |
bef4a34a HJ |
441 | return ret; |
442 | } | |
443 | ||
3e189519 | 444 | /* |
ff568d3a GKH |
445 | * storvsc_commmand_completion - Command completion processing |
446 | */ | |
0b3f6834 | 447 | static void storvsc_commmand_completion(struct hv_storvsc_request *request) |
bef4a34a | 448 | { |
ff568d3a | 449 | struct storvsc_cmd_request *cmd_request = |
d7a1bdb9 | 450 | (struct storvsc_cmd_request *)request->context; |
bef4a34a | 451 | struct scsi_cmnd *scmnd = cmd_request->cmd; |
ff568d3a GKH |
452 | struct host_device_context *host_device_ctx = |
453 | (struct host_device_context *)scmnd->device->host->hostdata; | |
bef4a34a | 454 | void (*scsi_done_fn)(struct scsi_cmnd *); |
bef4a34a | 455 | struct scsi_sense_hdr sense_hdr; |
6dc3f0a7 | 456 | struct vmscsi_request *vm_srb; |
bef4a34a | 457 | |
b856e738 BP |
458 | /* ASSERT(request == &cmd_request->request); */ |
459 | /* ASSERT(scmnd); */ | |
460 | /* ASSERT((unsigned long)scmnd->host_scribble == */ | |
461 | /* (unsigned long)cmd_request); */ | |
462 | /* ASSERT(scmnd->scsi_done); */ | |
bef4a34a | 463 | |
ff568d3a GKH |
464 | if (cmd_request->bounce_sgl_count) { |
465 | /* using bounce buffer */ | |
454f18a9 | 466 | /* printk("copy_from_bounce_buffer\n"); */ |
bef4a34a | 467 | |
454f18a9 | 468 | /* FIXME: We can optimize on writes by just skipping this */ |
ff568d3a GKH |
469 | copy_from_bounce_buffer(scsi_sglist(scmnd), |
470 | cmd_request->bounce_sgl, | |
471 | scsi_sg_count(scmnd)); | |
472 | destroy_bounce_buffer(cmd_request->bounce_sgl, | |
473 | cmd_request->bounce_sgl_count); | |
bef4a34a HJ |
474 | } |
475 | ||
d7a1bdb9 | 476 | vm_srb = &request->vstor_packet.vm_srb; |
6dc3f0a7 | 477 | scmnd->result = vm_srb->scsi_status; |
bef4a34a | 478 | |
ff568d3a GKH |
479 | if (scmnd->result) { |
480 | if (scsi_normalize_sense(scmnd->sense_buffer, | |
6f461cc4 | 481 | SCSI_SENSE_BUFFERSIZE, &sense_hdr)) |
bef4a34a | 482 | scsi_print_sense_hdr("storvsc", &sense_hdr); |
bef4a34a HJ |
483 | } |
484 | ||
8a046024 HJ |
485 | /* ASSERT(request->BytesXfer <= request->data_buffer.Length); */ |
486 | scsi_set_resid(scmnd, | |
d7a1bdb9 | 487 | request->data_buffer.len - |
e9e936c6 | 488 | vm_srb->data_transfer_length); |
bef4a34a HJ |
489 | |
490 | scsi_done_fn = scmnd->scsi_done; | |
491 | ||
492 | scmnd->host_scribble = NULL; | |
493 | scmnd->scsi_done = NULL; | |
494 | ||
454f18a9 | 495 | /* !!DO NOT MODIFY the scmnd after this call */ |
bef4a34a HJ |
496 | scsi_done_fn(scmnd); |
497 | ||
498 | kmem_cache_free(host_device_ctx->request_pool, cmd_request); | |
bef4a34a HJ |
499 | } |
500 | ||
501 | static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count) | |
502 | { | |
ff568d3a | 503 | int i; |
bef4a34a | 504 | |
454f18a9 | 505 | /* No need to check */ |
bef4a34a HJ |
506 | if (sg_count < 2) |
507 | return -1; | |
508 | ||
454f18a9 | 509 | /* We have at least 2 sg entries */ |
ff568d3a GKH |
510 | for (i = 0; i < sg_count; i++) { |
511 | if (i == 0) { | |
512 | /* make sure 1st one does not have hole */ | |
bef4a34a HJ |
513 | if (sgl[i].offset + sgl[i].length != PAGE_SIZE) |
514 | return i; | |
ff568d3a GKH |
515 | } else if (i == sg_count - 1) { |
516 | /* make sure last one does not have hole */ | |
bef4a34a HJ |
517 | if (sgl[i].offset != 0) |
518 | return i; | |
ff568d3a GKH |
519 | } else { |
520 | /* make sure no hole in the middle */ | |
bef4a34a | 521 | if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0) |
bef4a34a | 522 | return i; |
bef4a34a HJ |
523 | } |
524 | } | |
525 | return -1; | |
526 | } | |
527 | ||
ff568d3a GKH |
528 | static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, |
529 | unsigned int sg_count, | |
530 | unsigned int len) | |
bef4a34a HJ |
531 | { |
532 | int i; | |
ff568d3a GKH |
533 | int num_pages; |
534 | struct scatterlist *bounce_sgl; | |
bef4a34a HJ |
535 | struct page *page_buf; |
536 | ||
73509681 | 537 | num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT; |
bef4a34a | 538 | |
06da0bc8 | 539 | bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC); |
bef4a34a | 540 | if (!bounce_sgl) |
bef4a34a | 541 | return NULL; |
bef4a34a | 542 | |
ff568d3a | 543 | for (i = 0; i < num_pages; i++) { |
bef4a34a HJ |
544 | page_buf = alloc_page(GFP_ATOMIC); |
545 | if (!page_buf) | |
bef4a34a | 546 | goto cleanup; |
bef4a34a | 547 | sg_set_page(&bounce_sgl[i], page_buf, 0, 0); |
bef4a34a HJ |
548 | } |
549 | ||
550 | return bounce_sgl; | |
551 | ||
552 | cleanup: | |
553 | destroy_bounce_buffer(bounce_sgl, num_pages); | |
554 | return NULL; | |
555 | } | |
556 | ||
ff568d3a GKH |
557 | static void destroy_bounce_buffer(struct scatterlist *sgl, |
558 | unsigned int sg_count) | |
bef4a34a HJ |
559 | { |
560 | int i; | |
561 | struct page *page_buf; | |
562 | ||
ff568d3a GKH |
563 | for (i = 0; i < sg_count; i++) { |
564 | page_buf = sg_page((&sgl[i])); | |
565 | if (page_buf != NULL) | |
bef4a34a | 566 | __free_page(page_buf); |
bef4a34a HJ |
567 | } |
568 | ||
569 | kfree(sgl); | |
570 | } | |
571 | ||
454f18a9 | 572 | /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */ |
ff568d3a GKH |
573 | static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, |
574 | struct scatterlist *bounce_sgl, | |
575 | unsigned int orig_sgl_count) | |
bef4a34a | 576 | { |
ff568d3a GKH |
577 | int i; |
578 | int j = 0; | |
bef4a34a HJ |
579 | unsigned long src, dest; |
580 | unsigned int srclen, destlen, copylen; | |
ff568d3a GKH |
581 | unsigned int total_copied = 0; |
582 | unsigned long bounce_addr = 0; | |
583 | unsigned long src_addr = 0; | |
bef4a34a HJ |
584 | unsigned long flags; |
585 | ||
586 | local_irq_save(flags); | |
587 | ||
ff568d3a GKH |
588 | for (i = 0; i < orig_sgl_count; i++) { |
589 | src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), | |
590 | KM_IRQ0) + orig_sgl[i].offset; | |
bef4a34a HJ |
591 | src = src_addr; |
592 | srclen = orig_sgl[i].length; | |
593 | ||
b856e738 | 594 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ |
bef4a34a | 595 | |
0c47a70a | 596 | if (bounce_addr == 0) |
bef4a34a | 597 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); |
bef4a34a | 598 | |
ff568d3a | 599 | while (srclen) { |
454f18a9 | 600 | /* assume bounce offset always == 0 */ |
bef4a34a HJ |
601 | dest = bounce_addr + bounce_sgl[j].length; |
602 | destlen = PAGE_SIZE - bounce_sgl[j].length; | |
603 | ||
fc6a4b26 | 604 | copylen = min(srclen, destlen); |
ff568d3a | 605 | memcpy((void *)dest, (void *)src, copylen); |
bef4a34a HJ |
606 | |
607 | total_copied += copylen; | |
608 | bounce_sgl[j].length += copylen; | |
609 | srclen -= copylen; | |
610 | src += copylen; | |
611 | ||
ff568d3a GKH |
612 | if (bounce_sgl[j].length == PAGE_SIZE) { |
613 | /* full..move to next entry */ | |
614 | kunmap_atomic((void *)bounce_addr, KM_IRQ0); | |
bef4a34a HJ |
615 | j++; |
616 | ||
454f18a9 | 617 | /* if we need to use another bounce buffer */ |
ff568d3a | 618 | if (srclen || i != orig_sgl_count - 1) |
bef4a34a | 619 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); |
ff568d3a GKH |
620 | } else if (srclen == 0 && i == orig_sgl_count - 1) { |
621 | /* unmap the last bounce that is < PAGE_SIZE */ | |
622 | kunmap_atomic((void *)bounce_addr, KM_IRQ0); | |
bef4a34a HJ |
623 | } |
624 | } | |
625 | ||
ff568d3a | 626 | kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0); |
bef4a34a HJ |
627 | } |
628 | ||
629 | local_irq_restore(flags); | |
630 | ||
631 | return total_copied; | |
632 | } | |
633 | ||
454f18a9 | 634 | /* Assume the original sgl has enough room */ |
ff568d3a GKH |
635 | static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, |
636 | struct scatterlist *bounce_sgl, | |
637 | unsigned int orig_sgl_count) | |
bef4a34a | 638 | { |
ff568d3a GKH |
639 | int i; |
640 | int j = 0; | |
bef4a34a HJ |
641 | unsigned long src, dest; |
642 | unsigned int srclen, destlen, copylen; | |
ff568d3a GKH |
643 | unsigned int total_copied = 0; |
644 | unsigned long bounce_addr = 0; | |
645 | unsigned long dest_addr = 0; | |
bef4a34a HJ |
646 | unsigned long flags; |
647 | ||
648 | local_irq_save(flags); | |
649 | ||
ff568d3a GKH |
650 | for (i = 0; i < orig_sgl_count; i++) { |
651 | dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), | |
652 | KM_IRQ0) + orig_sgl[i].offset; | |
bef4a34a HJ |
653 | dest = dest_addr; |
654 | destlen = orig_sgl[i].length; | |
b856e738 | 655 | /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ |
bef4a34a | 656 | |
0c47a70a | 657 | if (bounce_addr == 0) |
bef4a34a | 658 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); |
bef4a34a | 659 | |
ff568d3a | 660 | while (destlen) { |
bef4a34a HJ |
661 | src = bounce_addr + bounce_sgl[j].offset; |
662 | srclen = bounce_sgl[j].length - bounce_sgl[j].offset; | |
663 | ||
fc6a4b26 | 664 | copylen = min(srclen, destlen); |
ff568d3a | 665 | memcpy((void *)dest, (void *)src, copylen); |
bef4a34a HJ |
666 | |
667 | total_copied += copylen; | |
668 | bounce_sgl[j].offset += copylen; | |
669 | destlen -= copylen; | |
670 | dest += copylen; | |
671 | ||
ff568d3a GKH |
672 | if (bounce_sgl[j].offset == bounce_sgl[j].length) { |
673 | /* full */ | |
674 | kunmap_atomic((void *)bounce_addr, KM_IRQ0); | |
bef4a34a HJ |
675 | j++; |
676 | ||
454f18a9 | 677 | /* if we need to use another bounce buffer */ |
ff568d3a | 678 | if (destlen || i != orig_sgl_count - 1) |
bef4a34a | 679 | bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); |
ff568d3a GKH |
680 | } else if (destlen == 0 && i == orig_sgl_count - 1) { |
681 | /* unmap the last bounce that is < PAGE_SIZE */ | |
682 | kunmap_atomic((void *)bounce_addr, KM_IRQ0); | |
bef4a34a HJ |
683 | } |
684 | } | |
685 | ||
ff568d3a GKH |
686 | kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset), |
687 | KM_IRQ0); | |
bef4a34a HJ |
688 | } |
689 | ||
690 | local_irq_restore(flags); | |
691 | ||
692 | return total_copied; | |
693 | } | |
694 | ||
3e189519 | 695 | /* |
ff568d3a GKH |
696 | * storvsc_queuecommand - Initiate command processing |
697 | */ | |
f281233d | 698 | static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd, |
ff568d3a | 699 | void (*done)(struct scsi_cmnd *)) |
bef4a34a | 700 | { |
ff568d3a GKH |
701 | int ret; |
702 | struct host_device_context *host_device_ctx = | |
703 | (struct host_device_context *)scmnd->device->host->hostdata; | |
6bad88da | 704 | struct hv_device *device_ctx = host_device_ctx->device_ctx; |
150f9398 S |
705 | struct hv_driver *drv = |
706 | drv_to_hv_drv(device_ctx->device.driver); | |
4af27d70 | 707 | struct storvsc_driver_object *storvsc_drv_obj = drv->priv; |
0b3f6834 | 708 | struct hv_storvsc_request *request; |
bef4a34a | 709 | struct storvsc_cmd_request *cmd_request; |
ff568d3a | 710 | unsigned int request_size = 0; |
bef4a34a HJ |
711 | int i; |
712 | struct scatterlist *sgl; | |
77c5ceaf | 713 | unsigned int sg_count = 0; |
12fbd416 | 714 | struct vmscsi_request *vm_srb; |
bef4a34a | 715 | |
ff568d3a GKH |
716 | DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d " |
717 | "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction, | |
718 | scsi_sg_count(scmnd), scsi_sglist(scmnd), | |
719 | scsi_bufflen(scmnd), scmnd->device->queue_depth, | |
720 | scmnd->device->tagged_supported); | |
bef4a34a | 721 | |
454f18a9 | 722 | /* If retrying, no need to prep the cmd */ |
ff568d3a | 723 | if (scmnd->host_scribble) { |
b856e738 | 724 | /* ASSERT(scmnd->scsi_done != NULL); */ |
bef4a34a | 725 | |
ff568d3a GKH |
726 | cmd_request = |
727 | (struct storvsc_cmd_request *)scmnd->host_scribble; | |
728 | DPRINT_INFO(STORVSC_DRV, "retrying scmnd %p cmd_request %p", | |
729 | scmnd, cmd_request); | |
bef4a34a HJ |
730 | |
731 | goto retry_request; | |
732 | } | |
733 | ||
b856e738 BP |
734 | /* ASSERT(scmnd->scsi_done == NULL); */ |
735 | /* ASSERT(scmnd->host_scribble == NULL); */ | |
bef4a34a HJ |
736 | |
737 | scmnd->scsi_done = done; | |
738 | ||
739 | request_size = sizeof(struct storvsc_cmd_request); | |
740 | ||
c7a9a484 | 741 | cmd_request = kmem_cache_zalloc(host_device_ctx->request_pool, |
ff568d3a GKH |
742 | GFP_ATOMIC); |
743 | if (!cmd_request) { | |
744 | DPRINT_ERR(STORVSC_DRV, "scmnd (%p) - unable to allocate " | |
745 | "storvsc_cmd_request...marking queue busy", scmnd); | |
bef4a34a HJ |
746 | scmnd->scsi_done = NULL; |
747 | return SCSI_MLQUEUE_DEVICE_BUSY; | |
748 | } | |
749 | ||
454f18a9 | 750 | /* Setup the cmd request */ |
bef4a34a HJ |
751 | cmd_request->bounce_sgl_count = 0; |
752 | cmd_request->bounce_sgl = NULL; | |
753 | cmd_request->cmd = scmnd; | |
754 | ||
ff568d3a | 755 | scmnd->host_scribble = (unsigned char *)cmd_request; |
bef4a34a HJ |
756 | |
757 | request = &cmd_request->request; | |
d7a1bdb9 | 758 | vm_srb = &request->vstor_packet.vm_srb; |
bef4a34a | 759 | |
1e05d88e | 760 | DPRINT_DBG(STORVSC_DRV, "req %p size %d", request, request_size); |
bef4a34a | 761 | |
454f18a9 | 762 | /* Build the SRB */ |
ff568d3a | 763 | switch (scmnd->sc_data_direction) { |
bef4a34a | 764 | case DMA_TO_DEVICE: |
12fbd416 | 765 | vm_srb->data_in = WRITE_TYPE; |
bef4a34a HJ |
766 | break; |
767 | case DMA_FROM_DEVICE: | |
12fbd416 | 768 | vm_srb->data_in = READ_TYPE; |
bef4a34a HJ |
769 | break; |
770 | default: | |
12fbd416 | 771 | vm_srb->data_in = UNKNOWN_TYPE; |
bef4a34a HJ |
772 | break; |
773 | } | |
774 | ||
d7a1bdb9 S |
775 | request->on_io_completion = storvsc_commmand_completion; |
776 | request->context = cmd_request;/* scmnd; */ | |
bef4a34a | 777 | |
454f18a9 | 778 | /* request->PortId = scmnd->device->channel; */ |
124661de | 779 | vm_srb->port_number = host_device_ctx->port; |
735625fe | 780 | vm_srb->path_id = scmnd->device->channel; |
b4dba0a3 | 781 | vm_srb->target_id = scmnd->device->id; |
fc3967b0 | 782 | vm_srb->lun = scmnd->device->lun; |
bef4a34a | 783 | |
b856e738 | 784 | /* ASSERT(scmnd->cmd_len <= 16); */ |
473f9409 | 785 | vm_srb->cdb_length = scmnd->cmd_len; |
373dd8a9 S |
786 | |
787 | memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); | |
bef4a34a | 788 | |
d7a1bdb9 | 789 | request->sense_buffer = scmnd->sense_buffer; |
bef4a34a HJ |
790 | |
791 | ||
d7a1bdb9 | 792 | request->data_buffer.len = scsi_bufflen(scmnd); |
ff568d3a GKH |
793 | if (scsi_sg_count(scmnd)) { |
794 | sgl = (struct scatterlist *)scsi_sglist(scmnd); | |
77c5ceaf | 795 | sg_count = scsi_sg_count(scmnd); |
bef4a34a | 796 | |
454f18a9 | 797 | /* check if we need to bounce the sgl */ |
ff568d3a GKH |
798 | if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { |
799 | DPRINT_INFO(STORVSC_DRV, | |
800 | "need to bounce buffer for this scmnd %p", | |
801 | scmnd); | |
802 | cmd_request->bounce_sgl = | |
803 | create_bounce_buffer(sgl, scsi_sg_count(scmnd), | |
804 | scsi_bufflen(scmnd)); | |
805 | if (!cmd_request->bounce_sgl) { | |
806 | DPRINT_ERR(STORVSC_DRV, | |
807 | "unable to create bounce buffer for " | |
808 | "this scmnd %p", scmnd); | |
bef4a34a HJ |
809 | |
810 | scmnd->scsi_done = NULL; | |
811 | scmnd->host_scribble = NULL; | |
ff568d3a GKH |
812 | kmem_cache_free(host_device_ctx->request_pool, |
813 | cmd_request); | |
bef4a34a HJ |
814 | |
815 | return SCSI_MLQUEUE_HOST_BUSY; | |
816 | } | |
817 | ||
ff568d3a | 818 | cmd_request->bounce_sgl_count = |
73509681 | 819 | ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >> |
ff568d3a | 820 | PAGE_SHIFT; |
bef4a34a | 821 | |
ff568d3a GKH |
822 | /* |
823 | * FIXME: We can optimize on reads by just skipping | |
824 | * this | |
825 | */ | |
826 | copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl, | |
827 | scsi_sg_count(scmnd)); | |
bef4a34a HJ |
828 | |
829 | sgl = cmd_request->bounce_sgl; | |
77c5ceaf | 830 | sg_count = cmd_request->bounce_sgl_count; |
bef4a34a HJ |
831 | } |
832 | ||
d7a1bdb9 | 833 | request->data_buffer.offset = sgl[0].offset; |
bef4a34a | 834 | |
77c5ceaf | 835 | for (i = 0; i < sg_count; i++) { |
0686e4f4 | 836 | DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", |
ff568d3a | 837 | i, sgl[i].length, sgl[i].offset); |
d7a1bdb9 | 838 | request->data_buffer.pfn_array[i] = |
77c5ceaf | 839 | page_to_pfn(sg_page((&sgl[i]))); |
bef4a34a | 840 | } |
ff568d3a | 841 | } else if (scsi_sglist(scmnd)) { |
b856e738 | 842 | /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ |
d7a1bdb9 | 843 | request->data_buffer.offset = |
ff568d3a | 844 | virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1); |
d7a1bdb9 | 845 | request->data_buffer.pfn_array[0] = |
ff568d3a | 846 | virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT; |
bef4a34a | 847 | } |
bef4a34a HJ |
848 | |
849 | retry_request: | |
454f18a9 | 850 | /* Invokes the vsc to start an IO */ |
6bad88da | 851 | ret = storvsc_drv_obj->on_io_request(device_ctx, |
ff568d3a GKH |
852 | &cmd_request->request); |
853 | if (ret == -1) { | |
854 | /* no more space */ | |
855 | DPRINT_ERR(STORVSC_DRV, | |
856 | "scmnd (%p) - queue FULL...marking queue busy", | |
857 | scmnd); | |
858 | ||
859 | if (cmd_request->bounce_sgl_count) { | |
860 | /* | |
861 | * FIXME: We can optimize on writes by just skipping | |
862 | * this | |
863 | */ | |
864 | copy_from_bounce_buffer(scsi_sglist(scmnd), | |
865 | cmd_request->bounce_sgl, | |
866 | scsi_sg_count(scmnd)); | |
867 | destroy_bounce_buffer(cmd_request->bounce_sgl, | |
868 | cmd_request->bounce_sgl_count); | |
bef4a34a HJ |
869 | } |
870 | ||
871 | kmem_cache_free(host_device_ctx->request_pool, cmd_request); | |
872 | ||
873 | scmnd->scsi_done = NULL; | |
874 | scmnd->host_scribble = NULL; | |
875 | ||
876 | ret = SCSI_MLQUEUE_DEVICE_BUSY; | |
877 | } | |
878 | ||
bef4a34a HJ |
879 | return ret; |
880 | } | |
881 | ||
f281233d JG |
882 | static DEF_SCSI_QCMD(storvsc_queuecommand) |
883 | ||
ff568d3a GKH |
884 | static int storvsc_merge_bvec(struct request_queue *q, |
885 | struct bvec_merge_data *bmd, struct bio_vec *bvec) | |
bef4a34a | 886 | { |
ff568d3a GKH |
887 | /* checking done by caller. */ |
888 | return bvec->bv_len; | |
bef4a34a | 889 | } |
bef4a34a | 890 | |
3e189519 | 891 | /* |
ff568d3a GKH |
892 | * storvsc_device_configure - Configure the specified scsi device |
893 | */ | |
bef4a34a HJ |
894 | static int storvsc_device_alloc(struct scsi_device *sdevice) |
895 | { | |
ff568d3a GKH |
896 | DPRINT_DBG(STORVSC_DRV, "sdev (%p) - setting device flag to %d", |
897 | sdevice, BLIST_SPARSELUN); | |
898 | /* | |
899 | * This enables luns to be located sparsely. Otherwise, we may not | |
900 | * discovered them. | |
901 | */ | |
bef4a34a | 902 | sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN; |
bef4a34a HJ |
903 | return 0; |
904 | } | |
905 | ||
906 | static int storvsc_device_configure(struct scsi_device *sdevice) | |
907 | { | |
ff568d3a GKH |
908 | DPRINT_INFO(STORVSC_DRV, "sdev (%p) - curr queue depth %d", sdevice, |
909 | sdevice->queue_depth); | |
bef4a34a | 910 | |
ff568d3a GKH |
911 | DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting queue depth to %d", |
912 | sdevice, STORVSC_MAX_IO_REQUESTS); | |
913 | scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG, | |
914 | STORVSC_MAX_IO_REQUESTS); | |
bef4a34a | 915 | |
ff568d3a GKH |
916 | DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting max segment size to %ld", |
917 | sdevice, PAGE_SIZE); | |
bef4a34a HJ |
918 | blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE); |
919 | ||
ff568d3a GKH |
920 | DPRINT_INFO(STORVSC_DRV, "sdev (%p) - adding merge bio vec routine", |
921 | sdevice); | |
bef4a34a HJ |
922 | blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec); |
923 | ||
924 | blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY); | |
454f18a9 | 925 | /* sdevice->timeout = (2000 * HZ);//(75 * HZ); */ |
bef4a34a HJ |
926 | |
927 | return 0; | |
928 | } | |
929 | ||
3e189519 | 930 | /* |
ff568d3a GKH |
931 | * storvsc_host_reset_handler - Reset the scsi HBA |
932 | */ | |
bef4a34a HJ |
933 | static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) |
934 | { | |
ff568d3a GKH |
935 | int ret; |
936 | struct host_device_context *host_device_ctx = | |
937 | (struct host_device_context *)scmnd->device->host->hostdata; | |
6bad88da | 938 | struct hv_device *device_ctx = host_device_ctx->device_ctx; |
bef4a34a | 939 | |
ff568d3a | 940 | DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...", |
6bad88da | 941 | scmnd->device, device_ctx); |
bef4a34a | 942 | |
454f18a9 | 943 | /* Invokes the vsc to reset the host/bus */ |
6bad88da | 944 | ret = stor_vsc_on_host_reset(device_ctx); |
83c720ea | 945 | if (ret != 0) |
bef4a34a | 946 | return ret; |
bef4a34a | 947 | |
ff568d3a | 948 | DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted", |
6bad88da | 949 | scmnd->device, device_ctx); |
bef4a34a | 950 | |
bef4a34a HJ |
951 | return ret; |
952 | } | |
953 | ||
ff568d3a GKH |
954 | static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, |
955 | sector_t capacity, int *info) | |
bef4a34a HJ |
956 | { |
957 | sector_t total_sectors = capacity; | |
ff568d3a GKH |
958 | sector_t cylinder_times_heads = 0; |
959 | sector_t temp = 0; | |
bef4a34a | 960 | |
ff568d3a GKH |
961 | int sectors_per_track = 0; |
962 | int heads = 0; | |
963 | int cylinders = 0; | |
964 | int rem = 0; | |
bef4a34a | 965 | |
ff568d3a GKH |
966 | if (total_sectors > (65535 * 16 * 255)) |
967 | total_sectors = (65535 * 16 * 255); | |
bef4a34a | 968 | |
ff568d3a GKH |
969 | if (total_sectors >= (65535 * 16 * 63)) { |
970 | sectors_per_track = 255; | |
971 | heads = 16; | |
bef4a34a HJ |
972 | |
973 | cylinder_times_heads = total_sectors; | |
ff568d3a GKH |
974 | /* sector_div stores the quotient in cylinder_times_heads */ |
975 | rem = sector_div(cylinder_times_heads, sectors_per_track); | |
976 | } else { | |
977 | sectors_per_track = 17; | |
bef4a34a HJ |
978 | |
979 | cylinder_times_heads = total_sectors; | |
ff568d3a GKH |
980 | /* sector_div stores the quotient in cylinder_times_heads */ |
981 | rem = sector_div(cylinder_times_heads, sectors_per_track); | |
bef4a34a HJ |
982 | |
983 | temp = cylinder_times_heads + 1023; | |
ff568d3a GKH |
984 | /* sector_div stores the quotient in temp */ |
985 | rem = sector_div(temp, 1024); | |
bef4a34a HJ |
986 | |
987 | heads = temp; | |
988 | ||
ff568d3a GKH |
989 | if (heads < 4) |
990 | heads = 4; | |
bef4a34a | 991 | |
ff568d3a GKH |
992 | if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) { |
993 | sectors_per_track = 31; | |
994 | heads = 16; | |
bef4a34a HJ |
995 | |
996 | cylinder_times_heads = total_sectors; | |
ff568d3a GKH |
997 | /* |
998 | * sector_div stores the quotient in | |
999 | * cylinder_times_heads | |
1000 | */ | |
1001 | rem = sector_div(cylinder_times_heads, | |
1002 | sectors_per_track); | |
1003 | } | |
bef4a34a | 1004 | |
ff568d3a GKH |
1005 | if (cylinder_times_heads >= (heads * 1024)) { |
1006 | sectors_per_track = 63; | |
1007 | heads = 16; | |
bef4a34a HJ |
1008 | |
1009 | cylinder_times_heads = total_sectors; | |
ff568d3a GKH |
1010 | /* |
1011 | * sector_div stores the quotient in | |
1012 | * cylinder_times_heads | |
1013 | */ | |
1014 | rem = sector_div(cylinder_times_heads, | |
1015 | sectors_per_track); | |
1016 | } | |
1017 | } | |
bef4a34a HJ |
1018 | |
1019 | temp = cylinder_times_heads; | |
ff568d3a GKH |
1020 | /* sector_div stores the quotient in temp */ |
1021 | rem = sector_div(temp, heads); | |
bef4a34a HJ |
1022 | cylinders = temp; |
1023 | ||
1024 | info[0] = heads; | |
ff568d3a GKH |
1025 | info[1] = sectors_per_track; |
1026 | info[2] = cylinders; | |
bef4a34a | 1027 | |
ff568d3a GKH |
1028 | DPRINT_INFO(STORVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, |
1029 | sectors_per_track); | |
bef4a34a HJ |
1030 | |
1031 | return 0; | |
1032 | } | |
1033 | ||
bef4a34a HJ |
1034 | static int __init storvsc_init(void) |
1035 | { | |
1036 | int ret; | |
1037 | ||
bef4a34a | 1038 | DPRINT_INFO(STORVSC_DRV, "Storvsc initializing...."); |
db085777 | 1039 | ret = storvsc_drv_init(); |
bef4a34a HJ |
1040 | return ret; |
1041 | } | |
1042 | ||
1043 | static void __exit storvsc_exit(void) | |
1044 | { | |
bef4a34a | 1045 | storvsc_drv_exit(); |
bef4a34a HJ |
1046 | } |
1047 | ||
ff568d3a | 1048 | MODULE_LICENSE("GPL"); |
26c14cc1 | 1049 | MODULE_VERSION(HV_DRV_VERSION); |
3afc7cc3 | 1050 | MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver"); |
bef4a34a HJ |
1051 | module_init(storvsc_init); |
1052 | module_exit(storvsc_exit); |