Merge branches 'pxa-ian' and 'pxa-xm270' into pxa
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / s390 / block / dasd.c
CommitLineData
1da177e4
LT
1/*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
1da177e4
LT
10 */
11
1da177e4
LT
12#include <linux/kmod.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/ctype.h>
16#include <linux/major.h>
17#include <linux/slab.h>
18#include <linux/buffer_head.h>
a885c8c4 19#include <linux/hdreg.h>
1da177e4
LT
20
21#include <asm/ccwdev.h>
22#include <asm/ebcdic.h>
23#include <asm/idals.h>
24#include <asm/todclk.h>
25
26/* This is ugly... */
27#define PRINTK_HEADER "dasd:"
28
29#include "dasd_int.h"
30/*
31 * SECTION: Constant definitions to be used within this file
32 */
33#define DASD_CHANQ_MAX_SIZE 4
34
35/*
36 * SECTION: exported variables of dasd.c
37 */
38debug_info_t *dasd_debug_area;
39struct dasd_discipline *dasd_diag_discipline_pointer;
2b67fc46 40void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
1da177e4
LT
41
42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
43MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
44 " Copyright 2000 IBM Corporation");
45MODULE_SUPPORTED_DEVICE("dasd");
1da177e4
LT
46MODULE_LICENSE("GPL");
47
48/*
49 * SECTION: prototypes for static functions of dasd.c
50 */
8e09f215
SW
51static int dasd_alloc_queue(struct dasd_block *);
52static void dasd_setup_queue(struct dasd_block *);
53static void dasd_free_queue(struct dasd_block *);
54static void dasd_flush_request_queue(struct dasd_block *);
55static int dasd_flush_block_queue(struct dasd_block *);
56static void dasd_device_tasklet(struct dasd_device *);
57static void dasd_block_tasklet(struct dasd_block *);
4927b3f7 58static void do_kick_device(struct work_struct *);
8e09f215 59static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
1da177e4
LT
60
61/*
62 * SECTION: Operations on the device structure.
63 */
64static wait_queue_head_t dasd_init_waitq;
8f61701b 65static wait_queue_head_t dasd_flush_wq;
c80ee724 66static wait_queue_head_t generic_waitq;
1da177e4
LT
67
68/*
69 * Allocate memory for a new device structure.
70 */
8e09f215 71struct dasd_device *dasd_alloc_device(void)
1da177e4
LT
72{
73 struct dasd_device *device;
74
8e09f215
SW
75 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
76 if (!device)
1da177e4 77 return ERR_PTR(-ENOMEM);
1da177e4
LT
78
79 /* Get two pages for normal block device operations. */
80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
8e09f215 81 if (!device->ccw_mem) {
1da177e4
LT
82 kfree(device);
83 return ERR_PTR(-ENOMEM);
84 }
85 /* Get one page for error recovery. */
86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
8e09f215 87 if (!device->erp_mem) {
1da177e4
LT
88 free_pages((unsigned long) device->ccw_mem, 1);
89 kfree(device);
90 return ERR_PTR(-ENOMEM);
91 }
92
93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
95 spin_lock_init(&device->mem_lock);
8e09f215 96 atomic_set(&device->tasklet_scheduled, 0);
138c014d 97 tasklet_init(&device->tasklet,
8e09f215 98 (void (*)(unsigned long)) dasd_device_tasklet,
1da177e4
LT
99 (unsigned long) device);
100 INIT_LIST_HEAD(&device->ccw_queue);
101 init_timer(&device->timer);
4927b3f7 102 INIT_WORK(&device->kick_work, do_kick_device);
1da177e4
LT
103 device->state = DASD_STATE_NEW;
104 device->target = DASD_STATE_NEW;
105
106 return device;
107}
108
109/*
110 * Free memory of a device structure.
111 */
8e09f215 112void dasd_free_device(struct dasd_device *device)
1da177e4 113{
17fd682e 114 kfree(device->private);
1da177e4
LT
115 free_page((unsigned long) device->erp_mem);
116 free_pages((unsigned long) device->ccw_mem, 1);
117 kfree(device);
118}
119
8e09f215
SW
120/*
121 * Allocate memory for a new device structure.
122 */
123struct dasd_block *dasd_alloc_block(void)
124{
125 struct dasd_block *block;
126
127 block = kzalloc(sizeof(*block), GFP_ATOMIC);
128 if (!block)
129 return ERR_PTR(-ENOMEM);
130 /* open_count = 0 means device online but not in use */
131 atomic_set(&block->open_count, -1);
132
133 spin_lock_init(&block->request_queue_lock);
134 atomic_set(&block->tasklet_scheduled, 0);
135 tasklet_init(&block->tasklet,
136 (void (*)(unsigned long)) dasd_block_tasklet,
137 (unsigned long) block);
138 INIT_LIST_HEAD(&block->ccw_queue);
139 spin_lock_init(&block->queue_lock);
140 init_timer(&block->timer);
141
142 return block;
143}
144
145/*
146 * Free memory of a device structure.
147 */
148void dasd_free_block(struct dasd_block *block)
149{
150 kfree(block);
151}
152
1da177e4
LT
153/*
154 * Make a new device known to the system.
155 */
8e09f215 156static int dasd_state_new_to_known(struct dasd_device *device)
1da177e4
LT
157{
158 int rc;
159
160 /*
138c014d 161 * As long as the device is not in state DASD_STATE_NEW we want to
1da177e4
LT
162 * keep the reference count > 0.
163 */
164 dasd_get_device(device);
165
8e09f215
SW
166 if (device->block) {
167 rc = dasd_alloc_queue(device->block);
168 if (rc) {
169 dasd_put_device(device);
170 return rc;
171 }
1da177e4 172 }
1da177e4
LT
173 device->state = DASD_STATE_KNOWN;
174 return 0;
175}
176
177/*
178 * Let the system forget about a device.
179 */
8e09f215 180static int dasd_state_known_to_new(struct dasd_device *device)
1da177e4 181{
20c64468
SW
182 /* Disable extended error reporting for this device. */
183 dasd_eer_disable(device);
1da177e4 184 /* Forget the discipline information. */
8e09f215
SW
185 if (device->discipline) {
186 if (device->discipline->uncheck_device)
187 device->discipline->uncheck_device(device);
aa88861f 188 module_put(device->discipline->owner);
8e09f215 189 }
1da177e4 190 device->discipline = NULL;
aa88861f
PO
191 if (device->base_discipline)
192 module_put(device->base_discipline->owner);
193 device->base_discipline = NULL;
1da177e4
LT
194 device->state = DASD_STATE_NEW;
195
8e09f215
SW
196 if (device->block)
197 dasd_free_queue(device->block);
1da177e4
LT
198
199 /* Give up reference we took in dasd_state_new_to_known. */
200 dasd_put_device(device);
8f61701b 201 return 0;
1da177e4
LT
202}
203
204/*
205 * Request the irq line for the device.
206 */
8e09f215 207static int dasd_state_known_to_basic(struct dasd_device *device)
1da177e4
LT
208{
209 int rc;
210
211 /* Allocate and register gendisk structure. */
8e09f215
SW
212 if (device->block) {
213 rc = dasd_gendisk_alloc(device->block);
214 if (rc)
215 return rc;
216 }
1da177e4 217 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
361f494d 218 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1,
8e09f215 219 8 * sizeof(long));
1da177e4 220 debug_register_view(device->debug_area, &debug_sprintf_view);
b0035f12 221 debug_set_level(device->debug_area, DBF_WARNING);
1da177e4
LT
222 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
223
224 device->state = DASD_STATE_BASIC;
225 return 0;
226}
227
228/*
229 * Release the irq line for the device. Terminate any running i/o.
230 */
8e09f215 231static int dasd_state_basic_to_known(struct dasd_device *device)
1da177e4 232{
8f61701b 233 int rc;
8e09f215
SW
234 if (device->block) {
235 dasd_gendisk_free(device->block);
236 dasd_block_clear_timer(device->block);
237 }
238 rc = dasd_flush_device_queue(device);
8f61701b
HH
239 if (rc)
240 return rc;
8e09f215 241 dasd_device_clear_timer(device);
8f61701b 242
1da177e4
LT
243 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
244 if (device->debug_area != NULL) {
245 debug_unregister(device->debug_area);
246 device->debug_area = NULL;
247 }
248 device->state = DASD_STATE_KNOWN;
8f61701b 249 return 0;
1da177e4
LT
250}
251
252/*
253 * Do the initial analysis. The do_analysis function may return
254 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
255 * until the discipline decides to continue the startup sequence
256 * by calling the function dasd_change_state. The eckd disciplines
257 * uses this to start a ccw that detects the format. The completion
258 * interrupt for this detection ccw uses the kernel event daemon to
259 * trigger the call to dasd_change_state. All this is done in the
260 * discipline code, see dasd_eckd.c.
90f0094d
HH
261 * After the analysis ccw is done (do_analysis returned 0) the block
262 * device is setup.
263 * In case the analysis returns an error, the device setup is stopped
264 * (a fake disk was already added to allow formatting).
1da177e4 265 */
8e09f215 266static int dasd_state_basic_to_ready(struct dasd_device *device)
1da177e4
LT
267{
268 int rc;
8e09f215 269 struct dasd_block *block;
1da177e4
LT
270
271 rc = 0;
8e09f215 272 block = device->block;
90f0094d 273 /* make disk known with correct capacity */
8e09f215
SW
274 if (block) {
275 if (block->base->discipline->do_analysis != NULL)
276 rc = block->base->discipline->do_analysis(block);
277 if (rc) {
278 if (rc != -EAGAIN)
279 device->state = DASD_STATE_UNFMT;
280 return rc;
281 }
282 dasd_setup_queue(block);
283 set_capacity(block->gdp,
284 block->blocks << block->s2b_shift);
285 device->state = DASD_STATE_READY;
286 rc = dasd_scan_partitions(block);
287 if (rc)
288 device->state = DASD_STATE_BASIC;
289 } else {
290 device->state = DASD_STATE_READY;
291 }
90f0094d 292 return rc;
1da177e4
LT
293}
294
295/*
296 * Remove device from block device layer. Destroy dirty buffers.
297 * Forget format information. Check if the target level is basic
298 * and if it is create fake disk for formatting.
299 */
8e09f215 300static int dasd_state_ready_to_basic(struct dasd_device *device)
1da177e4 301{
8f61701b
HH
302 int rc;
303
1da177e4 304 device->state = DASD_STATE_BASIC;
8e09f215
SW
305 if (device->block) {
306 struct dasd_block *block = device->block;
307 rc = dasd_flush_block_queue(block);
308 if (rc) {
309 device->state = DASD_STATE_READY;
310 return rc;
311 }
312 dasd_destroy_partitions(block);
313 dasd_flush_request_queue(block);
314 block->blocks = 0;
315 block->bp_block = 0;
316 block->s2b_shift = 0;
317 }
8f61701b 318 return 0;
1da177e4
LT
319}
320
90f0094d
HH
321/*
322 * Back to basic.
323 */
8e09f215 324static int dasd_state_unfmt_to_basic(struct dasd_device *device)
90f0094d
HH
325{
326 device->state = DASD_STATE_BASIC;
8f61701b 327 return 0;
90f0094d
HH
328}
329
1da177e4
LT
330/*
331 * Make the device online and schedule the bottom half to start
332 * the requeueing of requests from the linux request queue to the
333 * ccw queue.
334 */
8f61701b 335static int
1da177e4
LT
336dasd_state_ready_to_online(struct dasd_device * device)
337{
8e09f215
SW
338 int rc;
339
340 if (device->discipline->ready_to_online) {
341 rc = device->discipline->ready_to_online(device);
342 if (rc)
343 return rc;
344 }
1da177e4 345 device->state = DASD_STATE_ONLINE;
8e09f215
SW
346 if (device->block)
347 dasd_schedule_block_bh(device->block);
1da177e4
LT
348 return 0;
349}
350
351/*
352 * Stop the requeueing of requests again.
353 */
8e09f215 354static int dasd_state_online_to_ready(struct dasd_device *device)
1da177e4 355{
8e09f215
SW
356 int rc;
357
358 if (device->discipline->online_to_ready) {
359 rc = device->discipline->online_to_ready(device);
360 if (rc)
361 return rc;
362 }
1da177e4 363 device->state = DASD_STATE_READY;
8f61701b 364 return 0;
1da177e4
LT
365}
366
367/*
368 * Device startup state changes.
369 */
8e09f215 370static int dasd_increase_state(struct dasd_device *device)
1da177e4
LT
371{
372 int rc;
373
374 rc = 0;
375 if (device->state == DASD_STATE_NEW &&
376 device->target >= DASD_STATE_KNOWN)
377 rc = dasd_state_new_to_known(device);
378
379 if (!rc &&
380 device->state == DASD_STATE_KNOWN &&
381 device->target >= DASD_STATE_BASIC)
382 rc = dasd_state_known_to_basic(device);
383
384 if (!rc &&
385 device->state == DASD_STATE_BASIC &&
386 device->target >= DASD_STATE_READY)
387 rc = dasd_state_basic_to_ready(device);
388
39ccf95e
HH
389 if (!rc &&
390 device->state == DASD_STATE_UNFMT &&
391 device->target > DASD_STATE_UNFMT)
392 rc = -EPERM;
393
1da177e4
LT
394 if (!rc &&
395 device->state == DASD_STATE_READY &&
396 device->target >= DASD_STATE_ONLINE)
397 rc = dasd_state_ready_to_online(device);
398
399 return rc;
400}
401
402/*
403 * Device shutdown state changes.
404 */
8e09f215 405static int dasd_decrease_state(struct dasd_device *device)
1da177e4 406{
8f61701b
HH
407 int rc;
408
409 rc = 0;
1da177e4
LT
410 if (device->state == DASD_STATE_ONLINE &&
411 device->target <= DASD_STATE_READY)
8f61701b 412 rc = dasd_state_online_to_ready(device);
138c014d 413
8f61701b
HH
414 if (!rc &&
415 device->state == DASD_STATE_READY &&
1da177e4 416 device->target <= DASD_STATE_BASIC)
8f61701b 417 rc = dasd_state_ready_to_basic(device);
90f0094d 418
8f61701b
HH
419 if (!rc &&
420 device->state == DASD_STATE_UNFMT &&
90f0094d 421 device->target <= DASD_STATE_BASIC)
8f61701b 422 rc = dasd_state_unfmt_to_basic(device);
90f0094d 423
8f61701b
HH
424 if (!rc &&
425 device->state == DASD_STATE_BASIC &&
1da177e4 426 device->target <= DASD_STATE_KNOWN)
8f61701b 427 rc = dasd_state_basic_to_known(device);
138c014d 428
8f61701b
HH
429 if (!rc &&
430 device->state == DASD_STATE_KNOWN &&
1da177e4 431 device->target <= DASD_STATE_NEW)
8f61701b 432 rc = dasd_state_known_to_new(device);
1da177e4 433
8f61701b 434 return rc;
1da177e4
LT
435}
436
437/*
438 * This is the main startup/shutdown routine.
439 */
8e09f215 440static void dasd_change_state(struct dasd_device *device)
1da177e4
LT
441{
442 int rc;
443
444 if (device->state == device->target)
445 /* Already where we want to go today... */
446 return;
447 if (device->state < device->target)
448 rc = dasd_increase_state(device);
449 else
450 rc = dasd_decrease_state(device);
451 if (rc && rc != -EAGAIN)
452 device->target = device->state;
453
454 if (device->state == device->target)
455 wake_up(&dasd_init_waitq);
4dfd5c45
HH
456
457 /* let user-space know that the device status changed */
458 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
1da177e4
LT
459}
460
461/*
462 * Kick starter for devices that did not complete the startup/shutdown
463 * procedure or were sleeping because of a pending state.
464 * dasd_kick_device will schedule a call do do_kick_device to the kernel
465 * event daemon.
466 */
8e09f215 467static void do_kick_device(struct work_struct *work)
1da177e4 468{
4927b3f7 469 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
1da177e4 470 dasd_change_state(device);
8e09f215 471 dasd_schedule_device_bh(device);
1da177e4
LT
472 dasd_put_device(device);
473}
474
8e09f215 475void dasd_kick_device(struct dasd_device *device)
1da177e4
LT
476{
477 dasd_get_device(device);
478 /* queue call to dasd_kick_device to the kernel event daemon. */
479 schedule_work(&device->kick_work);
480}
481
482/*
483 * Set the target state for a device and starts the state change.
484 */
8e09f215 485void dasd_set_target_state(struct dasd_device *device, int target)
1da177e4
LT
486{
487 /* If we are in probeonly mode stop at DASD_STATE_READY. */
488 if (dasd_probeonly && target > DASD_STATE_READY)
489 target = DASD_STATE_READY;
490 if (device->target != target) {
491 if (device->state == target)
492 wake_up(&dasd_init_waitq);
493 device->target = target;
494 }
495 if (device->state != device->target)
496 dasd_change_state(device);
497}
498
499/*
500 * Enable devices with device numbers in [from..to].
501 */
8e09f215 502static inline int _wait_for_device(struct dasd_device *device)
1da177e4
LT
503{
504 return (device->state == device->target);
505}
506
8e09f215 507void dasd_enable_device(struct dasd_device *device)
1da177e4
LT
508{
509 dasd_set_target_state(device, DASD_STATE_ONLINE);
510 if (device->state <= DASD_STATE_KNOWN)
511 /* No discipline for device found. */
512 dasd_set_target_state(device, DASD_STATE_NEW);
513 /* Now wait for the devices to come up. */
514 wait_event(dasd_init_waitq, _wait_for_device(device));
515}
516
517/*
518 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
519 */
520#ifdef CONFIG_DASD_PROFILE
521
522struct dasd_profile_info_t dasd_global_profile;
523unsigned int dasd_profile_level = DASD_PROFILE_OFF;
524
525/*
526 * Increments counter in global and local profiling structures.
527 */
8e09f215 528#define dasd_profile_counter(value, counter, block) \
1da177e4
LT
529{ \
530 int index; \
531 for (index = 0; index < 31 && value >> (2+index); index++); \
532 dasd_global_profile.counter[index]++; \
8e09f215 533 block->profile.counter[index]++; \
1da177e4
LT
534}
535
536/*
537 * Add profiling information for cqr before execution.
538 */
8e09f215
SW
539static void dasd_profile_start(struct dasd_block *block,
540 struct dasd_ccw_req *cqr,
541 struct request *req)
1da177e4
LT
542{
543 struct list_head *l;
544 unsigned int counter;
545
546 if (dasd_profile_level != DASD_PROFILE_ON)
547 return;
548
549 /* count the length of the chanq for statistics */
550 counter = 0;
8e09f215 551 list_for_each(l, &block->ccw_queue)
1da177e4
LT
552 if (++counter >= 31)
553 break;
554 dasd_global_profile.dasd_io_nr_req[counter]++;
8e09f215 555 block->profile.dasd_io_nr_req[counter]++;
1da177e4
LT
556}
557
558/*
559 * Add profiling information for cqr after execution.
560 */
8e09f215
SW
561static void dasd_profile_end(struct dasd_block *block,
562 struct dasd_ccw_req *cqr,
563 struct request *req)
1da177e4
LT
564{
565 long strtime, irqtime, endtime, tottime; /* in microseconds */
566 long tottimeps, sectors;
567
568 if (dasd_profile_level != DASD_PROFILE_ON)
569 return;
570
571 sectors = req->nr_sectors;
572 if (!cqr->buildclk || !cqr->startclk ||
573 !cqr->stopclk || !cqr->endclk ||
574 !sectors)
575 return;
576
577 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
578 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
579 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
580 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
581 tottimeps = tottime / sectors;
582
583 if (!dasd_global_profile.dasd_io_reqs)
584 memset(&dasd_global_profile, 0,
8e09f215 585 sizeof(struct dasd_profile_info_t));
1da177e4
LT
586 dasd_global_profile.dasd_io_reqs++;
587 dasd_global_profile.dasd_io_sects += sectors;
588
8e09f215
SW
589 if (!block->profile.dasd_io_reqs)
590 memset(&block->profile, 0,
591 sizeof(struct dasd_profile_info_t));
592 block->profile.dasd_io_reqs++;
593 block->profile.dasd_io_sects += sectors;
1da177e4 594
8e09f215
SW
595 dasd_profile_counter(sectors, dasd_io_secs, block);
596 dasd_profile_counter(tottime, dasd_io_times, block);
597 dasd_profile_counter(tottimeps, dasd_io_timps, block);
598 dasd_profile_counter(strtime, dasd_io_time1, block);
599 dasd_profile_counter(irqtime, dasd_io_time2, block);
600 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
601 dasd_profile_counter(endtime, dasd_io_time3, block);
1da177e4
LT
602}
603#else
8e09f215
SW
604#define dasd_profile_start(block, cqr, req) do {} while (0)
605#define dasd_profile_end(block, cqr, req) do {} while (0)
1da177e4
LT
606#endif /* CONFIG_DASD_PROFILE */
607
608/*
609 * Allocate memory for a channel program with 'cplength' channel
610 * command words and 'datasize' additional space. There are two
611 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
612 * memory and 2) dasd_smalloc_request uses the static ccw memory
613 * that gets allocated for each device.
614 */
8e09f215
SW
615struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
616 int datasize,
617 struct dasd_device *device)
1da177e4
LT
618{
619 struct dasd_ccw_req *cqr;
620
621 /* Sanity checks */
7ac1e877
ES
622 BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
623 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
1da177e4 624
88abaab4 625 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
1da177e4
LT
626 if (cqr == NULL)
627 return ERR_PTR(-ENOMEM);
1da177e4
LT
628 cqr->cpaddr = NULL;
629 if (cplength > 0) {
88abaab4 630 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
1da177e4
LT
631 GFP_ATOMIC | GFP_DMA);
632 if (cqr->cpaddr == NULL) {
633 kfree(cqr);
634 return ERR_PTR(-ENOMEM);
635 }
1da177e4
LT
636 }
637 cqr->data = NULL;
638 if (datasize > 0) {
88abaab4 639 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
1da177e4 640 if (cqr->data == NULL) {
17fd682e 641 kfree(cqr->cpaddr);
1da177e4
LT
642 kfree(cqr);
643 return ERR_PTR(-ENOMEM);
644 }
1da177e4
LT
645 }
646 strncpy((char *) &cqr->magic, magic, 4);
647 ASCEBC((char *) &cqr->magic, 4);
648 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
649 dasd_get_device(device);
650 return cqr;
651}
652
8e09f215
SW
653struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
654 int datasize,
655 struct dasd_device *device)
1da177e4
LT
656{
657 unsigned long flags;
658 struct dasd_ccw_req *cqr;
659 char *data;
660 int size;
661
662 /* Sanity checks */
7ac1e877
ES
663 BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
664 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
1da177e4
LT
665
666 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
667 if (cplength > 0)
668 size += cplength * sizeof(struct ccw1);
669 if (datasize > 0)
670 size += datasize;
671 spin_lock_irqsave(&device->mem_lock, flags);
672 cqr = (struct dasd_ccw_req *)
673 dasd_alloc_chunk(&device->ccw_chunks, size);
674 spin_unlock_irqrestore(&device->mem_lock, flags);
675 if (cqr == NULL)
676 return ERR_PTR(-ENOMEM);
677 memset(cqr, 0, sizeof(struct dasd_ccw_req));
678 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
679 cqr->cpaddr = NULL;
680 if (cplength > 0) {
681 cqr->cpaddr = (struct ccw1 *) data;
682 data += cplength*sizeof(struct ccw1);
683 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
684 }
685 cqr->data = NULL;
686 if (datasize > 0) {
687 cqr->data = data;
688 memset(cqr->data, 0, datasize);
689 }
690 strncpy((char *) &cqr->magic, magic, 4);
691 ASCEBC((char *) &cqr->magic, 4);
692 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
693 dasd_get_device(device);
694 return cqr;
695}
696
697/*
698 * Free memory of a channel program. This function needs to free all the
699 * idal lists that might have been created by dasd_set_cda and the
700 * struct dasd_ccw_req itself.
701 */
8e09f215 702void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1da177e4 703{
347a8dc3 704#ifdef CONFIG_64BIT
1da177e4
LT
705 struct ccw1 *ccw;
706
707 /* Clear any idals used for the request. */
708 ccw = cqr->cpaddr;
709 do {
710 clear_normalized_cda(ccw);
711 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
712#endif
17fd682e
JJ
713 kfree(cqr->cpaddr);
714 kfree(cqr->data);
1da177e4
LT
715 kfree(cqr);
716 dasd_put_device(device);
717}
718
8e09f215 719void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1da177e4
LT
720{
721 unsigned long flags;
722
723 spin_lock_irqsave(&device->mem_lock, flags);
724 dasd_free_chunk(&device->ccw_chunks, cqr);
725 spin_unlock_irqrestore(&device->mem_lock, flags);
726 dasd_put_device(device);
727}
728
729/*
730 * Check discipline magic in cqr.
731 */
8e09f215 732static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1da177e4
LT
733{
734 struct dasd_device *device;
735
736 if (cqr == NULL)
737 return -EINVAL;
8e09f215 738 device = cqr->startdev;
1da177e4
LT
739 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
740 DEV_MESSAGE(KERN_WARNING, device,
741 " dasd_ccw_req 0x%08x magic doesn't match"
742 " discipline 0x%08x",
743 cqr->magic,
744 *(unsigned int *) device->discipline->name);
745 return -EINVAL;
746 }
747 return 0;
748}
749
750/*
751 * Terminate the current i/o and set the request to clear_pending.
752 * Timer keeps device runnig.
753 * ccw_device_clear can fail if the i/o subsystem
754 * is in a bad mood.
755 */
8e09f215 756int dasd_term_IO(struct dasd_ccw_req *cqr)
1da177e4
LT
757{
758 struct dasd_device *device;
759 int retries, rc;
760
761 /* Check the cqr */
762 rc = dasd_check_cqr(cqr);
763 if (rc)
764 return rc;
765 retries = 0;
8e09f215 766 device = (struct dasd_device *) cqr->startdev;
1da177e4
LT
767 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
768 rc = ccw_device_clear(device->cdev, (long) cqr);
769 switch (rc) {
770 case 0: /* termination successful */
c2ba444d 771 cqr->retries--;
8e09f215 772 cqr->status = DASD_CQR_CLEAR_PENDING;
1da177e4 773 cqr->stopclk = get_clock();
8f61701b 774 cqr->starttime = 0;
1da177e4
LT
775 DBF_DEV_EVENT(DBF_DEBUG, device,
776 "terminate cqr %p successful",
777 cqr);
778 break;
779 case -ENODEV:
780 DBF_DEV_EVENT(DBF_ERR, device, "%s",
781 "device gone, retry");
782 break;
783 case -EIO:
784 DBF_DEV_EVENT(DBF_ERR, device, "%s",
785 "I/O error, retry");
786 break;
787 case -EINVAL:
788 case -EBUSY:
789 DBF_DEV_EVENT(DBF_ERR, device, "%s",
790 "device busy, retry later");
791 break;
792 default:
793 DEV_MESSAGE(KERN_ERR, device,
794 "line %d unknown RC=%d, please "
795 "report to linux390@de.ibm.com",
796 __LINE__, rc);
797 BUG();
798 break;
799 }
800 retries++;
801 }
8e09f215 802 dasd_schedule_device_bh(device);
1da177e4
LT
803 return rc;
804}
805
806/*
807 * Start the i/o. This start_IO can fail if the channel is really busy.
808 * In that case set up a timer to start the request later.
809 */
8e09f215 810int dasd_start_IO(struct dasd_ccw_req *cqr)
1da177e4
LT
811{
812 struct dasd_device *device;
813 int rc;
814
815 /* Check the cqr */
816 rc = dasd_check_cqr(cqr);
817 if (rc)
818 return rc;
8e09f215 819 device = (struct dasd_device *) cqr->startdev;
1da177e4
LT
820 if (cqr->retries < 0) {
821 DEV_MESSAGE(KERN_DEBUG, device,
822 "start_IO: request %p (%02x/%i) - no retry left.",
823 cqr, cqr->status, cqr->retries);
8e09f215 824 cqr->status = DASD_CQR_ERROR;
1da177e4
LT
825 return -EIO;
826 }
827 cqr->startclk = get_clock();
828 cqr->starttime = jiffies;
829 cqr->retries--;
830 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
831 cqr->lpm, 0);
832 switch (rc) {
833 case 0:
834 cqr->status = DASD_CQR_IN_IO;
835 DBF_DEV_EVENT(DBF_DEBUG, device,
836 "start_IO: request %p started successful",
837 cqr);
838 break;
839 case -EBUSY:
840 DBF_DEV_EVENT(DBF_ERR, device, "%s",
841 "start_IO: device busy, retry later");
842 break;
843 case -ETIMEDOUT:
844 DBF_DEV_EVENT(DBF_ERR, device, "%s",
845 "start_IO: request timeout, retry later");
846 break;
847 case -EACCES:
848 /* -EACCES indicates that the request used only a
849 * subset of the available pathes and all these
850 * pathes are gone.
851 * Do a retry with all available pathes.
852 */
853 cqr->lpm = LPM_ANYPATH;
854 DBF_DEV_EVENT(DBF_ERR, device, "%s",
855 "start_IO: selected pathes gone,"
856 " retry on all pathes");
857 break;
858 case -ENODEV:
859 case -EIO:
860 DBF_DEV_EVENT(DBF_ERR, device, "%s",
861 "start_IO: device gone, retry");
862 break;
863 default:
864 DEV_MESSAGE(KERN_ERR, device,
865 "line %d unknown RC=%d, please report"
866 " to linux390@de.ibm.com", __LINE__, rc);
867 BUG();
868 break;
869 }
870 return rc;
871}
872
873/*
874 * Timeout function for dasd devices. This is used for different purposes
875 * 1) missing interrupt handler for normal operation
876 * 2) delayed start of request where start_IO failed with -EBUSY
877 * 3) timeout for missing state change interrupts
878 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
879 * DASD_CQR_QUEUED for 2) and 3).
880 */
8e09f215 881static void dasd_device_timeout(unsigned long ptr)
1da177e4
LT
882{
883 unsigned long flags;
884 struct dasd_device *device;
885
886 device = (struct dasd_device *) ptr;
887 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
888 /* re-activate request queue */
889 device->stopped &= ~DASD_STOPPED_PENDING;
890 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
8e09f215 891 dasd_schedule_device_bh(device);
1da177e4
LT
892}
893
894/*
895 * Setup timeout for a device in jiffies.
896 */
8e09f215 897void dasd_device_set_timer(struct dasd_device *device, int expires)
1da177e4
LT
898{
899 if (expires == 0) {
900 if (timer_pending(&device->timer))
901 del_timer(&device->timer);
902 return;
903 }
904 if (timer_pending(&device->timer)) {
905 if (mod_timer(&device->timer, jiffies + expires))
906 return;
907 }
8e09f215 908 device->timer.function = dasd_device_timeout;
1da177e4
LT
909 device->timer.data = (unsigned long) device;
910 device->timer.expires = jiffies + expires;
911 add_timer(&device->timer);
912}
913
914/*
915 * Clear timeout for a device.
916 */
8e09f215 917void dasd_device_clear_timer(struct dasd_device *device)
1da177e4
LT
918{
919 if (timer_pending(&device->timer))
920 del_timer(&device->timer);
921}
922
8e09f215
SW
923static void dasd_handle_killed_request(struct ccw_device *cdev,
924 unsigned long intparm)
1da177e4
LT
925{
926 struct dasd_ccw_req *cqr;
927 struct dasd_device *device;
928
f16f5843
SW
929 if (!intparm)
930 return;
1da177e4
LT
931 cqr = (struct dasd_ccw_req *) intparm;
932 if (cqr->status != DASD_CQR_IN_IO) {
933 MESSAGE(KERN_DEBUG,
934 "invalid status in handle_killed_request: "
935 "bus_id %s, status %02x",
936 cdev->dev.bus_id, cqr->status);
937 return;
938 }
939
8e09f215 940 device = (struct dasd_device *) cqr->startdev;
1da177e4 941 if (device == NULL ||
a00bfd71 942 device != dasd_device_from_cdev_locked(cdev) ||
1da177e4
LT
943 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
944 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
945 cdev->dev.bus_id);
946 return;
947 }
948
949 /* Schedule request to be retried. */
950 cqr->status = DASD_CQR_QUEUED;
951
8e09f215
SW
952 dasd_device_clear_timer(device);
953 dasd_schedule_device_bh(device);
1da177e4
LT
954 dasd_put_device(device);
955}
956
8e09f215 957void dasd_generic_handle_state_change(struct dasd_device *device)
1da177e4 958{
20c64468
SW
959 /* First of all start sense subsystem status request. */
960 dasd_eer_snss(device);
961
1da177e4 962 device->stopped &= ~DASD_STOPPED_PENDING;
8e09f215
SW
963 dasd_schedule_device_bh(device);
964 if (device->block)
965 dasd_schedule_block_bh(device->block);
1da177e4
LT
966}
967
968/*
969 * Interrupt handler for "normal" ssch-io based dasd devices.
970 */
8e09f215
SW
971void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
972 struct irb *irb)
1da177e4
LT
973{
974 struct dasd_ccw_req *cqr, *next;
975 struct dasd_device *device;
976 unsigned long long now;
977 int expires;
1da177e4
LT
978
979 if (IS_ERR(irb)) {
980 switch (PTR_ERR(irb)) {
981 case -EIO:
1da177e4
LT
982 break;
983 case -ETIMEDOUT:
984 printk(KERN_WARNING"%s(%s): request timed out\n",
2a2cf6b1 985 __func__, cdev->dev.bus_id);
1da177e4
LT
986 break;
987 default:
988 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
2a2cf6b1 989 __func__, cdev->dev.bus_id, PTR_ERR(irb));
1da177e4 990 }
f16f5843 991 dasd_handle_killed_request(cdev, intparm);
1da177e4
LT
992 return;
993 }
994
995 now = get_clock();
996
997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
998 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
999 (unsigned int) intparm);
1000
8e09f215
SW
1001 /* check for unsolicited interrupts */
1002 cqr = (struct dasd_ccw_req *) intparm;
1003 if (!cqr || ((irb->scsw.cc == 1) &&
1004 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
1005 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) {
1006 if (cqr && cqr->status == DASD_CQR_IN_IO)
1007 cqr->status = DASD_CQR_QUEUED;
a00bfd71 1008 device = dasd_device_from_cdev_locked(cdev);
1da177e4 1009 if (!IS_ERR(device)) {
8e09f215
SW
1010 dasd_device_clear_timer(device);
1011 device->discipline->handle_unsolicited_interrupt(device,
1012 irb);
1da177e4
LT
1013 dasd_put_device(device);
1014 }
1015 return;
1016 }
1017
8e09f215
SW
1018 device = (struct dasd_device *) cqr->startdev;
1019 if (!device ||
1da177e4
LT
1020 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1021 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
1022 cdev->dev.bus_id);
1023 return;
1024 }
1025
1026 /* Check for clear pending */
8e09f215 1027 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1da177e4 1028 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
8e09f215
SW
1029 cqr->status = DASD_CQR_CLEARED;
1030 dasd_device_clear_timer(device);
8f61701b 1031 wake_up(&dasd_flush_wq);
8e09f215 1032 dasd_schedule_device_bh(device);
1da177e4
LT
1033 return;
1034 }
1035
1036 /* check status - the request might have been killed by dyn detach */
1037 if (cqr->status != DASD_CQR_IN_IO) {
1038 MESSAGE(KERN_DEBUG,
1039 "invalid status: bus_id %s, status %02x",
1040 cdev->dev.bus_id, cqr->status);
1041 return;
1042 }
1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
1044 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
8e09f215 1045 next = NULL;
1da177e4 1046 expires = 0;
8e09f215
SW
1047 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1048 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) {
1049 /* request was completed successfully */
1050 cqr->status = DASD_CQR_SUCCESS;
1da177e4
LT
1051 cqr->stopclk = now;
1052 /* Start first request on queue if possible -> fast_io. */
8e09f215
SW
1053 if (cqr->devlist.next != &device->ccw_queue) {
1054 next = list_entry(cqr->devlist.next,
1055 struct dasd_ccw_req, devlist);
1da177e4 1056 }
8e09f215
SW
1057 } else { /* error */
1058 memcpy(&cqr->irb, irb, sizeof(struct irb));
9575bf26 1059 if (device->features & DASD_FEATURE_ERPLOG) {
9575bf26
HH
1060 dasd_log_sense(cqr, irb);
1061 }
6c5f57c7
SH
1062 /*
1063 * If we don't want complex ERP for this request, then just
1064 * reset this and retry it in the fastpath
8e09f215 1065 */
6c5f57c7 1066 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
8e09f215
SW
1067 cqr->retries > 0) {
1068 DEV_MESSAGE(KERN_DEBUG, device,
1069 "default ERP in fastpath (%i retries left)",
1070 cqr->retries);
1071 cqr->lpm = LPM_ANYPATH;
1072 cqr->status = DASD_CQR_QUEUED;
1073 next = cqr;
1074 } else
1da177e4 1075 cqr->status = DASD_CQR_ERROR;
8e09f215
SW
1076 }
1077 if (next && (next->status == DASD_CQR_QUEUED) &&
1078 (!device->stopped)) {
1079 if (device->discipline->start_IO(next) == 0)
1080 expires = next->expires;
1081 else
1082 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1083 "Interrupt fastpath "
1084 "failed!");
1da177e4
LT
1085 }
1086 if (expires != 0)
8e09f215 1087 dasd_device_set_timer(device, expires);
1da177e4 1088 else
8e09f215
SW
1089 dasd_device_clear_timer(device);
1090 dasd_schedule_device_bh(device);
1da177e4
LT
1091}
1092
1093/*
8e09f215
SW
1094 * If we have an error on a dasd_block layer request then we cancel
1095 * and return all further requests from the same dasd_block as well.
1da177e4 1096 */
8e09f215
SW
1097static void __dasd_device_recovery(struct dasd_device *device,
1098 struct dasd_ccw_req *ref_cqr)
1da177e4 1099{
8e09f215
SW
1100 struct list_head *l, *n;
1101 struct dasd_ccw_req *cqr;
1da177e4 1102
8e09f215
SW
1103 /*
1104 * only requeue request that came from the dasd_block layer
1105 */
1106 if (!ref_cqr->block)
1107 return;
1da177e4 1108
8e09f215
SW
1109 list_for_each_safe(l, n, &device->ccw_queue) {
1110 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1111 if (cqr->status == DASD_CQR_QUEUED &&
1112 ref_cqr->block == cqr->block) {
1113 cqr->status = DASD_CQR_CLEARED;
1114 }
1115 }
1116};
1da177e4
LT
1117
1118/*
8e09f215
SW
1119 * Remove those ccw requests from the queue that need to be returned
1120 * to the upper layer.
1da177e4 1121 */
8e09f215
SW
1122static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1123 struct list_head *final_queue)
1da177e4
LT
1124{
1125 struct list_head *l, *n;
1126 struct dasd_ccw_req *cqr;
1da177e4 1127
1da177e4
LT
1128 /* Process request with final status. */
1129 list_for_each_safe(l, n, &device->ccw_queue) {
8e09f215
SW
1130 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1131
1da177e4 1132 /* Stop list processing at the first non-final request. */
8e09f215
SW
1133 if (cqr->status == DASD_CQR_QUEUED ||
1134 cqr->status == DASD_CQR_IN_IO ||
1135 cqr->status == DASD_CQR_CLEAR_PENDING)
1da177e4 1136 break;
1da177e4 1137 if (cqr->status == DASD_CQR_ERROR) {
8e09f215 1138 __dasd_device_recovery(device, cqr);
20c64468 1139 }
1da177e4 1140 /* Rechain finished requests to final queue */
8e09f215 1141 list_move_tail(&cqr->devlist, final_queue);
1da177e4
LT
1142 }
1143}
1144
1da177e4 1145/*
8e09f215
SW
1146 * the cqrs from the final queue are returned to the upper layer
1147 * by setting a dasd_block state and calling the callback function
1da177e4 1148 */
8e09f215
SW
1149static void __dasd_device_process_final_queue(struct dasd_device *device,
1150 struct list_head *final_queue)
1da177e4 1151{
8e09f215 1152 struct list_head *l, *n;
1da177e4 1153 struct dasd_ccw_req *cqr;
03513bcc 1154 struct dasd_block *block;
c80ee724
SH
1155 void (*callback)(struct dasd_ccw_req *, void *data);
1156 void *callback_data;
f24acd45 1157
8e09f215
SW
1158 list_for_each_safe(l, n, final_queue) {
1159 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1160 list_del_init(&cqr->devlist);
03513bcc 1161 block = cqr->block;
c80ee724
SH
1162 callback = cqr->callback;
1163 callback_data = cqr->callback_data;
03513bcc
SW
1164 if (block)
1165 spin_lock_bh(&block->queue_lock);
8e09f215
SW
1166 switch (cqr->status) {
1167 case DASD_CQR_SUCCESS:
1168 cqr->status = DASD_CQR_DONE;
1169 break;
1170 case DASD_CQR_ERROR:
1171 cqr->status = DASD_CQR_NEED_ERP;
1172 break;
1173 case DASD_CQR_CLEARED:
1174 cqr->status = DASD_CQR_TERMINATED;
1175 break;
1176 default:
1177 DEV_MESSAGE(KERN_ERR, device,
1178 "wrong cqr status in __dasd_process_final_queue "
1179 "for cqr %p, status %x",
1180 cqr, cqr->status);
1181 BUG();
1da177e4 1182 }
8e09f215 1183 if (cqr->callback != NULL)
c80ee724 1184 (callback)(cqr, callback_data);
03513bcc
SW
1185 if (block)
1186 spin_unlock_bh(&block->queue_lock);
1da177e4
LT
1187 }
1188}
1189
1190/*
1191 * Take a look at the first request on the ccw queue and check
1192 * if it reached its expire time. If so, terminate the IO.
1193 */
8e09f215 1194static void __dasd_device_check_expire(struct dasd_device *device)
1da177e4
LT
1195{
1196 struct dasd_ccw_req *cqr;
1197
1198 if (list_empty(&device->ccw_queue))
1199 return;
8e09f215 1200 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
29145a6c
HH
1201 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1202 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1203 if (device->discipline->term_IO(cqr) != 0) {
1204 /* Hmpf, try again in 5 sec */
29145a6c
HH
1205 DEV_MESSAGE(KERN_ERR, device,
1206 "internal error - timeout (%is) expired "
1207 "for cqr %p, termination failed, "
1208 "retrying in 5s",
1209 (cqr->expires/HZ), cqr);
7dc1da9f
SH
1210 cqr->expires += 5*HZ;
1211 dasd_device_set_timer(device, 5*HZ);
29145a6c 1212 } else {
8f61701b
HH
1213 DEV_MESSAGE(KERN_ERR, device,
1214 "internal error - timeout (%is) expired "
1215 "for cqr %p (%i retries left)",
1216 (cqr->expires/HZ), cqr, cqr->retries);
1da177e4
LT
1217 }
1218 }
1219}
1220
1221/*
1222 * Take a look at the first request on the ccw queue and check
1223 * if it needs to be started.
1224 */
8e09f215 1225static void __dasd_device_start_head(struct dasd_device *device)
1da177e4
LT
1226{
1227 struct dasd_ccw_req *cqr;
1228 int rc;
1229
1230 if (list_empty(&device->ccw_queue))
1231 return;
8e09f215 1232 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
25ee4cf8
PO
1233 if (cqr->status != DASD_CQR_QUEUED)
1234 return;
8e09f215
SW
1235 /* when device is stopped, return request to previous layer */
1236 if (device->stopped) {
1237 cqr->status = DASD_CQR_CLEARED;
1238 dasd_schedule_device_bh(device);
25ee4cf8 1239 return;
1c01b8a5 1240 }
25ee4cf8
PO
1241
1242 rc = device->discipline->start_IO(cqr);
1243 if (rc == 0)
8e09f215 1244 dasd_device_set_timer(device, cqr->expires);
25ee4cf8 1245 else if (rc == -EACCES) {
8e09f215 1246 dasd_schedule_device_bh(device);
25ee4cf8
PO
1247 } else
1248 /* Hmpf, try again in 1/2 sec */
8e09f215 1249 dasd_device_set_timer(device, 50);
8f61701b
HH
1250}
1251
1da177e4 1252/*
8e09f215
SW
1253 * Go through all request on the dasd_device request queue,
1254 * terminate them on the cdev if necessary, and return them to the
1255 * submitting layer via callback.
1256 * Note:
1257 * Make sure that all 'submitting layers' still exist when
1258 * this function is called!. In other words, when 'device' is a base
1259 * device then all block layer requests must have been removed before
1260 * via dasd_flush_block_queue.
1da177e4 1261 */
8e09f215 1262int dasd_flush_device_queue(struct dasd_device *device)
1da177e4 1263{
8e09f215
SW
1264 struct dasd_ccw_req *cqr, *n;
1265 int rc;
1da177e4 1266 struct list_head flush_queue;
1da177e4
LT
1267
1268 INIT_LIST_HEAD(&flush_queue);
1269 spin_lock_irq(get_ccwdev_lock(device->cdev));
8f61701b 1270 rc = 0;
8e09f215 1271 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
8f61701b
HH
1272 /* Check status and move request to flush_queue */
1273 switch (cqr->status) {
1274 case DASD_CQR_IN_IO:
1275 rc = device->discipline->term_IO(cqr);
1276 if (rc) {
1277 /* unable to terminate requeust */
1278 DEV_MESSAGE(KERN_ERR, device,
1279 "dasd flush ccw_queue is unable "
1280 " to terminate request %p",
1281 cqr);
1282 /* stop flush processing */
1283 goto finished;
1284 }
1285 break;
1286 case DASD_CQR_QUEUED:
1da177e4 1287 cqr->stopclk = get_clock();
8e09f215 1288 cqr->status = DASD_CQR_CLEARED;
8f61701b 1289 break;
8e09f215 1290 default: /* no need to modify the others */
8f61701b
HH
1291 break;
1292 }
8e09f215 1293 list_move_tail(&cqr->devlist, &flush_queue);
8f61701b 1294 }
8f61701b
HH
1295finished:
1296 spin_unlock_irq(get_ccwdev_lock(device->cdev));
8e09f215
SW
1297 /*
1298 * After this point all requests must be in state CLEAR_PENDING,
1299 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1300 * one of the others.
1301 */
1302 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1303 wait_event(dasd_flush_wq,
1304 (cqr->status != DASD_CQR_CLEAR_PENDING));
1305 /*
1306 * Now set each request back to TERMINATED, DONE or NEED_ERP
1307 * and call the callback function of flushed requests
1308 */
1309 __dasd_device_process_final_queue(device, &flush_queue);
8f61701b 1310 return rc;
1da177e4
LT
1311}
1312
1313/*
1314 * Acquire the device lock and process queues for the device.
1315 */
8e09f215 1316static void dasd_device_tasklet(struct dasd_device *device)
1da177e4
LT
1317{
1318 struct list_head final_queue;
1da177e4
LT
1319
1320 atomic_set (&device->tasklet_scheduled, 0);
1321 INIT_LIST_HEAD(&final_queue);
1322 spin_lock_irq(get_ccwdev_lock(device->cdev));
1323 /* Check expire time of first request on the ccw queue. */
8e09f215
SW
1324 __dasd_device_check_expire(device);
1325 /* find final requests on ccw queue */
1326 __dasd_device_process_ccw_queue(device, &final_queue);
1da177e4
LT
1327 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1328 /* Now call the callback function of requests with final status */
8e09f215
SW
1329 __dasd_device_process_final_queue(device, &final_queue);
1330 spin_lock_irq(get_ccwdev_lock(device->cdev));
1da177e4 1331 /* Now check if the head of the ccw queue needs to be started. */
8e09f215
SW
1332 __dasd_device_start_head(device);
1333 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1da177e4
LT
1334 dasd_put_device(device);
1335}
1336
1337/*
1338 * Schedules a call to dasd_tasklet over the device tasklet.
1339 */
8e09f215 1340void dasd_schedule_device_bh(struct dasd_device *device)
1da177e4
LT
1341{
1342 /* Protect against rescheduling. */
973bd993 1343 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1da177e4
LT
1344 return;
1345 dasd_get_device(device);
1346 tasklet_hi_schedule(&device->tasklet);
1347}
1348
1349/*
8e09f215
SW
1350 * Queue a request to the head of the device ccw_queue.
1351 * Start the I/O if possible.
1da177e4 1352 */
8e09f215 1353void dasd_add_request_head(struct dasd_ccw_req *cqr)
1da177e4
LT
1354{
1355 struct dasd_device *device;
1356 unsigned long flags;
1357
8e09f215 1358 device = cqr->startdev;
1da177e4 1359 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
8e09f215
SW
1360 cqr->status = DASD_CQR_QUEUED;
1361 list_add(&cqr->devlist, &device->ccw_queue);
1da177e4 1362 /* let the bh start the request to keep them in order */
8e09f215 1363 dasd_schedule_device_bh(device);
1da177e4
LT
1364 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1365}
1366
1367/*
8e09f215
SW
1368 * Queue a request to the tail of the device ccw_queue.
1369 * Start the I/O if possible.
1da177e4 1370 */
8e09f215 1371void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1da177e4
LT
1372{
1373 struct dasd_device *device;
1374 unsigned long flags;
1375
8e09f215 1376 device = cqr->startdev;
1da177e4 1377 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
8e09f215
SW
1378 cqr->status = DASD_CQR_QUEUED;
1379 list_add_tail(&cqr->devlist, &device->ccw_queue);
1da177e4 1380 /* let the bh start the request to keep them in order */
8e09f215 1381 dasd_schedule_device_bh(device);
1da177e4
LT
1382 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1383}
1384
1385/*
8e09f215 1386 * Wakeup helper for the 'sleep_on' functions.
1da177e4 1387 */
8e09f215 1388static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1da177e4
LT
1389{
1390 wake_up((wait_queue_head_t *) data);
1391}
1392
8e09f215 1393static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1da177e4
LT
1394{
1395 struct dasd_device *device;
1396 int rc;
1397
8e09f215 1398 device = cqr->startdev;
1da177e4 1399 spin_lock_irq(get_ccwdev_lock(device->cdev));
c2ba444d 1400 rc = ((cqr->status == DASD_CQR_DONE ||
8e09f215
SW
1401 cqr->status == DASD_CQR_NEED_ERP ||
1402 cqr->status == DASD_CQR_TERMINATED) &&
1403 list_empty(&cqr->devlist));
1da177e4
LT
1404 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1405 return rc;
1406}
1407
1408/*
8e09f215
SW
1409 * Queue a request to the tail of the device ccw_queue and wait for
1410 * it's completion.
1da177e4 1411 */
8e09f215 1412int dasd_sleep_on(struct dasd_ccw_req *cqr)
1da177e4 1413{
1da177e4
LT
1414 struct dasd_device *device;
1415 int rc;
138c014d 1416
8e09f215 1417 device = cqr->startdev;
138c014d 1418
1da177e4 1419 cqr->callback = dasd_wakeup_cb;
c80ee724 1420 cqr->callback_data = (void *) &generic_waitq;
8e09f215 1421 dasd_add_request_tail(cqr);
c80ee724 1422 wait_event(generic_waitq, _wait_for_wakeup(cqr));
138c014d 1423
1da177e4 1424 /* Request status is either done or failed. */
8e09f215 1425 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1da177e4
LT
1426 return rc;
1427}
1428
1429/*
8e09f215
SW
1430 * Queue a request to the tail of the device ccw_queue and wait
1431 * interruptible for it's completion.
1da177e4 1432 */
8e09f215 1433int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1da177e4 1434{
1da177e4 1435 struct dasd_device *device;
8e09f215 1436 int rc;
1da177e4 1437
8e09f215 1438 device = cqr->startdev;
1da177e4 1439 cqr->callback = dasd_wakeup_cb;
c80ee724 1440 cqr->callback_data = (void *) &generic_waitq;
8e09f215 1441 dasd_add_request_tail(cqr);
c80ee724 1442 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
8e09f215
SW
1443 if (rc == -ERESTARTSYS) {
1444 dasd_cancel_req(cqr);
1445 /* wait (non-interruptible) for final status */
c80ee724 1446 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1da177e4 1447 }
8e09f215 1448 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1da177e4
LT
1449 return rc;
1450}
1451
1452/*
1453 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1454 * for eckd devices) the currently running request has to be terminated
1455 * and be put back to status queued, before the special request is added
1456 * to the head of the queue. Then the special request is waited on normally.
1457 */
8e09f215 1458static inline int _dasd_term_running_cqr(struct dasd_device *device)
1da177e4
LT
1459{
1460 struct dasd_ccw_req *cqr;
1da177e4
LT
1461
1462 if (list_empty(&device->ccw_queue))
1463 return 0;
8e09f215 1464 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
8f61701b 1465 return device->discipline->term_IO(cqr);
1da177e4
LT
1466}
1467
8e09f215 1468int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1da177e4 1469{
1da177e4
LT
1470 struct dasd_device *device;
1471 int rc;
138c014d 1472
8e09f215 1473 device = cqr->startdev;
1da177e4
LT
1474 spin_lock_irq(get_ccwdev_lock(device->cdev));
1475 rc = _dasd_term_running_cqr(device);
1476 if (rc) {
1477 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1478 return rc;
1479 }
138c014d 1480
1da177e4 1481 cqr->callback = dasd_wakeup_cb;
c80ee724 1482 cqr->callback_data = (void *) &generic_waitq;
1da177e4 1483 cqr->status = DASD_CQR_QUEUED;
8e09f215 1484 list_add(&cqr->devlist, &device->ccw_queue);
138c014d 1485
1da177e4 1486 /* let the bh start the request to keep them in order */
8e09f215 1487 dasd_schedule_device_bh(device);
138c014d 1488
1da177e4
LT
1489 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1490
c80ee724 1491 wait_event(generic_waitq, _wait_for_wakeup(cqr));
138c014d 1492
1da177e4 1493 /* Request status is either done or failed. */
8e09f215 1494 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1da177e4
LT
1495 return rc;
1496}
1497
1498/*
1499 * Cancels a request that was started with dasd_sleep_on_req.
1500 * This is useful to timeout requests. The request will be
1501 * terminated if it is currently in i/o.
1502 * Returns 1 if the request has been terminated.
8e09f215
SW
1503 * 0 if there was no need to terminate the request (not started yet)
1504 * negative error code if termination failed
1505 * Cancellation of a request is an asynchronous operation! The calling
1506 * function has to wait until the request is properly returned via callback.
1da177e4 1507 */
8e09f215 1508int dasd_cancel_req(struct dasd_ccw_req *cqr)
1da177e4 1509{
8e09f215 1510 struct dasd_device *device = cqr->startdev;
1da177e4
LT
1511 unsigned long flags;
1512 int rc;
1513
1514 rc = 0;
1515 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1516 switch (cqr->status) {
1517 case DASD_CQR_QUEUED:
8e09f215
SW
1518 /* request was not started - just set to cleared */
1519 cqr->status = DASD_CQR_CLEARED;
1da177e4
LT
1520 break;
1521 case DASD_CQR_IN_IO:
1522 /* request in IO - terminate IO and release again */
8e09f215
SW
1523 rc = device->discipline->term_IO(cqr);
1524 if (rc) {
1525 DEV_MESSAGE(KERN_ERR, device,
1526 "dasd_cancel_req is unable "
1527 " to terminate request %p, rc = %d",
1528 cqr, rc);
1529 } else {
1530 cqr->stopclk = get_clock();
1531 rc = 1;
1532 }
1da177e4 1533 break;
8e09f215 1534 default: /* already finished or clear pending - do nothing */
1da177e4 1535 break;
8e09f215
SW
1536 }
1537 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1538 dasd_schedule_device_bh(device);
1539 return rc;
1540}
1541
1542
1543/*
1544 * SECTION: Operations of the dasd_block layer.
1545 */
1546
1547/*
1548 * Timeout function for dasd_block. This is used when the block layer
1549 * is waiting for something that may not come reliably, (e.g. a state
1550 * change interrupt)
1551 */
1552static void dasd_block_timeout(unsigned long ptr)
1553{
1554 unsigned long flags;
1555 struct dasd_block *block;
1556
1557 block = (struct dasd_block *) ptr;
1558 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1559 /* re-activate request queue */
1560 block->base->stopped &= ~DASD_STOPPED_PENDING;
1561 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1562 dasd_schedule_block_bh(block);
1563}
1564
1565/*
1566 * Setup timeout for a dasd_block in jiffies.
1567 */
1568void dasd_block_set_timer(struct dasd_block *block, int expires)
1569{
1570 if (expires == 0) {
1571 if (timer_pending(&block->timer))
1572 del_timer(&block->timer);
1573 return;
1574 }
1575 if (timer_pending(&block->timer)) {
1576 if (mod_timer(&block->timer, jiffies + expires))
1577 return;
1578 }
1579 block->timer.function = dasd_block_timeout;
1580 block->timer.data = (unsigned long) block;
1581 block->timer.expires = jiffies + expires;
1582 add_timer(&block->timer);
1583}
1584
1585/*
1586 * Clear timeout for a dasd_block.
1587 */
1588void dasd_block_clear_timer(struct dasd_block *block)
1589{
1590 if (timer_pending(&block->timer))
1591 del_timer(&block->timer);
1592}
1593
1594/*
1595 * posts the buffer_cache about a finalized request
1596 */
4c4e2148 1597static inline void dasd_end_request(struct request *req, int error)
8e09f215 1598{
4c4e2148 1599 if (__blk_end_request(req, error, blk_rq_bytes(req)))
1da177e4 1600 BUG();
8e09f215
SW
1601}
1602
1603/*
1604 * Process finished error recovery ccw.
1605 */
1606static inline void __dasd_block_process_erp(struct dasd_block *block,
1607 struct dasd_ccw_req *cqr)
1608{
1609 dasd_erp_fn_t erp_fn;
1610 struct dasd_device *device = block->base;
1611
1612 if (cqr->status == DASD_CQR_DONE)
1613 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1614 else
1615 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1616 erp_fn = device->discipline->erp_postaction(cqr);
1617 erp_fn(cqr);
1618}
1da177e4 1619
8e09f215
SW
1620/*
1621 * Fetch requests from the block device queue.
1622 */
1623static void __dasd_process_request_queue(struct dasd_block *block)
1624{
1625 struct request_queue *queue;
1626 struct request *req;
1627 struct dasd_ccw_req *cqr;
1628 struct dasd_device *basedev;
1629 unsigned long flags;
1630 queue = block->request_queue;
1631 basedev = block->base;
1632 /* No queue ? Then there is nothing to do. */
1633 if (queue == NULL)
1634 return;
1635
1636 /*
1637 * We requeue request from the block device queue to the ccw
1638 * queue only in two states. In state DASD_STATE_READY the
1639 * partition detection is done and we need to requeue requests
1640 * for that. State DASD_STATE_ONLINE is normal block device
1641 * operation.
1642 */
1643 if (basedev->state < DASD_STATE_READY)
1644 return;
1645 /* Now we try to fetch requests from the request queue */
1646 while (!blk_queue_plugged(queue) &&
1647 elv_next_request(queue)) {
1648
1649 req = elv_next_request(queue);
1650
1651 if (basedev->features & DASD_FEATURE_READONLY &&
1652 rq_data_dir(req) == WRITE) {
1653 DBF_DEV_EVENT(DBF_ERR, basedev,
1654 "Rejecting write request %p",
1655 req);
1656 blkdev_dequeue_request(req);
4c4e2148 1657 dasd_end_request(req, -EIO);
8e09f215
SW
1658 continue;
1659 }
1660 cqr = basedev->discipline->build_cp(basedev, block, req);
1661 if (IS_ERR(cqr)) {
1662 if (PTR_ERR(cqr) == -EBUSY)
1663 break; /* normal end condition */
1664 if (PTR_ERR(cqr) == -ENOMEM)
1665 break; /* terminate request queue loop */
1666 if (PTR_ERR(cqr) == -EAGAIN) {
1667 /*
1668 * The current request cannot be build right
1669 * now, we have to try later. If this request
1670 * is the head-of-queue we stop the device
1671 * for 1/2 second.
1672 */
1673 if (!list_empty(&block->ccw_queue))
1674 break;
1675 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
1676 basedev->stopped |= DASD_STOPPED_PENDING;
1677 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
1678 dasd_block_set_timer(block, HZ/2);
1679 break;
1680 }
1681 DBF_DEV_EVENT(DBF_ERR, basedev,
1682 "CCW creation failed (rc=%ld) "
1683 "on request %p",
1684 PTR_ERR(cqr), req);
1685 blkdev_dequeue_request(req);
4c4e2148 1686 dasd_end_request(req, -EIO);
8e09f215
SW
1687 continue;
1688 }
1689 /*
1690 * Note: callback is set to dasd_return_cqr_cb in
1691 * __dasd_block_start_head to cover erp requests as well
1692 */
1693 cqr->callback_data = (void *) req;
1694 cqr->status = DASD_CQR_FILLED;
1695 blkdev_dequeue_request(req);
1696 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1697 dasd_profile_start(block, cqr, req);
1698 }
1699}
1700
1701static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1702{
1703 struct request *req;
1704 int status;
4c4e2148 1705 int error = 0;
8e09f215
SW
1706
1707 req = (struct request *) cqr->callback_data;
1708 dasd_profile_end(cqr->block, cqr, req);
fe6b8e76 1709 status = cqr->block->base->discipline->free_cp(cqr, req);
4c4e2148
KU
1710 if (status <= 0)
1711 error = status ? status : -EIO;
1712 dasd_end_request(req, error);
8e09f215
SW
1713}
1714
1715/*
1716 * Process ccw request queue.
1717 */
1718static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1719 struct list_head *final_queue)
1720{
1721 struct list_head *l, *n;
1722 struct dasd_ccw_req *cqr;
1723 dasd_erp_fn_t erp_fn;
1724 unsigned long flags;
1725 struct dasd_device *base = block->base;
1726
1727restart:
1728 /* Process request with final status. */
1729 list_for_each_safe(l, n, &block->ccw_queue) {
1730 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1731 if (cqr->status != DASD_CQR_DONE &&
1732 cqr->status != DASD_CQR_FAILED &&
1733 cqr->status != DASD_CQR_NEED_ERP &&
1734 cqr->status != DASD_CQR_TERMINATED)
1735 continue;
1736
1737 if (cqr->status == DASD_CQR_TERMINATED) {
1738 base->discipline->handle_terminated_request(cqr);
1739 goto restart;
1740 }
1741
1742 /* Process requests that may be recovered */
1743 if (cqr->status == DASD_CQR_NEED_ERP) {
6c5f57c7
SH
1744 erp_fn = base->discipline->erp_action(cqr);
1745 erp_fn(cqr);
8e09f215
SW
1746 goto restart;
1747 }
1748
1749 /* First of all call extended error reporting. */
1750 if (dasd_eer_enabled(base) &&
1751 cqr->status == DASD_CQR_FAILED) {
1752 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1753
1754 /* restart request */
1755 cqr->status = DASD_CQR_FILLED;
1756 cqr->retries = 255;
1757 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1758 base->stopped |= DASD_STOPPED_QUIESCE;
1759 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1760 flags);
1761 goto restart;
1762 }
1763
1764 /* Process finished ERP request. */
1765 if (cqr->refers) {
1766 __dasd_block_process_erp(block, cqr);
1767 goto restart;
1768 }
1769
1770 /* Rechain finished requests to final queue */
1771 cqr->endclk = get_clock();
1772 list_move_tail(&cqr->blocklist, final_queue);
1773 }
1774}
1775
1776static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1777{
1778 dasd_schedule_block_bh(cqr->block);
1779}
1780
1781static void __dasd_block_start_head(struct dasd_block *block)
1782{
1783 struct dasd_ccw_req *cqr;
1784
1785 if (list_empty(&block->ccw_queue))
1786 return;
1787 /* We allways begin with the first requests on the queue, as some
1788 * of previously started requests have to be enqueued on a
1789 * dasd_device again for error recovery.
1790 */
1791 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1792 if (cqr->status != DASD_CQR_FILLED)
1793 continue;
1794 /* Non-temporary stop condition will trigger fail fast */
1795 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1796 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1797 (!dasd_eer_enabled(block->base))) {
1798 cqr->status = DASD_CQR_FAILED;
1799 dasd_schedule_block_bh(block);
1800 continue;
1801 }
1802 /* Don't try to start requests if device is stopped */
1803 if (block->base->stopped)
1804 return;
1805
1806 /* just a fail safe check, should not happen */
1807 if (!cqr->startdev)
1808 cqr->startdev = block->base;
1809
1810 /* make sure that the requests we submit find their way back */
1811 cqr->callback = dasd_return_cqr_cb;
1812
1813 dasd_add_request_tail(cqr);
1814 }
1815}
1816
1817/*
1818 * Central dasd_block layer routine. Takes requests from the generic
1819 * block layer request queue, creates ccw requests, enqueues them on
1820 * a dasd_device and processes ccw requests that have been returned.
1821 */
1822static void dasd_block_tasklet(struct dasd_block *block)
1823{
1824 struct list_head final_queue;
1825 struct list_head *l, *n;
1826 struct dasd_ccw_req *cqr;
1827
1828 atomic_set(&block->tasklet_scheduled, 0);
1829 INIT_LIST_HEAD(&final_queue);
1830 spin_lock(&block->queue_lock);
1831 /* Finish off requests on ccw queue */
1832 __dasd_process_block_ccw_queue(block, &final_queue);
1833 spin_unlock(&block->queue_lock);
1834 /* Now call the callback function of requests with final status */
1835 spin_lock_irq(&block->request_queue_lock);
1836 list_for_each_safe(l, n, &final_queue) {
1837 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1838 list_del_init(&cqr->blocklist);
1839 __dasd_cleanup_cqr(cqr);
1840 }
1841 spin_lock(&block->queue_lock);
1842 /* Get new request from the block device request queue */
1843 __dasd_process_request_queue(block);
1844 /* Now check if the head of the ccw queue needs to be started. */
1845 __dasd_block_start_head(block);
1846 spin_unlock(&block->queue_lock);
1847 spin_unlock_irq(&block->request_queue_lock);
1848 dasd_put_device(block->base);
1849}
1850
1851static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
1852{
1853 wake_up(&dasd_flush_wq);
1854}
1855
1856/*
1857 * Go through all request on the dasd_block request queue, cancel them
1858 * on the respective dasd_device, and return them to the generic
1859 * block layer.
1860 */
1861static int dasd_flush_block_queue(struct dasd_block *block)
1862{
1863 struct dasd_ccw_req *cqr, *n;
1864 int rc, i;
1865 struct list_head flush_queue;
1866
1867 INIT_LIST_HEAD(&flush_queue);
1868 spin_lock_bh(&block->queue_lock);
1869 rc = 0;
1870restart:
1871 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
1872 /* if this request currently owned by a dasd_device cancel it */
1873 if (cqr->status >= DASD_CQR_QUEUED)
1874 rc = dasd_cancel_req(cqr);
1875 if (rc < 0)
1876 break;
1877 /* Rechain request (including erp chain) so it won't be
1878 * touched by the dasd_block_tasklet anymore.
1879 * Replace the callback so we notice when the request
1880 * is returned from the dasd_device layer.
1881 */
1882 cqr->callback = _dasd_wake_block_flush_cb;
1883 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
1884 list_move_tail(&cqr->blocklist, &flush_queue);
1885 if (i > 1)
1886 /* moved more than one request - need to restart */
1887 goto restart;
1888 }
1889 spin_unlock_bh(&block->queue_lock);
1890 /* Now call the callback function of flushed requests */
1891restart_cb:
1892 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
1893 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
1894 /* Process finished ERP request. */
1895 if (cqr->refers) {
1896 __dasd_block_process_erp(block, cqr);
1897 /* restart list_for_xx loop since dasd_process_erp
1898 * might remove multiple elements */
1899 goto restart_cb;
1900 }
1901 /* call the callback function */
1902 cqr->endclk = get_clock();
1903 list_del_init(&cqr->blocklist);
1904 __dasd_cleanup_cqr(cqr);
1da177e4 1905 }
1da177e4
LT
1906 return rc;
1907}
1908
1909/*
8e09f215
SW
1910 * Schedules a call to dasd_tasklet over the device tasklet.
1911 */
1912void dasd_schedule_block_bh(struct dasd_block *block)
1913{
1914 /* Protect against rescheduling. */
1915 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
1916 return;
1917 /* life cycle of block is bound to it's base device */
1918 dasd_get_device(block->base);
1919 tasklet_hi_schedule(&block->tasklet);
1920}
1921
1922
1923/*
1924 * SECTION: external block device operations
1925 * (request queue handling, open, release, etc.)
1da177e4
LT
1926 */
1927
1928/*
1929 * Dasd request queue function. Called from ll_rw_blk.c
1930 */
8e09f215 1931static void do_dasd_request(struct request_queue *queue)
1da177e4 1932{
8e09f215 1933 struct dasd_block *block;
1da177e4 1934
8e09f215
SW
1935 block = queue->queuedata;
1936 spin_lock(&block->queue_lock);
1da177e4 1937 /* Get new request from the block device request queue */
8e09f215 1938 __dasd_process_request_queue(block);
1da177e4 1939 /* Now check if the head of the ccw queue needs to be started. */
8e09f215
SW
1940 __dasd_block_start_head(block);
1941 spin_unlock(&block->queue_lock);
1da177e4
LT
1942}
1943
1944/*
1945 * Allocate and initialize request queue and default I/O scheduler.
1946 */
8e09f215 1947static int dasd_alloc_queue(struct dasd_block *block)
1da177e4
LT
1948{
1949 int rc;
1950
8e09f215
SW
1951 block->request_queue = blk_init_queue(do_dasd_request,
1952 &block->request_queue_lock);
1953 if (block->request_queue == NULL)
1da177e4
LT
1954 return -ENOMEM;
1955
8e09f215 1956 block->request_queue->queuedata = block;
1da177e4 1957
8e09f215 1958 elevator_exit(block->request_queue->elevator);
08a8a0c5 1959 block->request_queue->elevator = NULL;
8e09f215 1960 rc = elevator_init(block->request_queue, "deadline");
1da177e4 1961 if (rc) {
8e09f215 1962 blk_cleanup_queue(block->request_queue);
1da177e4
LT
1963 return rc;
1964 }
1965 return 0;
1966}
1967
1968/*
1969 * Allocate and initialize request queue.
1970 */
8e09f215 1971static void dasd_setup_queue(struct dasd_block *block)
1da177e4
LT
1972{
1973 int max;
1974
8e09f215
SW
1975 blk_queue_hardsect_size(block->request_queue, block->bp_block);
1976 max = block->base->discipline->max_blocks << block->s2b_shift;
1977 blk_queue_max_sectors(block->request_queue, max);
1978 blk_queue_max_phys_segments(block->request_queue, -1L);
1979 blk_queue_max_hw_segments(block->request_queue, -1L);
1980 blk_queue_max_segment_size(block->request_queue, -1L);
1981 blk_queue_segment_boundary(block->request_queue, -1L);
1982 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
1da177e4
LT
1983}
1984
1985/*
1986 * Deactivate and free request queue.
1987 */
8e09f215 1988static void dasd_free_queue(struct dasd_block *block)
1da177e4 1989{
8e09f215
SW
1990 if (block->request_queue) {
1991 blk_cleanup_queue(block->request_queue);
1992 block->request_queue = NULL;
1da177e4
LT
1993 }
1994}
1995
1996/*
1997 * Flush request on the request queue.
1998 */
8e09f215 1999static void dasd_flush_request_queue(struct dasd_block *block)
1da177e4
LT
2000{
2001 struct request *req;
2002
8e09f215 2003 if (!block->request_queue)
1da177e4 2004 return;
138c014d 2005
8e09f215
SW
2006 spin_lock_irq(&block->request_queue_lock);
2007 while ((req = elv_next_request(block->request_queue))) {
1da177e4 2008 blkdev_dequeue_request(req);
4c4e2148 2009 dasd_end_request(req, -EIO);
1da177e4 2010 }
8e09f215 2011 spin_unlock_irq(&block->request_queue_lock);
1da177e4
LT
2012}
2013
8e09f215 2014static int dasd_open(struct inode *inp, struct file *filp)
1da177e4
LT
2015{
2016 struct gendisk *disk = inp->i_bdev->bd_disk;
8e09f215
SW
2017 struct dasd_block *block = disk->private_data;
2018 struct dasd_device *base = block->base;
1da177e4
LT
2019 int rc;
2020
8e09f215
SW
2021 atomic_inc(&block->open_count);
2022 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
1da177e4
LT
2023 rc = -ENODEV;
2024 goto unlock;
2025 }
2026
8e09f215 2027 if (!try_module_get(base->discipline->owner)) {
1da177e4
LT
2028 rc = -EINVAL;
2029 goto unlock;
2030 }
2031
2032 if (dasd_probeonly) {
8e09f215 2033 DEV_MESSAGE(KERN_INFO, base, "%s",
1da177e4
LT
2034 "No access to device due to probeonly mode");
2035 rc = -EPERM;
2036 goto out;
2037 }
2038
8e09f215
SW
2039 if (base->state <= DASD_STATE_BASIC) {
2040 DBF_DEV_EVENT(DBF_ERR, base, " %s",
1da177e4
LT
2041 " Cannot open unrecognized device");
2042 rc = -ENODEV;
2043 goto out;
2044 }
2045
2046 return 0;
2047
2048out:
8e09f215 2049 module_put(base->discipline->owner);
1da177e4 2050unlock:
8e09f215 2051 atomic_dec(&block->open_count);
1da177e4
LT
2052 return rc;
2053}
2054
8e09f215 2055static int dasd_release(struct inode *inp, struct file *filp)
1da177e4
LT
2056{
2057 struct gendisk *disk = inp->i_bdev->bd_disk;
8e09f215 2058 struct dasd_block *block = disk->private_data;
1da177e4 2059
8e09f215
SW
2060 atomic_dec(&block->open_count);
2061 module_put(block->base->discipline->owner);
1da177e4
LT
2062 return 0;
2063}
2064
a885c8c4
CH
2065/*
2066 * Return disk geometry.
2067 */
8e09f215 2068static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
a885c8c4 2069{
8e09f215
SW
2070 struct dasd_block *block;
2071 struct dasd_device *base;
a885c8c4 2072
8e09f215
SW
2073 block = bdev->bd_disk->private_data;
2074 base = block->base;
2075 if (!block)
a885c8c4
CH
2076 return -ENODEV;
2077
8e09f215
SW
2078 if (!base->discipline ||
2079 !base->discipline->fill_geometry)
a885c8c4
CH
2080 return -EINVAL;
2081
8e09f215
SW
2082 base->discipline->fill_geometry(block, geo);
2083 geo->start = get_start_sect(bdev) >> block->s2b_shift;
a885c8c4
CH
2084 return 0;
2085}
2086
1da177e4
LT
2087struct block_device_operations
2088dasd_device_operations = {
2089 .owner = THIS_MODULE,
2090 .open = dasd_open,
2091 .release = dasd_release,
2092 .ioctl = dasd_ioctl,
8262037f 2093 .compat_ioctl = dasd_compat_ioctl,
a885c8c4 2094 .getgeo = dasd_getgeo,
1da177e4
LT
2095};
2096
8e09f215
SW
2097/*******************************************************************************
2098 * end of block device operations
2099 */
1da177e4
LT
2100
2101static void
2102dasd_exit(void)
2103{
2104#ifdef CONFIG_PROC_FS
2105 dasd_proc_exit();
2106#endif
20c64468 2107 dasd_eer_exit();
6bb0e010
HH
2108 if (dasd_page_cache != NULL) {
2109 kmem_cache_destroy(dasd_page_cache);
2110 dasd_page_cache = NULL;
2111 }
1da177e4
LT
2112 dasd_gendisk_exit();
2113 dasd_devmap_exit();
1da177e4
LT
2114 if (dasd_debug_area != NULL) {
2115 debug_unregister(dasd_debug_area);
2116 dasd_debug_area = NULL;
2117 }
2118}
2119
2120/*
2121 * SECTION: common functions for ccw_driver use
2122 */
2123
1c01b8a5
HH
2124/*
2125 * Initial attempt at a probe function. this can be simplified once
2126 * the other detection code is gone.
2127 */
8e09f215
SW
2128int dasd_generic_probe(struct ccw_device *cdev,
2129 struct dasd_discipline *discipline)
1da177e4
LT
2130{
2131 int ret;
2132
40545573
HH
2133 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
2134 if (ret) {
2135 printk(KERN_WARNING
2136 "dasd_generic_probe: could not set ccw-device options "
2137 "for %s\n", cdev->dev.bus_id);
2138 return ret;
2139 }
1da177e4
LT
2140 ret = dasd_add_sysfs_files(cdev);
2141 if (ret) {
2142 printk(KERN_WARNING
2143 "dasd_generic_probe: could not add sysfs entries "
2144 "for %s\n", cdev->dev.bus_id);
40545573 2145 return ret;
1da177e4 2146 }
40545573 2147 cdev->handler = &dasd_int_handler;
1da177e4 2148
40545573
HH
2149 /*
2150 * Automatically online either all dasd devices (dasd_autodetect)
2151 * or all devices specified with dasd= parameters during
2152 * initial probe.
2153 */
2154 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2155 (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0))
2156 ret = ccw_device_set_online(cdev);
2157 if (ret)
2158 printk(KERN_WARNING
de3e0da1
SH
2159 "dasd_generic_probe: could not initially "
2160 "online ccw-device %s; return code: %d\n",
2161 cdev->dev.bus_id, ret);
2162 return 0;
1da177e4
LT
2163}
2164
1c01b8a5
HH
2165/*
2166 * This will one day be called from a global not_oper handler.
2167 * It is also used by driver_unregister during module unload.
2168 */
8e09f215 2169void dasd_generic_remove(struct ccw_device *cdev)
1da177e4
LT
2170{
2171 struct dasd_device *device;
8e09f215 2172 struct dasd_block *block;
1da177e4 2173
59afda78
HH
2174 cdev->handler = NULL;
2175
1da177e4
LT
2176 dasd_remove_sysfs_files(cdev);
2177 device = dasd_device_from_cdev(cdev);
2178 if (IS_ERR(device))
2179 return;
2180 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2181 /* Already doing offline processing */
2182 dasd_put_device(device);
2183 return;
2184 }
2185 /*
2186 * This device is removed unconditionally. Set offline
2187 * flag to prevent dasd_open from opening it while it is
2188 * no quite down yet.
2189 */
2190 dasd_set_target_state(device, DASD_STATE_NEW);
2191 /* dasd_delete_device destroys the device reference. */
8e09f215
SW
2192 block = device->block;
2193 device->block = NULL;
1da177e4 2194 dasd_delete_device(device);
8e09f215
SW
2195 /*
2196 * life cycle of block is bound to device, so delete it after
2197 * device was safely removed
2198 */
2199 if (block)
2200 dasd_free_block(block);
1da177e4
LT
2201}
2202
1c01b8a5
HH
2203/*
2204 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
1da177e4 2205 * the device is detected for the first time and is supposed to be used
1c01b8a5
HH
2206 * or the user has started activation through sysfs.
2207 */
8e09f215
SW
2208int dasd_generic_set_online(struct ccw_device *cdev,
2209 struct dasd_discipline *base_discipline)
1da177e4 2210{
aa88861f 2211 struct dasd_discipline *discipline;
1da177e4 2212 struct dasd_device *device;
c6eb7b77 2213 int rc;
f24acd45 2214
40545573
HH
2215 /* first online clears initial online feature flag */
2216 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
1da177e4
LT
2217 device = dasd_create_device(cdev);
2218 if (IS_ERR(device))
2219 return PTR_ERR(device);
2220
aa88861f 2221 discipline = base_discipline;
c6eb7b77 2222 if (device->features & DASD_FEATURE_USEDIAG) {
1da177e4
LT
2223 if (!dasd_diag_discipline_pointer) {
2224 printk (KERN_WARNING
2225 "dasd_generic couldn't online device %s "
2226 "- discipline DIAG not available\n",
2227 cdev->dev.bus_id);
2228 dasd_delete_device(device);
2229 return -ENODEV;
2230 }
2231 discipline = dasd_diag_discipline_pointer;
2232 }
aa88861f
PO
2233 if (!try_module_get(base_discipline->owner)) {
2234 dasd_delete_device(device);
2235 return -EINVAL;
2236 }
2237 if (!try_module_get(discipline->owner)) {
2238 module_put(base_discipline->owner);
2239 dasd_delete_device(device);
2240 return -EINVAL;
2241 }
2242 device->base_discipline = base_discipline;
1da177e4
LT
2243 device->discipline = discipline;
2244
8e09f215 2245 /* check_device will allocate block device if necessary */
1da177e4
LT
2246 rc = discipline->check_device(device);
2247 if (rc) {
2248 printk (KERN_WARNING
2249 "dasd_generic couldn't online device %s "
2250 "with discipline %s rc=%i\n",
2251 cdev->dev.bus_id, discipline->name, rc);
aa88861f
PO
2252 module_put(discipline->owner);
2253 module_put(base_discipline->owner);
1da177e4
LT
2254 dasd_delete_device(device);
2255 return rc;
2256 }
2257
2258 dasd_set_target_state(device, DASD_STATE_ONLINE);
2259 if (device->state <= DASD_STATE_KNOWN) {
2260 printk (KERN_WARNING
2261 "dasd_generic discipline not found for %s\n",
2262 cdev->dev.bus_id);
2263 rc = -ENODEV;
2264 dasd_set_target_state(device, DASD_STATE_NEW);
8e09f215
SW
2265 if (device->block)
2266 dasd_free_block(device->block);
1da177e4
LT
2267 dasd_delete_device(device);
2268 } else
2269 pr_debug("dasd_generic device %s found\n",
2270 cdev->dev.bus_id);
2271
2272 /* FIXME: we have to wait for the root device but we don't want
2273 * to wait for each single device but for all at once. */
2274 wait_event(dasd_init_waitq, _wait_for_device(device));
2275
2276 dasd_put_device(device);
2277
2278 return rc;
2279}
2280
8e09f215 2281int dasd_generic_set_offline(struct ccw_device *cdev)
1da177e4
LT
2282{
2283 struct dasd_device *device;
8e09f215 2284 struct dasd_block *block;
dafd87aa 2285 int max_count, open_count;
1da177e4
LT
2286
2287 device = dasd_device_from_cdev(cdev);
2288 if (IS_ERR(device))
2289 return PTR_ERR(device);
2290 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2291 /* Already doing offline processing */
2292 dasd_put_device(device);
2293 return 0;
2294 }
2295 /*
2296 * We must make sure that this device is currently not in use.
2297 * The open_count is increased for every opener, that includes
2298 * the blkdev_get in dasd_scan_partitions. We are only interested
2299 * in the other openers.
2300 */
8e09f215 2301 if (device->block) {
a806170e
HC
2302 max_count = device->block->bdev ? 0 : -1;
2303 open_count = atomic_read(&device->block->open_count);
8e09f215
SW
2304 if (open_count > max_count) {
2305 if (open_count > 0)
2306 printk(KERN_WARNING "Can't offline dasd "
2307 "device with open count = %i.\n",
2308 open_count);
2309 else
2310 printk(KERN_WARNING "%s",
2311 "Can't offline dasd device due "
2312 "to internal use\n");
2313 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2314 dasd_put_device(device);
2315 return -EBUSY;
2316 }
1da177e4
LT
2317 }
2318 dasd_set_target_state(device, DASD_STATE_NEW);
2319 /* dasd_delete_device destroys the device reference. */
8e09f215
SW
2320 block = device->block;
2321 device->block = NULL;
1da177e4 2322 dasd_delete_device(device);
8e09f215
SW
2323 /*
2324 * life cycle of block is bound to device, so delete it after
2325 * device was safely removed
2326 */
2327 if (block)
2328 dasd_free_block(block);
1da177e4
LT
2329 return 0;
2330}
2331
8e09f215 2332int dasd_generic_notify(struct ccw_device *cdev, int event)
1da177e4
LT
2333{
2334 struct dasd_device *device;
2335 struct dasd_ccw_req *cqr;
2336 unsigned long flags;
2337 int ret;
2338
2339 device = dasd_device_from_cdev(cdev);
2340 if (IS_ERR(device))
2341 return 0;
2342 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
2343 ret = 0;
2344 switch (event) {
2345 case CIO_GONE:
2346 case CIO_NO_PATH:
20c64468
SW
2347 /* First of all call extended error reporting. */
2348 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2349
1da177e4
LT
2350 if (device->state < DASD_STATE_BASIC)
2351 break;
2352 /* Device is active. We want to keep it. */
8e09f215
SW
2353 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2354 if (cqr->status == DASD_CQR_IN_IO) {
2355 cqr->status = DASD_CQR_QUEUED;
2356 cqr->retries++;
2357 }
2358 device->stopped |= DASD_STOPPED_DC_WAIT;
2359 dasd_device_clear_timer(device);
2360 dasd_schedule_device_bh(device);
1da177e4
LT
2361 ret = 1;
2362 break;
2363 case CIO_OPER:
2364 /* FIXME: add a sanity check. */
8e09f215
SW
2365 device->stopped &= ~DASD_STOPPED_DC_WAIT;
2366 dasd_schedule_device_bh(device);
2367 if (device->block)
2368 dasd_schedule_block_bh(device->block);
1da177e4
LT
2369 ret = 1;
2370 break;
2371 }
2372 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2373 dasd_put_device(device);
2374 return ret;
2375}
2376
763968e2
HC
2377static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2378 void *rdc_buffer,
2379 int rdc_buffer_size,
2380 char *magic)
17283b56
CH
2381{
2382 struct dasd_ccw_req *cqr;
2383 struct ccw1 *ccw;
2384
2385 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2386
2387 if (IS_ERR(cqr)) {
2388 DEV_MESSAGE(KERN_WARNING, device, "%s",
2389 "Could not allocate RDC request");
2390 return cqr;
2391 }
2392
2393 ccw = cqr->cpaddr;
2394 ccw->cmd_code = CCW_CMD_RDC;
2395 ccw->cda = (__u32)(addr_t)rdc_buffer;
2396 ccw->count = rdc_buffer_size;
2397
8e09f215
SW
2398 cqr->startdev = device;
2399 cqr->memdev = device;
17283b56
CH
2400 cqr->expires = 10*HZ;
2401 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2402 cqr->retries = 2;
2403 cqr->buildclk = get_clock();
2404 cqr->status = DASD_CQR_FILLED;
2405 return cqr;
2406}
2407
2408
2409int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2410 void **rdc_buffer, int rdc_buffer_size)
2411{
2412 int ret;
2413 struct dasd_ccw_req *cqr;
2414
2415 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size,
2416 magic);
2417 if (IS_ERR(cqr))
2418 return PTR_ERR(cqr);
2419
2420 ret = dasd_sleep_on(cqr);
8e09f215 2421 dasd_sfree_request(cqr, cqr->memdev);
17283b56
CH
2422 return ret;
2423}
aaff0f64 2424EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
20c64468 2425
8e09f215 2426static int __init dasd_init(void)
1da177e4
LT
2427{
2428 int rc;
2429
2430 init_waitqueue_head(&dasd_init_waitq);
8f61701b 2431 init_waitqueue_head(&dasd_flush_wq);
c80ee724 2432 init_waitqueue_head(&generic_waitq);
1da177e4
LT
2433
2434 /* register 'common' DASD debug area, used for all DBF_XXX calls */
361f494d 2435 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
1da177e4
LT
2436 if (dasd_debug_area == NULL) {
2437 rc = -ENOMEM;
2438 goto failed;
2439 }
2440 debug_register_view(dasd_debug_area, &debug_sprintf_view);
b0035f12 2441 debug_set_level(dasd_debug_area, DBF_WARNING);
1da177e4
LT
2442
2443 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2444
2445 dasd_diag_discipline_pointer = NULL;
2446
1da177e4
LT
2447 rc = dasd_devmap_init();
2448 if (rc)
2449 goto failed;
2450 rc = dasd_gendisk_init();
2451 if (rc)
2452 goto failed;
2453 rc = dasd_parse();
2454 if (rc)
2455 goto failed;
20c64468
SW
2456 rc = dasd_eer_init();
2457 if (rc)
2458 goto failed;
1da177e4
LT
2459#ifdef CONFIG_PROC_FS
2460 rc = dasd_proc_init();
2461 if (rc)
2462 goto failed;
2463#endif
2464
2465 return 0;
2466failed:
2467 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
2468 dasd_exit();
2469 return rc;
2470}
2471
2472module_init(dasd_init);
2473module_exit(dasd_exit);
2474
2475EXPORT_SYMBOL(dasd_debug_area);
2476EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2477
2478EXPORT_SYMBOL(dasd_add_request_head);
2479EXPORT_SYMBOL(dasd_add_request_tail);
2480EXPORT_SYMBOL(dasd_cancel_req);
8e09f215
SW
2481EXPORT_SYMBOL(dasd_device_clear_timer);
2482EXPORT_SYMBOL(dasd_block_clear_timer);
1da177e4
LT
2483EXPORT_SYMBOL(dasd_enable_device);
2484EXPORT_SYMBOL(dasd_int_handler);
2485EXPORT_SYMBOL(dasd_kfree_request);
2486EXPORT_SYMBOL(dasd_kick_device);
2487EXPORT_SYMBOL(dasd_kmalloc_request);
8e09f215
SW
2488EXPORT_SYMBOL(dasd_schedule_device_bh);
2489EXPORT_SYMBOL(dasd_schedule_block_bh);
1da177e4 2490EXPORT_SYMBOL(dasd_set_target_state);
8e09f215
SW
2491EXPORT_SYMBOL(dasd_device_set_timer);
2492EXPORT_SYMBOL(dasd_block_set_timer);
1da177e4
LT
2493EXPORT_SYMBOL(dasd_sfree_request);
2494EXPORT_SYMBOL(dasd_sleep_on);
2495EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2496EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2497EXPORT_SYMBOL(dasd_smalloc_request);
2498EXPORT_SYMBOL(dasd_start_IO);
2499EXPORT_SYMBOL(dasd_term_IO);
2500
2501EXPORT_SYMBOL_GPL(dasd_generic_probe);
2502EXPORT_SYMBOL_GPL(dasd_generic_remove);
2503EXPORT_SYMBOL_GPL(dasd_generic_notify);
2504EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2505EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
8e09f215
SW
2506EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2507EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2508EXPORT_SYMBOL_GPL(dasd_alloc_block);
2509EXPORT_SYMBOL_GPL(dasd_free_block);