Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / s390 / block / dasd_eckd.c
1 /*
2 * File...........: linux/drivers/s390/block/dasd_eckd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 *
10 */
11
12 #include <linux/stddef.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/hdreg.h> /* HDIO_GETGEO */
16 #include <linux/bio.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19
20 #include <asm/debug.h>
21 #include <asm/idals.h>
22 #include <asm/ebcdic.h>
23 #include <asm/io.h>
24 #include <asm/todclk.h>
25 #include <asm/uaccess.h>
26 #include <asm/cio.h>
27 #include <asm/ccwdev.h>
28
29 #include "dasd_int.h"
30 #include "dasd_eckd.h"
31
32 #ifdef PRINTK_HEADER
33 #undef PRINTK_HEADER
34 #endif /* PRINTK_HEADER */
35 #define PRINTK_HEADER "dasd(eckd):"
36
37 #define ECKD_C0(i) (i->home_bytes)
38 #define ECKD_F(i) (i->formula)
39 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
40 (i->factors.f_0x02.f1))
41 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
42 (i->factors.f_0x02.f2))
43 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
44 (i->factors.f_0x02.f3))
45 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
46 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
47 #define ECKD_F6(i) (i->factor6)
48 #define ECKD_F7(i) (i->factor7)
49 #define ECKD_F8(i) (i->factor8)
50
51 MODULE_LICENSE("GPL");
52
53 static struct dasd_discipline dasd_eckd_discipline;
54
55 struct dasd_eckd_private {
56 struct dasd_eckd_characteristics rdc_data;
57 struct dasd_eckd_confdata conf_data;
58 struct dasd_eckd_path path_data;
59 struct eckd_count count_area[5];
60 int init_cqr_status;
61 int uses_cdl;
62 struct attrib_data_t attrib; /* e.g. cache operations */
63 };
64
65 /* The ccw bus type uses this table to find devices that it sends to
66 * dasd_eckd_probe */
67 static struct ccw_device_id dasd_eckd_ids[] = {
68 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
69 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
70 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
71 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
72 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
73 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
74 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
75 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
76 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
77 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
78 { /* end of list */ },
79 };
80
81 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
82
83 static struct ccw_driver dasd_eckd_driver; /* see below */
84
85 /* initial attempt at a probe function. this can be simplified once
86 * the other detection code is gone */
87 static int
88 dasd_eckd_probe (struct ccw_device *cdev)
89 {
90 int ret;
91
92 /* set ECKD specific ccw-device options */
93 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
94 if (ret) {
95 printk(KERN_WARNING
96 "dasd_eckd_probe: could not set ccw-device options "
97 "for %s\n", cdev->dev.bus_id);
98 return ret;
99 }
100 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
101 return ret;
102 }
103
104 static int
105 dasd_eckd_set_online(struct ccw_device *cdev)
106 {
107 return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
108 }
109
110 static struct ccw_driver dasd_eckd_driver = {
111 .name = "dasd-eckd",
112 .owner = THIS_MODULE,
113 .ids = dasd_eckd_ids,
114 .probe = dasd_eckd_probe,
115 .remove = dasd_generic_remove,
116 .set_offline = dasd_generic_set_offline,
117 .set_online = dasd_eckd_set_online,
118 .notify = dasd_generic_notify,
119 };
120
121 static const int sizes_trk0[] = { 28, 148, 84 };
122 #define LABEL_SIZE 140
123
124 static inline unsigned int
125 round_up_multiple(unsigned int no, unsigned int mult)
126 {
127 int rem = no % mult;
128 return (rem ? no - rem + mult : no);
129 }
130
131 static inline unsigned int
132 ceil_quot(unsigned int d1, unsigned int d2)
133 {
134 return (d1 + (d2 - 1)) / d2;
135 }
136
137 static unsigned int
138 recs_per_track(struct dasd_eckd_characteristics * rdc,
139 unsigned int kl, unsigned int dl)
140 {
141 int dn, kn;
142
143 switch (rdc->dev_type) {
144 case 0x3380:
145 if (kl)
146 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
147 ceil_quot(dl + 12, 32));
148 else
149 return 1499 / (15 + ceil_quot(dl + 12, 32));
150 case 0x3390:
151 dn = ceil_quot(dl + 6, 232) + 1;
152 if (kl) {
153 kn = ceil_quot(kl + 6, 232) + 1;
154 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
155 9 + ceil_quot(dl + 6 * dn, 34));
156 } else
157 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
158 case 0x9345:
159 dn = ceil_quot(dl + 6, 232) + 1;
160 if (kl) {
161 kn = ceil_quot(kl + 6, 232) + 1;
162 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
163 ceil_quot(dl + 6 * dn, 34));
164 } else
165 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
166 }
167 return 0;
168 }
169
170 static int
171 check_XRC (struct ccw1 *de_ccw,
172 struct DE_eckd_data *data,
173 struct dasd_device *device)
174 {
175 struct dasd_eckd_private *private;
176 int rc;
177
178 private = (struct dasd_eckd_private *) device->private;
179 if (!private->rdc_data.facilities.XRC_supported)
180 return 0;
181
182 /* switch on System Time Stamp - needed for XRC Support */
183 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
184 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
185
186 rc = get_sync_clock(&data->ep_sys_time);
187 /* Ignore return code if sync clock is switched off. */
188 if (rc == -ENOSYS || rc == -EACCES)
189 rc = 0;
190
191 de_ccw->count = sizeof (struct DE_eckd_data);
192 de_ccw->flags |= CCW_FLAG_SLI;
193 return rc;
194 }
195
196 static int
197 define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
198 int totrk, int cmd, struct dasd_device * device)
199 {
200 struct dasd_eckd_private *private;
201 struct ch_t geo, beg, end;
202 int rc = 0;
203
204 private = (struct dasd_eckd_private *) device->private;
205
206 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
207 ccw->flags = 0;
208 ccw->count = 16;
209 ccw->cda = (__u32) __pa(data);
210
211 memset(data, 0, sizeof (struct DE_eckd_data));
212 switch (cmd) {
213 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
214 case DASD_ECKD_CCW_READ_RECORD_ZERO:
215 case DASD_ECKD_CCW_READ:
216 case DASD_ECKD_CCW_READ_MT:
217 case DASD_ECKD_CCW_READ_CKD:
218 case DASD_ECKD_CCW_READ_CKD_MT:
219 case DASD_ECKD_CCW_READ_KD:
220 case DASD_ECKD_CCW_READ_KD_MT:
221 case DASD_ECKD_CCW_READ_COUNT:
222 data->mask.perm = 0x1;
223 data->attributes.operation = private->attrib.operation;
224 break;
225 case DASD_ECKD_CCW_WRITE:
226 case DASD_ECKD_CCW_WRITE_MT:
227 case DASD_ECKD_CCW_WRITE_KD:
228 case DASD_ECKD_CCW_WRITE_KD_MT:
229 data->mask.perm = 0x02;
230 data->attributes.operation = private->attrib.operation;
231 rc = check_XRC (ccw, data, device);
232 break;
233 case DASD_ECKD_CCW_WRITE_CKD:
234 case DASD_ECKD_CCW_WRITE_CKD_MT:
235 data->attributes.operation = DASD_BYPASS_CACHE;
236 rc = check_XRC (ccw, data, device);
237 break;
238 case DASD_ECKD_CCW_ERASE:
239 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
240 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
241 data->mask.perm = 0x3;
242 data->mask.auth = 0x1;
243 data->attributes.operation = DASD_BYPASS_CACHE;
244 rc = check_XRC (ccw, data, device);
245 break;
246 default:
247 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
248 break;
249 }
250
251 data->attributes.mode = 0x3; /* ECKD */
252
253 if ((private->rdc_data.cu_type == 0x2105 ||
254 private->rdc_data.cu_type == 0x2107 ||
255 private->rdc_data.cu_type == 0x1750)
256 && !(private->uses_cdl && trk < 2))
257 data->ga_extended |= 0x40; /* Regular Data Format Mode */
258
259 geo.cyl = private->rdc_data.no_cyl;
260 geo.head = private->rdc_data.trk_per_cyl;
261 beg.cyl = trk / geo.head;
262 beg.head = trk % geo.head;
263 end.cyl = totrk / geo.head;
264 end.head = totrk % geo.head;
265
266 /* check for sequential prestage - enhance cylinder range */
267 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
268 data->attributes.operation == DASD_SEQ_ACCESS) {
269
270 if (end.cyl + private->attrib.nr_cyl < geo.cyl)
271 end.cyl += private->attrib.nr_cyl;
272 else
273 end.cyl = (geo.cyl - 1);
274 }
275
276 data->beg_ext.cyl = beg.cyl;
277 data->beg_ext.head = beg.head;
278 data->end_ext.cyl = end.cyl;
279 data->end_ext.head = end.head;
280 return rc;
281 }
282
283 static void
284 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
285 int rec_on_trk, int no_rec, int cmd,
286 struct dasd_device * device, int reclen)
287 {
288 struct dasd_eckd_private *private;
289 int sector;
290 int dn, d;
291
292 private = (struct dasd_eckd_private *) device->private;
293
294 DBF_DEV_EVENT(DBF_INFO, device,
295 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
296 trk, rec_on_trk, no_rec, cmd, reclen);
297
298 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
299 ccw->flags = 0;
300 ccw->count = 16;
301 ccw->cda = (__u32) __pa(data);
302
303 memset(data, 0, sizeof (struct LO_eckd_data));
304 sector = 0;
305 if (rec_on_trk) {
306 switch (private->rdc_data.dev_type) {
307 case 0x3390:
308 dn = ceil_quot(reclen + 6, 232);
309 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
310 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
311 break;
312 case 0x3380:
313 d = 7 + ceil_quot(reclen + 12, 32);
314 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
315 break;
316 }
317 }
318 data->sector = sector;
319 data->count = no_rec;
320 switch (cmd) {
321 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
322 data->operation.orientation = 0x3;
323 data->operation.operation = 0x03;
324 break;
325 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
326 data->operation.orientation = 0x3;
327 data->operation.operation = 0x16;
328 break;
329 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
330 data->operation.orientation = 0x1;
331 data->operation.operation = 0x03;
332 data->count++;
333 break;
334 case DASD_ECKD_CCW_READ_RECORD_ZERO:
335 data->operation.orientation = 0x3;
336 data->operation.operation = 0x16;
337 data->count++;
338 break;
339 case DASD_ECKD_CCW_WRITE:
340 case DASD_ECKD_CCW_WRITE_MT:
341 case DASD_ECKD_CCW_WRITE_KD:
342 case DASD_ECKD_CCW_WRITE_KD_MT:
343 data->auxiliary.last_bytes_used = 0x1;
344 data->length = reclen;
345 data->operation.operation = 0x01;
346 break;
347 case DASD_ECKD_CCW_WRITE_CKD:
348 case DASD_ECKD_CCW_WRITE_CKD_MT:
349 data->auxiliary.last_bytes_used = 0x1;
350 data->length = reclen;
351 data->operation.operation = 0x03;
352 break;
353 case DASD_ECKD_CCW_READ:
354 case DASD_ECKD_CCW_READ_MT:
355 case DASD_ECKD_CCW_READ_KD:
356 case DASD_ECKD_CCW_READ_KD_MT:
357 data->auxiliary.last_bytes_used = 0x1;
358 data->length = reclen;
359 data->operation.operation = 0x06;
360 break;
361 case DASD_ECKD_CCW_READ_CKD:
362 case DASD_ECKD_CCW_READ_CKD_MT:
363 data->auxiliary.last_bytes_used = 0x1;
364 data->length = reclen;
365 data->operation.operation = 0x16;
366 break;
367 case DASD_ECKD_CCW_READ_COUNT:
368 data->operation.operation = 0x06;
369 break;
370 case DASD_ECKD_CCW_ERASE:
371 data->length = reclen;
372 data->auxiliary.last_bytes_used = 0x1;
373 data->operation.operation = 0x0b;
374 break;
375 default:
376 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
377 }
378 data->seek_addr.cyl = data->search_arg.cyl =
379 trk / private->rdc_data.trk_per_cyl;
380 data->seek_addr.head = data->search_arg.head =
381 trk % private->rdc_data.trk_per_cyl;
382 data->search_arg.record = rec_on_trk;
383 }
384
385 /*
386 * Returns 1 if the block is one of the special blocks that needs
387 * to get read/written with the KD variant of the command.
388 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
389 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
390 * Luckily the KD variants differ only by one bit (0x08) from the
391 * normal variant. So don't wonder about code like:
392 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
393 * ccw->cmd_code |= 0x8;
394 */
395 static inline int
396 dasd_eckd_cdl_special(int blk_per_trk, int recid)
397 {
398 if (recid < 3)
399 return 1;
400 if (recid < blk_per_trk)
401 return 0;
402 if (recid < 2 * blk_per_trk)
403 return 1;
404 return 0;
405 }
406
407 /*
408 * Returns the record size for the special blocks of the cdl format.
409 * Only returns something useful if dasd_eckd_cdl_special is true
410 * for the recid.
411 */
412 static inline int
413 dasd_eckd_cdl_reclen(int recid)
414 {
415 if (recid < 3)
416 return sizes_trk0[recid];
417 return LABEL_SIZE;
418 }
419
420 /*
421 * Generate device unique id that specifies the physical device.
422 */
423 static int
424 dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
425 {
426 struct dasd_eckd_private *private;
427 struct dasd_eckd_confdata *confdata;
428
429 private = (struct dasd_eckd_private *) device->private;
430 if (!private)
431 return -ENODEV;
432 confdata = &private->conf_data;
433 if (!confdata)
434 return -ENODEV;
435
436 memset(uid, 0, sizeof(struct dasd_uid));
437 memcpy(uid->vendor, confdata->ned1.HDA_manufacturer,
438 sizeof(uid->vendor) - 1);
439 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
440 memcpy(uid->serial, confdata->ned1.HDA_location,
441 sizeof(uid->serial) - 1);
442 EBCASC(uid->serial, sizeof(uid->serial) - 1);
443 uid->ssid = confdata->neq.subsystemID;
444 if (confdata->ned2.sneq.flags == 0x40) {
445 uid->alias = 1;
446 uid->unit_addr = confdata->ned2.sneq.base_unit_addr;
447 } else
448 uid->unit_addr = confdata->ned1.unit_addr;
449
450 return 0;
451 }
452
453 static int
454 dasd_eckd_read_conf(struct dasd_device *device)
455 {
456 void *conf_data;
457 int conf_len, conf_data_saved;
458 int rc;
459 __u8 lpm;
460 struct dasd_eckd_private *private;
461 struct dasd_eckd_path *path_data;
462
463 private = (struct dasd_eckd_private *) device->private;
464 path_data = (struct dasd_eckd_path *) &private->path_data;
465 path_data->opm = ccw_device_get_path_mask(device->cdev);
466 lpm = 0x80;
467 conf_data_saved = 0;
468
469 /* get configuration data per operational path */
470 for (lpm = 0x80; lpm; lpm>>= 1) {
471 if (lpm & path_data->opm){
472 rc = read_conf_data_lpm(device->cdev, &conf_data,
473 &conf_len, lpm);
474 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
475 MESSAGE(KERN_WARNING,
476 "Read configuration data returned "
477 "error %d", rc);
478 return rc;
479 }
480 if (conf_data == NULL) {
481 MESSAGE(KERN_WARNING, "%s", "No configuration "
482 "data retrieved");
483 continue; /* no errror */
484 }
485 if (conf_len != sizeof (struct dasd_eckd_confdata)) {
486 MESSAGE(KERN_WARNING,
487 "sizes of configuration data mismatch"
488 "%d (read) vs %ld (expected)",
489 conf_len,
490 sizeof (struct dasd_eckd_confdata));
491 kfree(conf_data);
492 continue; /* no errror */
493 }
494 /* save first valid configuration data */
495 if (!conf_data_saved){
496 memcpy(&private->conf_data, conf_data,
497 sizeof (struct dasd_eckd_confdata));
498 conf_data_saved++;
499 }
500 switch (((char *)conf_data)[242] & 0x07){
501 case 0x02:
502 path_data->npm |= lpm;
503 break;
504 case 0x03:
505 path_data->ppm |= lpm;
506 break;
507 }
508 kfree(conf_data);
509 }
510 }
511 return 0;
512 }
513
514 /*
515 * Build CP for Perform Subsystem Function - SSC.
516 */
517 static struct dasd_ccw_req *
518 dasd_eckd_build_psf_ssc(struct dasd_device *device)
519 {
520 struct dasd_ccw_req *cqr;
521 struct dasd_psf_ssc_data *psf_ssc_data;
522 struct ccw1 *ccw;
523
524 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
525 sizeof(struct dasd_psf_ssc_data),
526 device);
527
528 if (IS_ERR(cqr)) {
529 DEV_MESSAGE(KERN_WARNING, device, "%s",
530 "Could not allocate PSF-SSC request");
531 return cqr;
532 }
533 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
534 psf_ssc_data->order = PSF_ORDER_SSC;
535 psf_ssc_data->suborder = 0x08;
536
537 ccw = cqr->cpaddr;
538 ccw->cmd_code = DASD_ECKD_CCW_PSF;
539 ccw->cda = (__u32)(addr_t)psf_ssc_data;
540 ccw->count = 66;
541
542 cqr->device = device;
543 cqr->expires = 10*HZ;
544 cqr->buildclk = get_clock();
545 cqr->status = DASD_CQR_FILLED;
546 return cqr;
547 }
548
549 /*
550 * Perform Subsystem Function.
551 * It is necessary to trigger CIO for channel revalidation since this
552 * call might change behaviour of DASD devices.
553 */
554 static int
555 dasd_eckd_psf_ssc(struct dasd_device *device)
556 {
557 struct dasd_ccw_req *cqr;
558 int rc;
559
560 cqr = dasd_eckd_build_psf_ssc(device);
561 if (IS_ERR(cqr))
562 return PTR_ERR(cqr);
563
564 rc = dasd_sleep_on(cqr);
565 if (!rc)
566 /* trigger CIO to reprobe devices */
567 css_schedule_reprobe();
568 dasd_sfree_request(cqr, cqr->device);
569 return rc;
570 }
571
572 /*
573 * Valide storage server of current device.
574 */
575 static int
576 dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
577 {
578 int rc;
579
580 /* Currently PAV is the only reason to 'validate' server on LPAR */
581 if (dasd_nopav || MACHINE_IS_VM)
582 return 0;
583
584 rc = dasd_eckd_psf_ssc(device);
585 /* may be requested feature is not available on server,
586 * therefore just report error and go ahead */
587 DEV_MESSAGE(KERN_INFO, device,
588 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
589 uid->vendor, uid->serial, uid->ssid, rc);
590 /* RE-Read Configuration Data */
591 return dasd_eckd_read_conf(device);
592 }
593
594 /*
595 * Check device characteristics.
596 * If the device is accessible using ECKD discipline, the device is enabled.
597 */
598 static int
599 dasd_eckd_check_characteristics(struct dasd_device *device)
600 {
601 struct dasd_eckd_private *private;
602 struct dasd_uid uid;
603 void *rdc_data;
604 int rc;
605
606 private = (struct dasd_eckd_private *) device->private;
607 if (private == NULL) {
608 private = kzalloc(sizeof(struct dasd_eckd_private),
609 GFP_KERNEL | GFP_DMA);
610 if (private == NULL) {
611 DEV_MESSAGE(KERN_WARNING, device, "%s",
612 "memory allocation failed for private "
613 "data");
614 return -ENOMEM;
615 }
616 device->private = (void *) private;
617 }
618 /* Invalidate status of initial analysis. */
619 private->init_cqr_status = -1;
620 /* Set default cache operations. */
621 private->attrib.operation = DASD_NORMAL_CACHE;
622 private->attrib.nr_cyl = 0;
623
624 /* Read Configuration Data */
625 rc = dasd_eckd_read_conf(device);
626 if (rc)
627 return rc;
628
629 /* Generate device unique id and register in devmap */
630 rc = dasd_eckd_generate_uid(device, &uid);
631 if (rc)
632 return rc;
633 rc = dasd_set_uid(device->cdev, &uid);
634 if (rc == 1) /* new server found */
635 rc = dasd_eckd_validate_server(device, &uid);
636 if (rc)
637 return rc;
638
639 /* Read Device Characteristics */
640 rdc_data = (void *) &(private->rdc_data);
641 memset(rdc_data, 0, sizeof(rdc_data));
642 rc = read_dev_chars(device->cdev, &rdc_data, 64);
643 if (rc)
644 DEV_MESSAGE(KERN_WARNING, device,
645 "Read device characteristics returned "
646 "rc=%d", rc);
647
648 DEV_MESSAGE(KERN_INFO, device,
649 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
650 private->rdc_data.dev_type,
651 private->rdc_data.dev_model,
652 private->rdc_data.cu_type,
653 private->rdc_data.cu_model.model,
654 private->rdc_data.no_cyl,
655 private->rdc_data.trk_per_cyl,
656 private->rdc_data.sec_per_trk);
657 return rc;
658 }
659
660 static struct dasd_ccw_req *
661 dasd_eckd_analysis_ccw(struct dasd_device *device)
662 {
663 struct dasd_eckd_private *private;
664 struct eckd_count *count_data;
665 struct LO_eckd_data *LO_data;
666 struct dasd_ccw_req *cqr;
667 struct ccw1 *ccw;
668 int cplength, datasize;
669 int i;
670
671 private = (struct dasd_eckd_private *) device->private;
672
673 cplength = 8;
674 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
675 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
676 cplength, datasize, device);
677 if (IS_ERR(cqr))
678 return cqr;
679 ccw = cqr->cpaddr;
680 /* Define extent for the first 3 tracks. */
681 define_extent(ccw++, cqr->data, 0, 2,
682 DASD_ECKD_CCW_READ_COUNT, device);
683 LO_data = cqr->data + sizeof (struct DE_eckd_data);
684 /* Locate record for the first 4 records on track 0. */
685 ccw[-1].flags |= CCW_FLAG_CC;
686 locate_record(ccw++, LO_data++, 0, 0, 4,
687 DASD_ECKD_CCW_READ_COUNT, device, 0);
688
689 count_data = private->count_area;
690 for (i = 0; i < 4; i++) {
691 ccw[-1].flags |= CCW_FLAG_CC;
692 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
693 ccw->flags = 0;
694 ccw->count = 8;
695 ccw->cda = (__u32)(addr_t) count_data;
696 ccw++;
697 count_data++;
698 }
699
700 /* Locate record for the first record on track 2. */
701 ccw[-1].flags |= CCW_FLAG_CC;
702 locate_record(ccw++, LO_data++, 2, 0, 1,
703 DASD_ECKD_CCW_READ_COUNT, device, 0);
704 /* Read count ccw. */
705 ccw[-1].flags |= CCW_FLAG_CC;
706 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
707 ccw->flags = 0;
708 ccw->count = 8;
709 ccw->cda = (__u32)(addr_t) count_data;
710
711 cqr->device = device;
712 cqr->retries = 0;
713 cqr->buildclk = get_clock();
714 cqr->status = DASD_CQR_FILLED;
715 return cqr;
716 }
717
718 /*
719 * This is the callback function for the init_analysis cqr. It saves
720 * the status of the initial analysis ccw before it frees it and kicks
721 * the device to continue the startup sequence. This will call
722 * dasd_eckd_do_analysis again (if the devices has not been marked
723 * for deletion in the meantime).
724 */
725 static void
726 dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
727 {
728 struct dasd_eckd_private *private;
729 struct dasd_device *device;
730
731 device = init_cqr->device;
732 private = (struct dasd_eckd_private *) device->private;
733 private->init_cqr_status = init_cqr->status;
734 dasd_sfree_request(init_cqr, device);
735 dasd_kick_device(device);
736 }
737
738 static int
739 dasd_eckd_start_analysis(struct dasd_device *device)
740 {
741 struct dasd_eckd_private *private;
742 struct dasd_ccw_req *init_cqr;
743
744 private = (struct dasd_eckd_private *) device->private;
745 init_cqr = dasd_eckd_analysis_ccw(device);
746 if (IS_ERR(init_cqr))
747 return PTR_ERR(init_cqr);
748 init_cqr->callback = dasd_eckd_analysis_callback;
749 init_cqr->callback_data = NULL;
750 init_cqr->expires = 5*HZ;
751 dasd_add_request_head(init_cqr);
752 return -EAGAIN;
753 }
754
755 static int
756 dasd_eckd_end_analysis(struct dasd_device *device)
757 {
758 struct dasd_eckd_private *private;
759 struct eckd_count *count_area;
760 unsigned int sb, blk_per_trk;
761 int status, i;
762
763 private = (struct dasd_eckd_private *) device->private;
764 status = private->init_cqr_status;
765 private->init_cqr_status = -1;
766 if (status != DASD_CQR_DONE) {
767 DEV_MESSAGE(KERN_WARNING, device, "%s",
768 "volume analysis returned unformatted disk");
769 return -EMEDIUMTYPE;
770 }
771
772 private->uses_cdl = 1;
773 /* Calculate number of blocks/records per track. */
774 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
775 /* Check Track 0 for Compatible Disk Layout */
776 count_area = NULL;
777 for (i = 0; i < 3; i++) {
778 if (private->count_area[i].kl != 4 ||
779 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
780 private->uses_cdl = 0;
781 break;
782 }
783 }
784 if (i == 3)
785 count_area = &private->count_area[4];
786
787 if (private->uses_cdl == 0) {
788 for (i = 0; i < 5; i++) {
789 if ((private->count_area[i].kl != 0) ||
790 (private->count_area[i].dl !=
791 private->count_area[0].dl))
792 break;
793 }
794 if (i == 5)
795 count_area = &private->count_area[0];
796 } else {
797 if (private->count_area[3].record == 1)
798 DEV_MESSAGE(KERN_WARNING, device, "%s",
799 "Trk 0: no records after VTOC!");
800 }
801 if (count_area != NULL && count_area->kl == 0) {
802 /* we found notthing violating our disk layout */
803 if (dasd_check_blocksize(count_area->dl) == 0)
804 device->bp_block = count_area->dl;
805 }
806 if (device->bp_block == 0) {
807 DEV_MESSAGE(KERN_WARNING, device, "%s",
808 "Volume has incompatible disk layout");
809 return -EMEDIUMTYPE;
810 }
811 device->s2b_shift = 0; /* bits to shift 512 to get a block */
812 for (sb = 512; sb < device->bp_block; sb = sb << 1)
813 device->s2b_shift++;
814
815 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
816 device->blocks = (private->rdc_data.no_cyl *
817 private->rdc_data.trk_per_cyl *
818 blk_per_trk);
819
820 DEV_MESSAGE(KERN_INFO, device,
821 "(%dkB blks): %dkB at %dkB/trk %s",
822 (device->bp_block >> 10),
823 ((private->rdc_data.no_cyl *
824 private->rdc_data.trk_per_cyl *
825 blk_per_trk * (device->bp_block >> 9)) >> 1),
826 ((blk_per_trk * device->bp_block) >> 10),
827 private->uses_cdl ?
828 "compatible disk layout" : "linux disk layout");
829
830 return 0;
831 }
832
833 static int
834 dasd_eckd_do_analysis(struct dasd_device *device)
835 {
836 struct dasd_eckd_private *private;
837
838 private = (struct dasd_eckd_private *) device->private;
839 if (private->init_cqr_status < 0)
840 return dasd_eckd_start_analysis(device);
841 else
842 return dasd_eckd_end_analysis(device);
843 }
844
845 static int
846 dasd_eckd_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
847 {
848 struct dasd_eckd_private *private;
849
850 private = (struct dasd_eckd_private *) device->private;
851 if (dasd_check_blocksize(device->bp_block) == 0) {
852 geo->sectors = recs_per_track(&private->rdc_data,
853 0, device->bp_block);
854 }
855 geo->cylinders = private->rdc_data.no_cyl;
856 geo->heads = private->rdc_data.trk_per_cyl;
857 return 0;
858 }
859
860 static struct dasd_ccw_req *
861 dasd_eckd_format_device(struct dasd_device * device,
862 struct format_data_t * fdata)
863 {
864 struct dasd_eckd_private *private;
865 struct dasd_ccw_req *fcp;
866 struct eckd_count *ect;
867 struct ccw1 *ccw;
868 void *data;
869 int rpt, cyl, head;
870 int cplength, datasize;
871 int i;
872
873 private = (struct dasd_eckd_private *) device->private;
874 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
875 cyl = fdata->start_unit / private->rdc_data.trk_per_cyl;
876 head = fdata->start_unit % private->rdc_data.trk_per_cyl;
877
878 /* Sanity checks. */
879 if (fdata->start_unit >=
880 (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)) {
881 DEV_MESSAGE(KERN_INFO, device, "Track no %d too big!",
882 fdata->start_unit);
883 return ERR_PTR(-EINVAL);
884 }
885 if (fdata->start_unit > fdata->stop_unit) {
886 DEV_MESSAGE(KERN_INFO, device, "Track %d reached! ending.",
887 fdata->start_unit);
888 return ERR_PTR(-EINVAL);
889 }
890 if (dasd_check_blocksize(fdata->blksize) != 0) {
891 DEV_MESSAGE(KERN_WARNING, device,
892 "Invalid blocksize %d...terminating!",
893 fdata->blksize);
894 return ERR_PTR(-EINVAL);
895 }
896
897 /*
898 * fdata->intensity is a bit string that tells us what to do:
899 * Bit 0: write record zero
900 * Bit 1: write home address, currently not supported
901 * Bit 2: invalidate tracks
902 * Bit 3: use OS/390 compatible disk layout (cdl)
903 * Only some bit combinations do make sense.
904 */
905 switch (fdata->intensity) {
906 case 0x00: /* Normal format */
907 case 0x08: /* Normal format, use cdl. */
908 cplength = 2 + rpt;
909 datasize = sizeof(struct DE_eckd_data) +
910 sizeof(struct LO_eckd_data) +
911 rpt * sizeof(struct eckd_count);
912 break;
913 case 0x01: /* Write record zero and format track. */
914 case 0x09: /* Write record zero and format track, use cdl. */
915 cplength = 3 + rpt;
916 datasize = sizeof(struct DE_eckd_data) +
917 sizeof(struct LO_eckd_data) +
918 sizeof(struct eckd_count) +
919 rpt * sizeof(struct eckd_count);
920 break;
921 case 0x04: /* Invalidate track. */
922 case 0x0c: /* Invalidate track, use cdl. */
923 cplength = 3;
924 datasize = sizeof(struct DE_eckd_data) +
925 sizeof(struct LO_eckd_data) +
926 sizeof(struct eckd_count);
927 break;
928 default:
929 DEV_MESSAGE(KERN_WARNING, device, "Invalid flags 0x%x.",
930 fdata->intensity);
931 return ERR_PTR(-EINVAL);
932 }
933 /* Allocate the format ccw request. */
934 fcp = dasd_smalloc_request(dasd_eckd_discipline.name,
935 cplength, datasize, device);
936 if (IS_ERR(fcp))
937 return fcp;
938
939 data = fcp->data;
940 ccw = fcp->cpaddr;
941
942 switch (fdata->intensity & ~0x08) {
943 case 0x00: /* Normal format. */
944 define_extent(ccw++, (struct DE_eckd_data *) data,
945 fdata->start_unit, fdata->start_unit,
946 DASD_ECKD_CCW_WRITE_CKD, device);
947 data += sizeof(struct DE_eckd_data);
948 ccw[-1].flags |= CCW_FLAG_CC;
949 locate_record(ccw++, (struct LO_eckd_data *) data,
950 fdata->start_unit, 0, rpt,
951 DASD_ECKD_CCW_WRITE_CKD, device,
952 fdata->blksize);
953 data += sizeof(struct LO_eckd_data);
954 break;
955 case 0x01: /* Write record zero + format track. */
956 define_extent(ccw++, (struct DE_eckd_data *) data,
957 fdata->start_unit, fdata->start_unit,
958 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
959 device);
960 data += sizeof(struct DE_eckd_data);
961 ccw[-1].flags |= CCW_FLAG_CC;
962 locate_record(ccw++, (struct LO_eckd_data *) data,
963 fdata->start_unit, 0, rpt + 1,
964 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
965 device->bp_block);
966 data += sizeof(struct LO_eckd_data);
967 break;
968 case 0x04: /* Invalidate track. */
969 define_extent(ccw++, (struct DE_eckd_data *) data,
970 fdata->start_unit, fdata->start_unit,
971 DASD_ECKD_CCW_WRITE_CKD, device);
972 data += sizeof(struct DE_eckd_data);
973 ccw[-1].flags |= CCW_FLAG_CC;
974 locate_record(ccw++, (struct LO_eckd_data *) data,
975 fdata->start_unit, 0, 1,
976 DASD_ECKD_CCW_WRITE_CKD, device, 8);
977 data += sizeof(struct LO_eckd_data);
978 break;
979 }
980 if (fdata->intensity & 0x01) { /* write record zero */
981 ect = (struct eckd_count *) data;
982 data += sizeof(struct eckd_count);
983 ect->cyl = cyl;
984 ect->head = head;
985 ect->record = 0;
986 ect->kl = 0;
987 ect->dl = 8;
988 ccw[-1].flags |= CCW_FLAG_CC;
989 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
990 ccw->flags = CCW_FLAG_SLI;
991 ccw->count = 8;
992 ccw->cda = (__u32)(addr_t) ect;
993 ccw++;
994 }
995 if ((fdata->intensity & ~0x08) & 0x04) { /* erase track */
996 ect = (struct eckd_count *) data;
997 data += sizeof(struct eckd_count);
998 ect->cyl = cyl;
999 ect->head = head;
1000 ect->record = 1;
1001 ect->kl = 0;
1002 ect->dl = 0;
1003 ccw[-1].flags |= CCW_FLAG_CC;
1004 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1005 ccw->flags = CCW_FLAG_SLI;
1006 ccw->count = 8;
1007 ccw->cda = (__u32)(addr_t) ect;
1008 } else { /* write remaining records */
1009 for (i = 0; i < rpt; i++) {
1010 ect = (struct eckd_count *) data;
1011 data += sizeof(struct eckd_count);
1012 ect->cyl = cyl;
1013 ect->head = head;
1014 ect->record = i + 1;
1015 ect->kl = 0;
1016 ect->dl = fdata->blksize;
1017 /* Check for special tracks 0-1 when formatting CDL */
1018 if ((fdata->intensity & 0x08) &&
1019 fdata->start_unit == 0) {
1020 if (i < 3) {
1021 ect->kl = 4;
1022 ect->dl = sizes_trk0[i] - 4;
1023 }
1024 }
1025 if ((fdata->intensity & 0x08) &&
1026 fdata->start_unit == 1) {
1027 ect->kl = 44;
1028 ect->dl = LABEL_SIZE - 44;
1029 }
1030 ccw[-1].flags |= CCW_FLAG_CC;
1031 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1032 ccw->flags = CCW_FLAG_SLI;
1033 ccw->count = 8;
1034 ccw->cda = (__u32)(addr_t) ect;
1035 ccw++;
1036 }
1037 }
1038 fcp->device = device;
1039 fcp->retries = 2; /* set retry counter to enable ERP */
1040 fcp->buildclk = get_clock();
1041 fcp->status = DASD_CQR_FILLED;
1042 return fcp;
1043 }
1044
1045 static dasd_era_t
1046 dasd_eckd_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
1047 {
1048 struct dasd_device *device = (struct dasd_device *) cqr->device;
1049 struct ccw_device *cdev = device->cdev;
1050
1051 if (irb->scsw.cstat == 0x00 &&
1052 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
1053 return dasd_era_none;
1054
1055 switch (cdev->id.cu_type) {
1056 case 0x3990:
1057 case 0x2105:
1058 case 0x2107:
1059 case 0x1750:
1060 return dasd_3990_erp_examine(cqr, irb);
1061 case 0x9343:
1062 return dasd_9343_erp_examine(cqr, irb);
1063 case 0x3880:
1064 default:
1065 DEV_MESSAGE(KERN_WARNING, device, "%s",
1066 "default (unknown CU type) - RECOVERABLE return");
1067 return dasd_era_recover;
1068 }
1069 }
1070
1071 static dasd_erp_fn_t
1072 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1073 {
1074 struct dasd_device *device = (struct dasd_device *) cqr->device;
1075 struct ccw_device *cdev = device->cdev;
1076
1077 switch (cdev->id.cu_type) {
1078 case 0x3990:
1079 case 0x2105:
1080 case 0x2107:
1081 case 0x1750:
1082 return dasd_3990_erp_action;
1083 case 0x9343:
1084 case 0x3880:
1085 default:
1086 return dasd_default_erp_action;
1087 }
1088 }
1089
1090 static dasd_erp_fn_t
1091 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1092 {
1093 return dasd_default_erp_postaction;
1094 }
1095
1096 static struct dasd_ccw_req *
1097 dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1098 {
1099 struct dasd_eckd_private *private;
1100 unsigned long *idaws;
1101 struct LO_eckd_data *LO_data;
1102 struct dasd_ccw_req *cqr;
1103 struct ccw1 *ccw;
1104 struct bio *bio;
1105 struct bio_vec *bv;
1106 char *dst;
1107 unsigned int blksize, blk_per_trk, off;
1108 int count, cidaw, cplength, datasize;
1109 sector_t recid, first_rec, last_rec;
1110 sector_t first_trk, last_trk;
1111 unsigned int first_offs, last_offs;
1112 unsigned char cmd, rcmd;
1113 int i;
1114
1115 private = (struct dasd_eckd_private *) device->private;
1116 if (rq_data_dir(req) == READ)
1117 cmd = DASD_ECKD_CCW_READ_MT;
1118 else if (rq_data_dir(req) == WRITE)
1119 cmd = DASD_ECKD_CCW_WRITE_MT;
1120 else
1121 return ERR_PTR(-EINVAL);
1122 /* Calculate number of blocks/records per track. */
1123 blksize = device->bp_block;
1124 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1125 /* Calculate record id of first and last block. */
1126 first_rec = first_trk = req->sector >> device->s2b_shift;
1127 first_offs = sector_div(first_trk, blk_per_trk);
1128 last_rec = last_trk =
1129 (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
1130 last_offs = sector_div(last_trk, blk_per_trk);
1131 /* Check struct bio and count the number of blocks for the request. */
1132 count = 0;
1133 cidaw = 0;
1134 rq_for_each_bio(bio, req) {
1135 bio_for_each_segment(bv, bio, i) {
1136 if (bv->bv_len & (blksize - 1))
1137 /* Eckd can only do full blocks. */
1138 return ERR_PTR(-EINVAL);
1139 count += bv->bv_len >> (device->s2b_shift + 9);
1140 #if defined(CONFIG_64BIT)
1141 if (idal_is_needed (page_address(bv->bv_page),
1142 bv->bv_len))
1143 cidaw += bv->bv_len >> (device->s2b_shift + 9);
1144 #endif
1145 }
1146 }
1147 /* Paranoia. */
1148 if (count != last_rec - first_rec + 1)
1149 return ERR_PTR(-EINVAL);
1150 /* 1x define extent + 1x locate record + number of blocks */
1151 cplength = 2 + count;
1152 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1153 datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) +
1154 cidaw * sizeof(unsigned long);
1155 /* Find out the number of additional locate record ccws for cdl. */
1156 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1157 if (last_rec >= 2*blk_per_trk)
1158 count = 2*blk_per_trk - first_rec;
1159 cplength += count;
1160 datasize += count*sizeof(struct LO_eckd_data);
1161 }
1162 /* Allocate the ccw request. */
1163 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1164 cplength, datasize, device);
1165 if (IS_ERR(cqr))
1166 return cqr;
1167 ccw = cqr->cpaddr;
1168 /* First ccw is define extent. */
1169 if (define_extent(ccw++, cqr->data, first_trk,
1170 last_trk, cmd, device) == -EAGAIN) {
1171 /* Clock not in sync and XRC is enabled. Try again later. */
1172 dasd_sfree_request(cqr, device);
1173 return ERR_PTR(-EAGAIN);
1174 }
1175 /* Build locate_record+read/write/ccws. */
1176 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1177 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1178 recid = first_rec;
1179 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1180 /* Only standard blocks so there is just one locate record. */
1181 ccw[-1].flags |= CCW_FLAG_CC;
1182 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1183 last_rec - recid + 1, cmd, device, blksize);
1184 }
1185 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
1186 dst = page_address(bv->bv_page) + bv->bv_offset;
1187 if (dasd_page_cache) {
1188 char *copy = kmem_cache_alloc(dasd_page_cache,
1189 GFP_DMA | __GFP_NOWARN);
1190 if (copy && rq_data_dir(req) == WRITE)
1191 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1192 if (copy)
1193 dst = copy + bv->bv_offset;
1194 }
1195 for (off = 0; off < bv->bv_len; off += blksize) {
1196 sector_t trkid = recid;
1197 unsigned int recoffs = sector_div(trkid, blk_per_trk);
1198 rcmd = cmd;
1199 count = blksize;
1200 /* Locate record for cdl special block ? */
1201 if (private->uses_cdl && recid < 2*blk_per_trk) {
1202 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1203 rcmd |= 0x8;
1204 count = dasd_eckd_cdl_reclen(recid);
1205 if (count < blksize &&
1206 rq_data_dir(req) == READ)
1207 memset(dst + count, 0xe5,
1208 blksize - count);
1209 }
1210 ccw[-1].flags |= CCW_FLAG_CC;
1211 locate_record(ccw++, LO_data++,
1212 trkid, recoffs + 1,
1213 1, rcmd, device, count);
1214 }
1215 /* Locate record for standard blocks ? */
1216 if (private->uses_cdl && recid == 2*blk_per_trk) {
1217 ccw[-1].flags |= CCW_FLAG_CC;
1218 locate_record(ccw++, LO_data++,
1219 trkid, recoffs + 1,
1220 last_rec - recid + 1,
1221 cmd, device, count);
1222 }
1223 /* Read/write ccw. */
1224 ccw[-1].flags |= CCW_FLAG_CC;
1225 ccw->cmd_code = rcmd;
1226 ccw->count = count;
1227 if (idal_is_needed(dst, blksize)) {
1228 ccw->cda = (__u32)(addr_t) idaws;
1229 ccw->flags = CCW_FLAG_IDA;
1230 idaws = idal_create_words(idaws, dst, blksize);
1231 } else {
1232 ccw->cda = (__u32)(addr_t) dst;
1233 ccw->flags = 0;
1234 }
1235 ccw++;
1236 dst += blksize;
1237 recid++;
1238 }
1239 }
1240 if (req->cmd_flags & REQ_FAILFAST)
1241 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1242 cqr->device = device;
1243 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
1244 cqr->lpm = private->path_data.ppm;
1245 cqr->retries = 256;
1246 cqr->buildclk = get_clock();
1247 cqr->status = DASD_CQR_FILLED;
1248 return cqr;
1249 }
1250
1251 static int
1252 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1253 {
1254 struct dasd_eckd_private *private;
1255 struct ccw1 *ccw;
1256 struct bio *bio;
1257 struct bio_vec *bv;
1258 char *dst, *cda;
1259 unsigned int blksize, blk_per_trk, off;
1260 sector_t recid;
1261 int i, status;
1262
1263 if (!dasd_page_cache)
1264 goto out;
1265 private = (struct dasd_eckd_private *) cqr->device->private;
1266 blksize = cqr->device->bp_block;
1267 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1268 recid = req->sector >> cqr->device->s2b_shift;
1269 ccw = cqr->cpaddr;
1270 /* Skip over define extent & locate record. */
1271 ccw++;
1272 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
1273 ccw++;
1274 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
1275 dst = page_address(bv->bv_page) + bv->bv_offset;
1276 for (off = 0; off < bv->bv_len; off += blksize) {
1277 /* Skip locate record. */
1278 if (private->uses_cdl && recid <= 2*blk_per_trk)
1279 ccw++;
1280 if (dst) {
1281 if (ccw->flags & CCW_FLAG_IDA)
1282 cda = *((char **)((addr_t) ccw->cda));
1283 else
1284 cda = (char *)((addr_t) ccw->cda);
1285 if (dst != cda) {
1286 if (rq_data_dir(req) == READ)
1287 memcpy(dst, cda, bv->bv_len);
1288 kmem_cache_free(dasd_page_cache,
1289 (void *)((addr_t)cda & PAGE_MASK));
1290 }
1291 dst = NULL;
1292 }
1293 ccw++;
1294 recid++;
1295 }
1296 }
1297 out:
1298 status = cqr->status == DASD_CQR_DONE;
1299 dasd_sfree_request(cqr, cqr->device);
1300 return status;
1301 }
1302
1303 static int
1304 dasd_eckd_fill_info(struct dasd_device * device,
1305 struct dasd_information2_t * info)
1306 {
1307 struct dasd_eckd_private *private;
1308
1309 private = (struct dasd_eckd_private *) device->private;
1310 info->label_block = 2;
1311 info->FBA_layout = private->uses_cdl ? 0 : 1;
1312 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
1313 info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
1314 memcpy(info->characteristics, &private->rdc_data,
1315 sizeof(struct dasd_eckd_characteristics));
1316 info->confdata_size = sizeof (struct dasd_eckd_confdata);
1317 memcpy(info->configuration_data, &private->conf_data,
1318 sizeof (struct dasd_eckd_confdata));
1319 return 0;
1320 }
1321
1322 /*
1323 * SECTION: ioctl functions for eckd devices.
1324 */
1325
1326 /*
1327 * Release device ioctl.
1328 * Buils a channel programm to releases a prior reserved
1329 * (see dasd_eckd_reserve) device.
1330 */
1331 static int
1332 dasd_eckd_release(struct dasd_device *device)
1333 {
1334 struct dasd_ccw_req *cqr;
1335 int rc;
1336
1337 if (!capable(CAP_SYS_ADMIN))
1338 return -EACCES;
1339
1340 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1341 1, 32, device);
1342 if (IS_ERR(cqr)) {
1343 DEV_MESSAGE(KERN_WARNING, device, "%s",
1344 "Could not allocate initialization request");
1345 return PTR_ERR(cqr);
1346 }
1347 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE;
1348 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1349 cqr->cpaddr->count = 32;
1350 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1351 cqr->device = device;
1352 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1353 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1354 cqr->retries = 2; /* set retry counter to enable basic ERP */
1355 cqr->expires = 2 * HZ;
1356 cqr->buildclk = get_clock();
1357 cqr->status = DASD_CQR_FILLED;
1358
1359 rc = dasd_sleep_on_immediatly(cqr);
1360
1361 dasd_sfree_request(cqr, cqr->device);
1362 return rc;
1363 }
1364
1365 /*
1366 * Reserve device ioctl.
1367 * Options are set to 'synchronous wait for interrupt' and
1368 * 'timeout the request'. This leads to a terminate IO if
1369 * the interrupt is outstanding for a certain time.
1370 */
1371 static int
1372 dasd_eckd_reserve(struct dasd_device *device)
1373 {
1374 struct dasd_ccw_req *cqr;
1375 int rc;
1376
1377 if (!capable(CAP_SYS_ADMIN))
1378 return -EACCES;
1379
1380 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1381 1, 32, device);
1382 if (IS_ERR(cqr)) {
1383 DEV_MESSAGE(KERN_WARNING, device, "%s",
1384 "Could not allocate initialization request");
1385 return PTR_ERR(cqr);
1386 }
1387 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE;
1388 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1389 cqr->cpaddr->count = 32;
1390 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1391 cqr->device = device;
1392 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1393 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1394 cqr->retries = 2; /* set retry counter to enable basic ERP */
1395 cqr->expires = 2 * HZ;
1396 cqr->buildclk = get_clock();
1397 cqr->status = DASD_CQR_FILLED;
1398
1399 rc = dasd_sleep_on_immediatly(cqr);
1400
1401 dasd_sfree_request(cqr, cqr->device);
1402 return rc;
1403 }
1404
1405 /*
1406 * Steal lock ioctl - unconditional reserve device.
1407 * Buils a channel programm to break a device's reservation.
1408 * (unconditional reserve)
1409 */
1410 static int
1411 dasd_eckd_steal_lock(struct dasd_device *device)
1412 {
1413 struct dasd_ccw_req *cqr;
1414 int rc;
1415
1416 if (!capable(CAP_SYS_ADMIN))
1417 return -EACCES;
1418
1419 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1420 1, 32, device);
1421 if (IS_ERR(cqr)) {
1422 DEV_MESSAGE(KERN_WARNING, device, "%s",
1423 "Could not allocate initialization request");
1424 return PTR_ERR(cqr);
1425 }
1426 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK;
1427 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1428 cqr->cpaddr->count = 32;
1429 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1430 cqr->device = device;
1431 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1432 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1433 cqr->retries = 2; /* set retry counter to enable basic ERP */
1434 cqr->expires = 2 * HZ;
1435 cqr->buildclk = get_clock();
1436 cqr->status = DASD_CQR_FILLED;
1437
1438 rc = dasd_sleep_on_immediatly(cqr);
1439
1440 dasd_sfree_request(cqr, cqr->device);
1441 return rc;
1442 }
1443
1444 /*
1445 * Read performance statistics
1446 */
1447 static int
1448 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
1449 {
1450 struct dasd_psf_prssd_data *prssdp;
1451 struct dasd_rssd_perf_stats_t *stats;
1452 struct dasd_ccw_req *cqr;
1453 struct ccw1 *ccw;
1454 int rc;
1455
1456 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1457 1 /* PSF */ + 1 /* RSSD */ ,
1458 (sizeof (struct dasd_psf_prssd_data) +
1459 sizeof (struct dasd_rssd_perf_stats_t)),
1460 device);
1461 if (IS_ERR(cqr)) {
1462 DEV_MESSAGE(KERN_WARNING, device, "%s",
1463 "Could not allocate initialization request");
1464 return PTR_ERR(cqr);
1465 }
1466 cqr->device = device;
1467 cqr->retries = 0;
1468 cqr->expires = 10 * HZ;
1469
1470 /* Prepare for Read Subsystem Data */
1471 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1472 memset(prssdp, 0, sizeof (struct dasd_psf_prssd_data));
1473 prssdp->order = PSF_ORDER_PRSSD;
1474 prssdp->suborder = 0x01; /* Perfomance Statistics */
1475 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
1476
1477 ccw = cqr->cpaddr;
1478 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1479 ccw->count = sizeof (struct dasd_psf_prssd_data);
1480 ccw->flags |= CCW_FLAG_CC;
1481 ccw->cda = (__u32)(addr_t) prssdp;
1482
1483 /* Read Subsystem Data - Performance Statistics */
1484 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1485 memset(stats, 0, sizeof (struct dasd_rssd_perf_stats_t));
1486
1487 ccw++;
1488 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1489 ccw->count = sizeof (struct dasd_rssd_perf_stats_t);
1490 ccw->cda = (__u32)(addr_t) stats;
1491
1492 cqr->buildclk = get_clock();
1493 cqr->status = DASD_CQR_FILLED;
1494 rc = dasd_sleep_on(cqr);
1495 if (rc == 0) {
1496 /* Prepare for Read Subsystem Data */
1497 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1498 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1499 if (copy_to_user(argp, stats,
1500 sizeof(struct dasd_rssd_perf_stats_t)))
1501 rc = -EFAULT;
1502 }
1503 dasd_sfree_request(cqr, cqr->device);
1504 return rc;
1505 }
1506
1507 /*
1508 * Get attributes (cache operations)
1509 * Returnes the cache attributes used in Define Extend (DE).
1510 */
1511 static int
1512 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
1513 {
1514 struct dasd_eckd_private *private =
1515 (struct dasd_eckd_private *)device->private;
1516 struct attrib_data_t attrib = private->attrib;
1517 int rc;
1518
1519 if (!capable(CAP_SYS_ADMIN))
1520 return -EACCES;
1521 if (!argp)
1522 return -EINVAL;
1523
1524 rc = 0;
1525 if (copy_to_user(argp, (long *) &attrib,
1526 sizeof (struct attrib_data_t)))
1527 rc = -EFAULT;
1528
1529 return rc;
1530 }
1531
1532 /*
1533 * Set attributes (cache operations)
1534 * Stores the attributes for cache operation to be used in Define Extend (DE).
1535 */
1536 static int
1537 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
1538 {
1539 struct dasd_eckd_private *private =
1540 (struct dasd_eckd_private *)device->private;
1541 struct attrib_data_t attrib;
1542
1543 if (!capable(CAP_SYS_ADMIN))
1544 return -EACCES;
1545 if (!argp)
1546 return -EINVAL;
1547
1548 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
1549 return -EFAULT;
1550 private->attrib = attrib;
1551
1552 DEV_MESSAGE(KERN_INFO, device,
1553 "cache operation mode set to %x (%i cylinder prestage)",
1554 private->attrib.operation, private->attrib.nr_cyl);
1555 return 0;
1556 }
1557
1558 static int
1559 dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
1560 {
1561 switch (cmd) {
1562 case BIODASDGATTR:
1563 return dasd_eckd_get_attrib(device, argp);
1564 case BIODASDSATTR:
1565 return dasd_eckd_set_attrib(device, argp);
1566 case BIODASDPSRD:
1567 return dasd_eckd_performance(device, argp);
1568 case BIODASDRLSE:
1569 return dasd_eckd_release(device);
1570 case BIODASDRSRV:
1571 return dasd_eckd_reserve(device);
1572 case BIODASDSLCK:
1573 return dasd_eckd_steal_lock(device);
1574 default:
1575 return -ENOIOCTLCMD;
1576 }
1577 }
1578
1579 /*
1580 * Dump the range of CCWs into 'page' buffer
1581 * and return number of printed chars.
1582 */
1583 static int
1584 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
1585 {
1586 int len, count;
1587 char *datap;
1588
1589 len = 0;
1590 while (from <= to) {
1591 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1592 " CCW %p: %08X %08X DAT:",
1593 from, ((int *) from)[0], ((int *) from)[1]);
1594
1595 /* get pointer to data (consider IDALs) */
1596 if (from->flags & CCW_FLAG_IDA)
1597 datap = (char *) *((addr_t *) (addr_t) from->cda);
1598 else
1599 datap = (char *) ((addr_t) from->cda);
1600
1601 /* dump data (max 32 bytes) */
1602 for (count = 0; count < from->count && count < 32; count++) {
1603 if (count % 8 == 0) len += sprintf(page + len, " ");
1604 if (count % 4 == 0) len += sprintf(page + len, " ");
1605 len += sprintf(page + len, "%02x", datap[count]);
1606 }
1607 len += sprintf(page + len, "\n");
1608 from++;
1609 }
1610 return len;
1611 }
1612
1613 /*
1614 * Print sense data and related channel program.
1615 * Parts are printed because printk buffer is only 1024 bytes.
1616 */
1617 static void
1618 dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
1619 struct irb *irb)
1620 {
1621 char *page;
1622 struct ccw1 *first, *last, *fail, *from, *to;
1623 int len, sl, sct;
1624
1625 page = (char *) get_zeroed_page(GFP_ATOMIC);
1626 if (page == NULL) {
1627 DEV_MESSAGE(KERN_ERR, device, " %s",
1628 "No memory to dump sense data");
1629 return;
1630 }
1631 /* dump the sense data */
1632 len = sprintf(page, KERN_ERR PRINTK_HEADER
1633 " I/O status report for device %s:\n",
1634 device->cdev->dev.bus_id);
1635 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1636 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
1637 irb->scsw.cstat, irb->scsw.dstat);
1638 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1639 " device %s: Failing CCW: %p\n",
1640 device->cdev->dev.bus_id,
1641 (void *) (addr_t) irb->scsw.cpa);
1642 if (irb->esw.esw0.erw.cons) {
1643 for (sl = 0; sl < 4; sl++) {
1644 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1645 " Sense(hex) %2d-%2d:",
1646 (8 * sl), ((8 * sl) + 7));
1647
1648 for (sct = 0; sct < 8; sct++) {
1649 len += sprintf(page + len, " %02x",
1650 irb->ecw[8 * sl + sct]);
1651 }
1652 len += sprintf(page + len, "\n");
1653 }
1654
1655 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
1656 /* 24 Byte Sense Data */
1657 sprintf(page + len, KERN_ERR PRINTK_HEADER
1658 " 24 Byte: %x MSG %x, "
1659 "%s MSGb to SYSOP\n",
1660 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
1661 irb->ecw[1] & 0x10 ? "" : "no");
1662 } else {
1663 /* 32 Byte Sense Data */
1664 sprintf(page + len, KERN_ERR PRINTK_HEADER
1665 " 32 Byte: Format: %x "
1666 "Exception class %x\n",
1667 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
1668 }
1669 } else {
1670 sprintf(page + len, KERN_ERR PRINTK_HEADER
1671 " SORRY - NO VALID SENSE AVAILABLE\n");
1672 }
1673 printk("%s", page);
1674
1675 /* dump the Channel Program (max 140 Bytes per line) */
1676 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
1677 first = req->cpaddr;
1678 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
1679 to = min(first + 6, last);
1680 len = sprintf(page, KERN_ERR PRINTK_HEADER
1681 " Related CP in req: %p\n", req);
1682 dasd_eckd_dump_ccw_range(first, to, page + len);
1683 printk("%s", page);
1684
1685 /* print failing CCW area (maximum 4) */
1686 /* scsw->cda is either valid or zero */
1687 len = 0;
1688 from = ++to;
1689 fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */
1690 if (from < fail - 2) {
1691 from = fail - 2; /* there is a gap - print header */
1692 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
1693 }
1694 to = min(fail + 1, last);
1695 len += dasd_eckd_dump_ccw_range(from, to, page + len);
1696
1697 /* print last CCWs (maximum 2) */
1698 from = max(from, ++to);
1699 if (from < last - 1) {
1700 from = last - 1; /* there is a gap - print header */
1701 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
1702 }
1703 len += dasd_eckd_dump_ccw_range(from, last, page + len);
1704 if (len > 0)
1705 printk("%s", page);
1706 free_page((unsigned long) page);
1707 }
1708
1709 /*
1710 * max_blocks is dependent on the amount of storage that is available
1711 * in the static io buffer for each device. Currently each device has
1712 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
1713 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
1714 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
1715 * addition we have one define extent ccw + 16 bytes of data and one
1716 * locate record ccw + 16 bytes of data. That makes:
1717 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
1718 * We want to fit two into the available memory so that we can immediately
1719 * start the next request if one finishes off. That makes 249.5 blocks
1720 * for one request. Give a little safety and the result is 240.
1721 */
1722 static struct dasd_discipline dasd_eckd_discipline = {
1723 .owner = THIS_MODULE,
1724 .name = "ECKD",
1725 .ebcname = "ECKD",
1726 .max_blocks = 240,
1727 .check_device = dasd_eckd_check_characteristics,
1728 .do_analysis = dasd_eckd_do_analysis,
1729 .fill_geometry = dasd_eckd_fill_geometry,
1730 .start_IO = dasd_start_IO,
1731 .term_IO = dasd_term_IO,
1732 .format_device = dasd_eckd_format_device,
1733 .examine_error = dasd_eckd_examine_error,
1734 .erp_action = dasd_eckd_erp_action,
1735 .erp_postaction = dasd_eckd_erp_postaction,
1736 .build_cp = dasd_eckd_build_cp,
1737 .free_cp = dasd_eckd_free_cp,
1738 .dump_sense = dasd_eckd_dump_sense,
1739 .fill_info = dasd_eckd_fill_info,
1740 .ioctl = dasd_eckd_ioctl,
1741 };
1742
1743 static int __init
1744 dasd_eckd_init(void)
1745 {
1746 ASCEBC(dasd_eckd_discipline.ebcname, 4);
1747 return ccw_driver_register(&dasd_eckd_driver);
1748 }
1749
1750 static void __exit
1751 dasd_eckd_cleanup(void)
1752 {
1753 ccw_driver_unregister(&dasd_eckd_driver);
1754 }
1755
1756 module_init(dasd_eckd_init);
1757 module_exit(dasd_eckd_cleanup);