[S390] dasd: optimize cpu usage in goodcase
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / s390 / block / dasd_alias.c
CommitLineData
8e09f215
SW
1/*
2 * PAV alias management for the DASD ECKD discipline
3 *
4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */
7
fc19f381
SH
8#define KMSG_COMPONENT "dasd"
9
8e09f215
SW
10#include <linux/list.h>
11#include <asm/ebcdic.h>
12#include "dasd_int.h"
13#include "dasd_eckd.h"
14
15#ifdef PRINTK_HEADER
16#undef PRINTK_HEADER
17#endif /* PRINTK_HEADER */
18#define PRINTK_HEADER "dasd(eckd):"
19
20
21/*
22 * General concept of alias management:
23 * - PAV and DASD alias management is specific to the eckd discipline.
24 * - A device is connected to an lcu as long as the device exists.
25 * dasd_alias_make_device_known_to_lcu will be called wenn the
26 * device is checked by the eckd discipline and
27 * dasd_alias_disconnect_device_from_lcu will be called
28 * before the device is deleted.
29 * - The dasd_alias_add_device / dasd_alias_remove_device
30 * functions mark the point when a device is 'ready for service'.
31 * - A summary unit check is a rare occasion, but it is mandatory to
32 * support it. It requires some complex recovery actions before the
33 * devices can be used again (see dasd_alias_handle_summary_unit_check).
34 * - dasd_alias_get_start_dev will find an alias device that can be used
35 * instead of the base device and does some (very simple) load balancing.
36 * This is the function that gets called for each I/O, so when improving
37 * something, this function should get faster or better, the rest has just
38 * to be correct.
39 */
40
41
42static void summary_unit_check_handling_work(struct work_struct *);
43static void lcu_update_work(struct work_struct *);
44static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
45
46static struct alias_root aliastree = {
47 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
48 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
49};
50
51static struct alias_server *_find_server(struct dasd_uid *uid)
52{
53 struct alias_server *pos;
54 list_for_each_entry(pos, &aliastree.serverlist, server) {
55 if (!strncmp(pos->uid.vendor, uid->vendor,
56 sizeof(uid->vendor))
57 && !strncmp(pos->uid.serial, uid->serial,
58 sizeof(uid->serial)))
59 return pos;
60 };
61 return NULL;
62}
63
64static struct alias_lcu *_find_lcu(struct alias_server *server,
65 struct dasd_uid *uid)
66{
67 struct alias_lcu *pos;
68 list_for_each_entry(pos, &server->lculist, lcu) {
69 if (pos->uid.ssid == uid->ssid)
70 return pos;
71 };
72 return NULL;
73}
74
75static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
76 struct dasd_uid *uid)
77{
78 struct alias_pav_group *pos;
79 __u8 search_unit_addr;
80
81 /* for hyper pav there is only one group */
82 if (lcu->pav == HYPER_PAV) {
83 if (list_empty(&lcu->grouplist))
84 return NULL;
85 else
86 return list_first_entry(&lcu->grouplist,
87 struct alias_pav_group, group);
88 }
89
90 /* for base pav we have to find the group that matches the base */
91 if (uid->type == UA_BASE_DEVICE)
92 search_unit_addr = uid->real_unit_addr;
93 else
94 search_unit_addr = uid->base_unit_addr;
95 list_for_each_entry(pos, &lcu->grouplist, group) {
4abb08c2
SW
96 if (pos->uid.base_unit_addr == search_unit_addr &&
97 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
8e09f215
SW
98 return pos;
99 };
100 return NULL;
101}
102
103static struct alias_server *_allocate_server(struct dasd_uid *uid)
104{
105 struct alias_server *server;
106
107 server = kzalloc(sizeof(*server), GFP_KERNEL);
108 if (!server)
109 return ERR_PTR(-ENOMEM);
110 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
111 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
112 INIT_LIST_HEAD(&server->server);
113 INIT_LIST_HEAD(&server->lculist);
114 return server;
115}
116
117static void _free_server(struct alias_server *server)
118{
119 kfree(server);
120}
121
122static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
123{
124 struct alias_lcu *lcu;
125
126 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
127 if (!lcu)
128 return ERR_PTR(-ENOMEM);
129 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
130 if (!lcu->uac)
131 goto out_err1;
132 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
133 if (!lcu->rsu_cqr)
134 goto out_err2;
135 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
136 GFP_KERNEL | GFP_DMA);
137 if (!lcu->rsu_cqr->cpaddr)
138 goto out_err3;
139 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
140 if (!lcu->rsu_cqr->data)
141 goto out_err4;
142
143 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
144 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
145 lcu->uid.ssid = uid->ssid;
146 lcu->pav = NO_PAV;
147 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
148 INIT_LIST_HEAD(&lcu->lcu);
149 INIT_LIST_HEAD(&lcu->inactive_devices);
150 INIT_LIST_HEAD(&lcu->active_devices);
151 INIT_LIST_HEAD(&lcu->grouplist);
152 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
153 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
154 spin_lock_init(&lcu->lock);
155 return lcu;
156
157out_err4:
158 kfree(lcu->rsu_cqr->cpaddr);
159out_err3:
160 kfree(lcu->rsu_cqr);
161out_err2:
162 kfree(lcu->uac);
163out_err1:
164 kfree(lcu);
165 return ERR_PTR(-ENOMEM);
166}
167
168static void _free_lcu(struct alias_lcu *lcu)
169{
170 kfree(lcu->rsu_cqr->data);
171 kfree(lcu->rsu_cqr->cpaddr);
172 kfree(lcu->rsu_cqr);
173 kfree(lcu->uac);
174 kfree(lcu);
175}
176
177/*
178 * This is the function that will allocate all the server and lcu data,
179 * so this function must be called first for a new device.
180 * If the return value is 1, the lcu was already known before, if it
181 * is 0, this is a new lcu.
182 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
183 */
184int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
185{
186 struct dasd_eckd_private *private;
187 unsigned long flags;
188 struct alias_server *server, *newserver;
189 struct alias_lcu *lcu, *newlcu;
190 int is_lcu_known;
191 struct dasd_uid *uid;
192
193 private = (struct dasd_eckd_private *) device->private;
194 uid = &private->uid;
195 spin_lock_irqsave(&aliastree.lock, flags);
196 is_lcu_known = 1;
197 server = _find_server(uid);
198 if (!server) {
199 spin_unlock_irqrestore(&aliastree.lock, flags);
200 newserver = _allocate_server(uid);
201 if (IS_ERR(newserver))
202 return PTR_ERR(newserver);
203 spin_lock_irqsave(&aliastree.lock, flags);
204 server = _find_server(uid);
205 if (!server) {
206 list_add(&newserver->server, &aliastree.serverlist);
207 server = newserver;
208 is_lcu_known = 0;
209 } else {
210 /* someone was faster */
211 _free_server(newserver);
212 }
213 }
214
215 lcu = _find_lcu(server, uid);
216 if (!lcu) {
217 spin_unlock_irqrestore(&aliastree.lock, flags);
218 newlcu = _allocate_lcu(uid);
219 if (IS_ERR(newlcu))
220 return PTR_ERR(lcu);
221 spin_lock_irqsave(&aliastree.lock, flags);
222 lcu = _find_lcu(server, uid);
223 if (!lcu) {
224 list_add(&newlcu->lcu, &server->lculist);
225 lcu = newlcu;
226 is_lcu_known = 0;
227 } else {
228 /* someone was faster */
229 _free_lcu(newlcu);
230 }
231 is_lcu_known = 0;
232 }
233 spin_lock(&lcu->lock);
234 list_add(&device->alias_list, &lcu->inactive_devices);
235 private->lcu = lcu;
236 spin_unlock(&lcu->lock);
237 spin_unlock_irqrestore(&aliastree.lock, flags);
238
239 return is_lcu_known;
240}
241
242/*
243 * This function removes a device from the scope of alias management.
244 * The complicated part is to make sure that it is not in use by
245 * any of the workers. If necessary cancel the work.
246 */
247void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
248{
249 struct dasd_eckd_private *private;
250 unsigned long flags;
251 struct alias_lcu *lcu;
252 struct alias_server *server;
253 int was_pending;
254
255 private = (struct dasd_eckd_private *) device->private;
256 lcu = private->lcu;
257 spin_lock_irqsave(&lcu->lock, flags);
258 list_del_init(&device->alias_list);
259 /* make sure that the workers don't use this device */
260 if (device == lcu->suc_data.device) {
261 spin_unlock_irqrestore(&lcu->lock, flags);
262 cancel_work_sync(&lcu->suc_data.worker);
263 spin_lock_irqsave(&lcu->lock, flags);
264 if (device == lcu->suc_data.device)
265 lcu->suc_data.device = NULL;
266 }
267 was_pending = 0;
268 if (device == lcu->ruac_data.device) {
269 spin_unlock_irqrestore(&lcu->lock, flags);
270 was_pending = 1;
271 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
272 spin_lock_irqsave(&lcu->lock, flags);
273 if (device == lcu->ruac_data.device)
274 lcu->ruac_data.device = NULL;
275 }
276 private->lcu = NULL;
277 spin_unlock_irqrestore(&lcu->lock, flags);
278
279 spin_lock_irqsave(&aliastree.lock, flags);
280 spin_lock(&lcu->lock);
281 if (list_empty(&lcu->grouplist) &&
282 list_empty(&lcu->active_devices) &&
283 list_empty(&lcu->inactive_devices)) {
284 list_del(&lcu->lcu);
285 spin_unlock(&lcu->lock);
286 _free_lcu(lcu);
287 lcu = NULL;
288 } else {
289 if (was_pending)
290 _schedule_lcu_update(lcu, NULL);
291 spin_unlock(&lcu->lock);
292 }
293 server = _find_server(&private->uid);
294 if (server && list_empty(&server->lculist)) {
295 list_del(&server->server);
296 _free_server(server);
297 }
298 spin_unlock_irqrestore(&aliastree.lock, flags);
299}
300
301/*
302 * This function assumes that the unit address configuration stored
303 * in the lcu is up to date and will update the device uid before
304 * adding it to a pav group.
305 */
306static int _add_device_to_lcu(struct alias_lcu *lcu,
307 struct dasd_device *device)
308{
309
310 struct dasd_eckd_private *private;
311 struct alias_pav_group *group;
312 struct dasd_uid *uid;
313
314 private = (struct dasd_eckd_private *) device->private;
315 uid = &private->uid;
316 uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
317 uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
318 dasd_set_uid(device->cdev, &private->uid);
319
320 /* if we have no PAV anyway, we don't need to bother with PAV groups */
321 if (lcu->pav == NO_PAV) {
322 list_move(&device->alias_list, &lcu->active_devices);
323 return 0;
324 }
325
326 group = _find_group(lcu, uid);
327 if (!group) {
328 group = kzalloc(sizeof(*group), GFP_ATOMIC);
329 if (!group)
330 return -ENOMEM;
331 memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
332 memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
333 group->uid.ssid = uid->ssid;
334 if (uid->type == UA_BASE_DEVICE)
335 group->uid.base_unit_addr = uid->real_unit_addr;
336 else
337 group->uid.base_unit_addr = uid->base_unit_addr;
4abb08c2 338 memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
8e09f215
SW
339 INIT_LIST_HEAD(&group->group);
340 INIT_LIST_HEAD(&group->baselist);
341 INIT_LIST_HEAD(&group->aliaslist);
342 list_add(&group->group, &lcu->grouplist);
343 }
344 if (uid->type == UA_BASE_DEVICE)
345 list_move(&device->alias_list, &group->baselist);
346 else
347 list_move(&device->alias_list, &group->aliaslist);
348 private->pavgroup = group;
349 return 0;
350};
351
352static void _remove_device_from_lcu(struct alias_lcu *lcu,
353 struct dasd_device *device)
354{
355 struct dasd_eckd_private *private;
356 struct alias_pav_group *group;
357
358 private = (struct dasd_eckd_private *) device->private;
359 list_move(&device->alias_list, &lcu->inactive_devices);
360 group = private->pavgroup;
361 if (!group)
362 return;
363 private->pavgroup = NULL;
364 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
365 list_del(&group->group);
366 kfree(group);
367 return;
368 }
369 if (group->next == device)
370 group->next = NULL;
371};
372
373static int read_unit_address_configuration(struct dasd_device *device,
374 struct alias_lcu *lcu)
375{
376 struct dasd_psf_prssd_data *prssdp;
377 struct dasd_ccw_req *cqr;
378 struct ccw1 *ccw;
379 int rc;
380 unsigned long flags;
381
68b781fe 382 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
8e09f215
SW
383 (sizeof(struct dasd_psf_prssd_data)),
384 device);
385 if (IS_ERR(cqr))
386 return PTR_ERR(cqr);
387 cqr->startdev = device;
388 cqr->memdev = device;
389 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
390 cqr->retries = 10;
391 cqr->expires = 20 * HZ;
392
393 /* Prepare for Read Subsystem Data */
394 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
395 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
396 prssdp->order = PSF_ORDER_PRSSD;
397 prssdp->suborder = 0x0e; /* Read unit address configuration */
398 /* all other bytes of prssdp must be zero */
399
400 ccw = cqr->cpaddr;
401 ccw->cmd_code = DASD_ECKD_CCW_PSF;
402 ccw->count = sizeof(struct dasd_psf_prssd_data);
403 ccw->flags |= CCW_FLAG_CC;
404 ccw->cda = (__u32)(addr_t) prssdp;
405
406 /* Read Subsystem Data - feature codes */
407 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
408
409 ccw++;
410 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
411 ccw->count = sizeof(*(lcu->uac));
412 ccw->cda = (__u32)(addr_t) lcu->uac;
413
414 cqr->buildclk = get_clock();
415 cqr->status = DASD_CQR_FILLED;
416
417 /* need to unset flag here to detect race with summary unit check */
418 spin_lock_irqsave(&lcu->lock, flags);
419 lcu->flags &= ~NEED_UAC_UPDATE;
420 spin_unlock_irqrestore(&lcu->lock, flags);
421
422 do {
423 rc = dasd_sleep_on(cqr);
424 } while (rc && (cqr->retries > 0));
425 if (rc) {
426 spin_lock_irqsave(&lcu->lock, flags);
427 lcu->flags |= NEED_UAC_UPDATE;
428 spin_unlock_irqrestore(&lcu->lock, flags);
429 }
430 dasd_kfree_request(cqr, cqr->memdev);
431 return rc;
432}
433
434static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
435{
436 unsigned long flags;
437 struct alias_pav_group *pavgroup, *tempgroup;
438 struct dasd_device *device, *tempdev;
439 int i, rc;
440 struct dasd_eckd_private *private;
441
442 spin_lock_irqsave(&lcu->lock, flags);
443 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
444 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
445 alias_list) {
446 list_move(&device->alias_list, &lcu->active_devices);
447 private = (struct dasd_eckd_private *) device->private;
448 private->pavgroup = NULL;
449 }
450 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
451 alias_list) {
452 list_move(&device->alias_list, &lcu->active_devices);
453 private = (struct dasd_eckd_private *) device->private;
454 private->pavgroup = NULL;
455 }
456 list_del(&pavgroup->group);
457 kfree(pavgroup);
458 }
459 spin_unlock_irqrestore(&lcu->lock, flags);
460
461 rc = read_unit_address_configuration(refdev, lcu);
462 if (rc)
463 return rc;
464
465 spin_lock_irqsave(&lcu->lock, flags);
466 lcu->pav = NO_PAV;
467 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
468 switch (lcu->uac->unit[i].ua_type) {
469 case UA_BASE_PAV_ALIAS:
470 lcu->pav = BASE_PAV;
471 break;
472 case UA_HYPER_PAV_ALIAS:
473 lcu->pav = HYPER_PAV;
474 break;
475 }
476 if (lcu->pav != NO_PAV)
477 break;
478 }
479
480 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
481 alias_list) {
482 _add_device_to_lcu(lcu, device);
483 }
484 spin_unlock_irqrestore(&lcu->lock, flags);
485 return 0;
486}
487
488static void lcu_update_work(struct work_struct *work)
489{
490 struct alias_lcu *lcu;
491 struct read_uac_work_data *ruac_data;
492 struct dasd_device *device;
493 unsigned long flags;
494 int rc;
495
496 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
497 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
498 device = ruac_data->device;
499 rc = _lcu_update(device, lcu);
500 /*
501 * Need to check flags again, as there could have been another
502 * prepare_update or a new device a new device while we were still
503 * processing the data
504 */
505 spin_lock_irqsave(&lcu->lock, flags);
506 if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
fc19f381 507 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
8e09f215
SW
508 " alias data in lcu (rc = %d), retry later", rc);
509 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
510 } else {
511 lcu->ruac_data.device = NULL;
512 lcu->flags &= ~UPDATE_PENDING;
513 }
514 spin_unlock_irqrestore(&lcu->lock, flags);
515}
516
517static int _schedule_lcu_update(struct alias_lcu *lcu,
518 struct dasd_device *device)
519{
520 struct dasd_device *usedev = NULL;
521 struct alias_pav_group *group;
522
523 lcu->flags |= NEED_UAC_UPDATE;
524 if (lcu->ruac_data.device) {
525 /* already scheduled or running */
526 return 0;
527 }
528 if (device && !list_empty(&device->alias_list))
529 usedev = device;
530
531 if (!usedev && !list_empty(&lcu->grouplist)) {
532 group = list_first_entry(&lcu->grouplist,
533 struct alias_pav_group, group);
534 if (!list_empty(&group->baselist))
535 usedev = list_first_entry(&group->baselist,
536 struct dasd_device,
537 alias_list);
538 else if (!list_empty(&group->aliaslist))
539 usedev = list_first_entry(&group->aliaslist,
540 struct dasd_device,
541 alias_list);
542 }
543 if (!usedev && !list_empty(&lcu->active_devices)) {
544 usedev = list_first_entry(&lcu->active_devices,
545 struct dasd_device, alias_list);
546 }
547 /*
548 * if we haven't found a proper device yet, give up for now, the next
549 * device that will be set active will trigger an lcu update
550 */
551 if (!usedev)
552 return -EINVAL;
553 lcu->ruac_data.device = usedev;
554 schedule_delayed_work(&lcu->ruac_data.dwork, 0);
555 return 0;
556}
557
558int dasd_alias_add_device(struct dasd_device *device)
559{
560 struct dasd_eckd_private *private;
561 struct alias_lcu *lcu;
562 unsigned long flags;
563 int rc;
564
565 private = (struct dasd_eckd_private *) device->private;
566 lcu = private->lcu;
567 rc = 0;
568 spin_lock_irqsave(&lcu->lock, flags);
569 if (!(lcu->flags & UPDATE_PENDING)) {
570 rc = _add_device_to_lcu(lcu, device);
571 if (rc)
572 lcu->flags |= UPDATE_PENDING;
573 }
574 if (lcu->flags & UPDATE_PENDING) {
575 list_move(&device->alias_list, &lcu->active_devices);
576 _schedule_lcu_update(lcu, device);
577 }
578 spin_unlock_irqrestore(&lcu->lock, flags);
579 return rc;
580}
581
582int dasd_alias_remove_device(struct dasd_device *device)
583{
584 struct dasd_eckd_private *private;
585 struct alias_lcu *lcu;
586 unsigned long flags;
587
588 private = (struct dasd_eckd_private *) device->private;
589 lcu = private->lcu;
590 spin_lock_irqsave(&lcu->lock, flags);
591 _remove_device_from_lcu(lcu, device);
592 spin_unlock_irqrestore(&lcu->lock, flags);
593 return 0;
594}
595
596struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
597{
598
599 struct dasd_device *alias_device;
600 struct alias_pav_group *group;
601 struct alias_lcu *lcu;
602 struct dasd_eckd_private *private, *alias_priv;
603 unsigned long flags;
604
605 private = (struct dasd_eckd_private *) base_device->private;
606 group = private->pavgroup;
607 lcu = private->lcu;
608 if (!group || !lcu)
609 return NULL;
610 if (lcu->pav == NO_PAV ||
611 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
612 return NULL;
613
614 spin_lock_irqsave(&lcu->lock, flags);
615 alias_device = group->next;
616 if (!alias_device) {
617 if (list_empty(&group->aliaslist)) {
618 spin_unlock_irqrestore(&lcu->lock, flags);
619 return NULL;
620 } else {
621 alias_device = list_first_entry(&group->aliaslist,
622 struct dasd_device,
623 alias_list);
624 }
625 }
626 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
627 group->next = list_first_entry(&group->aliaslist,
628 struct dasd_device, alias_list);
629 else
630 group->next = list_first_entry(&alias_device->alias_list,
631 struct dasd_device, alias_list);
632 spin_unlock_irqrestore(&lcu->lock, flags);
633 alias_priv = (struct dasd_eckd_private *) alias_device->private;
634 if ((alias_priv->count < private->count) && !alias_device->stopped)
635 return alias_device;
636 else
637 return NULL;
638}
639
640/*
641 * Summary unit check handling depends on the way alias devices
642 * are handled so it is done here rather then in dasd_eckd.c
643 */
644static int reset_summary_unit_check(struct alias_lcu *lcu,
645 struct dasd_device *device,
646 char reason)
647{
648 struct dasd_ccw_req *cqr;
649 int rc = 0;
f3eb5384 650 struct ccw1 *ccw;
8e09f215
SW
651
652 cqr = lcu->rsu_cqr;
653 strncpy((char *) &cqr->magic, "ECKD", 4);
654 ASCEBC((char *) &cqr->magic, 4);
f3eb5384
SW
655 ccw = cqr->cpaddr;
656 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
657 ccw->flags = 0 ;
658 ccw->count = 16;
659 ccw->cda = (__u32)(addr_t) cqr->data;
8e09f215
SW
660 ((char *)cqr->data)[0] = reason;
661
662 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
663 cqr->retries = 255; /* set retry counter to enable basic ERP */
664 cqr->startdev = device;
665 cqr->memdev = device;
666 cqr->block = NULL;
667 cqr->expires = 5 * HZ;
668 cqr->buildclk = get_clock();
669 cqr->status = DASD_CQR_FILLED;
670
671 rc = dasd_sleep_on_immediatly(cqr);
672 return rc;
673}
674
675static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
676{
677 struct alias_pav_group *pavgroup;
678 struct dasd_device *device;
679 struct dasd_eckd_private *private;
680
681 /* active and inactive list can contain alias as well as base devices */
682 list_for_each_entry(device, &lcu->active_devices, alias_list) {
683 private = (struct dasd_eckd_private *) device->private;
684 if (private->uid.type != UA_BASE_DEVICE)
685 continue;
686 dasd_schedule_block_bh(device->block);
687 dasd_schedule_device_bh(device);
688 }
689 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
690 private = (struct dasd_eckd_private *) device->private;
691 if (private->uid.type != UA_BASE_DEVICE)
692 continue;
693 dasd_schedule_block_bh(device->block);
694 dasd_schedule_device_bh(device);
695 }
696 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
697 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
698 dasd_schedule_block_bh(device->block);
699 dasd_schedule_device_bh(device);
700 }
701 }
702}
703
704static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
705{
706 struct alias_pav_group *pavgroup;
707 struct dasd_device *device, *temp;
708 struct dasd_eckd_private *private;
709 int rc;
710 unsigned long flags;
711 LIST_HEAD(active);
712
713 /*
714 * Problem here ist that dasd_flush_device_queue may wait
715 * for termination of a request to complete. We can't keep
716 * the lcu lock during that time, so we must assume that
717 * the lists may have changed.
718 * Idea: first gather all active alias devices in a separate list,
719 * then flush the first element of this list unlocked, and afterwards
720 * check if it is still on the list before moving it to the
721 * active_devices list.
722 */
723
724 spin_lock_irqsave(&lcu->lock, flags);
725 list_for_each_entry_safe(device, temp, &lcu->active_devices,
726 alias_list) {
727 private = (struct dasd_eckd_private *) device->private;
728 if (private->uid.type == UA_BASE_DEVICE)
729 continue;
730 list_move(&device->alias_list, &active);
731 }
732
733 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
734 list_splice_init(&pavgroup->aliaslist, &active);
735 }
736 while (!list_empty(&active)) {
737 device = list_first_entry(&active, struct dasd_device,
738 alias_list);
739 spin_unlock_irqrestore(&lcu->lock, flags);
740 rc = dasd_flush_device_queue(device);
741 spin_lock_irqsave(&lcu->lock, flags);
742 /*
743 * only move device around if it wasn't moved away while we
744 * were waiting for the flush
745 */
746 if (device == list_first_entry(&active,
747 struct dasd_device, alias_list))
748 list_move(&device->alias_list, &lcu->active_devices);
749 }
750 spin_unlock_irqrestore(&lcu->lock, flags);
751}
752
a806170e
HC
753static void __stop_device_on_lcu(struct dasd_device *device,
754 struct dasd_device *pos)
755{
756 /* If pos == device then device is already locked! */
757 if (pos == device) {
758 pos->stopped |= DASD_STOPPED_SU;
759 return;
760 }
761 spin_lock(get_ccwdev_lock(pos->cdev));
762 pos->stopped |= DASD_STOPPED_SU;
763 spin_unlock(get_ccwdev_lock(pos->cdev));
764}
765
8e09f215
SW
766/*
767 * This function is called in interrupt context, so the
768 * cdev lock for device is already locked!
769 */
770static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
771 struct dasd_device *device)
772{
773 struct alias_pav_group *pavgroup;
774 struct dasd_device *pos;
775
a806170e
HC
776 list_for_each_entry(pos, &lcu->active_devices, alias_list)
777 __stop_device_on_lcu(device, pos);
778 list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
779 __stop_device_on_lcu(device, pos);
8e09f215 780 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
a806170e
HC
781 list_for_each_entry(pos, &pavgroup->baselist, alias_list)
782 __stop_device_on_lcu(device, pos);
783 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
784 __stop_device_on_lcu(device, pos);
8e09f215
SW
785 }
786}
787
788static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
789{
790 struct alias_pav_group *pavgroup;
791 struct dasd_device *device;
792 unsigned long flags;
793
794 list_for_each_entry(device, &lcu->active_devices, alias_list) {
795 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
796 device->stopped &= ~DASD_STOPPED_SU;
797 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
798 }
799
800 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
801 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
802 device->stopped &= ~DASD_STOPPED_SU;
803 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
804 }
805
806 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
807 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
808 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
809 device->stopped &= ~DASD_STOPPED_SU;
810 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
811 flags);
812 }
813 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
814 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
815 device->stopped &= ~DASD_STOPPED_SU;
816 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
817 flags);
818 }
819 }
820}
821
822static void summary_unit_check_handling_work(struct work_struct *work)
823{
824 struct alias_lcu *lcu;
825 struct summary_unit_check_work_data *suc_data;
826 unsigned long flags;
827 struct dasd_device *device;
828
829 suc_data = container_of(work, struct summary_unit_check_work_data,
830 worker);
831 lcu = container_of(suc_data, struct alias_lcu, suc_data);
832 device = suc_data->device;
833
834 /* 1. flush alias devices */
835 flush_all_alias_devices_on_lcu(lcu);
836
837 /* 2. reset summary unit check */
838 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
839 device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
840 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
841 reset_summary_unit_check(lcu, device, suc_data->reason);
842
843 spin_lock_irqsave(&lcu->lock, flags);
844 _unstop_all_devices_on_lcu(lcu);
845 _restart_all_base_devices_on_lcu(lcu);
846 /* 3. read new alias configuration */
847 _schedule_lcu_update(lcu, device);
848 lcu->suc_data.device = NULL;
849 spin_unlock_irqrestore(&lcu->lock, flags);
850}
851
852/*
853 * note: this will be called from int handler context (cdev locked)
854 */
855void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
856 struct irb *irb)
857{
858 struct alias_lcu *lcu;
859 char reason;
860 struct dasd_eckd_private *private;
f3eb5384 861 char *sense;
8e09f215
SW
862
863 private = (struct dasd_eckd_private *) device->private;
864
f3eb5384
SW
865 sense = dasd_get_sense(irb);
866 if (sense) {
867 reason = sense[8];
868 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
869 "eckd handle summary unit check: reason", reason);
870 } else {
871 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
872 "eckd handle summary unit check:"
873 " no reason code available");
874 return;
875 }
8e09f215
SW
876
877 lcu = private->lcu;
878 if (!lcu) {
fc19f381 879 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
8e09f215
SW
880 "device not ready to handle summary"
881 " unit check (no lcu structure)");
882 return;
883 }
884 spin_lock(&lcu->lock);
885 _stop_all_devices_on_lcu(lcu, device);
886 /* prepare for lcu_update */
887 private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
888 /* If this device is about to be removed just return and wait for
889 * the next interrupt on a different device
890 */
891 if (list_empty(&device->alias_list)) {
fc19f381 892 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
8e09f215
SW
893 "device is in offline processing,"
894 " don't do summary unit check handling");
895 spin_unlock(&lcu->lock);
896 return;
897 }
898 if (lcu->suc_data.device) {
899 /* already scheduled or running */
fc19f381 900 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
8e09f215
SW
901 "previous instance of summary unit check worker"
902 " still pending");
903 spin_unlock(&lcu->lock);
904 return ;
905 }
906 lcu->suc_data.reason = reason;
907 lcu->suc_data.device = device;
908 spin_unlock(&lcu->lock);
909 schedule_work(&lcu->suc_data.worker);
910};