Merge branches 'at91', 'cache', 'cup', 'ep93xx', 'ixp4xx', 'nuc', 'pending-dma-stream...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / device_handler / scsi_dh_rdac.c
1 /*
2 * Engenio/LSI RDAC SCSI Device Handler
3 *
4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_eh.h>
24 #include <scsi/scsi_dh.h>
25 #include <linux/workqueue.h>
26
27 #define RDAC_NAME "rdac"
28 #define RDAC_RETRY_COUNT 5
29
30 /*
31 * LSI mode page stuff
32 *
33 * These struct definitions and the forming of the
34 * mode page were taken from the LSI RDAC 2.4 GPL'd
35 * driver, and then converted to Linux conventions.
36 */
37 #define RDAC_QUIESCENCE_TIME 20;
38 /*
39 * Page Codes
40 */
41 #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
42
43 /*
44 * Controller modes definitions
45 */
46 #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
47
48 /*
49 * RDAC Options field
50 */
51 #define RDAC_FORCED_QUIESENCE 0x02
52
53 #define RDAC_TIMEOUT (60 * HZ)
54 #define RDAC_RETRIES 3
55
56 struct rdac_mode_6_hdr {
57 u8 data_len;
58 u8 medium_type;
59 u8 device_params;
60 u8 block_desc_len;
61 };
62
63 struct rdac_mode_10_hdr {
64 u16 data_len;
65 u8 medium_type;
66 u8 device_params;
67 u16 reserved;
68 u16 block_desc_len;
69 };
70
71 struct rdac_mode_common {
72 u8 controller_serial[16];
73 u8 alt_controller_serial[16];
74 u8 rdac_mode[2];
75 u8 alt_rdac_mode[2];
76 u8 quiescence_timeout;
77 u8 rdac_options;
78 };
79
80 struct rdac_pg_legacy {
81 struct rdac_mode_6_hdr hdr;
82 u8 page_code;
83 u8 page_len;
84 struct rdac_mode_common common;
85 #define MODE6_MAX_LUN 32
86 u8 lun_table[MODE6_MAX_LUN];
87 u8 reserved2[32];
88 u8 reserved3;
89 u8 reserved4;
90 };
91
92 struct rdac_pg_expanded {
93 struct rdac_mode_10_hdr hdr;
94 u8 page_code;
95 u8 subpage_code;
96 u8 page_len[2];
97 struct rdac_mode_common common;
98 u8 lun_table[256];
99 u8 reserved3;
100 u8 reserved4;
101 };
102
103 struct c9_inquiry {
104 u8 peripheral_info;
105 u8 page_code; /* 0xC9 */
106 u8 reserved1;
107 u8 page_len;
108 u8 page_id[4]; /* "vace" */
109 u8 avte_cvp;
110 u8 path_prio;
111 u8 reserved2[38];
112 };
113
114 #define SUBSYS_ID_LEN 16
115 #define SLOT_ID_LEN 2
116 #define ARRAY_LABEL_LEN 31
117
118 struct c4_inquiry {
119 u8 peripheral_info;
120 u8 page_code; /* 0xC4 */
121 u8 reserved1;
122 u8 page_len;
123 u8 page_id[4]; /* "subs" */
124 u8 subsys_id[SUBSYS_ID_LEN];
125 u8 revision[4];
126 u8 slot_id[SLOT_ID_LEN];
127 u8 reserved[2];
128 };
129
130 struct rdac_controller {
131 u8 subsys_id[SUBSYS_ID_LEN];
132 u8 slot_id[SLOT_ID_LEN];
133 int use_ms10;
134 struct kref kref;
135 struct list_head node; /* list of all controllers */
136 union {
137 struct rdac_pg_legacy legacy;
138 struct rdac_pg_expanded expanded;
139 } mode_select;
140 u8 index;
141 u8 array_name[ARRAY_LABEL_LEN];
142 spinlock_t ms_lock;
143 int ms_queued;
144 struct work_struct ms_work;
145 struct scsi_device *ms_sdev;
146 struct list_head ms_head;
147 };
148
149 struct c8_inquiry {
150 u8 peripheral_info;
151 u8 page_code; /* 0xC8 */
152 u8 reserved1;
153 u8 page_len;
154 u8 page_id[4]; /* "edid" */
155 u8 reserved2[3];
156 u8 vol_uniq_id_len;
157 u8 vol_uniq_id[16];
158 u8 vol_user_label_len;
159 u8 vol_user_label[60];
160 u8 array_uniq_id_len;
161 u8 array_unique_id[16];
162 u8 array_user_label_len;
163 u8 array_user_label[60];
164 u8 lun[8];
165 };
166
167 struct c2_inquiry {
168 u8 peripheral_info;
169 u8 page_code; /* 0xC2 */
170 u8 reserved1;
171 u8 page_len;
172 u8 page_id[4]; /* "swr4" */
173 u8 sw_version[3];
174 u8 sw_date[3];
175 u8 features_enabled;
176 u8 max_lun_supported;
177 u8 partitions[239]; /* Total allocation length should be 0xFF */
178 };
179
180 struct rdac_dh_data {
181 struct rdac_controller *ctlr;
182 #define UNINITIALIZED_LUN (1 << 8)
183 unsigned lun;
184 #define RDAC_STATE_ACTIVE 0
185 #define RDAC_STATE_PASSIVE 1
186 unsigned char state;
187
188 #define RDAC_LUN_UNOWNED 0
189 #define RDAC_LUN_OWNED 1
190 #define RDAC_LUN_AVT 2
191 char lun_state;
192 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
193 union {
194 struct c2_inquiry c2;
195 struct c4_inquiry c4;
196 struct c8_inquiry c8;
197 struct c9_inquiry c9;
198 } inq;
199 };
200
201 static const char *lun_state[] =
202 {
203 "unowned",
204 "owned",
205 "owned (AVT mode)",
206 };
207
208 struct rdac_queue_data {
209 struct list_head entry;
210 struct rdac_dh_data *h;
211 activate_complete callback_fn;
212 void *callback_data;
213 };
214
215 static LIST_HEAD(ctlr_list);
216 static DEFINE_SPINLOCK(list_lock);
217 static struct workqueue_struct *kmpath_rdacd;
218 static void send_mode_select(struct work_struct *work);
219
220 /*
221 * module parameter to enable rdac debug logging.
222 * 2 bits for each type of logging, only two types defined for now
223 * Can be enhanced if required at later point
224 */
225 static int rdac_logging = 1;
226 module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
227 MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
228 "Default is 1 - failover logging enabled, "
229 "set it to 0xF to enable all the logs");
230
231 #define RDAC_LOG_FAILOVER 0
232 #define RDAC_LOG_SENSE 2
233
234 #define RDAC_LOG_BITS 2
235
236 #define RDAC_LOG_LEVEL(SHIFT) \
237 ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
238
239 #define RDAC_LOG(SHIFT, sdev, f, arg...) \
240 do { \
241 if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
242 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
243 } while (0);
244
245 static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
246 {
247 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
248 BUG_ON(scsi_dh_data == NULL);
249 return ((struct rdac_dh_data *) scsi_dh_data->buf);
250 }
251
252 static struct request *get_rdac_req(struct scsi_device *sdev,
253 void *buffer, unsigned buflen, int rw)
254 {
255 struct request *rq;
256 struct request_queue *q = sdev->request_queue;
257
258 rq = blk_get_request(q, rw, GFP_NOIO);
259
260 if (!rq) {
261 sdev_printk(KERN_INFO, sdev,
262 "get_rdac_req: blk_get_request failed.\n");
263 return NULL;
264 }
265
266 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
267 blk_put_request(rq);
268 sdev_printk(KERN_INFO, sdev,
269 "get_rdac_req: blk_rq_map_kern failed.\n");
270 return NULL;
271 }
272
273 rq->cmd_type = REQ_TYPE_BLOCK_PC;
274 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
275 REQ_FAILFAST_DRIVER;
276 rq->retries = RDAC_RETRIES;
277 rq->timeout = RDAC_TIMEOUT;
278
279 return rq;
280 }
281
282 static struct request *rdac_failover_get(struct scsi_device *sdev,
283 struct rdac_dh_data *h)
284 {
285 struct request *rq;
286 struct rdac_mode_common *common;
287 unsigned data_size;
288
289 if (h->ctlr->use_ms10) {
290 struct rdac_pg_expanded *rdac_pg;
291
292 data_size = sizeof(struct rdac_pg_expanded);
293 rdac_pg = &h->ctlr->mode_select.expanded;
294 memset(rdac_pg, 0, data_size);
295 common = &rdac_pg->common;
296 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
297 rdac_pg->subpage_code = 0x1;
298 rdac_pg->page_len[0] = 0x01;
299 rdac_pg->page_len[1] = 0x28;
300 } else {
301 struct rdac_pg_legacy *rdac_pg;
302
303 data_size = sizeof(struct rdac_pg_legacy);
304 rdac_pg = &h->ctlr->mode_select.legacy;
305 memset(rdac_pg, 0, data_size);
306 common = &rdac_pg->common;
307 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
308 rdac_pg->page_len = 0x68;
309 }
310 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
311 common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
312 common->rdac_options = RDAC_FORCED_QUIESENCE;
313
314 /* get request for block layer packet command */
315 rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
316 if (!rq)
317 return NULL;
318
319 /* Prepare the command. */
320 if (h->ctlr->use_ms10) {
321 rq->cmd[0] = MODE_SELECT_10;
322 rq->cmd[7] = data_size >> 8;
323 rq->cmd[8] = data_size & 0xff;
324 } else {
325 rq->cmd[0] = MODE_SELECT;
326 rq->cmd[4] = data_size;
327 }
328 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
329
330 rq->sense = h->sense;
331 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
332 rq->sense_len = 0;
333
334 return rq;
335 }
336
337 static void release_controller(struct kref *kref)
338 {
339 struct rdac_controller *ctlr;
340 ctlr = container_of(kref, struct rdac_controller, kref);
341
342 flush_workqueue(kmpath_rdacd);
343 spin_lock(&list_lock);
344 list_del(&ctlr->node);
345 spin_unlock(&list_lock);
346 kfree(ctlr);
347 }
348
349 static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
350 char *array_name)
351 {
352 struct rdac_controller *ctlr, *tmp;
353
354 spin_lock(&list_lock);
355
356 list_for_each_entry(tmp, &ctlr_list, node) {
357 if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
358 (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
359 kref_get(&tmp->kref);
360 spin_unlock(&list_lock);
361 return tmp;
362 }
363 }
364 ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
365 if (!ctlr)
366 goto done;
367
368 /* initialize fields of controller */
369 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
370 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
371 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
372
373 /* update the controller index */
374 if (slot_id[1] == 0x31)
375 ctlr->index = 0;
376 else
377 ctlr->index = 1;
378
379 kref_init(&ctlr->kref);
380 ctlr->use_ms10 = -1;
381 ctlr->ms_queued = 0;
382 ctlr->ms_sdev = NULL;
383 spin_lock_init(&ctlr->ms_lock);
384 INIT_WORK(&ctlr->ms_work, send_mode_select);
385 INIT_LIST_HEAD(&ctlr->ms_head);
386 list_add(&ctlr->node, &ctlr_list);
387 done:
388 spin_unlock(&list_lock);
389 return ctlr;
390 }
391
392 static int submit_inquiry(struct scsi_device *sdev, int page_code,
393 unsigned int len, struct rdac_dh_data *h)
394 {
395 struct request *rq;
396 struct request_queue *q = sdev->request_queue;
397 int err = SCSI_DH_RES_TEMP_UNAVAIL;
398
399 rq = get_rdac_req(sdev, &h->inq, len, READ);
400 if (!rq)
401 goto done;
402
403 /* Prepare the command. */
404 rq->cmd[0] = INQUIRY;
405 rq->cmd[1] = 1;
406 rq->cmd[2] = page_code;
407 rq->cmd[4] = len;
408 rq->cmd_len = COMMAND_SIZE(INQUIRY);
409
410 rq->sense = h->sense;
411 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
412 rq->sense_len = 0;
413
414 err = blk_execute_rq(q, NULL, rq, 1);
415 if (err == -EIO)
416 err = SCSI_DH_IO;
417
418 blk_put_request(rq);
419 done:
420 return err;
421 }
422
423 static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
424 char *array_name)
425 {
426 int err, i;
427 struct c8_inquiry *inqp;
428
429 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
430 if (err == SCSI_DH_OK) {
431 inqp = &h->inq.c8;
432 if (inqp->page_code != 0xc8)
433 return SCSI_DH_NOSYS;
434 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
435 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
436 return SCSI_DH_NOSYS;
437 h->lun = inqp->lun[7]; /* Uses only the last byte */
438
439 for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
440 *(array_name+i) = inqp->array_user_label[(2*i)+1];
441
442 *(array_name+ARRAY_LABEL_LEN-1) = '\0';
443 }
444 return err;
445 }
446
447 static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
448 {
449 int err;
450 struct c9_inquiry *inqp;
451
452 h->lun_state = RDAC_LUN_UNOWNED;
453 h->state = RDAC_STATE_ACTIVE;
454 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
455 if (err == SCSI_DH_OK) {
456 inqp = &h->inq.c9;
457 if ((inqp->avte_cvp >> 7) == 0x1) {
458 /* LUN in AVT mode */
459 sdev_printk(KERN_NOTICE, sdev,
460 "%s: AVT mode detected\n",
461 RDAC_NAME);
462 h->lun_state = RDAC_LUN_AVT;
463 } else if ((inqp->avte_cvp & 0x1) != 0) {
464 /* LUN was owned by the controller */
465 h->lun_state = RDAC_LUN_OWNED;
466 }
467 }
468
469 if (h->lun_state == RDAC_LUN_UNOWNED)
470 h->state = RDAC_STATE_PASSIVE;
471
472 return err;
473 }
474
475 static int initialize_controller(struct scsi_device *sdev,
476 struct rdac_dh_data *h, char *array_name)
477 {
478 int err;
479 struct c4_inquiry *inqp;
480
481 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
482 if (err == SCSI_DH_OK) {
483 inqp = &h->inq.c4;
484 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id,
485 array_name);
486 if (!h->ctlr)
487 err = SCSI_DH_RES_TEMP_UNAVAIL;
488 }
489 return err;
490 }
491
492 static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
493 {
494 int err;
495 struct c2_inquiry *inqp;
496
497 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
498 if (err == SCSI_DH_OK) {
499 inqp = &h->inq.c2;
500 /*
501 * If more than MODE6_MAX_LUN luns are supported, use
502 * mode select 10
503 */
504 if (inqp->max_lun_supported >= MODE6_MAX_LUN)
505 h->ctlr->use_ms10 = 1;
506 else
507 h->ctlr->use_ms10 = 0;
508 }
509 return err;
510 }
511
512 static int mode_select_handle_sense(struct scsi_device *sdev,
513 unsigned char *sensebuf)
514 {
515 struct scsi_sense_hdr sense_hdr;
516 int err = SCSI_DH_IO, ret;
517 struct rdac_dh_data *h = get_rdac_data(sdev);
518
519 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
520 if (!ret)
521 goto done;
522
523 switch (sense_hdr.sense_key) {
524 case NO_SENSE:
525 case ABORTED_COMMAND:
526 case UNIT_ATTENTION:
527 err = SCSI_DH_RETRY;
528 break;
529 case NOT_READY:
530 if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
531 /* LUN Not Ready and is in the Process of Becoming
532 * Ready
533 */
534 err = SCSI_DH_RETRY;
535 break;
536 case ILLEGAL_REQUEST:
537 if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
538 /*
539 * Command Lock contention
540 */
541 err = SCSI_DH_RETRY;
542 break;
543 default:
544 break;
545 }
546
547 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
548 "MODE_SELECT returned with sense %02x/%02x/%02x",
549 (char *) h->ctlr->array_name, h->ctlr->index,
550 sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
551
552 done:
553 return err;
554 }
555
556 static void send_mode_select(struct work_struct *work)
557 {
558 struct rdac_controller *ctlr =
559 container_of(work, struct rdac_controller, ms_work);
560 struct request *rq;
561 struct scsi_device *sdev = ctlr->ms_sdev;
562 struct rdac_dh_data *h = get_rdac_data(sdev);
563 struct request_queue *q = sdev->request_queue;
564 int err, retry_cnt = RDAC_RETRY_COUNT;
565 struct rdac_queue_data *tmp, *qdata;
566 LIST_HEAD(list);
567 u8 *lun_table;
568
569 spin_lock(&ctlr->ms_lock);
570 list_splice_init(&ctlr->ms_head, &list);
571 ctlr->ms_queued = 0;
572 ctlr->ms_sdev = NULL;
573 spin_unlock(&ctlr->ms_lock);
574
575 if (ctlr->use_ms10)
576 lun_table = ctlr->mode_select.expanded.lun_table;
577 else
578 lun_table = ctlr->mode_select.legacy.lun_table;
579
580 retry:
581 err = SCSI_DH_RES_TEMP_UNAVAIL;
582 rq = rdac_failover_get(sdev, h);
583 if (!rq)
584 goto done;
585
586 list_for_each_entry(qdata, &list, entry) {
587 lun_table[qdata->h->lun] = 0x81;
588 }
589
590 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
591 "%s MODE_SELECT command",
592 (char *) h->ctlr->array_name, h->ctlr->index,
593 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
594
595 err = blk_execute_rq(q, NULL, rq, 1);
596 blk_put_request(rq);
597 if (err != SCSI_DH_OK) {
598 err = mode_select_handle_sense(sdev, h->sense);
599 if (err == SCSI_DH_RETRY && retry_cnt--)
600 goto retry;
601 }
602 if (err == SCSI_DH_OK) {
603 h->state = RDAC_STATE_ACTIVE;
604 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
605 "MODE_SELECT completed",
606 (char *) h->ctlr->array_name, h->ctlr->index);
607 }
608
609 done:
610 list_for_each_entry_safe(qdata, tmp, &list, entry) {
611 list_del(&qdata->entry);
612 if (err == SCSI_DH_OK)
613 qdata->h->state = RDAC_STATE_ACTIVE;
614 if (qdata->callback_fn)
615 qdata->callback_fn(qdata->callback_data, err);
616 kfree(qdata);
617 }
618 return;
619 }
620
621 static int queue_mode_select(struct scsi_device *sdev,
622 activate_complete fn, void *data)
623 {
624 struct rdac_queue_data *qdata;
625 struct rdac_controller *ctlr;
626
627 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
628 if (!qdata)
629 return SCSI_DH_RETRY;
630
631 qdata->h = get_rdac_data(sdev);
632 qdata->callback_fn = fn;
633 qdata->callback_data = data;
634
635 ctlr = qdata->h->ctlr;
636 spin_lock(&ctlr->ms_lock);
637 list_add_tail(&qdata->entry, &ctlr->ms_head);
638 if (!ctlr->ms_queued) {
639 ctlr->ms_queued = 1;
640 ctlr->ms_sdev = sdev;
641 queue_work(kmpath_rdacd, &ctlr->ms_work);
642 }
643 spin_unlock(&ctlr->ms_lock);
644 return SCSI_DH_OK;
645 }
646
647 static int rdac_activate(struct scsi_device *sdev,
648 activate_complete fn, void *data)
649 {
650 struct rdac_dh_data *h = get_rdac_data(sdev);
651 int err = SCSI_DH_OK;
652
653 err = check_ownership(sdev, h);
654 if (err != SCSI_DH_OK)
655 goto done;
656
657 if (h->lun_state == RDAC_LUN_UNOWNED) {
658 err = queue_mode_select(sdev, fn, data);
659 if (err == SCSI_DH_OK)
660 return 0;
661 }
662 done:
663 if (fn)
664 fn(data, err);
665 return 0;
666 }
667
668 static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
669 {
670 struct rdac_dh_data *h = get_rdac_data(sdev);
671 int ret = BLKPREP_OK;
672
673 if (h->state != RDAC_STATE_ACTIVE) {
674 ret = BLKPREP_KILL;
675 req->cmd_flags |= REQ_QUIET;
676 }
677 return ret;
678
679 }
680
681 static int rdac_check_sense(struct scsi_device *sdev,
682 struct scsi_sense_hdr *sense_hdr)
683 {
684 struct rdac_dh_data *h = get_rdac_data(sdev);
685
686 RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
687 "I/O returned with sense %02x/%02x/%02x",
688 (char *) h->ctlr->array_name, h->ctlr->index,
689 sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
690
691 switch (sense_hdr->sense_key) {
692 case NOT_READY:
693 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
694 /* LUN Not Ready - Logical Unit Not Ready and is in
695 * the process of becoming ready
696 * Just retry.
697 */
698 return ADD_TO_MLQUEUE;
699 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
700 /* LUN Not Ready - Storage firmware incompatible
701 * Manual code synchonisation required.
702 *
703 * Nothing we can do here. Try to bypass the path.
704 */
705 return SUCCESS;
706 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
707 /* LUN Not Ready - Quiescense in progress
708 *
709 * Just retry and wait.
710 */
711 return ADD_TO_MLQUEUE;
712 if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02)
713 /* LUN Not Ready - Quiescense in progress
714 * or has been achieved
715 * Just retry.
716 */
717 return ADD_TO_MLQUEUE;
718 break;
719 case ILLEGAL_REQUEST:
720 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
721 /* Invalid Request - Current Logical Unit Ownership.
722 * Controller is not the current owner of the LUN,
723 * Fail the path, so that the other path be used.
724 */
725 h->state = RDAC_STATE_PASSIVE;
726 return SUCCESS;
727 }
728 break;
729 case UNIT_ATTENTION:
730 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
731 /*
732 * Power On, Reset, or Bus Device Reset, just retry.
733 */
734 return ADD_TO_MLQUEUE;
735 if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
736 /*
737 * Quiescence in progress , just retry.
738 */
739 return ADD_TO_MLQUEUE;
740 break;
741 }
742 /* success just means we do not care what scsi-ml does */
743 return SCSI_RETURN_NOT_HANDLED;
744 }
745
746 static const struct scsi_dh_devlist rdac_dev_list[] = {
747 {"IBM", "1722"},
748 {"IBM", "1724"},
749 {"IBM", "1726"},
750 {"IBM", "1742"},
751 {"IBM", "1745"},
752 {"IBM", "1746"},
753 {"IBM", "1814"},
754 {"IBM", "1815"},
755 {"IBM", "1818"},
756 {"IBM", "3526"},
757 {"SGI", "TP9400"},
758 {"SGI", "TP9500"},
759 {"SGI", "IS"},
760 {"STK", "OPENstorage D280"},
761 {"SUN", "CSM200_R"},
762 {"SUN", "LCSM100_I"},
763 {"SUN", "LCSM100_S"},
764 {"SUN", "LCSM100_E"},
765 {"SUN", "LCSM100_F"},
766 {"DELL", "MD3000"},
767 {"DELL", "MD3000i"},
768 {"DELL", "MD32xx"},
769 {"DELL", "MD32xxi"},
770 {"LSI", "INF-01-00"},
771 {"ENGENIO", "INF-01-00"},
772 {"STK", "FLEXLINE 380"},
773 {"SUN", "CSM100_R_FC"},
774 {NULL, NULL},
775 };
776
777 static int rdac_bus_attach(struct scsi_device *sdev);
778 static void rdac_bus_detach(struct scsi_device *sdev);
779
780 static struct scsi_device_handler rdac_dh = {
781 .name = RDAC_NAME,
782 .module = THIS_MODULE,
783 .devlist = rdac_dev_list,
784 .prep_fn = rdac_prep_fn,
785 .check_sense = rdac_check_sense,
786 .attach = rdac_bus_attach,
787 .detach = rdac_bus_detach,
788 .activate = rdac_activate,
789 };
790
791 static int rdac_bus_attach(struct scsi_device *sdev)
792 {
793 struct scsi_dh_data *scsi_dh_data;
794 struct rdac_dh_data *h;
795 unsigned long flags;
796 int err;
797 char array_name[ARRAY_LABEL_LEN];
798
799 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
800 + sizeof(*h) , GFP_KERNEL);
801 if (!scsi_dh_data) {
802 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
803 RDAC_NAME);
804 return 0;
805 }
806
807 scsi_dh_data->scsi_dh = &rdac_dh;
808 h = (struct rdac_dh_data *) scsi_dh_data->buf;
809 h->lun = UNINITIALIZED_LUN;
810 h->state = RDAC_STATE_ACTIVE;
811
812 err = get_lun_info(sdev, h, array_name);
813 if (err != SCSI_DH_OK)
814 goto failed;
815
816 err = initialize_controller(sdev, h, array_name);
817 if (err != SCSI_DH_OK)
818 goto failed;
819
820 err = check_ownership(sdev, h);
821 if (err != SCSI_DH_OK)
822 goto clean_ctlr;
823
824 err = set_mode_select(sdev, h);
825 if (err != SCSI_DH_OK)
826 goto clean_ctlr;
827
828 if (!try_module_get(THIS_MODULE))
829 goto clean_ctlr;
830
831 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
832 sdev->scsi_dh_data = scsi_dh_data;
833 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
834
835 sdev_printk(KERN_NOTICE, sdev,
836 "%s: LUN %d (%s)\n",
837 RDAC_NAME, h->lun, lun_state[(int)h->lun_state]);
838
839 return 0;
840
841 clean_ctlr:
842 kref_put(&h->ctlr->kref, release_controller);
843
844 failed:
845 kfree(scsi_dh_data);
846 sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
847 RDAC_NAME);
848 return -EINVAL;
849 }
850
851 static void rdac_bus_detach( struct scsi_device *sdev )
852 {
853 struct scsi_dh_data *scsi_dh_data;
854 struct rdac_dh_data *h;
855 unsigned long flags;
856
857 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
858 scsi_dh_data = sdev->scsi_dh_data;
859 sdev->scsi_dh_data = NULL;
860 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
861
862 h = (struct rdac_dh_data *) scsi_dh_data->buf;
863 if (h->ctlr)
864 kref_put(&h->ctlr->kref, release_controller);
865 kfree(scsi_dh_data);
866 module_put(THIS_MODULE);
867 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
868 }
869
870
871
872 static int __init rdac_init(void)
873 {
874 int r;
875
876 r = scsi_register_device_handler(&rdac_dh);
877 if (r != 0) {
878 printk(KERN_ERR "Failed to register scsi device handler.");
879 goto done;
880 }
881
882 /*
883 * Create workqueue to handle mode selects for rdac
884 */
885 kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
886 if (!kmpath_rdacd) {
887 scsi_unregister_device_handler(&rdac_dh);
888 printk(KERN_ERR "kmpath_rdacd creation failed.\n");
889 }
890 done:
891 return r;
892 }
893
894 static void __exit rdac_exit(void)
895 {
896 destroy_workqueue(kmpath_rdacd);
897 scsi_unregister_device_handler(&rdac_dh);
898 }
899
900 module_init(rdac_init);
901 module_exit(rdac_exit);
902
903 MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
904 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
905 MODULE_LICENSE("GPL");