[SCSI] ips: Mode Sense (Caching Page ) fix
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / dpt_i2o.c
CommitLineData
1da177e4
LT
1/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
1da177e4
LT
37#include <linux/module.h>
38
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
41
42////////////////////////////////////////////////////////////////
43
44#include <linux/ioctl.h> /* For SCSI-Passthrough */
45#include <asm/uaccess.h>
46
47#include <linux/stat.h>
48#include <linux/slab.h> /* for kmalloc() */
49#include <linux/config.h> /* for CONFIG_PCI */
50#include <linux/pci.h> /* for PCI support */
51#include <linux/proc_fs.h>
52#include <linux/blkdev.h>
53#include <linux/delay.h> /* for udelay */
54#include <linux/interrupt.h>
55#include <linux/kernel.h> /* for printk */
56#include <linux/sched.h>
57#include <linux/reboot.h>
58#include <linux/spinlock.h>
59#include <linux/smp_lock.h>
60
61#include <linux/timer.h>
62#include <linux/string.h>
63#include <linux/ioport.h>
0b950672 64#include <linux/mutex.h>
1da177e4
LT
65
66#include <asm/processor.h> /* for boot_cpu_data */
67#include <asm/pgtable.h>
68#include <asm/io.h> /* for virt_to_bus, etc. */
69
70#include <scsi/scsi.h>
71#include <scsi/scsi_cmnd.h>
72#include <scsi/scsi_device.h>
73#include <scsi/scsi_host.h>
74#include <scsi/scsi_tcq.h>
75
76#include "dpt/dptsig.h"
77#include "dpti.h"
78
79/*============================================================================
80 * Create a binary signature - this is read by dptsig
81 * Needed for our management apps
82 *============================================================================
83 */
84static dpt_sig_S DPTI_sig = {
85 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
86#ifdef __i386__
87 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
88#elif defined(__ia64__)
89 PROC_INTEL, PROC_IA64,
90#elif defined(__sparc__)
91 PROC_ULTRASPARC, PROC_ULTRASPARC,
92#elif defined(__alpha__)
93 PROC_ALPHA, PROC_ALPHA,
94#else
95 (-1),(-1),
96#endif
97 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
98 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
99 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
100};
101
102
103
104
105/*============================================================================
106 * Globals
107 *============================================================================
108 */
109
0b950672 110static DEFINE_MUTEX(adpt_configuration_lock);
1da177e4
LT
111
112static struct i2o_sys_tbl *sys_tbl = NULL;
113static int sys_tbl_ind = 0;
114static int sys_tbl_len = 0;
115
1da177e4
LT
116static adpt_hba* hba_chain = NULL;
117static int hba_count = 0;
118
119static struct file_operations adpt_fops = {
120 .ioctl = adpt_ioctl,
121 .open = adpt_open,
122 .release = adpt_close
123};
124
125#ifdef REBOOT_NOTIFIER
126static struct notifier_block adpt_reboot_notifier =
127{
128 adpt_reboot_event,
129 NULL,
130 0
131};
132#endif
133
134/* Structures and definitions for synchronous message posting.
135 * See adpt_i2o_post_wait() for description
136 * */
137struct adpt_i2o_post_wait_data
138{
139 int status;
140 u32 id;
141 adpt_wait_queue_head_t *wq;
142 struct adpt_i2o_post_wait_data *next;
143};
144
145static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
146static u32 adpt_post_wait_id = 0;
147static DEFINE_SPINLOCK(adpt_post_wait_lock);
148
149
150/*============================================================================
151 * Functions
152 *============================================================================
153 */
154
155static u8 adpt_read_blink_led(adpt_hba* host)
156{
157 if(host->FwDebugBLEDflag_P != 0) {
158 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
159 return readb(host->FwDebugBLEDvalue_P);
160 }
161 }
162 return 0;
163}
164
165/*============================================================================
166 * Scsi host template interface functions
167 *============================================================================
168 */
169
170static struct pci_device_id dptids[] = {
171 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
172 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
173 { 0, }
174};
175MODULE_DEVICE_TABLE(pci,dptids);
176
177static int adpt_detect(struct scsi_host_template* sht)
178{
179 struct pci_dev *pDev = NULL;
180 adpt_hba* pHba;
181
182 adpt_init();
183
184 PINFO("Detecting Adaptec I2O RAID controllers...\n");
185
186 /* search for all Adatpec I2O RAID cards */
187 while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
188 if(pDev->device == PCI_DPT_DEVICE_ID ||
189 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
190 if(adpt_install_hba(sht, pDev) ){
191 PERROR("Could not Init an I2O RAID device\n");
192 PERROR("Will not try to detect others.\n");
193 return hba_count-1;
194 }
195 }
196 }
197
198 /* In INIT state, Activate IOPs */
199 for (pHba = hba_chain; pHba; pHba = pHba->next) {
200 // Activate does get status , init outbound, and get hrt
201 if (adpt_i2o_activate_hba(pHba) < 0) {
202 adpt_i2o_delete_hba(pHba);
203 }
204 }
205
206
207 /* Active IOPs in HOLD state */
208
209rebuild_sys_tab:
210 if (hba_chain == NULL)
211 return 0;
212
213 /*
214 * If build_sys_table fails, we kill everything and bail
215 * as we can't init the IOPs w/o a system table
216 */
217 if (adpt_i2o_build_sys_table() < 0) {
218 adpt_i2o_sys_shutdown();
219 return 0;
220 }
221
222 PDEBUG("HBA's in HOLD state\n");
223
224 /* If IOP don't get online, we need to rebuild the System table */
225 for (pHba = hba_chain; pHba; pHba = pHba->next) {
226 if (adpt_i2o_online_hba(pHba) < 0) {
227 adpt_i2o_delete_hba(pHba);
228 goto rebuild_sys_tab;
229 }
230 }
231
232 /* Active IOPs now in OPERATIONAL state */
233 PDEBUG("HBA's in OPERATIONAL state\n");
234
235 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
236 for (pHba = hba_chain; pHba; pHba = pHba->next) {
237 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
238 if (adpt_i2o_lct_get(pHba) < 0){
239 adpt_i2o_delete_hba(pHba);
240 continue;
241 }
242
243 if (adpt_i2o_parse_lct(pHba) < 0){
244 adpt_i2o_delete_hba(pHba);
245 continue;
246 }
247 adpt_inquiry(pHba);
248 }
249
250 for (pHba = hba_chain; pHba; pHba = pHba->next) {
251 if( adpt_scsi_register(pHba,sht) < 0){
252 adpt_i2o_delete_hba(pHba);
253 continue;
254 }
255 pHba->initialized = TRUE;
256 pHba->state &= ~DPTI_STATE_RESET;
257 }
258
259 // Register our control device node
260 // nodes will need to be created in /dev to access this
261 // the nodes can not be created from within the driver
262 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
263 adpt_i2o_sys_shutdown();
264 return 0;
265 }
266 return hba_count;
267}
268
269
270/*
271 * scsi_unregister will be called AFTER we return.
272 */
273static int adpt_release(struct Scsi_Host *host)
274{
275 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
276// adpt_i2o_quiesce_hba(pHba);
277 adpt_i2o_delete_hba(pHba);
278 scsi_unregister(host);
279 return 0;
280}
281
282
283static void adpt_inquiry(adpt_hba* pHba)
284{
285 u32 msg[14];
286 u32 *mptr;
287 u32 *lenptr;
288 int direction;
289 int scsidir;
290 u32 len;
291 u32 reqlen;
292 u8* buf;
293 u8 scb[16];
294 s32 rcode;
295
296 memset(msg, 0, sizeof(msg));
297 buf = (u8*)kmalloc(80,GFP_KERNEL|ADDR32);
298 if(!buf){
299 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
300 return;
301 }
302 memset((void*)buf, 0, 36);
303
304 len = 36;
305 direction = 0x00000000;
306 scsidir =0x40000000; // DATA IN (iop<--dev)
307
308 reqlen = 14; // SINGLE SGE
309 /* Stick the headers on */
310 msg[0] = reqlen<<16 | SGL_OFFSET_12;
311 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
312 msg[2] = 0;
313 msg[3] = 0;
314 // Adaptec/DPT Private stuff
315 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
316 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
317 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
318 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
319 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
320 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
321 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
322
323 mptr=msg+7;
324
325 memset(scb, 0, sizeof(scb));
326 // Write SCSI command into the message - always 16 byte block
327 scb[0] = INQUIRY;
328 scb[1] = 0;
329 scb[2] = 0;
330 scb[3] = 0;
331 scb[4] = 36;
332 scb[5] = 0;
333 // Don't care about the rest of scb
334
335 memcpy(mptr, scb, sizeof(scb));
336 mptr+=4;
337 lenptr=mptr++; /* Remember me - fill in when we know */
338
339 /* Now fill in the SGList and command */
340 *lenptr = len;
341 *mptr++ = 0xD0000000|direction|len;
342 *mptr++ = virt_to_bus(buf);
343
344 // Send it on it's way
345 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
346 if (rcode != 0) {
347 sprintf(pHba->detail, "Adaptec I2O RAID");
348 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
349 if (rcode != -ETIME && rcode != -EINTR)
350 kfree(buf);
351 } else {
352 memset(pHba->detail, 0, sizeof(pHba->detail));
353 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
354 memcpy(&(pHba->detail[16]), " Model: ", 8);
355 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
356 memcpy(&(pHba->detail[40]), " FW: ", 4);
357 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
358 pHba->detail[48] = '\0'; /* precautionary */
359 kfree(buf);
360 }
361 adpt_i2o_status_get(pHba);
362 return ;
363}
364
365
366static int adpt_slave_configure(struct scsi_device * device)
367{
368 struct Scsi_Host *host = device->host;
369 adpt_hba* pHba;
370
371 pHba = (adpt_hba *) host->hostdata[0];
372
373 if (host->can_queue && device->tagged_supported) {
374 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
375 host->can_queue - 1);
376 } else {
377 scsi_adjust_queue_depth(device, 0, 1);
378 }
379 return 0;
380}
381
382static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
383{
384 adpt_hba* pHba = NULL;
385 struct adpt_device* pDev = NULL; /* dpt per device information */
1da177e4
LT
386
387 cmd->scsi_done = done;
388 /*
389 * SCSI REQUEST_SENSE commands will be executed automatically by the
390 * Host Adapter for any errors, so they should not be executed
391 * explicitly unless the Sense Data is zero indicating that no error
392 * occurred.
393 */
394
395 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
396 cmd->result = (DID_OK << 16);
397 cmd->scsi_done(cmd);
398 return 0;
399 }
400
401 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
402 if (!pHba) {
403 return FAILED;
404 }
405
406 rmb();
407 /*
408 * TODO: I need to block here if I am processing ioctl cmds
409 * but if the outstanding cmds all finish before the ioctl,
410 * the scsi-core will not know to start sending cmds to me again.
411 * I need to a way to restart the scsi-cores queues or should I block
412 * calling scsi_done on the outstanding cmds instead
413 * for now we don't set the IOCTL state
414 */
415 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
416 pHba->host->last_reset = jiffies;
417 pHba->host->resetting = 1;
418 return 1;
419 }
420
1da177e4
LT
421 // TODO if the cmd->device if offline then I may need to issue a bus rescan
422 // followed by a get_lct to see if the device is there anymore
423 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
424 /*
425 * First command request for this device. Set up a pointer
426 * to the device structure. This should be a TEST_UNIT_READY
427 * command from scan_scsis_single.
428 */
429 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
430 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
431 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
432 cmd->result = (DID_NO_CONNECT << 16);
433 cmd->scsi_done(cmd);
434 return 0;
435 }
436 cmd->device->hostdata = pDev;
437 }
438 pDev->pScsi_dev = cmd->device;
439
440 /*
441 * If we are being called from when the device is being reset,
442 * delay processing of the command until later.
443 */
444 if (pDev->state & DPTI_DEV_RESET ) {
445 return FAILED;
446 }
447 return adpt_scsi_to_i2o(pHba, cmd, pDev);
448}
449
450static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
451 sector_t capacity, int geom[])
452{
453 int heads=-1;
454 int sectors=-1;
455 int cylinders=-1;
456
457 // *** First lets set the default geometry ****
458
459 // If the capacity is less than ox2000
460 if (capacity < 0x2000 ) { // floppy
461 heads = 18;
462 sectors = 2;
463 }
464 // else if between 0x2000 and 0x20000
465 else if (capacity < 0x20000) {
466 heads = 64;
467 sectors = 32;
468 }
469 // else if between 0x20000 and 0x40000
470 else if (capacity < 0x40000) {
471 heads = 65;
472 sectors = 63;
473 }
474 // else if between 0x4000 and 0x80000
475 else if (capacity < 0x80000) {
476 heads = 128;
477 sectors = 63;
478 }
479 // else if greater than 0x80000
480 else {
481 heads = 255;
482 sectors = 63;
483 }
484 cylinders = sector_div(capacity, heads * sectors);
485
486 // Special case if CDROM
487 if(sdev->type == 5) { // CDROM
488 heads = 252;
489 sectors = 63;
490 cylinders = 1111;
491 }
492
493 geom[0] = heads;
494 geom[1] = sectors;
495 geom[2] = cylinders;
496
497 PDEBUG("adpt_bios_param: exit\n");
498 return 0;
499}
500
501
502static const char *adpt_info(struct Scsi_Host *host)
503{
504 adpt_hba* pHba;
505
506 pHba = (adpt_hba *) host->hostdata[0];
507 return (char *) (pHba->detail);
508}
509
510static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
511 int length, int inout)
512{
513 struct adpt_device* d;
514 int id;
515 int chan;
516 int len = 0;
517 int begin = 0;
518 int pos = 0;
519 adpt_hba* pHba;
520 int unit;
521
522 *start = buffer;
523 if (inout == TRUE) {
524 /*
525 * The user has done a write and wants us to take the
526 * data in the buffer and do something with it.
527 * proc_scsiwrite calls us with inout = 1
528 *
529 * Read data from buffer (writing to us) - NOT SUPPORTED
530 */
531 return -EINVAL;
532 }
533
534 /*
535 * inout = 0 means the user has done a read and wants information
536 * returned, so we write information about the cards into the buffer
537 * proc_scsiread() calls us with inout = 0
538 */
539
540 // Find HBA (host bus adapter) we are looking for
0b950672 541 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
542 for (pHba = hba_chain; pHba; pHba = pHba->next) {
543 if (pHba->host == host) {
544 break; /* found adapter */
545 }
546 }
0b950672 547 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
548 if (pHba == NULL) {
549 return 0;
550 }
551 host = pHba->host;
552
553 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
554 len += sprintf(buffer+len, "%s\n", pHba->detail);
555 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
556 pHba->host->host_no, pHba->name, host->irq);
557 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
558 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
559
560 pos = begin + len;
561
562 /* CHECKPOINT */
563 if(pos > offset + length) {
564 goto stop_output;
565 }
566 if(pos <= offset) {
567 /*
568 * If we haven't even written to where we last left
569 * off (the last time we were called), reset the
570 * beginning pointer.
571 */
572 len = 0;
573 begin = pos;
574 }
575 len += sprintf(buffer+len, "Devices:\n");
576 for(chan = 0; chan < MAX_CHANNEL; chan++) {
577 for(id = 0; id < MAX_ID; id++) {
578 d = pHba->channel[chan].device[id];
579 while(d){
580 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
581 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
582 pos = begin + len;
583
584
585 /* CHECKPOINT */
586 if(pos > offset + length) {
587 goto stop_output;
588 }
589 if(pos <= offset) {
590 len = 0;
591 begin = pos;
592 }
593
594 unit = d->pI2o_dev->lct_data.tid;
595 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
596 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
597 scsi_device_online(d->pScsi_dev)? "online":"offline");
598 pos = begin + len;
599
600 /* CHECKPOINT */
601 if(pos > offset + length) {
602 goto stop_output;
603 }
604 if(pos <= offset) {
605 len = 0;
606 begin = pos;
607 }
608
609 d = d->next_lun;
610 }
611 }
612 }
613
614 /*
615 * begin is where we last checked our position with regards to offset
616 * begin is always less than offset. len is relative to begin. It
617 * is the number of bytes written past begin
618 *
619 */
620stop_output:
621 /* stop the output and calculate the correct length */
622 *(buffer + len) = '\0';
623
624 *start = buffer + (offset - begin); /* Start of wanted data */
625 len -= (offset - begin);
626 if(len > length) {
627 len = length;
628 } else if(len < 0){
629 len = 0;
630 **start = '\0';
631 }
632 return len;
633}
634
635
636/*===========================================================================
637 * Error Handling routines
638 *===========================================================================
639 */
640
641static int adpt_abort(struct scsi_cmnd * cmd)
642{
643 adpt_hba* pHba = NULL; /* host bus adapter structure */
644 struct adpt_device* dptdevice; /* dpt per device information */
645 u32 msg[5];
646 int rcode;
647
648 if(cmd->serial_number == 0){
649 return FAILED;
650 }
651 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
652 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
653 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
654 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
655 return FAILED;
656 }
657
658 memset(msg, 0, sizeof(msg));
659 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
660 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
661 msg[2] = 0;
662 msg[3]= 0;
663 msg[4] = (u32)cmd;
e5508c13
SM
664 if (pHba->host)
665 spin_lock_irq(pHba->host->host_lock);
666 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
667 if (pHba->host)
668 spin_unlock_irq(pHba->host->host_lock);
669 if (rcode != 0) {
1da177e4
LT
670 if(rcode == -EOPNOTSUPP ){
671 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
672 return FAILED;
673 }
674 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
675 return FAILED;
676 }
677 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
678 return SUCCESS;
679}
680
681
682#define I2O_DEVICE_RESET 0x27
683// This is the same for BLK and SCSI devices
684// NOTE this is wrong in the i2o.h definitions
685// This is not currently supported by our adapter but we issue it anyway
686static int adpt_device_reset(struct scsi_cmnd* cmd)
687{
688 adpt_hba* pHba;
689 u32 msg[4];
690 u32 rcode;
691 int old_state;
1c2fb3f3 692 struct adpt_device* d = cmd->device->hostdata;
1da177e4
LT
693
694 pHba = (void*) cmd->device->host->hostdata[0];
695 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
696 if (!d) {
697 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
698 return FAILED;
699 }
700 memset(msg, 0, sizeof(msg));
701 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
702 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
703 msg[2] = 0;
704 msg[3] = 0;
705
e5508c13
SM
706 if (pHba->host)
707 spin_lock_irq(pHba->host->host_lock);
1da177e4
LT
708 old_state = d->state;
709 d->state |= DPTI_DEV_RESET;
e5508c13
SM
710 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
711 d->state = old_state;
712 if (pHba->host)
713 spin_unlock_irq(pHba->host->host_lock);
714 if (rcode != 0) {
1da177e4
LT
715 if(rcode == -EOPNOTSUPP ){
716 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
717 return FAILED;
718 }
719 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
720 return FAILED;
721 } else {
1da177e4
LT
722 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
723 return SUCCESS;
724 }
725}
726
727
728#define I2O_HBA_BUS_RESET 0x87
729// This version of bus reset is called by the eh_error handler
730static int adpt_bus_reset(struct scsi_cmnd* cmd)
731{
732 adpt_hba* pHba;
733 u32 msg[4];
e5508c13 734 u32 rcode;
1da177e4
LT
735
736 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
737 memset(msg, 0, sizeof(msg));
738 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
739 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
740 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
741 msg[2] = 0;
742 msg[3] = 0;
e5508c13
SM
743 if (pHba->host)
744 spin_lock_irq(pHba->host->host_lock);
745 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
746 if (pHba->host)
747 spin_unlock_irq(pHba->host->host_lock);
748 if (rcode != 0) {
1da177e4
LT
749 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
750 return FAILED;
751 } else {
752 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
753 return SUCCESS;
754 }
755}
756
757// This version of reset is called by the eh_error_handler
df0ae249 758static int __adpt_reset(struct scsi_cmnd* cmd)
1da177e4
LT
759{
760 adpt_hba* pHba;
761 int rcode;
762 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
763 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
764 rcode = adpt_hba_reset(pHba);
765 if(rcode == 0){
766 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
767 return SUCCESS;
768 } else {
769 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
770 return FAILED;
771 }
772}
773
df0ae249
JG
774static int adpt_reset(struct scsi_cmnd* cmd)
775{
776 int rc;
777
778 spin_lock_irq(cmd->device->host->host_lock);
779 rc = __adpt_reset(cmd);
780 spin_unlock_irq(cmd->device->host->host_lock);
781
782 return rc;
783}
784
1da177e4
LT
785// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
786static int adpt_hba_reset(adpt_hba* pHba)
787{
788 int rcode;
789
790 pHba->state |= DPTI_STATE_RESET;
791
792 // Activate does get status , init outbound, and get hrt
793 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
794 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
795 adpt_i2o_delete_hba(pHba);
796 return rcode;
797 }
798
799 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
800 adpt_i2o_delete_hba(pHba);
801 return rcode;
802 }
803 PDEBUG("%s: in HOLD state\n",pHba->name);
804
805 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
806 adpt_i2o_delete_hba(pHba);
807 return rcode;
808 }
809 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
810
811 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
812 adpt_i2o_delete_hba(pHba);
813 return rcode;
814 }
815
816 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
817 adpt_i2o_delete_hba(pHba);
818 return rcode;
819 }
820 pHba->state &= ~DPTI_STATE_RESET;
821
822 adpt_fail_posted_scbs(pHba);
823 return 0; /* return success */
824}
825
826/*===========================================================================
827 *
828 *===========================================================================
829 */
830
831
832static void adpt_i2o_sys_shutdown(void)
833{
834 adpt_hba *pHba, *pNext;
458af543 835 struct adpt_i2o_post_wait_data *p1, *old;
1da177e4
LT
836
837 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
838 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
839 /* Delete all IOPs from the controller chain */
840 /* They should have already been released by the
841 * scsi-core
842 */
843 for (pHba = hba_chain; pHba; pHba = pNext) {
844 pNext = pHba->next;
845 adpt_i2o_delete_hba(pHba);
846 }
847
848 /* Remove any timedout entries from the wait queue. */
1da177e4
LT
849// spin_lock_irqsave(&adpt_post_wait_lock, flags);
850 /* Nothing should be outstanding at this point so just
851 * free them
852 */
458af543
AB
853 for(p1 = adpt_post_wait_queue; p1;) {
854 old = p1;
855 p1 = p1->next;
856 kfree(old);
1da177e4
LT
857 }
858// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
859 adpt_post_wait_queue = NULL;
860
861 printk(KERN_INFO "Adaptec I2O controllers down.\n");
862}
863
864/*
865 * reboot/shutdown notification.
866 *
867 * - Quiesce each IOP in the system
868 *
869 */
870
871#ifdef REBOOT_NOTIFIER
872static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
873{
874
875 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
876 return NOTIFY_DONE;
877
878 adpt_i2o_sys_shutdown();
879
880 return NOTIFY_DONE;
881}
882#endif
883
884
885static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
886{
887
888 adpt_hba* pHba = NULL;
889 adpt_hba* p = NULL;
890 ulong base_addr0_phys = 0;
891 ulong base_addr1_phys = 0;
892 u32 hba_map0_area_size = 0;
893 u32 hba_map1_area_size = 0;
894 void __iomem *base_addr_virt = NULL;
895 void __iomem *msg_addr_virt = NULL;
896
897 int raptorFlag = FALSE;
1da177e4
LT
898
899 if(pci_enable_device(pDev)) {
900 return -EINVAL;
901 }
902 pci_set_master(pDev);
903 if (pci_set_dma_mask(pDev, 0xffffffffffffffffULL) &&
904 pci_set_dma_mask(pDev, 0xffffffffULL))
905 return -EINVAL;
906
907 base_addr0_phys = pci_resource_start(pDev,0);
908 hba_map0_area_size = pci_resource_len(pDev,0);
909
910 // Check if standard PCI card or single BAR Raptor
911 if(pDev->device == PCI_DPT_DEVICE_ID){
912 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
913 // Raptor card with this device id needs 4M
914 hba_map0_area_size = 0x400000;
915 } else { // Not Raptor - it is a PCI card
916 if(hba_map0_area_size > 0x100000 ){
917 hba_map0_area_size = 0x100000;
918 }
919 }
920 } else {// Raptor split BAR config
921 // Use BAR1 in this configuration
922 base_addr1_phys = pci_resource_start(pDev,1);
923 hba_map1_area_size = pci_resource_len(pDev,1);
924 raptorFlag = TRUE;
925 }
926
5bb8345d 927 if (pci_request_regions(pDev, "dpt_i2o")) {
9c472dd9
JB
928 PERROR("dpti: adpt_config_hba: pci request region failed\n");
929 return -EINVAL;
930 }
1da177e4
LT
931 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
932 if (!base_addr_virt) {
9c472dd9 933 pci_release_regions(pDev);
1da177e4
LT
934 PERROR("dpti: adpt_config_hba: io remap failed\n");
935 return -EINVAL;
936 }
937
938 if(raptorFlag == TRUE) {
939 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
940 if (!msg_addr_virt) {
941 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
942 iounmap(base_addr_virt);
9c472dd9 943 pci_release_regions(pDev);
1da177e4
LT
944 return -EINVAL;
945 }
946 } else {
947 msg_addr_virt = base_addr_virt;
948 }
949
950 // Allocate and zero the data structure
951 pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL);
952 if( pHba == NULL) {
953 if(msg_addr_virt != base_addr_virt){
954 iounmap(msg_addr_virt);
955 }
956 iounmap(base_addr_virt);
9c472dd9 957 pci_release_regions(pDev);
1da177e4
LT
958 return -ENOMEM;
959 }
960 memset(pHba, 0, sizeof(adpt_hba));
961
0b950672 962 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
963
964 if(hba_chain != NULL){
965 for(p = hba_chain; p->next; p = p->next);
966 p->next = pHba;
967 } else {
968 hba_chain = pHba;
969 }
970 pHba->next = NULL;
971 pHba->unit = hba_count;
23a2bc22 972 sprintf(pHba->name, "dpti%d", hba_count);
1da177e4
LT
973 hba_count++;
974
0b950672 975 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
976
977 pHba->pDev = pDev;
978 pHba->base_addr_phys = base_addr0_phys;
979
980 // Set up the Virtual Base Address of the I2O Device
981 pHba->base_addr_virt = base_addr_virt;
982 pHba->msg_addr_virt = msg_addr_virt;
983 pHba->irq_mask = base_addr_virt+0x30;
984 pHba->post_port = base_addr_virt+0x40;
985 pHba->reply_port = base_addr_virt+0x44;
986
987 pHba->hrt = NULL;
988 pHba->lct = NULL;
989 pHba->lct_size = 0;
990 pHba->status_block = NULL;
991 pHba->post_count = 0;
992 pHba->state = DPTI_STATE_RESET;
993 pHba->pDev = pDev;
994 pHba->devices = NULL;
995
996 // Initializing the spinlocks
997 spin_lock_init(&pHba->state_lock);
998 spin_lock_init(&adpt_post_wait_lock);
999
1000 if(raptorFlag == 0){
1001 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
1002 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
1003 } else {
1004 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
1005 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1006 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1007 }
1008
1009 if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, pHba)) {
1010 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1011 adpt_i2o_delete_hba(pHba);
1012 return -EINVAL;
1013 }
1014
1015 return 0;
1016}
1017
1018
1019static void adpt_i2o_delete_hba(adpt_hba* pHba)
1020{
1021 adpt_hba* p1;
1022 adpt_hba* p2;
1023 struct i2o_device* d;
1024 struct i2o_device* next;
1025 int i;
1026 int j;
1027 struct adpt_device* pDev;
1028 struct adpt_device* pNext;
1029
1030
0b950672 1031 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1032 // scsi_unregister calls our adpt_release which
1033 // does a quiese
1034 if(pHba->host){
1035 free_irq(pHba->host->irq, pHba);
1036 }
1da177e4
LT
1037 p2 = NULL;
1038 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1039 if(p1 == pHba) {
1040 if(p2) {
1041 p2->next = p1->next;
1042 } else {
1043 hba_chain = p1->next;
1044 }
1045 break;
1046 }
1047 }
1048
1049 hba_count--;
0b950672 1050 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1051
1052 iounmap(pHba->base_addr_virt);
9c472dd9 1053 pci_release_regions(pHba->pDev);
1da177e4
LT
1054 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1055 iounmap(pHba->msg_addr_virt);
1056 }
c9475cb0
JJ
1057 kfree(pHba->hrt);
1058 kfree(pHba->lct);
1059 kfree(pHba->status_block);
1060 kfree(pHba->reply_pool);
1da177e4
LT
1061
1062 for(d = pHba->devices; d ; d = next){
1063 next = d->next;
1064 kfree(d);
1065 }
1066 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1067 for(j = 0; j < MAX_ID; j++){
1068 if(pHba->channel[i].device[j] != NULL){
1069 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1070 pNext = pDev->next_lun;
1071 kfree(pDev);
1072 }
1073 }
1074 }
1075 }
1076 kfree(pHba);
1077
1078 if(hba_count <= 0){
1079 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1080 }
1081}
1082
1083
1084static int adpt_init(void)
1085{
1da177e4 1086 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
1da177e4
LT
1087#ifdef REBOOT_NOTIFIER
1088 register_reboot_notifier(&adpt_reboot_notifier);
1089#endif
1090
1091 return 0;
1092}
1093
1094
1095static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1096{
1097 struct adpt_device* d;
1098
1099 if(chan < 0 || chan >= MAX_CHANNEL)
1100 return NULL;
1101
1102 if( pHba->channel[chan].device == NULL){
1103 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1104 return NULL;
1105 }
1106
1107 d = pHba->channel[chan].device[id];
1108 if(!d || d->tid == 0) {
1109 return NULL;
1110 }
1111
1112 /* If it is the only lun at that address then this should match*/
1113 if(d->scsi_lun == lun){
1114 return d;
1115 }
1116
1117 /* else we need to look through all the luns */
1118 for(d=d->next_lun ; d ; d = d->next_lun){
1119 if(d->scsi_lun == lun){
1120 return d;
1121 }
1122 }
1123 return NULL;
1124}
1125
1126
1127static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1128{
1129 // I used my own version of the WAIT_QUEUE_HEAD
1130 // to handle some version differences
1131 // When embedded in the kernel this could go back to the vanilla one
1132 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1133 int status = 0;
1134 ulong flags = 0;
1135 struct adpt_i2o_post_wait_data *p1, *p2;
1136 struct adpt_i2o_post_wait_data *wait_data =
1137 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
4452ea50 1138 DECLARE_WAITQUEUE(wait, current);
1da177e4 1139
4452ea50 1140 if (!wait_data)
1da177e4 1141 return -ENOMEM;
4452ea50 1142
1da177e4
LT
1143 /*
1144 * The spin locking is needed to keep anyone from playing
1145 * with the queue pointers and id while we do the same
1146 */
1147 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1148 // TODO we need a MORE unique way of getting ids
1149 // to support async LCT get
1150 wait_data->next = adpt_post_wait_queue;
1151 adpt_post_wait_queue = wait_data;
1152 adpt_post_wait_id++;
1153 adpt_post_wait_id &= 0x7fff;
1154 wait_data->id = adpt_post_wait_id;
1155 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1156
1157 wait_data->wq = &adpt_wq_i2o_post;
1158 wait_data->status = -ETIMEDOUT;
1159
4452ea50 1160 add_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1161
1162 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1163 timeout *= HZ;
1164 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1165 set_current_state(TASK_INTERRUPTIBLE);
1166 if(pHba->host)
1167 spin_unlock_irq(pHba->host->host_lock);
1168 if (!timeout)
1169 schedule();
1170 else{
1171 timeout = schedule_timeout(timeout);
1172 if (timeout == 0) {
1173 // I/O issued, but cannot get result in
1174 // specified time. Freeing resorces is
1175 // dangerous.
1176 status = -ETIME;
1177 }
1178 }
1179 if(pHba->host)
1180 spin_lock_irq(pHba->host->host_lock);
1181 }
4452ea50 1182 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1183
1184 if(status == -ETIMEDOUT){
1185 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1186 // We will have to free the wait_data memory during shutdown
1187 return status;
1188 }
1189
1190 /* Remove the entry from the queue. */
1191 p2 = NULL;
1192 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1193 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1194 if(p1 == wait_data) {
1195 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1196 status = -EOPNOTSUPP;
1197 }
1198 if(p2) {
1199 p2->next = p1->next;
1200 } else {
1201 adpt_post_wait_queue = p1->next;
1202 }
1203 break;
1204 }
1205 }
1206 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1207
1208 kfree(wait_data);
1209
1210 return status;
1211}
1212
1213
1214static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1215{
1216
1217 u32 m = EMPTY_QUEUE;
1218 u32 __iomem *msg;
1219 ulong timeout = jiffies + 30*HZ;
1220 do {
1221 rmb();
1222 m = readl(pHba->post_port);
1223 if (m != EMPTY_QUEUE) {
1224 break;
1225 }
1226 if(time_after(jiffies,timeout)){
1227 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1228 return -ETIMEDOUT;
1229 }
a9a3047d 1230 schedule_timeout_uninterruptible(1);
1da177e4
LT
1231 } while(m == EMPTY_QUEUE);
1232
1233 msg = pHba->msg_addr_virt + m;
1234 memcpy_toio(msg, data, len);
1235 wmb();
1236
1237 //post message
1238 writel(m, pHba->post_port);
1239 wmb();
1240
1241 return 0;
1242}
1243
1244
1245static void adpt_i2o_post_wait_complete(u32 context, int status)
1246{
1247 struct adpt_i2o_post_wait_data *p1 = NULL;
1248 /*
1249 * We need to search through the adpt_post_wait
1250 * queue to see if the given message is still
1251 * outstanding. If not, it means that the IOP
1252 * took longer to respond to the message than we
1253 * had allowed and timer has already expired.
1254 * Not much we can do about that except log
1255 * it for debug purposes, increase timeout, and recompile
1256 *
1257 * Lock needed to keep anyone from moving queue pointers
1258 * around while we're looking through them.
1259 */
1260
1261 context &= 0x7fff;
1262
1263 spin_lock(&adpt_post_wait_lock);
1264 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1265 if(p1->id == context) {
1266 p1->status = status;
1267 spin_unlock(&adpt_post_wait_lock);
1268 wake_up_interruptible(p1->wq);
1269 return;
1270 }
1271 }
1272 spin_unlock(&adpt_post_wait_lock);
1273 // If this happens we lose commands that probably really completed
1274 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1275 printk(KERN_DEBUG" Tasks in wait queue:\n");
1276 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1277 printk(KERN_DEBUG" %d\n",p1->id);
1278 }
1279 return;
1280}
1281
1282static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1283{
1284 u32 msg[8];
1285 u8* status;
1286 u32 m = EMPTY_QUEUE ;
1287 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1288
1289 if(pHba->initialized == FALSE) { // First time reset should be quick
1290 timeout = jiffies + (25*HZ);
1291 } else {
1292 adpt_i2o_quiesce_hba(pHba);
1293 }
1294
1295 do {
1296 rmb();
1297 m = readl(pHba->post_port);
1298 if (m != EMPTY_QUEUE) {
1299 break;
1300 }
1301 if(time_after(jiffies,timeout)){
1302 printk(KERN_WARNING"Timeout waiting for message!\n");
1303 return -ETIMEDOUT;
1304 }
a9a3047d 1305 schedule_timeout_uninterruptible(1);
1da177e4
LT
1306 } while (m == EMPTY_QUEUE);
1307
1308 status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32);
1309 if(status == NULL) {
1310 adpt_send_nop(pHba, m);
1311 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1312 return -ENOMEM;
1313 }
1314 memset(status,0,4);
1315
1316 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1317 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1318 msg[2]=0;
1319 msg[3]=0;
1320 msg[4]=0;
1321 msg[5]=0;
1322 msg[6]=virt_to_bus(status);
1323 msg[7]=0;
1324
1325 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1326 wmb();
1327 writel(m, pHba->post_port);
1328 wmb();
1329
1330 while(*status == 0){
1331 if(time_after(jiffies,timeout)){
1332 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1333 kfree(status);
1334 return -ETIMEDOUT;
1335 }
1336 rmb();
a9a3047d 1337 schedule_timeout_uninterruptible(1);
1da177e4
LT
1338 }
1339
1340 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1341 PDEBUG("%s: Reset in progress...\n", pHba->name);
1342 // Here we wait for message frame to become available
1343 // indicated that reset has finished
1344 do {
1345 rmb();
1346 m = readl(pHba->post_port);
1347 if (m != EMPTY_QUEUE) {
1348 break;
1349 }
1350 if(time_after(jiffies,timeout)){
1351 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1352 return -ETIMEDOUT;
1353 }
a9a3047d 1354 schedule_timeout_uninterruptible(1);
1da177e4
LT
1355 } while (m == EMPTY_QUEUE);
1356 // Flush the offset
1357 adpt_send_nop(pHba, m);
1358 }
1359 adpt_i2o_status_get(pHba);
1360 if(*status == 0x02 ||
1361 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1362 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1363 pHba->name);
1364 } else {
1365 PDEBUG("%s: Reset completed.\n", pHba->name);
1366 }
1367
1368 kfree(status);
1369#ifdef UARTDELAY
1370 // This delay is to allow someone attached to the card through the debug UART to
1371 // set up the dump levels that they want before the rest of the initialization sequence
1372 adpt_delay(20000);
1373#endif
1374 return 0;
1375}
1376
1377
1378static int adpt_i2o_parse_lct(adpt_hba* pHba)
1379{
1380 int i;
1381 int max;
1382 int tid;
1383 struct i2o_device *d;
1384 i2o_lct *lct = pHba->lct;
1385 u8 bus_no = 0;
1386 s16 scsi_id;
1387 s16 scsi_lun;
1388 u32 buf[10]; // larger than 7, or 8 ...
1389 struct adpt_device* pDev;
1390
1391 if (lct == NULL) {
1392 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1393 return -1;
1394 }
1395
1396 max = lct->table_size;
1397 max -= 3;
1398 max /= 9;
1399
1400 for(i=0;i<max;i++) {
1401 if( lct->lct_entry[i].user_tid != 0xfff){
1402 /*
1403 * If we have hidden devices, we need to inform the upper layers about
1404 * the possible maximum id reference to handle device access when
1405 * an array is disassembled. This code has no other purpose but to
1406 * allow us future access to devices that are currently hidden
1407 * behind arrays, hotspares or have not been configured (JBOD mode).
1408 */
1409 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1410 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1411 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1412 continue;
1413 }
1414 tid = lct->lct_entry[i].tid;
1415 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1416 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1417 continue;
1418 }
1419 bus_no = buf[0]>>16;
1420 scsi_id = buf[1];
1421 scsi_lun = (buf[2]>>8 )&0xff;
1422 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1423 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1424 continue;
1425 }
1426 if (scsi_id >= MAX_ID){
1427 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1428 continue;
1429 }
1430 if(bus_no > pHba->top_scsi_channel){
1431 pHba->top_scsi_channel = bus_no;
1432 }
1433 if(scsi_id > pHba->top_scsi_id){
1434 pHba->top_scsi_id = scsi_id;
1435 }
1436 if(scsi_lun > pHba->top_scsi_lun){
1437 pHba->top_scsi_lun = scsi_lun;
1438 }
1439 continue;
1440 }
1441 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1442 if(d==NULL)
1443 {
1444 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1445 return -ENOMEM;
1446 }
1447
1c2fb3f3 1448 d->controller = pHba;
1da177e4
LT
1449 d->next = NULL;
1450
1451 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1452
1453 d->flags = 0;
1454 tid = d->lct_data.tid;
1455 adpt_i2o_report_hba_unit(pHba, d);
1456 adpt_i2o_install_device(pHba, d);
1457 }
1458 bus_no = 0;
1459 for(d = pHba->devices; d ; d = d->next) {
1460 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1461 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1462 tid = d->lct_data.tid;
1463 // TODO get the bus_no from hrt-but for now they are in order
1464 //bus_no =
1465 if(bus_no > pHba->top_scsi_channel){
1466 pHba->top_scsi_channel = bus_no;
1467 }
1468 pHba->channel[bus_no].type = d->lct_data.class_id;
1469 pHba->channel[bus_no].tid = tid;
1470 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1471 {
1472 pHba->channel[bus_no].scsi_id = buf[1];
1473 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1474 }
1475 // TODO remove - this is just until we get from hrt
1476 bus_no++;
1477 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1478 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1479 break;
1480 }
1481 }
1482 }
1483
1484 // Setup adpt_device table
1485 for(d = pHba->devices; d ; d = d->next) {
1486 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1487 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1488 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1489
1490 tid = d->lct_data.tid;
1491 scsi_id = -1;
1492 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1493 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1494 bus_no = buf[0]>>16;
1495 scsi_id = buf[1];
1496 scsi_lun = (buf[2]>>8 )&0xff;
1497 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1498 continue;
1499 }
1500 if (scsi_id >= MAX_ID) {
1501 continue;
1502 }
1503 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1504 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1505 if(pDev == NULL) {
1506 return -ENOMEM;
1507 }
1508 pHba->channel[bus_no].device[scsi_id] = pDev;
1509 memset(pDev,0,sizeof(struct adpt_device));
1510 } else {
1511 for( pDev = pHba->channel[bus_no].device[scsi_id];
1512 pDev->next_lun; pDev = pDev->next_lun){
1513 }
1514 pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1515 if(pDev->next_lun == NULL) {
1516 return -ENOMEM;
1517 }
1518 memset(pDev->next_lun,0,sizeof(struct adpt_device));
1519 pDev = pDev->next_lun;
1520 }
1521 pDev->tid = tid;
1522 pDev->scsi_channel = bus_no;
1523 pDev->scsi_id = scsi_id;
1524 pDev->scsi_lun = scsi_lun;
1525 pDev->pI2o_dev = d;
1526 d->owner = pDev;
1527 pDev->type = (buf[0])&0xff;
1528 pDev->flags = (buf[0]>>8)&0xff;
1529 if(scsi_id > pHba->top_scsi_id){
1530 pHba->top_scsi_id = scsi_id;
1531 }
1532 if(scsi_lun > pHba->top_scsi_lun){
1533 pHba->top_scsi_lun = scsi_lun;
1534 }
1535 }
1536 if(scsi_id == -1){
1537 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1538 d->lct_data.identity_tag);
1539 }
1540 }
1541 }
1542 return 0;
1543}
1544
1545
1546/*
1547 * Each I2O controller has a chain of devices on it - these match
1548 * the useful parts of the LCT of the board.
1549 */
1550
1551static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1552{
0b950672 1553 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1554 d->controller=pHba;
1555 d->owner=NULL;
1556 d->next=pHba->devices;
1557 d->prev=NULL;
1558 if (pHba->devices != NULL){
1559 pHba->devices->prev=d;
1560 }
1561 pHba->devices=d;
1562 *d->dev_name = 0;
1563
0b950672 1564 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1565 return 0;
1566}
1567
1568static int adpt_open(struct inode *inode, struct file *file)
1569{
1570 int minor;
1571 adpt_hba* pHba;
1572
1573 //TODO check for root access
1574 //
1575 minor = iminor(inode);
1576 if (minor >= hba_count) {
1577 return -ENXIO;
1578 }
0b950672 1579 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1580 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1581 if (pHba->unit == minor) {
1582 break; /* found adapter */
1583 }
1584 }
1585 if (pHba == NULL) {
0b950672 1586 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1587 return -ENXIO;
1588 }
1589
1590// if(pHba->in_use){
0b950672 1591 // mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1592// return -EBUSY;
1593// }
1594
1595 pHba->in_use = 1;
0b950672 1596 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1597
1598 return 0;
1599}
1600
1601static int adpt_close(struct inode *inode, struct file *file)
1602{
1603 int minor;
1604 adpt_hba* pHba;
1605
1606 minor = iminor(inode);
1607 if (minor >= hba_count) {
1608 return -ENXIO;
1609 }
0b950672 1610 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1611 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1612 if (pHba->unit == minor) {
1613 break; /* found adapter */
1614 }
1615 }
0b950672 1616 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1617 if (pHba == NULL) {
1618 return -ENXIO;
1619 }
1620
1621 pHba->in_use = 0;
1622
1623 return 0;
1624}
1625
1626
1627static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1628{
1629 u32 msg[MAX_MESSAGE_SIZE];
1630 u32* reply = NULL;
1631 u32 size = 0;
1632 u32 reply_size = 0;
1633 u32 __user *user_msg = arg;
1634 u32 __user * user_reply = NULL;
1635 void *sg_list[pHba->sg_tablesize];
1636 u32 sg_offset = 0;
1637 u32 sg_count = 0;
1638 int sg_index = 0;
1639 u32 i = 0;
1640 u32 rcode = 0;
1641 void *p = NULL;
1642 ulong flags = 0;
1643
1644 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1645 // get user msg size in u32s
1646 if(get_user(size, &user_msg[0])){
1647 return -EFAULT;
1648 }
1649 size = size>>16;
1650
1651 user_reply = &user_msg[size];
1652 if(size > MAX_MESSAGE_SIZE){
1653 return -EFAULT;
1654 }
1655 size *= 4; // Convert to bytes
1656
1657 /* Copy in the user's I2O command */
1658 if(copy_from_user(msg, user_msg, size)) {
1659 return -EFAULT;
1660 }
1661 get_user(reply_size, &user_reply[0]);
1662 reply_size = reply_size>>16;
1663 if(reply_size > REPLY_FRAME_SIZE){
1664 reply_size = REPLY_FRAME_SIZE;
1665 }
1666 reply_size *= 4;
1667 reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1668 if(reply == NULL) {
1669 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1670 return -ENOMEM;
1671 }
1672 memset(reply,0,REPLY_FRAME_SIZE*4);
1673 sg_offset = (msg[0]>>4)&0xf;
1674 msg[2] = 0x40000000; // IOCTL context
1675 msg[3] = (u32)reply;
1676 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1677 if(sg_offset) {
1678 // TODO 64bit fix
1679 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1680 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1681 if (sg_count > pHba->sg_tablesize){
1682 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1683 kfree (reply);
1684 return -EINVAL;
1685 }
1686
1687 for(i = 0; i < sg_count; i++) {
1688 int sg_size;
1689
1690 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1691 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1692 rcode = -EINVAL;
1693 goto cleanup;
1694 }
1695 sg_size = sg[i].flag_count & 0xffffff;
1696 /* Allocate memory for the transfer */
1697 p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
1698 if(!p) {
1699 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1700 pHba->name,sg_size,i,sg_count);
1701 rcode = -ENOMEM;
1702 goto cleanup;
1703 }
1704 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1705 /* Copy in the user's SG buffer if necessary */
1706 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1707 // TODO 64bit fix
1708 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
1709 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1710 rcode = -EFAULT;
1711 goto cleanup;
1712 }
1713 }
1714 //TODO 64bit fix
1715 sg[i].addr_bus = (u32)virt_to_bus(p);
1716 }
1717 }
1718
1719 do {
1720 if(pHba->host)
1721 spin_lock_irqsave(pHba->host->host_lock, flags);
1722 // This state stops any new commands from enterring the
1723 // controller while processing the ioctl
1724// pHba->state |= DPTI_STATE_IOCTL;
1725// We can't set this now - The scsi subsystem sets host_blocked and
1726// the queue empties and stops. We need a way to restart the queue
1727 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1728 if (rcode != 0)
1729 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1730 rcode, reply);
1731// pHba->state &= ~DPTI_STATE_IOCTL;
1732 if(pHba->host)
1733 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1734 } while(rcode == -ETIMEDOUT);
1735
1736 if(rcode){
1737 goto cleanup;
1738 }
1739
1740 if(sg_offset) {
1741 /* Copy back the Scatter Gather buffers back to user space */
1742 u32 j;
1743 // TODO 64bit fix
1744 struct sg_simple_element* sg;
1745 int sg_size;
1746
1747 // re-acquire the original message to handle correctly the sg copy operation
1748 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1749 // get user msg size in u32s
1750 if(get_user(size, &user_msg[0])){
1751 rcode = -EFAULT;
1752 goto cleanup;
1753 }
1754 size = size>>16;
1755 size *= 4;
1756 /* Copy in the user's I2O command */
1757 if (copy_from_user (msg, user_msg, size)) {
1758 rcode = -EFAULT;
1759 goto cleanup;
1760 }
1761 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1762
1763 // TODO 64bit fix
1764 sg = (struct sg_simple_element*)(msg + sg_offset);
1765 for (j = 0; j < sg_count; j++) {
1766 /* Copy out the SG list to user's buffer if necessary */
1767 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1768 sg_size = sg[j].flag_count & 0xffffff;
1769 // TODO 64bit fix
1770 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
1771 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1772 rcode = -EFAULT;
1773 goto cleanup;
1774 }
1775 }
1776 }
1777 }
1778
1779 /* Copy back the reply to user space */
1780 if (reply_size) {
1781 // we wrote our own values for context - now restore the user supplied ones
1782 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1783 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1784 rcode = -EFAULT;
1785 }
1786 if(copy_to_user(user_reply, reply, reply_size)) {
1787 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1788 rcode = -EFAULT;
1789 }
1790 }
1791
1792
1793cleanup:
1794 if (rcode != -ETIME && rcode != -EINTR)
1795 kfree (reply);
1796 while(sg_index) {
1797 if(sg_list[--sg_index]) {
1798 if (rcode != -ETIME && rcode != -EINTR)
1799 kfree(sg_list[sg_index]);
1800 }
1801 }
1802 return rcode;
1803}
1804
1805
1806/*
1807 * This routine returns information about the system. This does not effect
1808 * any logic and if the info is wrong - it doesn't matter.
1809 */
1810
1811/* Get all the info we can not get from kernel services */
1812static int adpt_system_info(void __user *buffer)
1813{
1814 sysInfo_S si;
1815
1816 memset(&si, 0, sizeof(si));
1817
1818 si.osType = OS_LINUX;
a4cd16e2
AB
1819 si.osMajorVersion = 0;
1820 si.osMinorVersion = 0;
1821 si.osRevision = 0;
1da177e4
LT
1822 si.busType = SI_PCI_BUS;
1823 si.processorFamily = DPTI_sig.dsProcessorFamily;
1824
1825#if defined __i386__
1826 adpt_i386_info(&si);
1827#elif defined (__ia64__)
1828 adpt_ia64_info(&si);
1829#elif defined(__sparc__)
1830 adpt_sparc_info(&si);
1831#elif defined (__alpha__)
1832 adpt_alpha_info(&si);
1833#else
1834 si.processorType = 0xff ;
1835#endif
1836 if(copy_to_user(buffer, &si, sizeof(si))){
1837 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1838 return -EFAULT;
1839 }
1840
1841 return 0;
1842}
1843
1844#if defined __ia64__
1845static void adpt_ia64_info(sysInfo_S* si)
1846{
1847 // This is all the info we need for now
1848 // We will add more info as our new
1849 // managmenent utility requires it
1850 si->processorType = PROC_IA64;
1851}
1852#endif
1853
1854
1855#if defined __sparc__
1856static void adpt_sparc_info(sysInfo_S* si)
1857{
1858 // This is all the info we need for now
1859 // We will add more info as our new
1860 // managmenent utility requires it
1861 si->processorType = PROC_ULTRASPARC;
1862}
1863#endif
1864
1865#if defined __alpha__
1866static void adpt_alpha_info(sysInfo_S* si)
1867{
1868 // This is all the info we need for now
1869 // We will add more info as our new
1870 // managmenent utility requires it
1871 si->processorType = PROC_ALPHA;
1872}
1873#endif
1874
1875#if defined __i386__
1876
1877static void adpt_i386_info(sysInfo_S* si)
1878{
1879 // This is all the info we need for now
1880 // We will add more info as our new
1881 // managmenent utility requires it
1882 switch (boot_cpu_data.x86) {
1883 case CPU_386:
1884 si->processorType = PROC_386;
1885 break;
1886 case CPU_486:
1887 si->processorType = PROC_486;
1888 break;
1889 case CPU_586:
1890 si->processorType = PROC_PENTIUM;
1891 break;
1892 default: // Just in case
1893 si->processorType = PROC_PENTIUM;
1894 break;
1895 }
1896}
1897
1898#endif
1899
1900
1901static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1902 ulong arg)
1903{
1904 int minor;
1905 int error = 0;
1906 adpt_hba* pHba;
1907 ulong flags = 0;
1908 void __user *argp = (void __user *)arg;
1909
1910 minor = iminor(inode);
1911 if (minor >= DPTI_MAX_HBA){
1912 return -ENXIO;
1913 }
0b950672 1914 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1915 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1916 if (pHba->unit == minor) {
1917 break; /* found adapter */
1918 }
1919 }
0b950672 1920 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1921 if(pHba == NULL){
1922 return -ENXIO;
1923 }
1924
a9a3047d
NA
1925 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1926 schedule_timeout_uninterruptible(2);
1da177e4
LT
1927
1928 switch (cmd) {
1929 // TODO: handle 3 cases
1930 case DPT_SIGNATURE:
1931 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1932 return -EFAULT;
1933 }
1934 break;
1935 case I2OUSRCMD:
1936 return adpt_i2o_passthru(pHba, argp);
1937
1938 case DPT_CTRLINFO:{
1939 drvrHBAinfo_S HbaInfo;
1940
1941#define FLG_OSD_PCI_VALID 0x0001
1942#define FLG_OSD_DMA 0x0002
1943#define FLG_OSD_I2O 0x0004
1944 memset(&HbaInfo, 0, sizeof(HbaInfo));
1945 HbaInfo.drvrHBAnum = pHba->unit;
1946 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1947 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1948 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1949 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1950 HbaInfo.Interrupt = pHba->pDev->irq;
1951 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1952 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1953 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1954 return -EFAULT;
1955 }
1956 break;
1957 }
1958 case DPT_SYSINFO:
1959 return adpt_system_info(argp);
1960 case DPT_BLINKLED:{
1961 u32 value;
1962 value = (u32)adpt_read_blink_led(pHba);
1963 if (copy_to_user(argp, &value, sizeof(value))) {
1964 return -EFAULT;
1965 }
1966 break;
1967 }
1968 case I2ORESETCMD:
1969 if(pHba->host)
1970 spin_lock_irqsave(pHba->host->host_lock, flags);
1971 adpt_hba_reset(pHba);
1972 if(pHba->host)
1973 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1974 break;
1975 case I2ORESCANCMD:
1976 adpt_rescan(pHba);
1977 break;
1978 default:
1979 return -EINVAL;
1980 }
1981
1982 return error;
1983}
1984
1985
1986static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
1987{
1988 struct scsi_cmnd* cmd;
1989 adpt_hba* pHba = dev_id;
1990 u32 m;
1c2fb3f3 1991 void __iomem *reply;
1da177e4
LT
1992 u32 status=0;
1993 u32 context;
1994 ulong flags = 0;
1995 int handled = 0;
1996
1997 if (pHba == NULL){
1998 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1999 return IRQ_NONE;
2000 }
2001 if(pHba->host)
2002 spin_lock_irqsave(pHba->host->host_lock, flags);
2003
2004 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2005 m = readl(pHba->reply_port);
2006 if(m == EMPTY_QUEUE){
2007 // Try twice then give up
2008 rmb();
2009 m = readl(pHba->reply_port);
2010 if(m == EMPTY_QUEUE){
2011 // This really should not happen
2012 printk(KERN_ERR"dpti: Could not get reply frame\n");
2013 goto out;
2014 }
2015 }
1c2fb3f3 2016 reply = bus_to_virt(m);
1da177e4
LT
2017
2018 if (readl(reply) & MSG_FAIL) {
2019 u32 old_m = readl(reply+28);
1c2fb3f3 2020 void __iomem *msg;
1da177e4
LT
2021 u32 old_context;
2022 PDEBUG("%s: Failed message\n",pHba->name);
2023 if(old_m >= 0x100000){
2024 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2025 writel(m,pHba->reply_port);
2026 continue;
2027 }
2028 // Transaction context is 0 in failed reply frame
1c2fb3f3 2029 msg = pHba->msg_addr_virt + old_m;
1da177e4
LT
2030 old_context = readl(msg+12);
2031 writel(old_context, reply+12);
2032 adpt_send_nop(pHba, old_m);
2033 }
2034 context = readl(reply+8);
2035 if(context & 0x40000000){ // IOCTL
1c2fb3f3
BB
2036 void *p = (void *)readl(reply+12);
2037 if( p != NULL) {
2038 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
1da177e4
LT
2039 }
2040 // All IOCTLs will also be post wait
2041 }
2042 if(context & 0x80000000){ // Post wait message
2043 status = readl(reply+16);
2044 if(status >> 24){
2045 status &= 0xffff; /* Get detail status */
2046 } else {
2047 status = I2O_POST_WAIT_OK;
2048 }
2049 if(!(context & 0x40000000)) {
2050 cmd = (struct scsi_cmnd*) readl(reply+12);
2051 if(cmd != NULL) {
2052 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2053 }
2054 }
2055 adpt_i2o_post_wait_complete(context, status);
2056 } else { // SCSI message
2057 cmd = (struct scsi_cmnd*) readl(reply+12);
2058 if(cmd != NULL){
2059 if(cmd->serial_number != 0) { // If not timedout
2060 adpt_i2o_to_scsi(reply, cmd);
2061 }
2062 }
2063 }
2064 writel(m, pHba->reply_port);
2065 wmb();
2066 rmb();
2067 }
2068 handled = 1;
2069out: if(pHba->host)
2070 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2071 return IRQ_RETVAL(handled);
2072}
2073
2074static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2075{
2076 int i;
2077 u32 msg[MAX_MESSAGE_SIZE];
2078 u32* mptr;
2079 u32 *lenptr;
2080 int direction;
2081 int scsidir;
2082 u32 len;
2083 u32 reqlen;
2084 s32 rcode;
2085
2086 memset(msg, 0 , sizeof(msg));
2087 len = cmd->request_bufflen;
2088 direction = 0x00000000;
2089
2090 scsidir = 0x00000000; // DATA NO XFER
2091 if(len) {
2092 /*
2093 * Set SCBFlags to indicate if data is being transferred
2094 * in or out, or no data transfer
2095 * Note: Do not have to verify index is less than 0 since
2096 * cmd->cmnd[0] is an unsigned char
2097 */
2098 switch(cmd->sc_data_direction){
2099 case DMA_FROM_DEVICE:
2100 scsidir =0x40000000; // DATA IN (iop<--dev)
2101 break;
2102 case DMA_TO_DEVICE:
2103 direction=0x04000000; // SGL OUT
2104 scsidir =0x80000000; // DATA OUT (iop-->dev)
2105 break;
2106 case DMA_NONE:
2107 break;
2108 case DMA_BIDIRECTIONAL:
2109 scsidir =0x40000000; // DATA IN (iop<--dev)
2110 // Assume In - and continue;
2111 break;
2112 default:
2113 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2114 pHba->name, cmd->cmnd[0]);
2115 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2116 cmd->scsi_done(cmd);
2117 return 0;
2118 }
2119 }
2120 // msg[0] is set later
2121 // I2O_CMD_SCSI_EXEC
2122 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2123 msg[2] = 0;
2124 msg[3] = (u32)cmd; /* We want the SCSI control block back */
2125 // Our cards use the transaction context as the tag for queueing
2126 // Adaptec/DPT Private stuff
2127 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2128 msg[5] = d->tid;
2129 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2130 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2131 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2132 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2133 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2134
2135 mptr=msg+7;
2136
2137 // Write SCSI command into the message - always 16 byte block
2138 memset(mptr, 0, 16);
2139 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2140 mptr+=4;
2141 lenptr=mptr++; /* Remember me - fill in when we know */
2142 reqlen = 14; // SINGLE SGE
2143 /* Now fill in the SGList and command */
2144 if(cmd->use_sg) {
2145 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
2146 int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
2147 cmd->sc_data_direction);
2148
2149
2150 len = 0;
2151 for(i = 0 ; i < sg_count; i++) {
2152 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2153 len+=sg_dma_len(sg);
2154 *mptr++ = sg_dma_address(sg);
2155 sg++;
2156 }
2157 /* Make this an end of list */
2158 mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1);
2159 reqlen = mptr - msg;
2160 *lenptr = len;
2161
2162 if(cmd->underflow && len != cmd->underflow){
2163 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2164 len, cmd->underflow);
2165 }
2166 } else {
2167 *lenptr = len = cmd->request_bufflen;
2168 if(len == 0) {
2169 reqlen = 12;
2170 } else {
2171 *mptr++ = 0xD0000000|direction|cmd->request_bufflen;
2172 *mptr++ = pci_map_single(pHba->pDev,
2173 cmd->request_buffer,
2174 cmd->request_bufflen,
2175 cmd->sc_data_direction);
2176 }
2177 }
2178
2179 /* Stick the headers on */
2180 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2181
2182 // Send it on it's way
2183 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2184 if (rcode == 0) {
2185 return 0;
2186 }
2187 return rcode;
2188}
2189
2190
2191static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2192{
2193 struct Scsi_Host *host = NULL;
2194
2195 host = scsi_register(sht, sizeof(adpt_hba*));
2196 if (host == NULL) {
2197 printk ("%s: scsi_register returned NULL\n",pHba->name);
2198 return -1;
2199 }
2200 host->hostdata[0] = (unsigned long)pHba;
2201 pHba->host = host;
2202
2203 host->irq = pHba->pDev->irq;
2204 /* no IO ports, so don't have to set host->io_port and
2205 * host->n_io_port
2206 */
2207 host->io_port = 0;
2208 host->n_io_port = 0;
2209 /* see comments in hosts.h */
2210 host->max_id = 16;
2211 host->max_lun = 256;
2212 host->max_channel = pHba->top_scsi_channel + 1;
2213 host->cmd_per_lun = 1;
2214 host->unique_id = (uint) pHba;
2215 host->sg_tablesize = pHba->sg_tablesize;
2216 host->can_queue = pHba->post_fifo_size;
2217
2218 return 0;
2219}
2220
2221
1c2fb3f3 2222static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
1da177e4
LT
2223{
2224 adpt_hba* pHba;
2225 u32 hba_status;
2226 u32 dev_status;
2227 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2228 // I know this would look cleaner if I just read bytes
2229 // but the model I have been using for all the rest of the
2230 // io is in 4 byte words - so I keep that model
2231 u16 detailed_status = readl(reply+16) &0xffff;
2232 dev_status = (detailed_status & 0xff);
2233 hba_status = detailed_status >> 8;
2234
2235 // calculate resid for sg
2236 cmd->resid = cmd->request_bufflen - readl(reply+5);
2237
2238 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2239
2240 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2241
2242 if(!(reply_flags & MSG_FAIL)) {
2243 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2244 case I2O_SCSI_DSC_SUCCESS:
2245 cmd->result = (DID_OK << 16);
2246 // handle underflow
2247 if(readl(reply+5) < cmd->underflow ) {
2248 cmd->result = (DID_ERROR <<16);
2249 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2250 }
2251 break;
2252 case I2O_SCSI_DSC_REQUEST_ABORTED:
2253 cmd->result = (DID_ABORT << 16);
2254 break;
2255 case I2O_SCSI_DSC_PATH_INVALID:
2256 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2257 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2258 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2259 case I2O_SCSI_DSC_NO_ADAPTER:
2260 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2261 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2262 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2263 cmd->result = (DID_TIME_OUT << 16);
2264 break;
2265 case I2O_SCSI_DSC_ADAPTER_BUSY:
2266 case I2O_SCSI_DSC_BUS_BUSY:
2267 cmd->result = (DID_BUS_BUSY << 16);
2268 break;
2269 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2270 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2271 cmd->result = (DID_RESET << 16);
2272 break;
2273 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2274 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2275 cmd->result = (DID_PARITY << 16);
2276 break;
2277 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2278 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2279 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2280 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2281 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2282 case I2O_SCSI_DSC_DATA_OVERRUN:
2283 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2284 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2285 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2286 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2287 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2288 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2289 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2290 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2291 case I2O_SCSI_DSC_INVALID_CDB:
2292 case I2O_SCSI_DSC_LUN_INVALID:
2293 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2294 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2295 case I2O_SCSI_DSC_NO_NEXUS:
2296 case I2O_SCSI_DSC_CDB_RECEIVED:
2297 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2298 case I2O_SCSI_DSC_QUEUE_FROZEN:
2299 case I2O_SCSI_DSC_REQUEST_INVALID:
2300 default:
2301 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2302 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2303 hba_status, dev_status, cmd->cmnd[0]);
2304 cmd->result = (DID_ERROR << 16);
2305 break;
2306 }
2307
2308 // copy over the request sense data if it was a check
2309 // condition status
2310 if(dev_status == 0x02 /*CHECK_CONDITION*/) {
2311 u32 len = sizeof(cmd->sense_buffer);
2312 len = (len > 40) ? 40 : len;
2313 // Copy over the sense data
1c2fb3f3 2314 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
1da177e4
LT
2315 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2316 cmd->sense_buffer[2] == DATA_PROTECT ){
2317 /* This is to handle an array failed */
2318 cmd->result = (DID_TIME_OUT << 16);
2319 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2320 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2321 hba_status, dev_status, cmd->cmnd[0]);
2322
2323 }
2324 }
2325 } else {
2326 /* In this condtion we could not talk to the tid
2327 * the card rejected it. We should signal a retry
2328 * for a limitted number of retries.
2329 */
2330 cmd->result = (DID_TIME_OUT << 16);
2331 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2332 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2333 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2334 }
2335
2336 cmd->result |= (dev_status);
2337
2338 if(cmd->scsi_done != NULL){
2339 cmd->scsi_done(cmd);
2340 }
2341 return cmd->result;
2342}
2343
2344
2345static s32 adpt_rescan(adpt_hba* pHba)
2346{
2347 s32 rcode;
2348 ulong flags = 0;
2349
2350 if(pHba->host)
2351 spin_lock_irqsave(pHba->host->host_lock, flags);
2352 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2353 goto out;
2354 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2355 goto out;
2356 rcode = 0;
2357out: if(pHba->host)
2358 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2359 return rcode;
2360}
2361
2362
2363static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2364{
2365 int i;
2366 int max;
2367 int tid;
2368 struct i2o_device *d;
2369 i2o_lct *lct = pHba->lct;
2370 u8 bus_no = 0;
2371 s16 scsi_id;
2372 s16 scsi_lun;
2373 u32 buf[10]; // at least 8 u32's
2374 struct adpt_device* pDev = NULL;
2375 struct i2o_device* pI2o_dev = NULL;
2376
2377 if (lct == NULL) {
2378 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2379 return -1;
2380 }
2381
2382 max = lct->table_size;
2383 max -= 3;
2384 max /= 9;
2385
2386 // Mark each drive as unscanned
2387 for (d = pHba->devices; d; d = d->next) {
2388 pDev =(struct adpt_device*) d->owner;
2389 if(!pDev){
2390 continue;
2391 }
2392 pDev->state |= DPTI_DEV_UNSCANNED;
2393 }
2394
2395 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2396
2397 for(i=0;i<max;i++) {
2398 if( lct->lct_entry[i].user_tid != 0xfff){
2399 continue;
2400 }
2401
2402 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2403 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2404 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2405 tid = lct->lct_entry[i].tid;
2406 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2407 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2408 continue;
2409 }
2410 bus_no = buf[0]>>16;
2411 scsi_id = buf[1];
2412 scsi_lun = (buf[2]>>8 )&0xff;
2413 pDev = pHba->channel[bus_no].device[scsi_id];
2414 /* da lun */
2415 while(pDev) {
2416 if(pDev->scsi_lun == scsi_lun) {
2417 break;
2418 }
2419 pDev = pDev->next_lun;
2420 }
2421 if(!pDev ) { // Something new add it
2422 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
2423 if(d==NULL)
2424 {
2425 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2426 return -ENOMEM;
2427 }
2428
1c2fb3f3 2429 d->controller = pHba;
1da177e4
LT
2430 d->next = NULL;
2431
2432 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2433
2434 d->flags = 0;
2435 adpt_i2o_report_hba_unit(pHba, d);
2436 adpt_i2o_install_device(pHba, d);
2437
2438 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2439 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2440 continue;
2441 }
2442 pDev = pHba->channel[bus_no].device[scsi_id];
2443 if( pDev == NULL){
2444 pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2445 if(pDev == NULL) {
2446 return -ENOMEM;
2447 }
2448 pHba->channel[bus_no].device[scsi_id] = pDev;
2449 } else {
2450 while (pDev->next_lun) {
2451 pDev = pDev->next_lun;
2452 }
2453 pDev = pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2454 if(pDev == NULL) {
2455 return -ENOMEM;
2456 }
2457 }
2458 memset(pDev,0,sizeof(struct adpt_device));
2459 pDev->tid = d->lct_data.tid;
2460 pDev->scsi_channel = bus_no;
2461 pDev->scsi_id = scsi_id;
2462 pDev->scsi_lun = scsi_lun;
2463 pDev->pI2o_dev = d;
2464 d->owner = pDev;
2465 pDev->type = (buf[0])&0xff;
2466 pDev->flags = (buf[0]>>8)&0xff;
2467 // Too late, SCSI system has made up it's mind, but what the hey ...
2468 if(scsi_id > pHba->top_scsi_id){
2469 pHba->top_scsi_id = scsi_id;
2470 }
2471 if(scsi_lun > pHba->top_scsi_lun){
2472 pHba->top_scsi_lun = scsi_lun;
2473 }
2474 continue;
2475 } // end of new i2o device
2476
2477 // We found an old device - check it
2478 while(pDev) {
2479 if(pDev->scsi_lun == scsi_lun) {
2480 if(!scsi_device_online(pDev->pScsi_dev)) {
2481 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2482 pHba->name,bus_no,scsi_id,scsi_lun);
2483 if (pDev->pScsi_dev) {
2484 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2485 }
2486 }
2487 d = pDev->pI2o_dev;
2488 if(d->lct_data.tid != tid) { // something changed
2489 pDev->tid = tid;
2490 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2491 if (pDev->pScsi_dev) {
2492 pDev->pScsi_dev->changed = TRUE;
2493 pDev->pScsi_dev->removable = TRUE;
2494 }
2495 }
2496 // Found it - mark it scanned
2497 pDev->state = DPTI_DEV_ONLINE;
2498 break;
2499 }
2500 pDev = pDev->next_lun;
2501 }
2502 }
2503 }
2504 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2505 pDev =(struct adpt_device*) pI2o_dev->owner;
2506 if(!pDev){
2507 continue;
2508 }
2509 // Drive offline drives that previously existed but could not be found
2510 // in the LCT table
2511 if (pDev->state & DPTI_DEV_UNSCANNED){
2512 pDev->state = DPTI_DEV_OFFLINE;
2513 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2514 if (pDev->pScsi_dev) {
2515 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2516 }
2517 }
2518 }
2519 return 0;
2520}
2521
2522static void adpt_fail_posted_scbs(adpt_hba* pHba)
2523{
2524 struct scsi_cmnd* cmd = NULL;
2525 struct scsi_device* d = NULL;
2526
2527 shost_for_each_device(d, pHba->host) {
2528 unsigned long flags;
2529 spin_lock_irqsave(&d->list_lock, flags);
2530 list_for_each_entry(cmd, &d->cmd_list, list) {
2531 if(cmd->serial_number == 0){
2532 continue;
2533 }
2534 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2535 cmd->scsi_done(cmd);
2536 }
2537 spin_unlock_irqrestore(&d->list_lock, flags);
2538 }
2539}
2540
2541
2542/*============================================================================
2543 * Routines from i2o subsystem
2544 *============================================================================
2545 */
2546
2547
2548
2549/*
2550 * Bring an I2O controller into HOLD state. See the spec.
2551 */
2552static int adpt_i2o_activate_hba(adpt_hba* pHba)
2553{
2554 int rcode;
2555
2556 if(pHba->initialized ) {
2557 if (adpt_i2o_status_get(pHba) < 0) {
2558 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2559 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2560 return rcode;
2561 }
2562 if (adpt_i2o_status_get(pHba) < 0) {
2563 printk(KERN_INFO "HBA not responding.\n");
2564 return -1;
2565 }
2566 }
2567
2568 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2569 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2570 return -1;
2571 }
2572
2573 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2574 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2575 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2576 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2577 adpt_i2o_reset_hba(pHba);
2578 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2579 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2580 return -1;
2581 }
2582 }
2583 } else {
2584 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2585 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2586 return rcode;
2587 }
2588
2589 }
2590
2591 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2592 return -1;
2593 }
2594
2595 /* In HOLD state */
2596
2597 if (adpt_i2o_hrt_get(pHba) < 0) {
2598 return -1;
2599 }
2600
2601 return 0;
2602}
2603
2604/*
2605 * Bring a controller online into OPERATIONAL state.
2606 */
2607
2608static int adpt_i2o_online_hba(adpt_hba* pHba)
2609{
2610 if (adpt_i2o_systab_send(pHba) < 0) {
2611 adpt_i2o_delete_hba(pHba);
2612 return -1;
2613 }
2614 /* In READY state */
2615
2616 if (adpt_i2o_enable_hba(pHba) < 0) {
2617 adpt_i2o_delete_hba(pHba);
2618 return -1;
2619 }
2620
2621 /* In OPERATIONAL state */
2622 return 0;
2623}
2624
2625static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2626{
2627 u32 __iomem *msg;
2628 ulong timeout = jiffies + 5*HZ;
2629
2630 while(m == EMPTY_QUEUE){
2631 rmb();
2632 m = readl(pHba->post_port);
2633 if(m != EMPTY_QUEUE){
2634 break;
2635 }
2636 if(time_after(jiffies,timeout)){
2637 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2638 return 2;
2639 }
a9a3047d 2640 schedule_timeout_uninterruptible(1);
1da177e4
LT
2641 }
2642 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2643 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2644 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2645 writel( 0,&msg[2]);
2646 wmb();
2647
2648 writel(m, pHba->post_port);
2649 wmb();
2650 return 0;
2651}
2652
2653static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2654{
2655 u8 *status;
2656 u32 __iomem *msg = NULL;
2657 int i;
2658 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2659 u32* ptr;
2660 u32 outbound_frame; // This had to be a 32 bit address
2661 u32 m;
2662
2663 do {
2664 rmb();
2665 m = readl(pHba->post_port);
2666 if (m != EMPTY_QUEUE) {
2667 break;
2668 }
2669
2670 if(time_after(jiffies,timeout)){
2671 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2672 return -ETIMEDOUT;
2673 }
a9a3047d 2674 schedule_timeout_uninterruptible(1);
1da177e4
LT
2675 } while(m == EMPTY_QUEUE);
2676
2677 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2678
2679 status = kmalloc(4,GFP_KERNEL|ADDR32);
2680 if (status==NULL) {
2681 adpt_send_nop(pHba, m);
2682 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2683 pHba->name);
2684 return -ENOMEM;
2685 }
2686 memset(status, 0, 4);
2687
2688 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2689 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2690 writel(0, &msg[2]);
2691 writel(0x0106, &msg[3]); /* Transaction context */
2692 writel(4096, &msg[4]); /* Host page frame size */
2693 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2694 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2695 writel(virt_to_bus(status), &msg[7]);
2696
2697 writel(m, pHba->post_port);
2698 wmb();
2699
2700 // Wait for the reply status to come back
2701 do {
2702 if (*status) {
2703 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2704 break;
2705 }
2706 }
2707 rmb();
2708 if(time_after(jiffies,timeout)){
2709 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2710 return -ETIMEDOUT;
2711 }
a9a3047d 2712 schedule_timeout_uninterruptible(1);
1da177e4
LT
2713 } while (1);
2714
2715 // If the command was successful, fill the fifo with our reply
2716 // message packets
2717 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
c9475cb0 2718 kfree(status);
1da177e4
LT
2719 return -2;
2720 }
c9475cb0 2721 kfree(status);
1da177e4 2722
c9475cb0 2723 kfree(pHba->reply_pool);
1da177e4
LT
2724
2725 pHba->reply_pool = (u32*)kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
2726 if(!pHba->reply_pool){
2727 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2728 return -1;
2729 }
2730 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2731
2732 ptr = pHba->reply_pool;
2733 for(i = 0; i < pHba->reply_fifo_size; i++) {
2734 outbound_frame = (u32)virt_to_bus(ptr);
2735 writel(outbound_frame, pHba->reply_port);
2736 wmb();
2737 ptr += REPLY_FRAME_SIZE;
2738 }
2739 adpt_i2o_status_get(pHba);
2740 return 0;
2741}
2742
2743
2744/*
2745 * I2O System Table. Contains information about
2746 * all the IOPs in the system. Used to inform IOPs
2747 * about each other's existence.
2748 *
2749 * sys_tbl_ver is the CurrentChangeIndicator that is
2750 * used by IOPs to track changes.
2751 */
2752
2753
2754
2755static s32 adpt_i2o_status_get(adpt_hba* pHba)
2756{
2757 ulong timeout;
2758 u32 m;
2759 u32 __iomem *msg;
2760 u8 *status_block=NULL;
2761 ulong status_block_bus;
2762
2763 if(pHba->status_block == NULL) {
2764 pHba->status_block = (i2o_status_block*)
2765 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2766 if(pHba->status_block == NULL) {
2767 printk(KERN_ERR
2768 "dpti%d: Get Status Block failed; Out of memory. \n",
2769 pHba->unit);
2770 return -ENOMEM;
2771 }
2772 }
2773 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2774 status_block = (u8*)(pHba->status_block);
2775 status_block_bus = virt_to_bus(pHba->status_block);
2776 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2777 do {
2778 rmb();
2779 m = readl(pHba->post_port);
2780 if (m != EMPTY_QUEUE) {
2781 break;
2782 }
2783 if(time_after(jiffies,timeout)){
2784 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2785 pHba->name);
2786 return -ETIMEDOUT;
2787 }
a9a3047d 2788 schedule_timeout_uninterruptible(1);
1da177e4
LT
2789 } while(m==EMPTY_QUEUE);
2790
2791
2792 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2793
2794 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2795 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2796 writel(1, &msg[2]);
2797 writel(0, &msg[3]);
2798 writel(0, &msg[4]);
2799 writel(0, &msg[5]);
2800 writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2801 writel(0, &msg[7]);
2802 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2803
2804 //post message
2805 writel(m, pHba->post_port);
2806 wmb();
2807
2808 while(status_block[87]!=0xff){
2809 if(time_after(jiffies,timeout)){
2810 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2811 pHba->unit);
2812 return -ETIMEDOUT;
2813 }
2814 rmb();
a9a3047d 2815 schedule_timeout_uninterruptible(1);
1da177e4
LT
2816 }
2817
2818 // Set up our number of outbound and inbound messages
2819 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2820 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2821 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2822 }
2823
2824 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2825 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2826 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2827 }
2828
2829 // Calculate the Scatter Gather list size
2830 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2831 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2832 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2833 }
2834
2835
2836#ifdef DEBUG
2837 printk("dpti%d: State = ",pHba->unit);
2838 switch(pHba->status_block->iop_state) {
2839 case 0x01:
2840 printk("INIT\n");
2841 break;
2842 case 0x02:
2843 printk("RESET\n");
2844 break;
2845 case 0x04:
2846 printk("HOLD\n");
2847 break;
2848 case 0x05:
2849 printk("READY\n");
2850 break;
2851 case 0x08:
2852 printk("OPERATIONAL\n");
2853 break;
2854 case 0x10:
2855 printk("FAILED\n");
2856 break;
2857 case 0x11:
2858 printk("FAULTED\n");
2859 break;
2860 default:
2861 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2862 }
2863#endif
2864 return 0;
2865}
2866
2867/*
2868 * Get the IOP's Logical Configuration Table
2869 */
2870static int adpt_i2o_lct_get(adpt_hba* pHba)
2871{
2872 u32 msg[8];
2873 int ret;
2874 u32 buf[16];
2875
2876 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2877 pHba->lct_size = pHba->status_block->expected_lct_size;
2878 }
2879 do {
2880 if (pHba->lct == NULL) {
2881 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2882 if(pHba->lct == NULL) {
2883 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2884 pHba->name);
2885 return -ENOMEM;
2886 }
2887 }
2888 memset(pHba->lct, 0, pHba->lct_size);
2889
2890 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2891 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2892 msg[2] = 0;
2893 msg[3] = 0;
2894 msg[4] = 0xFFFFFFFF; /* All devices */
2895 msg[5] = 0x00000000; /* Report now */
2896 msg[6] = 0xD0000000|pHba->lct_size;
2897 msg[7] = virt_to_bus(pHba->lct);
2898
2899 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2900 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2901 pHba->name, ret);
2902 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2903 return ret;
2904 }
2905
2906 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2907 pHba->lct_size = pHba->lct->table_size << 2;
2908 kfree(pHba->lct);
2909 pHba->lct = NULL;
2910 }
2911 } while (pHba->lct == NULL);
2912
2913 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2914
2915
2916 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2917 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2918 pHba->FwDebugBufferSize = buf[1];
2919 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0];
2920 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2921 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2922 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
2923 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2924 pHba->FwDebugBuffer_P += buf[2];
2925 pHba->FwDebugFlags = 0;
2926 }
2927
2928 return 0;
2929}
2930
2931static int adpt_i2o_build_sys_table(void)
2932{
2933 adpt_hba* pHba = NULL;
2934 int count = 0;
2935
2936 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2937 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2938
c9475cb0 2939 kfree(sys_tbl);
1da177e4
LT
2940
2941 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2942 if(!sys_tbl) {
2943 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2944 return -ENOMEM;
2945 }
2946 memset(sys_tbl, 0, sys_tbl_len);
2947
2948 sys_tbl->num_entries = hba_count;
2949 sys_tbl->version = I2OVERSION;
2950 sys_tbl->change_ind = sys_tbl_ind++;
2951
2952 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2953 // Get updated Status Block so we have the latest information
2954 if (adpt_i2o_status_get(pHba)) {
2955 sys_tbl->num_entries--;
2956 continue; // try next one
2957 }
2958
2959 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2960 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2961 sys_tbl->iops[count].seg_num = 0;
2962 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2963 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2964 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2965 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2966 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2967 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
1c2fb3f3
BB
2968 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
2969 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
1da177e4
LT
2970
2971 count++;
2972 }
2973
2974#ifdef DEBUG
2975{
2976 u32 *table = (u32*)sys_tbl;
2977 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2978 for(count = 0; count < (sys_tbl_len >>2); count++) {
2979 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2980 count, table[count]);
2981 }
2982}
2983#endif
2984
2985 return 0;
2986}
2987
2988
2989/*
2990 * Dump the information block associated with a given unit (TID)
2991 */
2992
2993static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2994{
2995 char buf[64];
2996 int unit = d->lct_data.tid;
2997
2998 printk(KERN_INFO "TID %3.3d ", unit);
2999
3000 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3001 {
3002 buf[16]=0;
3003 printk(" Vendor: %-12.12s", buf);
3004 }
3005 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3006 {
3007 buf[16]=0;
3008 printk(" Device: %-12.12s", buf);
3009 }
3010 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3011 {
3012 buf[8]=0;
3013 printk(" Rev: %-12.12s\n", buf);
3014 }
3015#ifdef DEBUG
3016 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3017 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3018 printk(KERN_INFO "\tFlags: ");
3019
3020 if(d->lct_data.device_flags&(1<<0))
3021 printk("C"); // ConfigDialog requested
3022 if(d->lct_data.device_flags&(1<<1))
3023 printk("U"); // Multi-user capable
3024 if(!(d->lct_data.device_flags&(1<<4)))
3025 printk("P"); // Peer service enabled!
3026 if(!(d->lct_data.device_flags&(1<<5)))
3027 printk("M"); // Mgmt service enabled!
3028 printk("\n");
3029#endif
3030}
3031
3032#ifdef DEBUG
3033/*
3034 * Do i2o class name lookup
3035 */
3036static const char *adpt_i2o_get_class_name(int class)
3037{
3038 int idx = 16;
3039 static char *i2o_class_name[] = {
3040 "Executive",
3041 "Device Driver Module",
3042 "Block Device",
3043 "Tape Device",
3044 "LAN Interface",
3045 "WAN Interface",
3046 "Fibre Channel Port",
3047 "Fibre Channel Device",
3048 "SCSI Device",
3049 "ATE Port",
3050 "ATE Device",
3051 "Floppy Controller",
3052 "Floppy Device",
3053 "Secondary Bus Port",
3054 "Peer Transport Agent",
3055 "Peer Transport",
3056 "Unknown"
3057 };
3058
3059 switch(class&0xFFF) {
3060 case I2O_CLASS_EXECUTIVE:
3061 idx = 0; break;
3062 case I2O_CLASS_DDM:
3063 idx = 1; break;
3064 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3065 idx = 2; break;
3066 case I2O_CLASS_SEQUENTIAL_STORAGE:
3067 idx = 3; break;
3068 case I2O_CLASS_LAN:
3069 idx = 4; break;
3070 case I2O_CLASS_WAN:
3071 idx = 5; break;
3072 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3073 idx = 6; break;
3074 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3075 idx = 7; break;
3076 case I2O_CLASS_SCSI_PERIPHERAL:
3077 idx = 8; break;
3078 case I2O_CLASS_ATE_PORT:
3079 idx = 9; break;
3080 case I2O_CLASS_ATE_PERIPHERAL:
3081 idx = 10; break;
3082 case I2O_CLASS_FLOPPY_CONTROLLER:
3083 idx = 11; break;
3084 case I2O_CLASS_FLOPPY_DEVICE:
3085 idx = 12; break;
3086 case I2O_CLASS_BUS_ADAPTER_PORT:
3087 idx = 13; break;
3088 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3089 idx = 14; break;
3090 case I2O_CLASS_PEER_TRANSPORT:
3091 idx = 15; break;
3092 }
3093 return i2o_class_name[idx];
3094}
3095#endif
3096
3097
3098static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3099{
3100 u32 msg[6];
3101 int ret, size = sizeof(i2o_hrt);
3102
3103 do {
3104 if (pHba->hrt == NULL) {
3105 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3106 if (pHba->hrt == NULL) {
3107 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3108 return -ENOMEM;
3109 }
3110 }
3111
3112 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3113 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3114 msg[2]= 0;
3115 msg[3]= 0;
3116 msg[4]= (0xD0000000 | size); /* Simple transaction */
3117 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */
3118
3119 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3120 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3121 return ret;
3122 }
3123
3124 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3125 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3126 kfree(pHba->hrt);
3127 pHba->hrt = NULL;
3128 }
3129 } while(pHba->hrt == NULL);
3130 return 0;
3131}
3132
3133/*
3134 * Query one scalar group value or a whole scalar group.
3135 */
3136static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3137 int group, int field, void *buf, int buflen)
3138{
3139 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3140 u8 *resblk;
3141
3142 int size;
3143
3144 /* 8 bytes for header */
3145 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
3146 if (resblk == NULL) {
3147 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3148 return -ENOMEM;
3149 }
3150
3151 if (field == -1) /* whole group */
3152 opblk[4] = -1;
3153
3154 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3155 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
3156 if (size == -ETIME) {
3157 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3158 return -ETIME;
3159 } else if (size == -EINTR) {
3160 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3161 return -EINTR;
3162 }
3163
3164 memcpy(buf, resblk+8, buflen); /* cut off header */
3165
3166 kfree(resblk);
3167 if (size < 0)
3168 return size;
3169
3170 return buflen;
3171}
3172
3173
3174/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3175 *
3176 * This function can be used for all UtilParamsGet/Set operations.
3177 * The OperationBlock is given in opblk-buffer,
3178 * and results are returned in resblk-buffer.
3179 * Note that the minimum sized resblk is 8 bytes and contains
3180 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3181 */
3182static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3183 void *opblk, int oplen, void *resblk, int reslen)
3184{
3185 u32 msg[9];
3186 u32 *res = (u32 *)resblk;
3187 int wait_status;
3188
3189 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3190 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3191 msg[2] = 0;
3192 msg[3] = 0;
3193 msg[4] = 0;
3194 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3195 msg[6] = virt_to_bus(opblk);
3196 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3197 msg[8] = virt_to_bus(resblk);
3198
3199 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3200 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
3201 return wait_status; /* -DetailedStatus */
3202 }
3203
3204 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3205 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3206 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3207 pHba->name,
3208 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3209 : "PARAMS_GET",
3210 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3211 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3212 }
3213
3214 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3215}
3216
3217
3218static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3219{
3220 u32 msg[4];
3221 int ret;
3222
3223 adpt_i2o_status_get(pHba);
3224
3225 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3226
3227 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3228 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3229 return 0;
3230 }
3231
3232 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3233 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3234 msg[2] = 0;
3235 msg[3] = 0;
3236
3237 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3238 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3239 pHba->unit, -ret);
3240 } else {
3241 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3242 }
3243
3244 adpt_i2o_status_get(pHba);
3245 return ret;
3246}
3247
3248
3249/*
3250 * Enable IOP. Allows the IOP to resume external operations.
3251 */
3252static int adpt_i2o_enable_hba(adpt_hba* pHba)
3253{
3254 u32 msg[4];
3255 int ret;
3256
3257 adpt_i2o_status_get(pHba);
3258 if(!pHba->status_block){
3259 return -ENOMEM;
3260 }
3261 /* Enable only allowed on READY state */
3262 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3263 return 0;
3264
3265 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3266 return -EINVAL;
3267
3268 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3269 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3270 msg[2]= 0;
3271 msg[3]= 0;
3272
3273 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3274 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3275 pHba->name, ret);
3276 } else {
3277 PDEBUG("%s: Enabled.\n", pHba->name);
3278 }
3279
3280 adpt_i2o_status_get(pHba);
3281 return ret;
3282}
3283
3284
3285static int adpt_i2o_systab_send(adpt_hba* pHba)
3286{
3287 u32 msg[12];
3288 int ret;
3289
3290 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3291 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3292 msg[2] = 0;
3293 msg[3] = 0;
3294 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3295 msg[5] = 0; /* Segment 0 */
3296
3297 /*
3298 * Provide three SGL-elements:
3299 * System table (SysTab), Private memory space declaration and
3300 * Private i/o space declaration
3301 */
3302 msg[6] = 0x54000000 | sys_tbl_len;
3303 msg[7] = virt_to_phys(sys_tbl);
3304 msg[8] = 0x54000000 | 0;
3305 msg[9] = 0;
3306 msg[10] = 0xD4000000 | 0;
3307 msg[11] = 0;
3308
3309 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3310 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3311 pHba->name, ret);
3312 }
3313#ifdef DEBUG
3314 else {
3315 PINFO("%s: SysTab set.\n", pHba->name);
3316 }
3317#endif
3318
3319 return ret;
3320 }
3321
3322
3323/*============================================================================
3324 *
3325 *============================================================================
3326 */
3327
3328
3329#ifdef UARTDELAY
3330
3331static static void adpt_delay(int millisec)
3332{
3333 int i;
3334 for (i = 0; i < millisec; i++) {
3335 udelay(1000); /* delay for one millisecond */
3336 }
3337}
3338
3339#endif
3340
3341static struct scsi_host_template driver_template = {
3342 .name = "dpt_i2o",
3343 .proc_name = "dpt_i2o",
3344 .proc_info = adpt_proc_info,
3345 .detect = adpt_detect,
3346 .release = adpt_release,
3347 .info = adpt_info,
3348 .queuecommand = adpt_queue,
3349 .eh_abort_handler = adpt_abort,
3350 .eh_device_reset_handler = adpt_device_reset,
3351 .eh_bus_reset_handler = adpt_bus_reset,
3352 .eh_host_reset_handler = adpt_reset,
3353 .bios_param = adpt_bios_param,
3354 .slave_configure = adpt_slave_configure,
3355 .can_queue = MAX_TO_IOP_MESSAGES,
3356 .this_id = 7,
3357 .cmd_per_lun = 1,
3358 .use_clustering = ENABLE_CLUSTERING,
3359};
3360#include "scsi_module.c"
3361MODULE_LICENSE("GPL");