[SCSI] drivers/scsi: Use ARRAY_SIZE macro
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / qlogicpti.c
1 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
4 *
5 * A lot of this driver was directly stolen from Erik H. Moe's PCI
6 * Qlogic ISP driver. Mucho kudos to him for this code.
7 *
8 * An even bigger kudos to John Grana at Performance Technologies
9 * for providing me with the hardware to write this driver, you rule
10 * John you really do.
11 *
12 * May, 2, 1997: Added support for QLGC,isp --jj
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/proc_fs.h>
22 #include <linux/stat.h>
23 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/jiffies.h>
28
29 #include <asm/byteorder.h>
30
31 #include "qlogicpti.h"
32
33 #include <asm/sbus.h>
34 #include <asm/dma.h>
35 #include <asm/system.h>
36 #include <asm/ptrace.h>
37 #include <asm/pgtable.h>
38 #include <asm/oplib.h>
39 #include <asm/io.h>
40 #include <asm/irq.h>
41
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_eh.h>
46 #include <scsi/scsi_request.h>
47 #include <scsi/scsi_tcq.h>
48 #include <scsi/scsi_host.h>
49
50
51
52 #define MAX_TARGETS 16
53 #define MAX_LUNS 8 /* 32 for 1.31 F/W */
54
55 #define DEFAULT_LOOP_COUNT 10000
56
57 #include "qlogicpti_asm.c"
58
59 static struct qlogicpti *qptichain = NULL;
60 static DEFINE_SPINLOCK(qptichain_lock);
61 static int qptis_running = 0;
62
63 #define PACKB(a, b) (((a)<<4)|(b))
64
65 static const u_char mbox_param[] = {
66 PACKB(1, 1), /* MBOX_NO_OP */
67 PACKB(5, 5), /* MBOX_LOAD_RAM */
68 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
69 PACKB(5, 5), /* MBOX_DUMP_RAM */
70 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
71 PACKB(2, 3), /* MBOX_READ_RAM_WORD */
72 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
73 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
74 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
75 PACKB(0, 0), /* 0x0009 */
76 PACKB(0, 0), /* 0x000a */
77 PACKB(0, 0), /* 0x000b */
78 PACKB(0, 0), /* 0x000c */
79 PACKB(0, 0), /* 0x000d */
80 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
81 PACKB(0, 0), /* 0x000f */
82 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
83 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
84 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
85 PACKB(2, 2), /* MBOX_WAKE_UP */
86 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
87 PACKB(4, 4), /* MBOX_ABORT */
88 PACKB(2, 2), /* MBOX_ABORT_DEVICE */
89 PACKB(3, 3), /* MBOX_ABORT_TARGET */
90 PACKB(2, 2), /* MBOX_BUS_RESET */
91 PACKB(2, 3), /* MBOX_STOP_QUEUE */
92 PACKB(2, 3), /* MBOX_START_QUEUE */
93 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
94 PACKB(2, 3), /* MBOX_ABORT_QUEUE */
95 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
96 PACKB(0, 0), /* 0x001e */
97 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
98 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
99 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
100 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
101 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
102 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
103 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
104 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
105 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */
106 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
107 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
108 PACKB(0, 0), /* 0x002a */
109 PACKB(0, 0), /* 0x002b */
110 PACKB(0, 0), /* 0x002c */
111 PACKB(0, 0), /* 0x002d */
112 PACKB(0, 0), /* 0x002e */
113 PACKB(0, 0), /* 0x002f */
114 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
115 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
116 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
117 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
118 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
119 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
120 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
121 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */
122 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
123 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
124 PACKB(0, 0), /* 0x003a */
125 PACKB(0, 0), /* 0x003b */
126 PACKB(0, 0), /* 0x003c */
127 PACKB(0, 0), /* 0x003d */
128 PACKB(0, 0), /* 0x003e */
129 PACKB(0, 0), /* 0x003f */
130 PACKB(0, 0), /* 0x0040 */
131 PACKB(0, 0), /* 0x0041 */
132 PACKB(0, 0) /* 0x0042 */
133 };
134
135 #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param)
136
137 /* queue length's _must_ be power of two: */
138 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
139 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
140 QLOGICPTI_REQ_QUEUE_LEN)
141 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
142
143 static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti)
144 {
145 sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB,
146 qpti->qregs + SBUS_CTRL);
147 }
148
149 static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti)
150 {
151 sbus_writew(0, qpti->qregs + SBUS_CTRL);
152 }
153
154 static inline void set_sbus_cfg1(struct qlogicpti *qpti)
155 {
156 u16 val;
157 u8 bursts = qpti->bursts;
158
159 #if 0 /* It appears that at least PTI cards do not support
160 * 64-byte bursts and that setting the B64 bit actually
161 * is a nop and the chip ends up using the smallest burst
162 * size. -DaveM
163 */
164 if (sbus_can_burst64(qpti->sdev) && (bursts & DMA_BURST64)) {
165 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64);
166 } else
167 #endif
168 if (bursts & DMA_BURST32) {
169 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32);
170 } else if (bursts & DMA_BURST16) {
171 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16);
172 } else if (bursts & DMA_BURST8) {
173 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8);
174 } else {
175 val = 0; /* No sbus bursts for you... */
176 }
177 sbus_writew(val, qpti->qregs + SBUS_CFG1);
178 }
179
180 static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force)
181 {
182 int loop_count;
183 u16 tmp;
184
185 if (mbox_param[param[0]] == 0)
186 return 1;
187
188 /* Set SBUS semaphore. */
189 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
190 tmp |= SBUS_SEMAPHORE_LCK;
191 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
192
193 /* Wait for host IRQ bit to clear. */
194 loop_count = DEFAULT_LOOP_COUNT;
195 while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) {
196 barrier();
197 cpu_relax();
198 }
199 if (!loop_count)
200 printk(KERN_EMERG "qlogicpti: mbox_command loop timeout #1\n");
201
202 /* Write mailbox command registers. */
203 switch (mbox_param[param[0]] >> 4) {
204 case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
205 case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
206 case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
207 case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
208 case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
209 case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
210 }
211
212 /* Clear RISC interrupt. */
213 tmp = sbus_readw(qpti->qregs + HCCTRL);
214 tmp |= HCCTRL_CRIRQ;
215 sbus_writew(tmp, qpti->qregs + HCCTRL);
216
217 /* Clear SBUS semaphore. */
218 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
219
220 /* Set HOST interrupt. */
221 tmp = sbus_readw(qpti->qregs + HCCTRL);
222 tmp |= HCCTRL_SHIRQ;
223 sbus_writew(tmp, qpti->qregs + HCCTRL);
224
225 /* Wait for HOST interrupt clears. */
226 loop_count = DEFAULT_LOOP_COUNT;
227 while (--loop_count &&
228 (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ))
229 udelay(20);
230 if (!loop_count)
231 printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #2\n",
232 param[0]);
233
234 /* Wait for SBUS semaphore to get set. */
235 loop_count = DEFAULT_LOOP_COUNT;
236 while (--loop_count &&
237 !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) {
238 udelay(20);
239
240 /* Workaround for some buggy chips. */
241 if (sbus_readw(qpti->qregs + MBOX0) & 0x4000)
242 break;
243 }
244 if (!loop_count)
245 printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #3\n",
246 param[0]);
247
248 /* Wait for MBOX busy condition to go away. */
249 loop_count = DEFAULT_LOOP_COUNT;
250 while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04))
251 udelay(20);
252 if (!loop_count)
253 printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #4\n",
254 param[0]);
255
256 /* Read back output parameters. */
257 switch (mbox_param[param[0]] & 0xf) {
258 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
259 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
260 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
261 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
262 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
263 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
264 }
265
266 /* Clear RISC interrupt. */
267 tmp = sbus_readw(qpti->qregs + HCCTRL);
268 tmp |= HCCTRL_CRIRQ;
269 sbus_writew(tmp, qpti->qregs + HCCTRL);
270
271 /* Release SBUS semaphore. */
272 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
273 tmp &= ~(SBUS_SEMAPHORE_LCK);
274 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
275
276 /* We're done. */
277 return 0;
278 }
279
280 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti)
281 {
282 int i;
283
284 qpti->host_param.initiator_scsi_id = qpti->scsi_id;
285 qpti->host_param.bus_reset_delay = 3;
286 qpti->host_param.retry_count = 0;
287 qpti->host_param.retry_delay = 5;
288 qpti->host_param.async_data_setup_time = 3;
289 qpti->host_param.req_ack_active_negation = 1;
290 qpti->host_param.data_line_active_negation = 1;
291 qpti->host_param.data_dma_burst_enable = 1;
292 qpti->host_param.command_dma_burst_enable = 1;
293 qpti->host_param.tag_aging = 8;
294 qpti->host_param.selection_timeout = 250;
295 qpti->host_param.max_queue_depth = 256;
296
297 for(i = 0; i < MAX_TARGETS; i++) {
298 /*
299 * disconnect, parity, arq, reneg on reset, and, oddly enough
300 * tags...the midlayer's notion of tagged support has to match
301 * our device settings, and since we base whether we enable a
302 * tag on a per-cmnd basis upon what the midlayer sez, we
303 * actually enable the capability here.
304 */
305 qpti->dev_param[i].device_flags = 0xcd;
306 qpti->dev_param[i].execution_throttle = 16;
307 if (qpti->ultra) {
308 qpti->dev_param[i].synchronous_period = 12;
309 qpti->dev_param[i].synchronous_offset = 8;
310 } else {
311 qpti->dev_param[i].synchronous_period = 25;
312 qpti->dev_param[i].synchronous_offset = 12;
313 }
314 qpti->dev_param[i].device_enable = 1;
315 }
316 /* this is very important to set! */
317 qpti->sbits = 1 << qpti->scsi_id;
318 }
319
320 static int qlogicpti_reset_hardware(struct Scsi_Host *host)
321 {
322 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
323 u_short param[6];
324 unsigned short risc_code_addr;
325 int loop_count, i;
326 unsigned long flags;
327
328 risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */
329
330 spin_lock_irqsave(host->host_lock, flags);
331
332 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
333
334 /* Only reset the scsi bus if it is not free. */
335 if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) {
336 sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE);
337 sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD);
338 udelay(400);
339 }
340
341 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
342 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
343 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
344
345 loop_count = DEFAULT_LOOP_COUNT;
346 while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04))
347 udelay(20);
348 if (!loop_count)
349 printk(KERN_EMERG "qlogicpti: reset_hardware loop timeout\n");
350
351 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
352 set_sbus_cfg1(qpti);
353 qlogicpti_enable_irqs(qpti);
354
355 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
356 qpti->ultra = 1;
357 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
358 qpti->qregs + RISC_MTREG);
359 } else {
360 qpti->ultra = 0;
361 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
362 qpti->qregs + RISC_MTREG);
363 }
364
365 /* reset adapter and per-device default values. */
366 /* do it after finding out whether we're ultra mode capable */
367 qlogicpti_set_hostdev_defaults(qpti);
368
369 /* Release the RISC processor. */
370 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
371
372 /* Get RISC to start executing the firmware code. */
373 param[0] = MBOX_EXEC_FIRMWARE;
374 param[1] = risc_code_addr;
375 if (qlogicpti_mbox_command(qpti, param, 1)) {
376 printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n",
377 qpti->qpti_id);
378 spin_unlock_irqrestore(host->host_lock, flags);
379 return 1;
380 }
381
382 /* Set initiator scsi ID. */
383 param[0] = MBOX_SET_INIT_SCSI_ID;
384 param[1] = qpti->host_param.initiator_scsi_id;
385 if (qlogicpti_mbox_command(qpti, param, 1) ||
386 (param[0] != MBOX_COMMAND_COMPLETE)) {
387 printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n",
388 qpti->qpti_id);
389 spin_unlock_irqrestore(host->host_lock, flags);
390 return 1;
391 }
392
393 /* Initialize state of the queues, both hw and sw. */
394 qpti->req_in_ptr = qpti->res_out_ptr = 0;
395
396 param[0] = MBOX_INIT_RES_QUEUE;
397 param[1] = RES_QUEUE_LEN + 1;
398 param[2] = (u_short) (qpti->res_dvma >> 16);
399 param[3] = (u_short) (qpti->res_dvma & 0xffff);
400 param[4] = param[5] = 0;
401 if (qlogicpti_mbox_command(qpti, param, 1)) {
402 printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n",
403 qpti->qpti_id);
404 spin_unlock_irqrestore(host->host_lock, flags);
405 return 1;
406 }
407
408 param[0] = MBOX_INIT_REQ_QUEUE;
409 param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1;
410 param[2] = (u_short) (qpti->req_dvma >> 16);
411 param[3] = (u_short) (qpti->req_dvma & 0xffff);
412 param[4] = param[5] = 0;
413 if (qlogicpti_mbox_command(qpti, param, 1)) {
414 printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n",
415 qpti->qpti_id);
416 spin_unlock_irqrestore(host->host_lock, flags);
417 return 1;
418 }
419
420 param[0] = MBOX_SET_RETRY_COUNT;
421 param[1] = qpti->host_param.retry_count;
422 param[2] = qpti->host_param.retry_delay;
423 qlogicpti_mbox_command(qpti, param, 0);
424
425 param[0] = MBOX_SET_TAG_AGE_LIMIT;
426 param[1] = qpti->host_param.tag_aging;
427 qlogicpti_mbox_command(qpti, param, 0);
428
429 for (i = 0; i < MAX_TARGETS; i++) {
430 param[0] = MBOX_GET_DEV_QUEUE_PARAMS;
431 param[1] = (i << 8);
432 qlogicpti_mbox_command(qpti, param, 0);
433 }
434
435 param[0] = MBOX_GET_FIRMWARE_STATUS;
436 qlogicpti_mbox_command(qpti, param, 0);
437
438 param[0] = MBOX_SET_SELECT_TIMEOUT;
439 param[1] = qpti->host_param.selection_timeout;
440 qlogicpti_mbox_command(qpti, param, 0);
441
442 for (i = 0; i < MAX_TARGETS; i++) {
443 param[0] = MBOX_SET_TARGET_PARAMS;
444 param[1] = (i << 8);
445 param[2] = (qpti->dev_param[i].device_flags << 8);
446 /*
447 * Since we're now loading 1.31 f/w, force narrow/async.
448 */
449 param[2] |= 0xc0;
450 param[3] = 0; /* no offset, we do not have sync mode yet */
451 qlogicpti_mbox_command(qpti, param, 0);
452 }
453
454 /*
455 * Always (sigh) do an initial bus reset (kicks f/w).
456 */
457 param[0] = MBOX_BUS_RESET;
458 param[1] = qpti->host_param.bus_reset_delay;
459 qlogicpti_mbox_command(qpti, param, 0);
460 qpti->send_marker = 1;
461
462 spin_unlock_irqrestore(host->host_lock, flags);
463 return 0;
464 }
465
466 #define PTI_RESET_LIMIT 400
467
468 static int __init qlogicpti_load_firmware(struct qlogicpti *qpti)
469 {
470 struct Scsi_Host *host = qpti->qhost;
471 unsigned short csum = 0;
472 unsigned short param[6];
473 unsigned short *risc_code, risc_code_addr, risc_code_length;
474 unsigned long flags;
475 int i, timeout;
476
477 risc_code = &sbus_risc_code01[0];
478 risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */
479 risc_code_length = sbus_risc_code_length01;
480
481 spin_lock_irqsave(host->host_lock, flags);
482
483 /* Verify the checksum twice, one before loading it, and once
484 * afterwards via the mailbox commands.
485 */
486 for (i = 0; i < risc_code_length; i++)
487 csum += risc_code[i];
488 if (csum) {
489 spin_unlock_irqrestore(host->host_lock, flags);
490 printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!",
491 qpti->qpti_id);
492 return 1;
493 }
494 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
495 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
496 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
497 timeout = PTI_RESET_LIMIT;
498 while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET))
499 udelay(20);
500 if (!timeout) {
501 spin_unlock_irqrestore(host->host_lock, flags);
502 printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id);
503 return 1;
504 }
505
506 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
507 mdelay(1);
508
509 sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL);
510 set_sbus_cfg1(qpti);
511 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
512
513 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
514 qpti->ultra = 1;
515 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
516 qpti->qregs + RISC_MTREG);
517 } else {
518 qpti->ultra = 0;
519 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
520 qpti->qregs + RISC_MTREG);
521 }
522
523 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
524
525 /* Pin lines are only stable while RISC is paused. */
526 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
527 if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE)
528 qpti->differential = 1;
529 else
530 qpti->differential = 0;
531 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
532
533 /* This shouldn't be necessary- we've reset things so we should be
534 running from the ROM now.. */
535
536 param[0] = MBOX_STOP_FIRMWARE;
537 param[1] = param[2] = param[3] = param[4] = param[5] = 0;
538 if (qlogicpti_mbox_command(qpti, param, 1)) {
539 printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n",
540 qpti->qpti_id);
541 spin_unlock_irqrestore(host->host_lock, flags);
542 return 1;
543 }
544
545 /* Load it up.. */
546 for (i = 0; i < risc_code_length; i++) {
547 param[0] = MBOX_WRITE_RAM_WORD;
548 param[1] = risc_code_addr + i;
549 param[2] = risc_code[i];
550 if (qlogicpti_mbox_command(qpti, param, 1) ||
551 param[0] != MBOX_COMMAND_COMPLETE) {
552 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
553 qpti->qpti_id);
554 spin_unlock_irqrestore(host->host_lock, flags);
555 return 1;
556 }
557 }
558
559 /* Reset the ISP again. */
560 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
561 mdelay(1);
562
563 qlogicpti_enable_irqs(qpti);
564 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
565 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
566
567 /* Ask ISP to verify the checksum of the new code. */
568 param[0] = MBOX_VERIFY_CHECKSUM;
569 param[1] = risc_code_addr;
570 if (qlogicpti_mbox_command(qpti, param, 1) ||
571 (param[0] != MBOX_COMMAND_COMPLETE)) {
572 printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n",
573 qpti->qpti_id);
574 spin_unlock_irqrestore(host->host_lock, flags);
575 return 1;
576 }
577
578 /* Start using newly downloaded firmware. */
579 param[0] = MBOX_EXEC_FIRMWARE;
580 param[1] = risc_code_addr;
581 qlogicpti_mbox_command(qpti, param, 1);
582
583 param[0] = MBOX_ABOUT_FIRMWARE;
584 if (qlogicpti_mbox_command(qpti, param, 1) ||
585 (param[0] != MBOX_COMMAND_COMPLETE)) {
586 printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n",
587 qpti->qpti_id);
588 spin_unlock_irqrestore(host->host_lock, flags);
589 return 1;
590 }
591
592 /* Snag the major and minor revisions from the result. */
593 qpti->fware_majrev = param[1];
594 qpti->fware_minrev = param[2];
595 qpti->fware_micrev = param[3];
596
597 /* Set the clock rate */
598 param[0] = MBOX_SET_CLOCK_RATE;
599 param[1] = qpti->clock;
600 if (qlogicpti_mbox_command(qpti, param, 1) ||
601 (param[0] != MBOX_COMMAND_COMPLETE)) {
602 printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n",
603 qpti->qpti_id);
604 spin_unlock_irqrestore(host->host_lock, flags);
605 return 1;
606 }
607
608 if (qpti->is_pti != 0) {
609 /* Load scsi initiator ID and interrupt level into sbus static ram. */
610 param[0] = MBOX_WRITE_RAM_WORD;
611 param[1] = 0xff80;
612 param[2] = (unsigned short) qpti->scsi_id;
613 qlogicpti_mbox_command(qpti, param, 1);
614
615 param[0] = MBOX_WRITE_RAM_WORD;
616 param[1] = 0xff00;
617 param[2] = (unsigned short) 3;
618 qlogicpti_mbox_command(qpti, param, 1);
619 }
620
621 spin_unlock_irqrestore(host->host_lock, flags);
622 return 0;
623 }
624
625 static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
626 {
627 int curstat = sbus_readb(qpti->sreg);
628
629 curstat &= 0xf0;
630 if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE))
631 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id);
632 if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER))
633 printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id);
634 if (curstat != qpti->swsreg) {
635 int error = 0;
636 if (curstat & SREG_FUSE) {
637 error++;
638 printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id);
639 }
640 if (curstat & SREG_TPOWER) {
641 error++;
642 printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id);
643 }
644 if (qpti->differential &&
645 (curstat & SREG_DSENSE) != SREG_DSENSE) {
646 error++;
647 printk("qlogicpti%d: You have a single ended device on a "
648 "differential bus! Please fix!\n", qpti->qpti_id);
649 }
650 qpti->swsreg = curstat;
651 return error;
652 }
653 return 0;
654 }
655
656 static irqreturn_t qpti_intr(int irq, void *dev_id, struct pt_regs *regs);
657
658 static void __init qpti_chain_add(struct qlogicpti *qpti)
659 {
660 spin_lock_irq(&qptichain_lock);
661 if (qptichain != NULL) {
662 struct qlogicpti *qlink = qptichain;
663
664 while(qlink->next)
665 qlink = qlink->next;
666 qlink->next = qpti;
667 } else {
668 qptichain = qpti;
669 }
670 qpti->next = NULL;
671 spin_unlock_irq(&qptichain_lock);
672 }
673
674 static void __init qpti_chain_del(struct qlogicpti *qpti)
675 {
676 spin_lock_irq(&qptichain_lock);
677 if (qptichain == qpti) {
678 qptichain = qpti->next;
679 } else {
680 struct qlogicpti *qlink = qptichain;
681 while(qlink->next != qpti)
682 qlink = qlink->next;
683 qlink->next = qpti->next;
684 }
685 qpti->next = NULL;
686 spin_unlock_irq(&qptichain_lock);
687 }
688
689 static int __init qpti_map_regs(struct qlogicpti *qpti)
690 {
691 struct sbus_dev *sdev = qpti->sdev;
692
693 qpti->qregs = sbus_ioremap(&sdev->resource[0], 0,
694 sdev->reg_addrs[0].reg_size,
695 "PTI Qlogic/ISP");
696 if (!qpti->qregs) {
697 printk("PTI: Qlogic/ISP registers are unmappable\n");
698 return -1;
699 }
700 if (qpti->is_pti) {
701 qpti->sreg = sbus_ioremap(&sdev->resource[0], (16 * 4096),
702 sizeof(unsigned char),
703 "PTI Qlogic/ISP statreg");
704 if (!qpti->sreg) {
705 printk("PTI: Qlogic/ISP status register is unmappable\n");
706 return -1;
707 }
708 }
709 return 0;
710 }
711
712 static int __init qpti_register_irq(struct qlogicpti *qpti)
713 {
714 struct sbus_dev *sdev = qpti->sdev;
715
716 qpti->qhost->irq = qpti->irq = sdev->irqs[0];
717
718 /* We used to try various overly-clever things to
719 * reduce the interrupt processing overhead on
720 * sun4c/sun4m when multiple PTI's shared the
721 * same IRQ. It was too complex and messy to
722 * sanely maintain.
723 */
724 if (request_irq(qpti->irq, qpti_intr,
725 SA_SHIRQ, "Qlogic/PTI", qpti))
726 goto fail;
727
728 printk("qpti%d: IRQ %s ", qpti->qpti_id, __irq_itoa(qpti->irq));
729
730 return 0;
731
732 fail:
733 printk("qpti%d: Cannot acquire irq line\n", qpti->qpti_id);
734 return -1;
735 }
736
737 static void __init qpti_get_scsi_id(struct qlogicpti *qpti)
738 {
739 qpti->scsi_id = prom_getintdefault(qpti->prom_node,
740 "initiator-id",
741 -1);
742 if (qpti->scsi_id == -1)
743 qpti->scsi_id = prom_getintdefault(qpti->prom_node,
744 "scsi-initiator-id",
745 -1);
746 if (qpti->scsi_id == -1)
747 qpti->scsi_id =
748 prom_getintdefault(qpti->sdev->bus->prom_node,
749 "scsi-initiator-id", 7);
750 qpti->qhost->this_id = qpti->scsi_id;
751 qpti->qhost->max_sectors = 64;
752
753 printk("SCSI ID %d ", qpti->scsi_id);
754 }
755
756 static void qpti_get_bursts(struct qlogicpti *qpti)
757 {
758 struct sbus_dev *sdev = qpti->sdev;
759 u8 bursts, bmask;
760
761 bursts = prom_getintdefault(qpti->prom_node, "burst-sizes", 0xff);
762 bmask = prom_getintdefault(sdev->bus->prom_node,
763 "burst-sizes", 0xff);
764 if (bmask != 0xff)
765 bursts &= bmask;
766 if (bursts == 0xff ||
767 (bursts & DMA_BURST16) == 0 ||
768 (bursts & DMA_BURST32) == 0)
769 bursts = (DMA_BURST32 - 1);
770
771 qpti->bursts = bursts;
772 }
773
774 static void qpti_get_clock(struct qlogicpti *qpti)
775 {
776 unsigned int cfreq;
777
778 /* Check for what the clock input to this card is.
779 * Default to 40Mhz.
780 */
781 cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000);
782 qpti->clock = (cfreq + 500000)/1000000;
783 if (qpti->clock == 0) /* bullshit */
784 qpti->clock = 40;
785 }
786
787 /* The request and response queues must each be aligned
788 * on a page boundary.
789 */
790 static int __init qpti_map_queues(struct qlogicpti *qpti)
791 {
792 struct sbus_dev *sdev = qpti->sdev;
793
794 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
795 qpti->res_cpu = sbus_alloc_consistent(sdev,
796 QSIZE(RES_QUEUE_LEN),
797 &qpti->res_dvma);
798 if (qpti->res_cpu == NULL ||
799 qpti->res_dvma == 0) {
800 printk("QPTI: Cannot map response queue.\n");
801 return -1;
802 }
803
804 qpti->req_cpu = sbus_alloc_consistent(sdev,
805 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
806 &qpti->req_dvma);
807 if (qpti->req_cpu == NULL ||
808 qpti->req_dvma == 0) {
809 sbus_free_consistent(sdev, QSIZE(RES_QUEUE_LEN),
810 qpti->res_cpu, qpti->res_dvma);
811 printk("QPTI: Cannot map request queue.\n");
812 return -1;
813 }
814 memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN));
815 memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN));
816 return 0;
817 }
818
819 /* Detect all PTI Qlogic ISP's in the machine. */
820 static int __init qlogicpti_detect(struct scsi_host_template *tpnt)
821 {
822 struct qlogicpti *qpti;
823 struct Scsi_Host *qpti_host;
824 struct sbus_bus *sbus;
825 struct sbus_dev *sdev;
826 int nqptis = 0, nqptis_in_use = 0;
827
828 tpnt->proc_name = "qlogicpti";
829 for_each_sbus(sbus) {
830 for_each_sbusdev(sdev, sbus) {
831 /* Is this a red snapper? */
832 if (strcmp(sdev->prom_name, "ptisp") &&
833 strcmp(sdev->prom_name, "PTI,ptisp") &&
834 strcmp(sdev->prom_name, "QLGC,isp") &&
835 strcmp(sdev->prom_name, "SUNW,isp"))
836 continue;
837
838 /* Sometimes Antares cards come up not completely
839 * setup, and we get a report of a zero IRQ.
840 * Skip over them in such cases so we survive.
841 */
842 if (sdev->irqs[0] == 0) {
843 printk("qpti%d: Adapter reports no interrupt, "
844 "skipping over this card.", nqptis);
845 continue;
846 }
847
848 /* Yep, register and allocate software state. */
849 qpti_host = scsi_register(tpnt, sizeof(struct qlogicpti));
850 if (!qpti_host) {
851 printk("QPTI: Cannot register PTI Qlogic ISP SCSI host");
852 continue;
853 }
854 qpti = (struct qlogicpti *) qpti_host->hostdata;
855
856 /* We are wide capable, 16 targets. */
857 qpti_host->max_id = MAX_TARGETS;
858
859 /* Setup back pointers and misc. state. */
860 qpti->qhost = qpti_host;
861 qpti->sdev = sdev;
862 qpti->qpti_id = nqptis++;
863 qpti->prom_node = sdev->prom_node;
864 prom_getstring(qpti->prom_node, "name",
865 qpti->prom_name,
866 sizeof(qpti->prom_name));
867
868 /* This is not correct, actually. There's a switch
869 * on the PTI cards that put them into "emulation"
870 * mode- i.e., report themselves as QLGC,isp
871 * instead of PTI,ptisp. The only real substantive
872 * difference between non-pti and pti cards is
873 * the tmon register. Which is possibly even
874 * there for Qlogic cards, but non-functional.
875 */
876 qpti->is_pti = (strcmp (qpti->prom_name, "QLGC,isp") != 0);
877
878 qpti_chain_add(qpti);
879 if (qpti_map_regs(qpti) < 0)
880 goto fail_unlink;
881
882 if (qpti_register_irq(qpti) < 0)
883 goto fail_unmap_regs;
884
885 qpti_get_scsi_id(qpti);
886 qpti_get_bursts(qpti);
887 qpti_get_clock(qpti);
888
889 /* Clear out scsi_cmnd array. */
890 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots));
891
892 if (qpti_map_queues(qpti) < 0)
893 goto fail_free_irq;
894
895 /* Load the firmware. */
896 if (qlogicpti_load_firmware(qpti))
897 goto fail_unmap_queues;
898 if (qpti->is_pti) {
899 /* Check the PTI status reg. */
900 if (qlogicpti_verify_tmon(qpti))
901 goto fail_unmap_queues;
902 }
903
904 /* Reset the ISP and init res/req queues. */
905 if (qlogicpti_reset_hardware(qpti_host))
906 goto fail_unmap_queues;
907
908 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
909 qpti->fware_minrev, qpti->fware_micrev);
910 {
911 char buffer[60];
912
913 prom_getstring (qpti->prom_node,
914 "isp-fcode", buffer, 60);
915 if (buffer[0])
916 printk("(Firmware %s)", buffer);
917 if (prom_getbool(qpti->prom_node, "differential"))
918 qpti->differential = 1;
919 }
920
921 printk (" [%s Wide, using %s interface]\n",
922 (qpti->ultra ? "Ultra" : "Fast"),
923 (qpti->differential ? "differential" : "single ended"));
924
925 nqptis_in_use++;
926 continue;
927
928 fail_unmap_queues:
929 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
930 sbus_free_consistent(qpti->sdev,
931 QSIZE(RES_QUEUE_LEN),
932 qpti->res_cpu, qpti->res_dvma);
933 sbus_free_consistent(qpti->sdev,
934 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
935 qpti->req_cpu, qpti->req_dvma);
936 #undef QSIZE
937 fail_free_irq:
938 free_irq(qpti->irq, qpti);
939
940 fail_unmap_regs:
941 sbus_iounmap(qpti->qregs,
942 qpti->sdev->reg_addrs[0].reg_size);
943 if (qpti->is_pti)
944 sbus_iounmap(qpti->sreg, sizeof(unsigned char));
945 fail_unlink:
946 qpti_chain_del(qpti);
947 scsi_unregister(qpti->qhost);
948 }
949 }
950 if (nqptis)
951 printk("QPTI: Total of %d PTI Qlogic/ISP hosts found, %d actually in use.\n",
952 nqptis, nqptis_in_use);
953 qptis_running = nqptis_in_use;
954 return nqptis;
955 }
956
957 static int qlogicpti_release(struct Scsi_Host *host)
958 {
959 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
960
961 /* Remove visibility from IRQ handlers. */
962 qpti_chain_del(qpti);
963
964 /* Shut up the card. */
965 sbus_writew(0, qpti->qregs + SBUS_CTRL);
966
967 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
968 free_irq(qpti->irq, qpti);
969
970 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
971 sbus_free_consistent(qpti->sdev,
972 QSIZE(RES_QUEUE_LEN),
973 qpti->res_cpu, qpti->res_dvma);
974 sbus_free_consistent(qpti->sdev,
975 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
976 qpti->req_cpu, qpti->req_dvma);
977 #undef QSIZE
978
979 sbus_iounmap(qpti->qregs, qpti->sdev->reg_addrs[0].reg_size);
980 if (qpti->is_pti)
981 sbus_iounmap(qpti->sreg, sizeof(unsigned char));
982
983 return 0;
984 }
985
986 const char *qlogicpti_info(struct Scsi_Host *host)
987 {
988 static char buf[80];
989 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
990
991 sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %s regs at %p",
992 __irq_itoa(qpti->qhost->irq), qpti->qregs);
993 return buf;
994 }
995
996 /* I am a certified frobtronicist. */
997 static inline void marker_frob(struct Command_Entry *cmd)
998 {
999 struct Marker_Entry *marker = (struct Marker_Entry *) cmd;
1000
1001 memset(marker, 0, sizeof(struct Marker_Entry));
1002 marker->hdr.entry_cnt = 1;
1003 marker->hdr.entry_type = ENTRY_MARKER;
1004 marker->modifier = SYNC_ALL;
1005 marker->rsvd = 0;
1006 }
1007
1008 static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
1009 struct qlogicpti *qpti)
1010 {
1011 memset(cmd, 0, sizeof(struct Command_Entry));
1012 cmd->hdr.entry_cnt = 1;
1013 cmd->hdr.entry_type = ENTRY_COMMAND;
1014 cmd->target_id = Cmnd->device->id;
1015 cmd->target_lun = Cmnd->device->lun;
1016 cmd->cdb_length = Cmnd->cmd_len;
1017 cmd->control_flags = 0;
1018 if (Cmnd->device->tagged_supported) {
1019 if (qpti->cmd_count[Cmnd->device->id] == 0)
1020 qpti->tag_ages[Cmnd->device->id] = jiffies;
1021 if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) {
1022 cmd->control_flags = CFLAG_ORDERED_TAG;
1023 qpti->tag_ages[Cmnd->device->id] = jiffies;
1024 } else
1025 cmd->control_flags = CFLAG_SIMPLE_TAG;
1026 }
1027 if ((Cmnd->cmnd[0] == WRITE_6) ||
1028 (Cmnd->cmnd[0] == WRITE_10) ||
1029 (Cmnd->cmnd[0] == WRITE_12))
1030 cmd->control_flags |= CFLAG_WRITE;
1031 else
1032 cmd->control_flags |= CFLAG_READ;
1033 cmd->time_out = 30;
1034 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
1035 }
1036
1037 /* Do it to it baby. */
1038 static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
1039 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
1040 {
1041 struct dataseg *ds;
1042 struct scatterlist *sg;
1043 int i, n;
1044
1045 if (Cmnd->use_sg) {
1046 int sg_count;
1047
1048 sg = (struct scatterlist *) Cmnd->buffer;
1049 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
1050
1051 ds = cmd->dataseg;
1052 cmd->segment_cnt = sg_count;
1053
1054 /* Fill in first four sg entries: */
1055 n = sg_count;
1056 if (n > 4)
1057 n = 4;
1058 for (i = 0; i < n; i++, sg++) {
1059 ds[i].d_base = sg_dma_address(sg);
1060 ds[i].d_count = sg_dma_len(sg);
1061 }
1062 sg_count -= 4;
1063 while (sg_count > 0) {
1064 struct Continuation_Entry *cont;
1065
1066 ++cmd->hdr.entry_cnt;
1067 cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr];
1068 in_ptr = NEXT_REQ_PTR(in_ptr);
1069 if (in_ptr == out_ptr)
1070 return -1;
1071
1072 cont->hdr.entry_type = ENTRY_CONTINUATION;
1073 cont->hdr.entry_cnt = 0;
1074 cont->hdr.sys_def_1 = 0;
1075 cont->hdr.flags = 0;
1076 cont->reserved = 0;
1077 ds = cont->dataseg;
1078 n = sg_count;
1079 if (n > 7)
1080 n = 7;
1081 for (i = 0; i < n; i++, sg++) {
1082 ds[i].d_base = sg_dma_address(sg);
1083 ds[i].d_count = sg_dma_len(sg);
1084 }
1085 sg_count -= n;
1086 }
1087 } else if (Cmnd->request_bufflen) {
1088 Cmnd->SCp.ptr = (char *)(unsigned long)
1089 sbus_map_single(qpti->sdev,
1090 Cmnd->request_buffer,
1091 Cmnd->request_bufflen,
1092 Cmnd->sc_data_direction);
1093
1094 cmd->dataseg[0].d_base = (u32) ((unsigned long)Cmnd->SCp.ptr);
1095 cmd->dataseg[0].d_count = Cmnd->request_bufflen;
1096 cmd->segment_cnt = 1;
1097 } else {
1098 cmd->dataseg[0].d_base = 0;
1099 cmd->dataseg[0].d_count = 0;
1100 cmd->segment_cnt = 1; /* Shouldn't this be 0? */
1101 }
1102
1103 /* Committed, record Scsi_Cmd so we can find it later. */
1104 cmd->handle = in_ptr;
1105 qpti->cmd_slots[in_ptr] = Cmnd;
1106
1107 qpti->cmd_count[Cmnd->device->id]++;
1108 sbus_writew(in_ptr, qpti->qregs + MBOX4);
1109 qpti->req_in_ptr = in_ptr;
1110
1111 return in_ptr;
1112 }
1113
1114 static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr)
1115 {
1116 /* Temporary workaround until bug is found and fixed (one bug has been found
1117 already, but fixing it makes things even worse) -jj */
1118 int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
1119 host->can_queue = host->host_busy + num_free;
1120 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
1121 }
1122
1123 static unsigned int scsi_rbuf_get(struct scsi_cmnd *cmd, unsigned char **buf_out)
1124 {
1125 unsigned char *buf;
1126 unsigned int buflen;
1127
1128 if (cmd->use_sg) {
1129 struct scatterlist *sg;
1130
1131 sg = (struct scatterlist *) cmd->request_buffer;
1132 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1133 buflen = sg->length;
1134 } else {
1135 buf = cmd->request_buffer;
1136 buflen = cmd->request_bufflen;
1137 }
1138
1139 *buf_out = buf;
1140 return buflen;
1141 }
1142
1143 static void scsi_rbuf_put(struct scsi_cmnd *cmd, unsigned char *buf)
1144 {
1145 if (cmd->use_sg) {
1146 struct scatterlist *sg;
1147
1148 sg = (struct scatterlist *) cmd->request_buffer;
1149 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1150 }
1151 }
1152
1153 /*
1154 * Until we scan the entire bus with inquiries, go throught this fella...
1155 */
1156 static void ourdone(struct scsi_cmnd *Cmnd)
1157 {
1158 struct qlogicpti *qpti = (struct qlogicpti *) Cmnd->device->host->hostdata;
1159 int tgt = Cmnd->device->id;
1160 void (*done) (struct scsi_cmnd *);
1161
1162 /* This grot added by DaveM, blame him for ugliness.
1163 * The issue is that in the 2.3.x driver we use the
1164 * host_scribble portion of the scsi command as a
1165 * completion linked list at interrupt service time,
1166 * so we have to store the done function pointer elsewhere.
1167 */
1168 done = (void (*)(struct scsi_cmnd *))
1169 (((unsigned long) Cmnd->SCp.Message)
1170 #ifdef __sparc_v9__
1171 | ((unsigned long) Cmnd->SCp.Status << 32UL)
1172 #endif
1173 );
1174
1175 if ((qpti->sbits & (1 << tgt)) == 0) {
1176 int ok = host_byte(Cmnd->result) == DID_OK;
1177 if (Cmnd->cmnd[0] == 0x12 && ok) {
1178 unsigned char *iqd;
1179 unsigned int iqd_len;
1180
1181 iqd_len = scsi_rbuf_get(Cmnd, &iqd);
1182
1183 /* tags handled in midlayer */
1184 /* enable sync mode? */
1185 if (iqd[7] & 0x10) {
1186 qpti->dev_param[tgt].device_flags |= 0x10;
1187 } else {
1188 qpti->dev_param[tgt].synchronous_offset = 0;
1189 qpti->dev_param[tgt].synchronous_period = 0;
1190 }
1191 /* are we wide capable? */
1192 if (iqd[7] & 0x20) {
1193 qpti->dev_param[tgt].device_flags |= 0x20;
1194 }
1195
1196 scsi_rbuf_put(Cmnd, iqd);
1197
1198 qpti->sbits |= (1 << tgt);
1199 } else if (!ok) {
1200 qpti->sbits |= (1 << tgt);
1201 }
1202 }
1203 done(Cmnd);
1204 }
1205
1206 static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *));
1207
1208 static int qlogicpti_queuecommand_slow(struct scsi_cmnd *Cmnd,
1209 void (*done)(struct scsi_cmnd *))
1210 {
1211 struct qlogicpti *qpti = (struct qlogicpti *) Cmnd->device->host->hostdata;
1212
1213 /*
1214 * done checking this host adapter?
1215 * If not, then rewrite the command
1216 * to finish through ourdone so we
1217 * can peek at Inquiry data results.
1218 */
1219 if (qpti->sbits && qpti->sbits != 0xffff) {
1220 /* See above about in ourdone this ugliness... */
1221 Cmnd->SCp.Message = ((unsigned long)done) & 0xffffffff;
1222 #ifdef CONFIG_SPARC64
1223 Cmnd->SCp.Status = ((unsigned long)done >> 32UL) & 0xffffffff;
1224 #endif
1225 return qlogicpti_queuecommand(Cmnd, ourdone);
1226 }
1227
1228 /*
1229 * We've peeked at all targets for this bus- time
1230 * to set parameters for devices for real now.
1231 */
1232 if (qpti->sbits == 0xffff) {
1233 int i;
1234 for(i = 0; i < MAX_TARGETS; i++) {
1235 u_short param[6];
1236 param[0] = MBOX_SET_TARGET_PARAMS;
1237 param[1] = (i << 8);
1238 param[2] = (qpti->dev_param[i].device_flags << 8);
1239 if (qpti->dev_param[i].device_flags & 0x10) {
1240 param[3] = (qpti->dev_param[i].synchronous_offset << 8) |
1241 qpti->dev_param[i].synchronous_period;
1242 } else {
1243 param[3] = 0;
1244 }
1245 (void) qlogicpti_mbox_command(qpti, param, 0);
1246 }
1247 /*
1248 * set to zero so any traverse through ourdone
1249 * doesn't start the whole process again,
1250 */
1251 qpti->sbits = 0;
1252 }
1253
1254 /* check to see if we're done with all adapters... */
1255 for (qpti = qptichain; qpti != NULL; qpti = qpti->next) {
1256 if (qpti->sbits) {
1257 break;
1258 }
1259 }
1260
1261 /*
1262 * if we hit the end of the chain w/o finding adapters still
1263 * capability-configuring, then we're done with all adapters
1264 * and can rock on..
1265 */
1266 if (qpti == NULL)
1267 Cmnd->device->host->hostt->queuecommand = qlogicpti_queuecommand;
1268
1269 return qlogicpti_queuecommand(Cmnd, done);
1270 }
1271
1272 /*
1273 * The middle SCSI layer ensures that queuecommand never gets invoked
1274 * concurrently with itself or the interrupt handler (though the
1275 * interrupt handler may call this routine as part of
1276 * request-completion handling).
1277 *
1278 * "This code must fly." -davem
1279 */
1280 static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
1281 {
1282 struct Scsi_Host *host = Cmnd->device->host;
1283 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1284 struct Command_Entry *cmd;
1285 u_int out_ptr;
1286 int in_ptr;
1287
1288 Cmnd->scsi_done = done;
1289
1290 in_ptr = qpti->req_in_ptr;
1291 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1292 out_ptr = sbus_readw(qpti->qregs + MBOX4);
1293 in_ptr = NEXT_REQ_PTR(in_ptr);
1294 if (in_ptr == out_ptr)
1295 goto toss_command;
1296
1297 if (qpti->send_marker) {
1298 marker_frob(cmd);
1299 qpti->send_marker = 0;
1300 if (NEXT_REQ_PTR(in_ptr) == out_ptr) {
1301 sbus_writew(in_ptr, qpti->qregs + MBOX4);
1302 qpti->req_in_ptr = in_ptr;
1303 goto toss_command;
1304 }
1305 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1306 in_ptr = NEXT_REQ_PTR(in_ptr);
1307 }
1308 cmd_frob(cmd, Cmnd, qpti);
1309 if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1)
1310 goto toss_command;
1311
1312 update_can_queue(host, in_ptr, out_ptr);
1313
1314 return 0;
1315
1316 toss_command:
1317 printk(KERN_EMERG "qlogicpti%d: request queue overflow\n",
1318 qpti->qpti_id);
1319
1320 /* Unfortunately, unless you use the new EH code, which
1321 * we don't, the midlayer will ignore the return value,
1322 * which is insane. We pick up the pieces like this.
1323 */
1324 Cmnd->result = DID_BUS_BUSY;
1325 done(Cmnd);
1326 return 1;
1327 }
1328
1329 static int qlogicpti_return_status(struct Status_Entry *sts, int id)
1330 {
1331 int host_status = DID_ERROR;
1332
1333 switch (sts->completion_status) {
1334 case CS_COMPLETE:
1335 host_status = DID_OK;
1336 break;
1337 case CS_INCOMPLETE:
1338 if (!(sts->state_flags & SF_GOT_BUS))
1339 host_status = DID_NO_CONNECT;
1340 else if (!(sts->state_flags & SF_GOT_TARGET))
1341 host_status = DID_BAD_TARGET;
1342 else if (!(sts->state_flags & SF_SENT_CDB))
1343 host_status = DID_ERROR;
1344 else if (!(sts->state_flags & SF_TRANSFERRED_DATA))
1345 host_status = DID_ERROR;
1346 else if (!(sts->state_flags & SF_GOT_STATUS))
1347 host_status = DID_ERROR;
1348 else if (!(sts->state_flags & SF_GOT_SENSE))
1349 host_status = DID_ERROR;
1350 break;
1351 case CS_DMA_ERROR:
1352 case CS_TRANSPORT_ERROR:
1353 host_status = DID_ERROR;
1354 break;
1355 case CS_RESET_OCCURRED:
1356 case CS_BUS_RESET:
1357 host_status = DID_RESET;
1358 break;
1359 case CS_ABORTED:
1360 host_status = DID_ABORT;
1361 break;
1362 case CS_TIMEOUT:
1363 host_status = DID_TIME_OUT;
1364 break;
1365 case CS_DATA_OVERRUN:
1366 case CS_COMMAND_OVERRUN:
1367 case CS_STATUS_OVERRUN:
1368 case CS_BAD_MESSAGE:
1369 case CS_NO_MESSAGE_OUT:
1370 case CS_EXT_ID_FAILED:
1371 case CS_IDE_MSG_FAILED:
1372 case CS_ABORT_MSG_FAILED:
1373 case CS_NOP_MSG_FAILED:
1374 case CS_PARITY_ERROR_MSG_FAILED:
1375 case CS_DEVICE_RESET_MSG_FAILED:
1376 case CS_ID_MSG_FAILED:
1377 case CS_UNEXP_BUS_FREE:
1378 host_status = DID_ERROR;
1379 break;
1380 case CS_DATA_UNDERRUN:
1381 host_status = DID_OK;
1382 break;
1383 default:
1384 printk(KERN_EMERG "qpti%d: unknown completion status 0x%04x\n",
1385 id, sts->completion_status);
1386 host_status = DID_ERROR;
1387 break;
1388 }
1389
1390 return (sts->scsi_status & STATUS_MASK) | (host_status << 16);
1391 }
1392
1393 static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1394 {
1395 struct scsi_cmnd *Cmnd, *done_queue = NULL;
1396 struct Status_Entry *sts;
1397 u_int in_ptr, out_ptr;
1398
1399 if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT))
1400 return NULL;
1401
1402 in_ptr = sbus_readw(qpti->qregs + MBOX5);
1403 sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL);
1404 if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) {
1405 switch (sbus_readw(qpti->qregs + MBOX0)) {
1406 case ASYNC_SCSI_BUS_RESET:
1407 case EXECUTION_TIMEOUT_RESET:
1408 qpti->send_marker = 1;
1409 break;
1410 case INVALID_COMMAND:
1411 case HOST_INTERFACE_ERROR:
1412 case COMMAND_ERROR:
1413 case COMMAND_PARAM_ERROR:
1414 break;
1415 };
1416 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
1417 }
1418
1419 /* This looks like a network driver! */
1420 out_ptr = qpti->res_out_ptr;
1421 while (out_ptr != in_ptr) {
1422 u_int cmd_slot;
1423
1424 sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr];
1425 out_ptr = NEXT_RES_PTR(out_ptr);
1426
1427 /* We store an index in the handle, not the pointer in
1428 * some form. This avoids problems due to the fact
1429 * that the handle provided is only 32-bits. -DaveM
1430 */
1431 cmd_slot = sts->handle;
1432 Cmnd = qpti->cmd_slots[cmd_slot];
1433 qpti->cmd_slots[cmd_slot] = NULL;
1434
1435 if (sts->completion_status == CS_RESET_OCCURRED ||
1436 sts->completion_status == CS_ABORTED ||
1437 (sts->status_flags & STF_BUS_RESET))
1438 qpti->send_marker = 1;
1439
1440 if (sts->state_flags & SF_GOT_SENSE)
1441 memcpy(Cmnd->sense_buffer, sts->req_sense_data,
1442 sizeof(Cmnd->sense_buffer));
1443
1444 if (sts->hdr.entry_type == ENTRY_STATUS)
1445 Cmnd->result =
1446 qlogicpti_return_status(sts, qpti->qpti_id);
1447 else
1448 Cmnd->result = DID_ERROR << 16;
1449
1450 if (Cmnd->use_sg) {
1451 sbus_unmap_sg(qpti->sdev,
1452 (struct scatterlist *)Cmnd->buffer,
1453 Cmnd->use_sg,
1454 Cmnd->sc_data_direction);
1455 } else {
1456 sbus_unmap_single(qpti->sdev,
1457 (__u32)((unsigned long)Cmnd->SCp.ptr),
1458 Cmnd->request_bufflen,
1459 Cmnd->sc_data_direction);
1460 }
1461 qpti->cmd_count[Cmnd->device->id]--;
1462 sbus_writew(out_ptr, qpti->qregs + MBOX5);
1463 Cmnd->host_scribble = (unsigned char *) done_queue;
1464 done_queue = Cmnd;
1465 }
1466 qpti->res_out_ptr = out_ptr;
1467
1468 return done_queue;
1469 }
1470
1471 static irqreturn_t qpti_intr(int irq, void *dev_id, struct pt_regs *regs)
1472 {
1473 struct qlogicpti *qpti = dev_id;
1474 unsigned long flags;
1475 struct scsi_cmnd *dq;
1476
1477 spin_lock_irqsave(qpti->qhost->host_lock, flags);
1478 dq = qlogicpti_intr_handler(qpti);
1479
1480 if (dq != NULL) {
1481 do {
1482 struct scsi_cmnd *next;
1483
1484 next = (struct scsi_cmnd *) dq->host_scribble;
1485 dq->scsi_done(dq);
1486 dq = next;
1487 } while (dq != NULL);
1488 }
1489 spin_unlock_irqrestore(qpti->qhost->host_lock, flags);
1490
1491 return IRQ_HANDLED;
1492 }
1493
1494 static int qlogicpti_abort(struct scsi_cmnd *Cmnd)
1495 {
1496 u_short param[6];
1497 struct Scsi_Host *host = Cmnd->device->host;
1498 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1499 int return_status = SUCCESS;
1500 u32 cmd_cookie;
1501 int i;
1502
1503 printk(KERN_WARNING "qlogicpti : Aborting cmd for tgt[%d] lun[%d]\n",
1504 (int)Cmnd->device->id, (int)Cmnd->device->lun);
1505
1506 qlogicpti_disable_irqs(qpti);
1507
1508 /* Find the 32-bit cookie we gave to the firmware for
1509 * this command.
1510 */
1511 for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++)
1512 if (qpti->cmd_slots[i] == Cmnd)
1513 break;
1514 cmd_cookie = i;
1515
1516 param[0] = MBOX_ABORT;
1517 param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun;
1518 param[2] = cmd_cookie >> 16;
1519 param[3] = cmd_cookie & 0xffff;
1520 if (qlogicpti_mbox_command(qpti, param, 0) ||
1521 (param[0] != MBOX_COMMAND_COMPLETE)) {
1522 printk(KERN_EMERG "qlogicpti : scsi abort failure: %x\n", param[0]);
1523 return_status = FAILED;
1524 }
1525
1526 qlogicpti_enable_irqs(qpti);
1527
1528 return return_status;
1529 }
1530
1531 static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
1532 {
1533 u_short param[6];
1534 struct Scsi_Host *host = Cmnd->device->host;
1535 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1536 int return_status = SUCCESS;
1537
1538 printk(KERN_WARNING "qlogicpti : Resetting SCSI bus!\n");
1539
1540 qlogicpti_disable_irqs(qpti);
1541
1542 param[0] = MBOX_BUS_RESET;
1543 param[1] = qpti->host_param.bus_reset_delay;
1544 if (qlogicpti_mbox_command(qpti, param, 0) ||
1545 (param[0] != MBOX_COMMAND_COMPLETE)) {
1546 printk(KERN_EMERG "qlogicisp : scsi bus reset failure: %x\n", param[0]);
1547 return_status = FAILED;
1548 }
1549
1550 qlogicpti_enable_irqs(qpti);
1551
1552 return return_status;
1553 }
1554
1555 static struct scsi_host_template driver_template = {
1556 .detect = qlogicpti_detect,
1557 .release = qlogicpti_release,
1558 .info = qlogicpti_info,
1559 .queuecommand = qlogicpti_queuecommand_slow,
1560 .eh_abort_handler = qlogicpti_abort,
1561 .eh_bus_reset_handler = qlogicpti_reset,
1562 .can_queue = QLOGICPTI_REQ_QUEUE_LEN,
1563 .this_id = 7,
1564 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
1565 .cmd_per_lun = 1,
1566 .use_clustering = ENABLE_CLUSTERING,
1567 };
1568
1569
1570 #include "scsi_module.c"
1571
1572 MODULE_LICENSE("GPL");
1573