Drivers: scsi: remove __dev* attributes.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / qlogicpti.c
1 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
2 *
3 * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net)
4 *
5 * A lot of this driver was directly stolen from Erik H. Moe's PCI
6 * Qlogic ISP driver. Mucho kudos to him for this code.
7 *
8 * An even bigger kudos to John Grana at Performance Technologies
9 * for providing me with the hardware to write this driver, you rule
10 * John you really do.
11 *
12 * May, 2, 1997: Added support for QLGC,isp --jj
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/gfp.h>
20 #include <linux/blkdev.h>
21 #include <linux/proc_fs.h>
22 #include <linux/stat.h>
23 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/jiffies.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/of.h>
30 #include <linux/of_device.h>
31 #include <linux/firmware.h>
32
33 #include <asm/byteorder.h>
34
35 #include "qlogicpti.h"
36
37 #include <asm/dma.h>
38 #include <asm/ptrace.h>
39 #include <asm/pgtable.h>
40 #include <asm/oplib.h>
41 #include <asm/io.h>
42 #include <asm/irq.h>
43
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_eh.h>
48 #include <scsi/scsi_tcq.h>
49 #include <scsi/scsi_host.h>
50
51 #define MAX_TARGETS 16
52 #define MAX_LUNS 8 /* 32 for 1.31 F/W */
53
54 #define DEFAULT_LOOP_COUNT 10000
55
56 static struct qlogicpti *qptichain = NULL;
57 static DEFINE_SPINLOCK(qptichain_lock);
58
59 #define PACKB(a, b) (((a)<<4)|(b))
60
61 static const u_char mbox_param[] = {
62 PACKB(1, 1), /* MBOX_NO_OP */
63 PACKB(5, 5), /* MBOX_LOAD_RAM */
64 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
65 PACKB(5, 5), /* MBOX_DUMP_RAM */
66 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
67 PACKB(2, 3), /* MBOX_READ_RAM_WORD */
68 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
69 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
70 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
71 PACKB(0, 0), /* 0x0009 */
72 PACKB(0, 0), /* 0x000a */
73 PACKB(0, 0), /* 0x000b */
74 PACKB(0, 0), /* 0x000c */
75 PACKB(0, 0), /* 0x000d */
76 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
77 PACKB(0, 0), /* 0x000f */
78 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
79 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
80 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
81 PACKB(2, 2), /* MBOX_WAKE_UP */
82 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
83 PACKB(4, 4), /* MBOX_ABORT */
84 PACKB(2, 2), /* MBOX_ABORT_DEVICE */
85 PACKB(3, 3), /* MBOX_ABORT_TARGET */
86 PACKB(2, 2), /* MBOX_BUS_RESET */
87 PACKB(2, 3), /* MBOX_STOP_QUEUE */
88 PACKB(2, 3), /* MBOX_START_QUEUE */
89 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
90 PACKB(2, 3), /* MBOX_ABORT_QUEUE */
91 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
92 PACKB(0, 0), /* 0x001e */
93 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
94 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
95 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
96 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
97 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
98 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
99 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
100 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
101 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */
102 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
103 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
104 PACKB(0, 0), /* 0x002a */
105 PACKB(0, 0), /* 0x002b */
106 PACKB(0, 0), /* 0x002c */
107 PACKB(0, 0), /* 0x002d */
108 PACKB(0, 0), /* 0x002e */
109 PACKB(0, 0), /* 0x002f */
110 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
111 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
112 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
113 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
114 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
115 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
116 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
117 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */
118 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
119 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
120 PACKB(0, 0), /* 0x003a */
121 PACKB(0, 0), /* 0x003b */
122 PACKB(0, 0), /* 0x003c */
123 PACKB(0, 0), /* 0x003d */
124 PACKB(0, 0), /* 0x003e */
125 PACKB(0, 0), /* 0x003f */
126 PACKB(0, 0), /* 0x0040 */
127 PACKB(0, 0), /* 0x0041 */
128 PACKB(0, 0) /* 0x0042 */
129 };
130
131 #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param)
132
133 /* queue length's _must_ be power of two: */
134 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
135 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
136 QLOGICPTI_REQ_QUEUE_LEN)
137 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
138
139 static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti)
140 {
141 sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB,
142 qpti->qregs + SBUS_CTRL);
143 }
144
145 static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti)
146 {
147 sbus_writew(0, qpti->qregs + SBUS_CTRL);
148 }
149
150 static inline void set_sbus_cfg1(struct qlogicpti *qpti)
151 {
152 u16 val;
153 u8 bursts = qpti->bursts;
154
155 #if 0 /* It appears that at least PTI cards do not support
156 * 64-byte bursts and that setting the B64 bit actually
157 * is a nop and the chip ends up using the smallest burst
158 * size. -DaveM
159 */
160 if (sbus_can_burst64() && (bursts & DMA_BURST64)) {
161 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64);
162 } else
163 #endif
164 if (bursts & DMA_BURST32) {
165 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32);
166 } else if (bursts & DMA_BURST16) {
167 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16);
168 } else if (bursts & DMA_BURST8) {
169 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8);
170 } else {
171 val = 0; /* No sbus bursts for you... */
172 }
173 sbus_writew(val, qpti->qregs + SBUS_CFG1);
174 }
175
176 static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force)
177 {
178 int loop_count;
179 u16 tmp;
180
181 if (mbox_param[param[0]] == 0)
182 return 1;
183
184 /* Set SBUS semaphore. */
185 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
186 tmp |= SBUS_SEMAPHORE_LCK;
187 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
188
189 /* Wait for host IRQ bit to clear. */
190 loop_count = DEFAULT_LOOP_COUNT;
191 while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) {
192 barrier();
193 cpu_relax();
194 }
195 if (!loop_count)
196 printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n",
197 qpti->qpti_id);
198
199 /* Write mailbox command registers. */
200 switch (mbox_param[param[0]] >> 4) {
201 case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
202 case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
203 case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
204 case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
205 case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
206 case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
207 }
208
209 /* Clear RISC interrupt. */
210 tmp = sbus_readw(qpti->qregs + HCCTRL);
211 tmp |= HCCTRL_CRIRQ;
212 sbus_writew(tmp, qpti->qregs + HCCTRL);
213
214 /* Clear SBUS semaphore. */
215 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
216
217 /* Set HOST interrupt. */
218 tmp = sbus_readw(qpti->qregs + HCCTRL);
219 tmp |= HCCTRL_SHIRQ;
220 sbus_writew(tmp, qpti->qregs + HCCTRL);
221
222 /* Wait for HOST interrupt clears. */
223 loop_count = DEFAULT_LOOP_COUNT;
224 while (--loop_count &&
225 (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ))
226 udelay(20);
227 if (!loop_count)
228 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n",
229 qpti->qpti_id, param[0]);
230
231 /* Wait for SBUS semaphore to get set. */
232 loop_count = DEFAULT_LOOP_COUNT;
233 while (--loop_count &&
234 !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) {
235 udelay(20);
236
237 /* Workaround for some buggy chips. */
238 if (sbus_readw(qpti->qregs + MBOX0) & 0x4000)
239 break;
240 }
241 if (!loop_count)
242 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n",
243 qpti->qpti_id, param[0]);
244
245 /* Wait for MBOX busy condition to go away. */
246 loop_count = DEFAULT_LOOP_COUNT;
247 while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04))
248 udelay(20);
249 if (!loop_count)
250 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n",
251 qpti->qpti_id, param[0]);
252
253 /* Read back output parameters. */
254 switch (mbox_param[param[0]] & 0xf) {
255 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
256 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
257 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
258 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
259 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
260 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
261 }
262
263 /* Clear RISC interrupt. */
264 tmp = sbus_readw(qpti->qregs + HCCTRL);
265 tmp |= HCCTRL_CRIRQ;
266 sbus_writew(tmp, qpti->qregs + HCCTRL);
267
268 /* Release SBUS semaphore. */
269 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
270 tmp &= ~(SBUS_SEMAPHORE_LCK);
271 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
272
273 /* We're done. */
274 return 0;
275 }
276
277 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti)
278 {
279 int i;
280
281 qpti->host_param.initiator_scsi_id = qpti->scsi_id;
282 qpti->host_param.bus_reset_delay = 3;
283 qpti->host_param.retry_count = 0;
284 qpti->host_param.retry_delay = 5;
285 qpti->host_param.async_data_setup_time = 3;
286 qpti->host_param.req_ack_active_negation = 1;
287 qpti->host_param.data_line_active_negation = 1;
288 qpti->host_param.data_dma_burst_enable = 1;
289 qpti->host_param.command_dma_burst_enable = 1;
290 qpti->host_param.tag_aging = 8;
291 qpti->host_param.selection_timeout = 250;
292 qpti->host_param.max_queue_depth = 256;
293
294 for(i = 0; i < MAX_TARGETS; i++) {
295 /*
296 * disconnect, parity, arq, reneg on reset, and, oddly enough
297 * tags...the midlayer's notion of tagged support has to match
298 * our device settings, and since we base whether we enable a
299 * tag on a per-cmnd basis upon what the midlayer sez, we
300 * actually enable the capability here.
301 */
302 qpti->dev_param[i].device_flags = 0xcd;
303 qpti->dev_param[i].execution_throttle = 16;
304 if (qpti->ultra) {
305 qpti->dev_param[i].synchronous_period = 12;
306 qpti->dev_param[i].synchronous_offset = 8;
307 } else {
308 qpti->dev_param[i].synchronous_period = 25;
309 qpti->dev_param[i].synchronous_offset = 12;
310 }
311 qpti->dev_param[i].device_enable = 1;
312 }
313 }
314
315 static int qlogicpti_reset_hardware(struct Scsi_Host *host)
316 {
317 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
318 u_short param[6];
319 unsigned short risc_code_addr;
320 int loop_count, i;
321 unsigned long flags;
322
323 risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */
324
325 spin_lock_irqsave(host->host_lock, flags);
326
327 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
328
329 /* Only reset the scsi bus if it is not free. */
330 if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) {
331 sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE);
332 sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD);
333 udelay(400);
334 }
335
336 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
337 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
338 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
339
340 loop_count = DEFAULT_LOOP_COUNT;
341 while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04))
342 udelay(20);
343 if (!loop_count)
344 printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n",
345 qpti->qpti_id);
346
347 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
348 set_sbus_cfg1(qpti);
349 qlogicpti_enable_irqs(qpti);
350
351 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
352 qpti->ultra = 1;
353 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
354 qpti->qregs + RISC_MTREG);
355 } else {
356 qpti->ultra = 0;
357 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
358 qpti->qregs + RISC_MTREG);
359 }
360
361 /* reset adapter and per-device default values. */
362 /* do it after finding out whether we're ultra mode capable */
363 qlogicpti_set_hostdev_defaults(qpti);
364
365 /* Release the RISC processor. */
366 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
367
368 /* Get RISC to start executing the firmware code. */
369 param[0] = MBOX_EXEC_FIRMWARE;
370 param[1] = risc_code_addr;
371 if (qlogicpti_mbox_command(qpti, param, 1)) {
372 printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n",
373 qpti->qpti_id);
374 spin_unlock_irqrestore(host->host_lock, flags);
375 return 1;
376 }
377
378 /* Set initiator scsi ID. */
379 param[0] = MBOX_SET_INIT_SCSI_ID;
380 param[1] = qpti->host_param.initiator_scsi_id;
381 if (qlogicpti_mbox_command(qpti, param, 1) ||
382 (param[0] != MBOX_COMMAND_COMPLETE)) {
383 printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n",
384 qpti->qpti_id);
385 spin_unlock_irqrestore(host->host_lock, flags);
386 return 1;
387 }
388
389 /* Initialize state of the queues, both hw and sw. */
390 qpti->req_in_ptr = qpti->res_out_ptr = 0;
391
392 param[0] = MBOX_INIT_RES_QUEUE;
393 param[1] = RES_QUEUE_LEN + 1;
394 param[2] = (u_short) (qpti->res_dvma >> 16);
395 param[3] = (u_short) (qpti->res_dvma & 0xffff);
396 param[4] = param[5] = 0;
397 if (qlogicpti_mbox_command(qpti, param, 1)) {
398 printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n",
399 qpti->qpti_id);
400 spin_unlock_irqrestore(host->host_lock, flags);
401 return 1;
402 }
403
404 param[0] = MBOX_INIT_REQ_QUEUE;
405 param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1;
406 param[2] = (u_short) (qpti->req_dvma >> 16);
407 param[3] = (u_short) (qpti->req_dvma & 0xffff);
408 param[4] = param[5] = 0;
409 if (qlogicpti_mbox_command(qpti, param, 1)) {
410 printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n",
411 qpti->qpti_id);
412 spin_unlock_irqrestore(host->host_lock, flags);
413 return 1;
414 }
415
416 param[0] = MBOX_SET_RETRY_COUNT;
417 param[1] = qpti->host_param.retry_count;
418 param[2] = qpti->host_param.retry_delay;
419 qlogicpti_mbox_command(qpti, param, 0);
420
421 param[0] = MBOX_SET_TAG_AGE_LIMIT;
422 param[1] = qpti->host_param.tag_aging;
423 qlogicpti_mbox_command(qpti, param, 0);
424
425 for (i = 0; i < MAX_TARGETS; i++) {
426 param[0] = MBOX_GET_DEV_QUEUE_PARAMS;
427 param[1] = (i << 8);
428 qlogicpti_mbox_command(qpti, param, 0);
429 }
430
431 param[0] = MBOX_GET_FIRMWARE_STATUS;
432 qlogicpti_mbox_command(qpti, param, 0);
433
434 param[0] = MBOX_SET_SELECT_TIMEOUT;
435 param[1] = qpti->host_param.selection_timeout;
436 qlogicpti_mbox_command(qpti, param, 0);
437
438 for (i = 0; i < MAX_TARGETS; i++) {
439 param[0] = MBOX_SET_TARGET_PARAMS;
440 param[1] = (i << 8);
441 param[2] = (qpti->dev_param[i].device_flags << 8);
442 /*
443 * Since we're now loading 1.31 f/w, force narrow/async.
444 */
445 param[2] |= 0xc0;
446 param[3] = 0; /* no offset, we do not have sync mode yet */
447 qlogicpti_mbox_command(qpti, param, 0);
448 }
449
450 /*
451 * Always (sigh) do an initial bus reset (kicks f/w).
452 */
453 param[0] = MBOX_BUS_RESET;
454 param[1] = qpti->host_param.bus_reset_delay;
455 qlogicpti_mbox_command(qpti, param, 0);
456 qpti->send_marker = 1;
457
458 spin_unlock_irqrestore(host->host_lock, flags);
459 return 0;
460 }
461
462 #define PTI_RESET_LIMIT 400
463
464 static int qlogicpti_load_firmware(struct qlogicpti *qpti)
465 {
466 const struct firmware *fw;
467 const char fwname[] = "qlogic/isp1000.bin";
468 const __le16 *fw_data;
469 struct Scsi_Host *host = qpti->qhost;
470 unsigned short csum = 0;
471 unsigned short param[6];
472 unsigned short risc_code_addr, risc_code_length;
473 int err;
474 unsigned long flags;
475 int i, timeout;
476
477 err = request_firmware(&fw, fwname, &qpti->op->dev);
478 if (err) {
479 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
480 fwname, err);
481 return err;
482 }
483 if (fw->size % 2) {
484 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
485 fw->size, fwname);
486 err = -EINVAL;
487 goto outfirm;
488 }
489 fw_data = (const __le16 *)&fw->data[0];
490 risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */
491 risc_code_length = fw->size / 2;
492
493 spin_lock_irqsave(host->host_lock, flags);
494
495 /* Verify the checksum twice, one before loading it, and once
496 * afterwards via the mailbox commands.
497 */
498 for (i = 0; i < risc_code_length; i++)
499 csum += __le16_to_cpu(fw_data[i]);
500 if (csum) {
501 printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!",
502 qpti->qpti_id);
503 err = 1;
504 goto out;
505 }
506 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
507 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
508 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
509 timeout = PTI_RESET_LIMIT;
510 while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET))
511 udelay(20);
512 if (!timeout) {
513 printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id);
514 err = 1;
515 goto out;
516 }
517
518 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
519 mdelay(1);
520
521 sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL);
522 set_sbus_cfg1(qpti);
523 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
524
525 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
526 qpti->ultra = 1;
527 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
528 qpti->qregs + RISC_MTREG);
529 } else {
530 qpti->ultra = 0;
531 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
532 qpti->qregs + RISC_MTREG);
533 }
534
535 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
536
537 /* Pin lines are only stable while RISC is paused. */
538 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
539 if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE)
540 qpti->differential = 1;
541 else
542 qpti->differential = 0;
543 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
544
545 /* This shouldn't be necessary- we've reset things so we should be
546 running from the ROM now.. */
547
548 param[0] = MBOX_STOP_FIRMWARE;
549 param[1] = param[2] = param[3] = param[4] = param[5] = 0;
550 if (qlogicpti_mbox_command(qpti, param, 1)) {
551 printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n",
552 qpti->qpti_id);
553 err = 1;
554 goto out;
555 }
556
557 /* Load it up.. */
558 for (i = 0; i < risc_code_length; i++) {
559 param[0] = MBOX_WRITE_RAM_WORD;
560 param[1] = risc_code_addr + i;
561 param[2] = __le16_to_cpu(fw_data[i]);
562 if (qlogicpti_mbox_command(qpti, param, 1) ||
563 param[0] != MBOX_COMMAND_COMPLETE) {
564 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
565 qpti->qpti_id);
566 err = 1;
567 goto out;
568 }
569 }
570
571 /* Reset the ISP again. */
572 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
573 mdelay(1);
574
575 qlogicpti_enable_irqs(qpti);
576 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
577 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
578
579 /* Ask ISP to verify the checksum of the new code. */
580 param[0] = MBOX_VERIFY_CHECKSUM;
581 param[1] = risc_code_addr;
582 if (qlogicpti_mbox_command(qpti, param, 1) ||
583 (param[0] != MBOX_COMMAND_COMPLETE)) {
584 printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n",
585 qpti->qpti_id);
586 err = 1;
587 goto out;
588 }
589
590 /* Start using newly downloaded firmware. */
591 param[0] = MBOX_EXEC_FIRMWARE;
592 param[1] = risc_code_addr;
593 qlogicpti_mbox_command(qpti, param, 1);
594
595 param[0] = MBOX_ABOUT_FIRMWARE;
596 if (qlogicpti_mbox_command(qpti, param, 1) ||
597 (param[0] != MBOX_COMMAND_COMPLETE)) {
598 printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n",
599 qpti->qpti_id);
600 err = 1;
601 goto out;
602 }
603
604 /* Snag the major and minor revisions from the result. */
605 qpti->fware_majrev = param[1];
606 qpti->fware_minrev = param[2];
607 qpti->fware_micrev = param[3];
608
609 /* Set the clock rate */
610 param[0] = MBOX_SET_CLOCK_RATE;
611 param[1] = qpti->clock;
612 if (qlogicpti_mbox_command(qpti, param, 1) ||
613 (param[0] != MBOX_COMMAND_COMPLETE)) {
614 printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n",
615 qpti->qpti_id);
616 err = 1;
617 goto out;
618 }
619
620 if (qpti->is_pti != 0) {
621 /* Load scsi initiator ID and interrupt level into sbus static ram. */
622 param[0] = MBOX_WRITE_RAM_WORD;
623 param[1] = 0xff80;
624 param[2] = (unsigned short) qpti->scsi_id;
625 qlogicpti_mbox_command(qpti, param, 1);
626
627 param[0] = MBOX_WRITE_RAM_WORD;
628 param[1] = 0xff00;
629 param[2] = (unsigned short) 3;
630 qlogicpti_mbox_command(qpti, param, 1);
631 }
632
633 out:
634 spin_unlock_irqrestore(host->host_lock, flags);
635 outfirm:
636 release_firmware(fw);
637 return err;
638 }
639
640 static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
641 {
642 int curstat = sbus_readb(qpti->sreg);
643
644 curstat &= 0xf0;
645 if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE))
646 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id);
647 if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER))
648 printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id);
649 if (curstat != qpti->swsreg) {
650 int error = 0;
651 if (curstat & SREG_FUSE) {
652 error++;
653 printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id);
654 }
655 if (curstat & SREG_TPOWER) {
656 error++;
657 printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id);
658 }
659 if (qpti->differential &&
660 (curstat & SREG_DSENSE) != SREG_DSENSE) {
661 error++;
662 printk("qlogicpti%d: You have a single ended device on a "
663 "differential bus! Please fix!\n", qpti->qpti_id);
664 }
665 qpti->swsreg = curstat;
666 return error;
667 }
668 return 0;
669 }
670
671 static irqreturn_t qpti_intr(int irq, void *dev_id);
672
673 static void qpti_chain_add(struct qlogicpti *qpti)
674 {
675 spin_lock_irq(&qptichain_lock);
676 if (qptichain != NULL) {
677 struct qlogicpti *qlink = qptichain;
678
679 while(qlink->next)
680 qlink = qlink->next;
681 qlink->next = qpti;
682 } else {
683 qptichain = qpti;
684 }
685 qpti->next = NULL;
686 spin_unlock_irq(&qptichain_lock);
687 }
688
689 static void qpti_chain_del(struct qlogicpti *qpti)
690 {
691 spin_lock_irq(&qptichain_lock);
692 if (qptichain == qpti) {
693 qptichain = qpti->next;
694 } else {
695 struct qlogicpti *qlink = qptichain;
696 while(qlink->next != qpti)
697 qlink = qlink->next;
698 qlink->next = qpti->next;
699 }
700 qpti->next = NULL;
701 spin_unlock_irq(&qptichain_lock);
702 }
703
704 static int qpti_map_regs(struct qlogicpti *qpti)
705 {
706 struct platform_device *op = qpti->op;
707
708 qpti->qregs = of_ioremap(&op->resource[0], 0,
709 resource_size(&op->resource[0]),
710 "PTI Qlogic/ISP");
711 if (!qpti->qregs) {
712 printk("PTI: Qlogic/ISP registers are unmappable\n");
713 return -1;
714 }
715 if (qpti->is_pti) {
716 qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096),
717 sizeof(unsigned char),
718 "PTI Qlogic/ISP statreg");
719 if (!qpti->sreg) {
720 printk("PTI: Qlogic/ISP status register is unmappable\n");
721 return -1;
722 }
723 }
724 return 0;
725 }
726
727 static int qpti_register_irq(struct qlogicpti *qpti)
728 {
729 struct platform_device *op = qpti->op;
730
731 qpti->qhost->irq = qpti->irq = op->archdata.irqs[0];
732
733 /* We used to try various overly-clever things to
734 * reduce the interrupt processing overhead on
735 * sun4c/sun4m when multiple PTI's shared the
736 * same IRQ. It was too complex and messy to
737 * sanely maintain.
738 */
739 if (request_irq(qpti->irq, qpti_intr,
740 IRQF_SHARED, "QlogicPTI", qpti))
741 goto fail;
742
743 printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
744
745 return 0;
746
747 fail:
748 printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id);
749 return -1;
750 }
751
752 static void qpti_get_scsi_id(struct qlogicpti *qpti)
753 {
754 struct platform_device *op = qpti->op;
755 struct device_node *dp;
756
757 dp = op->dev.of_node;
758
759 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1);
760 if (qpti->scsi_id == -1)
761 qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id",
762 -1);
763 if (qpti->scsi_id == -1)
764 qpti->scsi_id =
765 of_getintprop_default(dp->parent,
766 "scsi-initiator-id", 7);
767 qpti->qhost->this_id = qpti->scsi_id;
768 qpti->qhost->max_sectors = 64;
769
770 printk("SCSI ID %d ", qpti->scsi_id);
771 }
772
773 static void qpti_get_bursts(struct qlogicpti *qpti)
774 {
775 struct platform_device *op = qpti->op;
776 u8 bursts, bmask;
777
778 bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff);
779 bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff);
780 if (bmask != 0xff)
781 bursts &= bmask;
782 if (bursts == 0xff ||
783 (bursts & DMA_BURST16) == 0 ||
784 (bursts & DMA_BURST32) == 0)
785 bursts = (DMA_BURST32 - 1);
786
787 qpti->bursts = bursts;
788 }
789
790 static void qpti_get_clock(struct qlogicpti *qpti)
791 {
792 unsigned int cfreq;
793
794 /* Check for what the clock input to this card is.
795 * Default to 40Mhz.
796 */
797 cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000);
798 qpti->clock = (cfreq + 500000)/1000000;
799 if (qpti->clock == 0) /* bullshit */
800 qpti->clock = 40;
801 }
802
803 /* The request and response queues must each be aligned
804 * on a page boundary.
805 */
806 static int qpti_map_queues(struct qlogicpti *qpti)
807 {
808 struct platform_device *op = qpti->op;
809
810 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
811 qpti->res_cpu = dma_alloc_coherent(&op->dev,
812 QSIZE(RES_QUEUE_LEN),
813 &qpti->res_dvma, GFP_ATOMIC);
814 if (qpti->res_cpu == NULL ||
815 qpti->res_dvma == 0) {
816 printk("QPTI: Cannot map response queue.\n");
817 return -1;
818 }
819
820 qpti->req_cpu = dma_alloc_coherent(&op->dev,
821 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
822 &qpti->req_dvma, GFP_ATOMIC);
823 if (qpti->req_cpu == NULL ||
824 qpti->req_dvma == 0) {
825 dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN),
826 qpti->res_cpu, qpti->res_dvma);
827 printk("QPTI: Cannot map request queue.\n");
828 return -1;
829 }
830 memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN));
831 memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN));
832 return 0;
833 }
834
835 const char *qlogicpti_info(struct Scsi_Host *host)
836 {
837 static char buf[80];
838 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
839
840 sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p",
841 qpti->qhost->irq, qpti->qregs);
842 return buf;
843 }
844
845 /* I am a certified frobtronicist. */
846 static inline void marker_frob(struct Command_Entry *cmd)
847 {
848 struct Marker_Entry *marker = (struct Marker_Entry *) cmd;
849
850 memset(marker, 0, sizeof(struct Marker_Entry));
851 marker->hdr.entry_cnt = 1;
852 marker->hdr.entry_type = ENTRY_MARKER;
853 marker->modifier = SYNC_ALL;
854 marker->rsvd = 0;
855 }
856
857 static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
858 struct qlogicpti *qpti)
859 {
860 memset(cmd, 0, sizeof(struct Command_Entry));
861 cmd->hdr.entry_cnt = 1;
862 cmd->hdr.entry_type = ENTRY_COMMAND;
863 cmd->target_id = Cmnd->device->id;
864 cmd->target_lun = Cmnd->device->lun;
865 cmd->cdb_length = Cmnd->cmd_len;
866 cmd->control_flags = 0;
867 if (Cmnd->device->tagged_supported) {
868 if (qpti->cmd_count[Cmnd->device->id] == 0)
869 qpti->tag_ages[Cmnd->device->id] = jiffies;
870 if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) {
871 cmd->control_flags = CFLAG_ORDERED_TAG;
872 qpti->tag_ages[Cmnd->device->id] = jiffies;
873 } else
874 cmd->control_flags = CFLAG_SIMPLE_TAG;
875 }
876 if ((Cmnd->cmnd[0] == WRITE_6) ||
877 (Cmnd->cmnd[0] == WRITE_10) ||
878 (Cmnd->cmnd[0] == WRITE_12))
879 cmd->control_flags |= CFLAG_WRITE;
880 else
881 cmd->control_flags |= CFLAG_READ;
882 cmd->time_out = Cmnd->request->timeout/HZ;
883 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
884 }
885
886 /* Do it to it baby. */
887 static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
888 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
889 {
890 struct dataseg *ds;
891 struct scatterlist *sg, *s;
892 int i, n;
893
894 if (scsi_bufflen(Cmnd)) {
895 int sg_count;
896
897 sg = scsi_sglist(Cmnd);
898 sg_count = dma_map_sg(&qpti->op->dev, sg,
899 scsi_sg_count(Cmnd),
900 Cmnd->sc_data_direction);
901
902 ds = cmd->dataseg;
903 cmd->segment_cnt = sg_count;
904
905 /* Fill in first four sg entries: */
906 n = sg_count;
907 if (n > 4)
908 n = 4;
909 for_each_sg(sg, s, n, i) {
910 ds[i].d_base = sg_dma_address(s);
911 ds[i].d_count = sg_dma_len(s);
912 }
913 sg_count -= 4;
914 sg = s;
915 while (sg_count > 0) {
916 struct Continuation_Entry *cont;
917
918 ++cmd->hdr.entry_cnt;
919 cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr];
920 in_ptr = NEXT_REQ_PTR(in_ptr);
921 if (in_ptr == out_ptr)
922 return -1;
923
924 cont->hdr.entry_type = ENTRY_CONTINUATION;
925 cont->hdr.entry_cnt = 0;
926 cont->hdr.sys_def_1 = 0;
927 cont->hdr.flags = 0;
928 cont->reserved = 0;
929 ds = cont->dataseg;
930 n = sg_count;
931 if (n > 7)
932 n = 7;
933 for_each_sg(sg, s, n, i) {
934 ds[i].d_base = sg_dma_address(s);
935 ds[i].d_count = sg_dma_len(s);
936 }
937 sg_count -= n;
938 sg = s;
939 }
940 } else {
941 cmd->dataseg[0].d_base = 0;
942 cmd->dataseg[0].d_count = 0;
943 cmd->segment_cnt = 1; /* Shouldn't this be 0? */
944 }
945
946 /* Committed, record Scsi_Cmd so we can find it later. */
947 cmd->handle = in_ptr;
948 qpti->cmd_slots[in_ptr] = Cmnd;
949
950 qpti->cmd_count[Cmnd->device->id]++;
951 sbus_writew(in_ptr, qpti->qregs + MBOX4);
952 qpti->req_in_ptr = in_ptr;
953
954 return in_ptr;
955 }
956
957 static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr)
958 {
959 /* Temporary workaround until bug is found and fixed (one bug has been found
960 already, but fixing it makes things even worse) -jj */
961 int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
962 host->can_queue = host->host_busy + num_free;
963 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
964 }
965
966 static int qlogicpti_slave_configure(struct scsi_device *sdev)
967 {
968 struct qlogicpti *qpti = shost_priv(sdev->host);
969 int tgt = sdev->id;
970 u_short param[6];
971
972 /* tags handled in midlayer */
973 /* enable sync mode? */
974 if (sdev->sdtr) {
975 qpti->dev_param[tgt].device_flags |= 0x10;
976 } else {
977 qpti->dev_param[tgt].synchronous_offset = 0;
978 qpti->dev_param[tgt].synchronous_period = 0;
979 }
980 /* are we wide capable? */
981 if (sdev->wdtr)
982 qpti->dev_param[tgt].device_flags |= 0x20;
983
984 param[0] = MBOX_SET_TARGET_PARAMS;
985 param[1] = (tgt << 8);
986 param[2] = (qpti->dev_param[tgt].device_flags << 8);
987 if (qpti->dev_param[tgt].device_flags & 0x10) {
988 param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) |
989 qpti->dev_param[tgt].synchronous_period;
990 } else {
991 param[3] = 0;
992 }
993 qlogicpti_mbox_command(qpti, param, 0);
994 return 0;
995 }
996
997 /*
998 * The middle SCSI layer ensures that queuecommand never gets invoked
999 * concurrently with itself or the interrupt handler (though the
1000 * interrupt handler may call this routine as part of
1001 * request-completion handling).
1002 *
1003 * "This code must fly." -davem
1004 */
1005 static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
1006 {
1007 struct Scsi_Host *host = Cmnd->device->host;
1008 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1009 struct Command_Entry *cmd;
1010 u_int out_ptr;
1011 int in_ptr;
1012
1013 Cmnd->scsi_done = done;
1014
1015 in_ptr = qpti->req_in_ptr;
1016 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1017 out_ptr = sbus_readw(qpti->qregs + MBOX4);
1018 in_ptr = NEXT_REQ_PTR(in_ptr);
1019 if (in_ptr == out_ptr)
1020 goto toss_command;
1021
1022 if (qpti->send_marker) {
1023 marker_frob(cmd);
1024 qpti->send_marker = 0;
1025 if (NEXT_REQ_PTR(in_ptr) == out_ptr) {
1026 sbus_writew(in_ptr, qpti->qregs + MBOX4);
1027 qpti->req_in_ptr = in_ptr;
1028 goto toss_command;
1029 }
1030 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1031 in_ptr = NEXT_REQ_PTR(in_ptr);
1032 }
1033 cmd_frob(cmd, Cmnd, qpti);
1034 if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1)
1035 goto toss_command;
1036
1037 update_can_queue(host, in_ptr, out_ptr);
1038
1039 return 0;
1040
1041 toss_command:
1042 printk(KERN_EMERG "qlogicpti%d: request queue overflow\n",
1043 qpti->qpti_id);
1044
1045 /* Unfortunately, unless you use the new EH code, which
1046 * we don't, the midlayer will ignore the return value,
1047 * which is insane. We pick up the pieces like this.
1048 */
1049 Cmnd->result = DID_BUS_BUSY;
1050 done(Cmnd);
1051 return 1;
1052 }
1053
1054 static DEF_SCSI_QCMD(qlogicpti_queuecommand)
1055
1056 static int qlogicpti_return_status(struct Status_Entry *sts, int id)
1057 {
1058 int host_status = DID_ERROR;
1059
1060 switch (sts->completion_status) {
1061 case CS_COMPLETE:
1062 host_status = DID_OK;
1063 break;
1064 case CS_INCOMPLETE:
1065 if (!(sts->state_flags & SF_GOT_BUS))
1066 host_status = DID_NO_CONNECT;
1067 else if (!(sts->state_flags & SF_GOT_TARGET))
1068 host_status = DID_BAD_TARGET;
1069 else if (!(sts->state_flags & SF_SENT_CDB))
1070 host_status = DID_ERROR;
1071 else if (!(sts->state_flags & SF_TRANSFERRED_DATA))
1072 host_status = DID_ERROR;
1073 else if (!(sts->state_flags & SF_GOT_STATUS))
1074 host_status = DID_ERROR;
1075 else if (!(sts->state_flags & SF_GOT_SENSE))
1076 host_status = DID_ERROR;
1077 break;
1078 case CS_DMA_ERROR:
1079 case CS_TRANSPORT_ERROR:
1080 host_status = DID_ERROR;
1081 break;
1082 case CS_RESET_OCCURRED:
1083 case CS_BUS_RESET:
1084 host_status = DID_RESET;
1085 break;
1086 case CS_ABORTED:
1087 host_status = DID_ABORT;
1088 break;
1089 case CS_TIMEOUT:
1090 host_status = DID_TIME_OUT;
1091 break;
1092 case CS_DATA_OVERRUN:
1093 case CS_COMMAND_OVERRUN:
1094 case CS_STATUS_OVERRUN:
1095 case CS_BAD_MESSAGE:
1096 case CS_NO_MESSAGE_OUT:
1097 case CS_EXT_ID_FAILED:
1098 case CS_IDE_MSG_FAILED:
1099 case CS_ABORT_MSG_FAILED:
1100 case CS_NOP_MSG_FAILED:
1101 case CS_PARITY_ERROR_MSG_FAILED:
1102 case CS_DEVICE_RESET_MSG_FAILED:
1103 case CS_ID_MSG_FAILED:
1104 case CS_UNEXP_BUS_FREE:
1105 host_status = DID_ERROR;
1106 break;
1107 case CS_DATA_UNDERRUN:
1108 host_status = DID_OK;
1109 break;
1110 default:
1111 printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n",
1112 id, sts->completion_status);
1113 host_status = DID_ERROR;
1114 break;
1115 }
1116
1117 return (sts->scsi_status & STATUS_MASK) | (host_status << 16);
1118 }
1119
1120 static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1121 {
1122 struct scsi_cmnd *Cmnd, *done_queue = NULL;
1123 struct Status_Entry *sts;
1124 u_int in_ptr, out_ptr;
1125
1126 if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT))
1127 return NULL;
1128
1129 in_ptr = sbus_readw(qpti->qregs + MBOX5);
1130 sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL);
1131 if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) {
1132 switch (sbus_readw(qpti->qregs + MBOX0)) {
1133 case ASYNC_SCSI_BUS_RESET:
1134 case EXECUTION_TIMEOUT_RESET:
1135 qpti->send_marker = 1;
1136 break;
1137 case INVALID_COMMAND:
1138 case HOST_INTERFACE_ERROR:
1139 case COMMAND_ERROR:
1140 case COMMAND_PARAM_ERROR:
1141 break;
1142 };
1143 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
1144 }
1145
1146 /* This looks like a network driver! */
1147 out_ptr = qpti->res_out_ptr;
1148 while (out_ptr != in_ptr) {
1149 u_int cmd_slot;
1150
1151 sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr];
1152 out_ptr = NEXT_RES_PTR(out_ptr);
1153
1154 /* We store an index in the handle, not the pointer in
1155 * some form. This avoids problems due to the fact
1156 * that the handle provided is only 32-bits. -DaveM
1157 */
1158 cmd_slot = sts->handle;
1159 Cmnd = qpti->cmd_slots[cmd_slot];
1160 qpti->cmd_slots[cmd_slot] = NULL;
1161
1162 if (sts->completion_status == CS_RESET_OCCURRED ||
1163 sts->completion_status == CS_ABORTED ||
1164 (sts->status_flags & STF_BUS_RESET))
1165 qpti->send_marker = 1;
1166
1167 if (sts->state_flags & SF_GOT_SENSE)
1168 memcpy(Cmnd->sense_buffer, sts->req_sense_data,
1169 SCSI_SENSE_BUFFERSIZE);
1170
1171 if (sts->hdr.entry_type == ENTRY_STATUS)
1172 Cmnd->result =
1173 qlogicpti_return_status(sts, qpti->qpti_id);
1174 else
1175 Cmnd->result = DID_ERROR << 16;
1176
1177 if (scsi_bufflen(Cmnd))
1178 dma_unmap_sg(&qpti->op->dev,
1179 scsi_sglist(Cmnd), scsi_sg_count(Cmnd),
1180 Cmnd->sc_data_direction);
1181
1182 qpti->cmd_count[Cmnd->device->id]--;
1183 sbus_writew(out_ptr, qpti->qregs + MBOX5);
1184 Cmnd->host_scribble = (unsigned char *) done_queue;
1185 done_queue = Cmnd;
1186 }
1187 qpti->res_out_ptr = out_ptr;
1188
1189 return done_queue;
1190 }
1191
1192 static irqreturn_t qpti_intr(int irq, void *dev_id)
1193 {
1194 struct qlogicpti *qpti = dev_id;
1195 unsigned long flags;
1196 struct scsi_cmnd *dq;
1197
1198 spin_lock_irqsave(qpti->qhost->host_lock, flags);
1199 dq = qlogicpti_intr_handler(qpti);
1200
1201 if (dq != NULL) {
1202 do {
1203 struct scsi_cmnd *next;
1204
1205 next = (struct scsi_cmnd *) dq->host_scribble;
1206 dq->scsi_done(dq);
1207 dq = next;
1208 } while (dq != NULL);
1209 }
1210 spin_unlock_irqrestore(qpti->qhost->host_lock, flags);
1211
1212 return IRQ_HANDLED;
1213 }
1214
1215 static int qlogicpti_abort(struct scsi_cmnd *Cmnd)
1216 {
1217 u_short param[6];
1218 struct Scsi_Host *host = Cmnd->device->host;
1219 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1220 int return_status = SUCCESS;
1221 u32 cmd_cookie;
1222 int i;
1223
1224 printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n",
1225 qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun);
1226
1227 qlogicpti_disable_irqs(qpti);
1228
1229 /* Find the 32-bit cookie we gave to the firmware for
1230 * this command.
1231 */
1232 for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++)
1233 if (qpti->cmd_slots[i] == Cmnd)
1234 break;
1235 cmd_cookie = i;
1236
1237 param[0] = MBOX_ABORT;
1238 param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun;
1239 param[2] = cmd_cookie >> 16;
1240 param[3] = cmd_cookie & 0xffff;
1241 if (qlogicpti_mbox_command(qpti, param, 0) ||
1242 (param[0] != MBOX_COMMAND_COMPLETE)) {
1243 printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n",
1244 qpti->qpti_id, param[0]);
1245 return_status = FAILED;
1246 }
1247
1248 qlogicpti_enable_irqs(qpti);
1249
1250 return return_status;
1251 }
1252
1253 static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
1254 {
1255 u_short param[6];
1256 struct Scsi_Host *host = Cmnd->device->host;
1257 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1258 int return_status = SUCCESS;
1259
1260 printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n",
1261 qpti->qpti_id);
1262
1263 qlogicpti_disable_irqs(qpti);
1264
1265 param[0] = MBOX_BUS_RESET;
1266 param[1] = qpti->host_param.bus_reset_delay;
1267 if (qlogicpti_mbox_command(qpti, param, 0) ||
1268 (param[0] != MBOX_COMMAND_COMPLETE)) {
1269 printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n",
1270 qpti->qpti_id, param[0]);
1271 return_status = FAILED;
1272 }
1273
1274 qlogicpti_enable_irqs(qpti);
1275
1276 return return_status;
1277 }
1278
1279 static struct scsi_host_template qpti_template = {
1280 .module = THIS_MODULE,
1281 .name = "qlogicpti",
1282 .info = qlogicpti_info,
1283 .queuecommand = qlogicpti_queuecommand,
1284 .slave_configure = qlogicpti_slave_configure,
1285 .eh_abort_handler = qlogicpti_abort,
1286 .eh_bus_reset_handler = qlogicpti_reset,
1287 .can_queue = QLOGICPTI_REQ_QUEUE_LEN,
1288 .this_id = 7,
1289 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
1290 .cmd_per_lun = 1,
1291 .use_clustering = ENABLE_CLUSTERING,
1292 };
1293
1294 static const struct of_device_id qpti_match[];
1295 static int qpti_sbus_probe(struct platform_device *op)
1296 {
1297 struct device_node *dp = op->dev.of_node;
1298 struct Scsi_Host *host;
1299 struct qlogicpti *qpti;
1300 static int nqptis;
1301 const char *fcode;
1302
1303 /* Sometimes Antares cards come up not completely
1304 * setup, and we get a report of a zero IRQ.
1305 */
1306 if (op->archdata.irqs[0] == 0)
1307 return -ENODEV;
1308
1309 host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti));
1310 if (!host)
1311 return -ENOMEM;
1312
1313 qpti = shost_priv(host);
1314
1315 host->max_id = MAX_TARGETS;
1316 qpti->qhost = host;
1317 qpti->op = op;
1318 qpti->qpti_id = nqptis;
1319 strcpy(qpti->prom_name, op->dev.of_node->name);
1320 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
1321
1322 if (qpti_map_regs(qpti) < 0)
1323 goto fail_unlink;
1324
1325 if (qpti_register_irq(qpti) < 0)
1326 goto fail_unmap_regs;
1327
1328 qpti_get_scsi_id(qpti);
1329 qpti_get_bursts(qpti);
1330 qpti_get_clock(qpti);
1331
1332 /* Clear out scsi_cmnd array. */
1333 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots));
1334
1335 if (qpti_map_queues(qpti) < 0)
1336 goto fail_free_irq;
1337
1338 /* Load the firmware. */
1339 if (qlogicpti_load_firmware(qpti))
1340 goto fail_unmap_queues;
1341 if (qpti->is_pti) {
1342 /* Check the PTI status reg. */
1343 if (qlogicpti_verify_tmon(qpti))
1344 goto fail_unmap_queues;
1345 }
1346
1347 /* Reset the ISP and init res/req queues. */
1348 if (qlogicpti_reset_hardware(host))
1349 goto fail_unmap_queues;
1350
1351 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
1352 qpti->fware_minrev, qpti->fware_micrev);
1353
1354 fcode = of_get_property(dp, "isp-fcode", NULL);
1355 if (fcode && fcode[0])
1356 printk("(FCode %s)", fcode);
1357 if (of_find_property(dp, "differential", NULL) != NULL)
1358 qpti->differential = 1;
1359
1360 printk("\nqlogicpti%d: [%s Wide, using %s interface]\n",
1361 qpti->qpti_id,
1362 (qpti->ultra ? "Ultra" : "Fast"),
1363 (qpti->differential ? "differential" : "single ended"));
1364
1365 if (scsi_add_host(host, &op->dev)) {
1366 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id);
1367 goto fail_unmap_queues;
1368 }
1369
1370 dev_set_drvdata(&op->dev, qpti);
1371
1372 qpti_chain_add(qpti);
1373
1374 scsi_scan_host(host);
1375 nqptis++;
1376
1377 return 0;
1378
1379 fail_unmap_queues:
1380 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1381 dma_free_coherent(&op->dev,
1382 QSIZE(RES_QUEUE_LEN),
1383 qpti->res_cpu, qpti->res_dvma);
1384 dma_free_coherent(&op->dev,
1385 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1386 qpti->req_cpu, qpti->req_dvma);
1387 #undef QSIZE
1388
1389 fail_unmap_regs:
1390 of_iounmap(&op->resource[0], qpti->qregs,
1391 resource_size(&op->resource[0]));
1392 if (qpti->is_pti)
1393 of_iounmap(&op->resource[0], qpti->sreg,
1394 sizeof(unsigned char));
1395
1396 fail_free_irq:
1397 free_irq(qpti->irq, qpti);
1398
1399 fail_unlink:
1400 scsi_host_put(host);
1401
1402 return -ENODEV;
1403 }
1404
1405 static int qpti_sbus_remove(struct platform_device *op)
1406 {
1407 struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
1408
1409 qpti_chain_del(qpti);
1410
1411 scsi_remove_host(qpti->qhost);
1412
1413 /* Shut up the card. */
1414 sbus_writew(0, qpti->qregs + SBUS_CTRL);
1415
1416 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
1417 free_irq(qpti->irq, qpti);
1418
1419 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1420 dma_free_coherent(&op->dev,
1421 QSIZE(RES_QUEUE_LEN),
1422 qpti->res_cpu, qpti->res_dvma);
1423 dma_free_coherent(&op->dev,
1424 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1425 qpti->req_cpu, qpti->req_dvma);
1426 #undef QSIZE
1427
1428 of_iounmap(&op->resource[0], qpti->qregs,
1429 resource_size(&op->resource[0]));
1430 if (qpti->is_pti)
1431 of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char));
1432
1433 scsi_host_put(qpti->qhost);
1434
1435 return 0;
1436 }
1437
1438 static const struct of_device_id qpti_match[] = {
1439 {
1440 .name = "ptisp",
1441 },
1442 {
1443 .name = "PTI,ptisp",
1444 },
1445 {
1446 .name = "QLGC,isp",
1447 },
1448 {
1449 .name = "SUNW,isp",
1450 },
1451 {},
1452 };
1453 MODULE_DEVICE_TABLE(of, qpti_match);
1454
1455 static struct platform_driver qpti_sbus_driver = {
1456 .driver = {
1457 .name = "qpti",
1458 .owner = THIS_MODULE,
1459 .of_match_table = qpti_match,
1460 },
1461 .probe = qpti_sbus_probe,
1462 .remove = qpti_sbus_remove,
1463 };
1464
1465 static int __init qpti_init(void)
1466 {
1467 return platform_driver_register(&qpti_sbus_driver);
1468 }
1469
1470 static void __exit qpti_exit(void)
1471 {
1472 platform_driver_unregister(&qpti_sbus_driver);
1473 }
1474
1475 MODULE_DESCRIPTION("QlogicISP SBUS driver");
1476 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
1477 MODULE_LICENSE("GPL");
1478 MODULE_VERSION("2.1");
1479 MODULE_FIRMWARE("qlogic/isp1000.bin");
1480
1481 module_init(qpti_init);
1482 module_exit(qpti_exit);