Merge remote-tracking branch 'regulator/topic/tps51632' into regulator-next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / hptiop.c
1 /*
2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * Please report bugs/comments/suggestions to linux@highpoint-tech.com
15 *
16 * For more information, visit http://www.highpoint-tech.com
17 */
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/timer.h>
27 #include <linux/spinlock.h>
28 #include <linux/gfp.h>
29 #include <asm/uaccess.h>
30 #include <asm/io.h>
31 #include <asm/div64.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_host.h>
37
38 #include "hptiop.h"
39
40 MODULE_AUTHOR("HighPoint Technologies, Inc.");
41 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
42
43 static char driver_name[] = "hptiop";
44 static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
45 static const char driver_ver[] = "v1.8";
46
47 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
48 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
49 struct hpt_iop_request_scsi_command *req);
50 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
51 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
52 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
53
54 static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
55 {
56 u32 req = 0;
57 int i;
58
59 for (i = 0; i < millisec; i++) {
60 req = readl(&hba->u.itl.iop->inbound_queue);
61 if (req != IOPMU_QUEUE_EMPTY)
62 break;
63 msleep(1);
64 }
65
66 if (req != IOPMU_QUEUE_EMPTY) {
67 writel(req, &hba->u.itl.iop->outbound_queue);
68 readl(&hba->u.itl.iop->outbound_intstatus);
69 return 0;
70 }
71
72 return -1;
73 }
74
75 static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
76 {
77 return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
78 }
79
80 static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
81 {
82 return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
83 }
84
85 static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
86 {
87 if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
88 hptiop_host_request_callback_itl(hba,
89 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
90 else
91 hptiop_iop_request_callback_itl(hba, tag);
92 }
93
94 static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
95 {
96 u32 req;
97
98 while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
99 IOPMU_QUEUE_EMPTY) {
100
101 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
102 hptiop_request_callback_itl(hba, req);
103 else {
104 struct hpt_iop_request_header __iomem * p;
105
106 p = (struct hpt_iop_request_header __iomem *)
107 ((char __iomem *)hba->u.itl.iop + req);
108
109 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
110 if (readl(&p->context))
111 hptiop_request_callback_itl(hba, req);
112 else
113 writel(1, &p->context);
114 }
115 else
116 hptiop_request_callback_itl(hba, req);
117 }
118 }
119 }
120
121 static int iop_intr_itl(struct hptiop_hba *hba)
122 {
123 struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
124 void __iomem *plx = hba->u.itl.plx;
125 u32 status;
126 int ret = 0;
127
128 if (plx && readl(plx + 0x11C5C) & 0xf)
129 writel(1, plx + 0x11C60);
130
131 status = readl(&iop->outbound_intstatus);
132
133 if (status & IOPMU_OUTBOUND_INT_MSG0) {
134 u32 msg = readl(&iop->outbound_msgaddr0);
135
136 dprintk("received outbound msg %x\n", msg);
137 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
138 hptiop_message_callback(hba, msg);
139 ret = 1;
140 }
141
142 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
143 hptiop_drain_outbound_queue_itl(hba);
144 ret = 1;
145 }
146
147 return ret;
148 }
149
150 static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
151 {
152 u32 outbound_tail = readl(&mu->outbound_tail);
153 u32 outbound_head = readl(&mu->outbound_head);
154
155 if (outbound_tail != outbound_head) {
156 u64 p;
157
158 memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
159 outbound_tail++;
160
161 if (outbound_tail == MVIOP_QUEUE_LEN)
162 outbound_tail = 0;
163 writel(outbound_tail, &mu->outbound_tail);
164 return p;
165 } else
166 return 0;
167 }
168
169 static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
170 {
171 u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
172 u32 head = inbound_head + 1;
173
174 if (head == MVIOP_QUEUE_LEN)
175 head = 0;
176
177 memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
178 writel(head, &hba->u.mv.mu->inbound_head);
179 writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
180 &hba->u.mv.regs->inbound_doorbell);
181 }
182
183 static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
184 {
185 u32 req_type = (tag >> 5) & 0x7;
186 struct hpt_iop_request_scsi_command *req;
187
188 dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
189
190 BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
191
192 switch (req_type) {
193 case IOP_REQUEST_TYPE_GET_CONFIG:
194 case IOP_REQUEST_TYPE_SET_CONFIG:
195 hba->msg_done = 1;
196 break;
197
198 case IOP_REQUEST_TYPE_SCSI_COMMAND:
199 req = hba->reqs[tag >> 8].req_virt;
200 if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
201 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
202
203 hptiop_finish_scsi_req(hba, tag>>8, req);
204 break;
205
206 default:
207 break;
208 }
209 }
210
211 static int iop_intr_mv(struct hptiop_hba *hba)
212 {
213 u32 status;
214 int ret = 0;
215
216 status = readl(&hba->u.mv.regs->outbound_doorbell);
217 writel(~status, &hba->u.mv.regs->outbound_doorbell);
218
219 if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
220 u32 msg;
221 msg = readl(&hba->u.mv.mu->outbound_msg);
222 dprintk("received outbound msg %x\n", msg);
223 hptiop_message_callback(hba, msg);
224 ret = 1;
225 }
226
227 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
228 u64 tag;
229
230 while ((tag = mv_outbound_read(hba->u.mv.mu)))
231 hptiop_request_callback_mv(hba, tag);
232 ret = 1;
233 }
234
235 return ret;
236 }
237
238 static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
239 {
240 u32 req_type = _tag & 0xf;
241 struct hpt_iop_request_scsi_command *req;
242
243 switch (req_type) {
244 case IOP_REQUEST_TYPE_GET_CONFIG:
245 case IOP_REQUEST_TYPE_SET_CONFIG:
246 hba->msg_done = 1;
247 break;
248
249 case IOP_REQUEST_TYPE_SCSI_COMMAND:
250 req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
251 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
252 req->header.result = IOP_RESULT_SUCCESS;
253 hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
254 break;
255
256 default:
257 break;
258 }
259 }
260
261 static int iop_intr_mvfrey(struct hptiop_hba *hba)
262 {
263 u32 _tag, status, cptr, cur_rptr;
264 int ret = 0;
265
266 if (hba->initialized)
267 writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
268
269 status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
270 if (status) {
271 writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
272 if (status & CPU_TO_F0_DRBL_MSG_BIT) {
273 u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
274 dprintk("received outbound msg %x\n", msg);
275 hptiop_message_callback(hba, msg);
276 }
277 ret = 1;
278 }
279
280 status = readl(&(hba->u.mvfrey.mu->isr_cause));
281 if (status) {
282 writel(status, &(hba->u.mvfrey.mu->isr_cause));
283 do {
284 cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
285 cur_rptr = hba->u.mvfrey.outlist_rptr;
286 while (cur_rptr != cptr) {
287 cur_rptr++;
288 if (cur_rptr == hba->u.mvfrey.list_count)
289 cur_rptr = 0;
290
291 _tag = hba->u.mvfrey.outlist[cur_rptr].val;
292 BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS));
293 hptiop_request_callback_mvfrey(hba, _tag);
294 ret = 1;
295 }
296 hba->u.mvfrey.outlist_rptr = cur_rptr;
297 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
298 }
299
300 if (hba->initialized)
301 writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
302
303 return ret;
304 }
305
306 static int iop_send_sync_request_itl(struct hptiop_hba *hba,
307 void __iomem *_req, u32 millisec)
308 {
309 struct hpt_iop_request_header __iomem *req = _req;
310 u32 i;
311
312 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
313 writel(0, &req->context);
314 writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
315 &hba->u.itl.iop->inbound_queue);
316 readl(&hba->u.itl.iop->outbound_intstatus);
317
318 for (i = 0; i < millisec; i++) {
319 iop_intr_itl(hba);
320 if (readl(&req->context))
321 return 0;
322 msleep(1);
323 }
324
325 return -1;
326 }
327
328 static int iop_send_sync_request_mv(struct hptiop_hba *hba,
329 u32 size_bits, u32 millisec)
330 {
331 struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
332 u32 i;
333
334 hba->msg_done = 0;
335 reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
336 mv_inbound_write(hba->u.mv.internal_req_phy |
337 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
338
339 for (i = 0; i < millisec; i++) {
340 iop_intr_mv(hba);
341 if (hba->msg_done)
342 return 0;
343 msleep(1);
344 }
345 return -1;
346 }
347
348 static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
349 u32 size_bits, u32 millisec)
350 {
351 struct hpt_iop_request_header *reqhdr =
352 hba->u.mvfrey.internal_req.req_virt;
353 u32 i;
354
355 hba->msg_done = 0;
356 reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
357 hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
358
359 for (i = 0; i < millisec; i++) {
360 iop_intr_mvfrey(hba);
361 if (hba->msg_done)
362 break;
363 msleep(1);
364 }
365 return hba->msg_done ? 0 : -1;
366 }
367
368 static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
369 {
370 writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
371 readl(&hba->u.itl.iop->outbound_intstatus);
372 }
373
374 static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
375 {
376 writel(msg, &hba->u.mv.mu->inbound_msg);
377 writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
378 readl(&hba->u.mv.regs->inbound_doorbell);
379 }
380
381 static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
382 {
383 writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
384 readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
385 }
386
387 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
388 {
389 u32 i;
390
391 hba->msg_done = 0;
392 hba->ops->disable_intr(hba);
393 hba->ops->post_msg(hba, msg);
394
395 for (i = 0; i < millisec; i++) {
396 spin_lock_irq(hba->host->host_lock);
397 hba->ops->iop_intr(hba);
398 spin_unlock_irq(hba->host->host_lock);
399 if (hba->msg_done)
400 break;
401 msleep(1);
402 }
403
404 hba->ops->enable_intr(hba);
405 return hba->msg_done? 0 : -1;
406 }
407
408 static int iop_get_config_itl(struct hptiop_hba *hba,
409 struct hpt_iop_request_get_config *config)
410 {
411 u32 req32;
412 struct hpt_iop_request_get_config __iomem *req;
413
414 req32 = readl(&hba->u.itl.iop->inbound_queue);
415 if (req32 == IOPMU_QUEUE_EMPTY)
416 return -1;
417
418 req = (struct hpt_iop_request_get_config __iomem *)
419 ((unsigned long)hba->u.itl.iop + req32);
420
421 writel(0, &req->header.flags);
422 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
423 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
424 writel(IOP_RESULT_PENDING, &req->header.result);
425
426 if (iop_send_sync_request_itl(hba, req, 20000)) {
427 dprintk("Get config send cmd failed\n");
428 return -1;
429 }
430
431 memcpy_fromio(config, req, sizeof(*config));
432 writel(req32, &hba->u.itl.iop->outbound_queue);
433 return 0;
434 }
435
436 static int iop_get_config_mv(struct hptiop_hba *hba,
437 struct hpt_iop_request_get_config *config)
438 {
439 struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
440
441 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
442 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
443 req->header.size =
444 cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
445 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
446 req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
447 req->header.context_hi32 = 0;
448
449 if (iop_send_sync_request_mv(hba, 0, 20000)) {
450 dprintk("Get config send cmd failed\n");
451 return -1;
452 }
453
454 memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
455 return 0;
456 }
457
458 static int iop_get_config_mvfrey(struct hptiop_hba *hba,
459 struct hpt_iop_request_get_config *config)
460 {
461 struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
462
463 if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
464 info->header.type != IOP_REQUEST_TYPE_GET_CONFIG)
465 return -1;
466
467 config->interface_version = info->interface_version;
468 config->firmware_version = info->firmware_version;
469 config->max_requests = info->max_requests;
470 config->request_size = info->request_size;
471 config->max_sg_count = info->max_sg_count;
472 config->data_transfer_length = info->data_transfer_length;
473 config->alignment_mask = info->alignment_mask;
474 config->max_devices = info->max_devices;
475 config->sdram_size = info->sdram_size;
476
477 return 0;
478 }
479
480 static int iop_set_config_itl(struct hptiop_hba *hba,
481 struct hpt_iop_request_set_config *config)
482 {
483 u32 req32;
484 struct hpt_iop_request_set_config __iomem *req;
485
486 req32 = readl(&hba->u.itl.iop->inbound_queue);
487 if (req32 == IOPMU_QUEUE_EMPTY)
488 return -1;
489
490 req = (struct hpt_iop_request_set_config __iomem *)
491 ((unsigned long)hba->u.itl.iop + req32);
492
493 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
494 (u8 *)config + sizeof(struct hpt_iop_request_header),
495 sizeof(struct hpt_iop_request_set_config) -
496 sizeof(struct hpt_iop_request_header));
497
498 writel(0, &req->header.flags);
499 writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
500 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
501 writel(IOP_RESULT_PENDING, &req->header.result);
502
503 if (iop_send_sync_request_itl(hba, req, 20000)) {
504 dprintk("Set config send cmd failed\n");
505 return -1;
506 }
507
508 writel(req32, &hba->u.itl.iop->outbound_queue);
509 return 0;
510 }
511
512 static int iop_set_config_mv(struct hptiop_hba *hba,
513 struct hpt_iop_request_set_config *config)
514 {
515 struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
516
517 memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
518 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
519 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
520 req->header.size =
521 cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
522 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
523 req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
524 req->header.context_hi32 = 0;
525
526 if (iop_send_sync_request_mv(hba, 0, 20000)) {
527 dprintk("Set config send cmd failed\n");
528 return -1;
529 }
530
531 return 0;
532 }
533
534 static int iop_set_config_mvfrey(struct hptiop_hba *hba,
535 struct hpt_iop_request_set_config *config)
536 {
537 struct hpt_iop_request_set_config *req =
538 hba->u.mvfrey.internal_req.req_virt;
539
540 memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
541 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
542 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
543 req->header.size =
544 cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
545 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
546 req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
547 req->header.context_hi32 = 0;
548
549 if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
550 dprintk("Set config send cmd failed\n");
551 return -1;
552 }
553
554 return 0;
555 }
556
557 static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
558 {
559 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
560 &hba->u.itl.iop->outbound_intmask);
561 }
562
563 static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
564 {
565 writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
566 &hba->u.mv.regs->outbound_intmask);
567 }
568
569 static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
570 {
571 writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
572 writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
573 writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
574 }
575
576 static int hptiop_initialize_iop(struct hptiop_hba *hba)
577 {
578 /* enable interrupts */
579 hba->ops->enable_intr(hba);
580
581 hba->initialized = 1;
582
583 /* start background tasks */
584 if (iop_send_sync_msg(hba,
585 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
586 printk(KERN_ERR "scsi%d: fail to start background task\n",
587 hba->host->host_no);
588 return -1;
589 }
590 return 0;
591 }
592
593 static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
594 {
595 u32 mem_base_phy, length;
596 void __iomem *mem_base_virt;
597
598 struct pci_dev *pcidev = hba->pcidev;
599
600
601 if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
602 printk(KERN_ERR "scsi%d: pci resource invalid\n",
603 hba->host->host_no);
604 return NULL;
605 }
606
607 mem_base_phy = pci_resource_start(pcidev, index);
608 length = pci_resource_len(pcidev, index);
609 mem_base_virt = ioremap(mem_base_phy, length);
610
611 if (!mem_base_virt) {
612 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
613 hba->host->host_no);
614 return NULL;
615 }
616 return mem_base_virt;
617 }
618
619 static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
620 {
621 struct pci_dev *pcidev = hba->pcidev;
622 hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
623 if (hba->u.itl.iop == NULL)
624 return -1;
625 if ((pcidev->device & 0xff00) == 0x4400) {
626 hba->u.itl.plx = hba->u.itl.iop;
627 hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
628 if (hba->u.itl.iop == NULL) {
629 iounmap(hba->u.itl.plx);
630 return -1;
631 }
632 }
633 return 0;
634 }
635
636 static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
637 {
638 if (hba->u.itl.plx)
639 iounmap(hba->u.itl.plx);
640 iounmap(hba->u.itl.iop);
641 }
642
643 static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
644 {
645 hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
646 if (hba->u.mv.regs == NULL)
647 return -1;
648
649 hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
650 if (hba->u.mv.mu == NULL) {
651 iounmap(hba->u.mv.regs);
652 return -1;
653 }
654
655 return 0;
656 }
657
658 static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
659 {
660 hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
661 if (hba->u.mvfrey.config == NULL)
662 return -1;
663
664 hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
665 if (hba->u.mvfrey.mu == NULL) {
666 iounmap(hba->u.mvfrey.config);
667 return -1;
668 }
669
670 return 0;
671 }
672
673 static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
674 {
675 iounmap(hba->u.mv.regs);
676 iounmap(hba->u.mv.mu);
677 }
678
679 static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
680 {
681 iounmap(hba->u.mvfrey.config);
682 iounmap(hba->u.mvfrey.mu);
683 }
684
685 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
686 {
687 dprintk("iop message 0x%x\n", msg);
688
689 if (msg == IOPMU_INBOUND_MSG0_NOP ||
690 msg == IOPMU_INBOUND_MSG0_RESET_COMM)
691 hba->msg_done = 1;
692
693 if (!hba->initialized)
694 return;
695
696 if (msg == IOPMU_INBOUND_MSG0_RESET) {
697 atomic_set(&hba->resetting, 0);
698 wake_up(&hba->reset_wq);
699 }
700 else if (msg <= IOPMU_INBOUND_MSG0_MAX)
701 hba->msg_done = 1;
702 }
703
704 static struct hptiop_request *get_req(struct hptiop_hba *hba)
705 {
706 struct hptiop_request *ret;
707
708 dprintk("get_req : req=%p\n", hba->req_list);
709
710 ret = hba->req_list;
711 if (ret)
712 hba->req_list = ret->next;
713
714 return ret;
715 }
716
717 static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
718 {
719 dprintk("free_req(%d, %p)\n", req->index, req);
720 req->next = hba->req_list;
721 hba->req_list = req;
722 }
723
724 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
725 struct hpt_iop_request_scsi_command *req)
726 {
727 struct scsi_cmnd *scp;
728
729 dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
730 "result=%d, context=0x%x tag=%d\n",
731 req, req->header.type, req->header.result,
732 req->header.context, tag);
733
734 BUG_ON(!req->header.result);
735 BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
736
737 scp = hba->reqs[tag].scp;
738
739 if (HPT_SCP(scp)->mapped)
740 scsi_dma_unmap(scp);
741
742 switch (le32_to_cpu(req->header.result)) {
743 case IOP_RESULT_SUCCESS:
744 scsi_set_resid(scp,
745 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
746 scp->result = (DID_OK<<16);
747 break;
748 case IOP_RESULT_BAD_TARGET:
749 scp->result = (DID_BAD_TARGET<<16);
750 break;
751 case IOP_RESULT_BUSY:
752 scp->result = (DID_BUS_BUSY<<16);
753 break;
754 case IOP_RESULT_RESET:
755 scp->result = (DID_RESET<<16);
756 break;
757 case IOP_RESULT_FAIL:
758 scp->result = (DID_ERROR<<16);
759 break;
760 case IOP_RESULT_INVALID_REQUEST:
761 scp->result = (DID_ABORT<<16);
762 break;
763 case IOP_RESULT_CHECK_CONDITION:
764 scsi_set_resid(scp,
765 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
766 scp->result = SAM_STAT_CHECK_CONDITION;
767 memcpy(scp->sense_buffer, &req->sg_list,
768 min_t(size_t, SCSI_SENSE_BUFFERSIZE,
769 le32_to_cpu(req->dataxfer_length)));
770 goto skip_resid;
771 break;
772
773 default:
774 scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
775 break;
776 }
777
778 scsi_set_resid(scp,
779 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
780
781 skip_resid:
782 dprintk("scsi_done(%p)\n", scp);
783 scp->scsi_done(scp);
784 free_req(hba, &hba->reqs[tag]);
785 }
786
787 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
788 {
789 struct hpt_iop_request_scsi_command *req;
790 u32 tag;
791
792 if (hba->iopintf_v2) {
793 tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
794 req = hba->reqs[tag].req_virt;
795 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
796 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
797 } else {
798 tag = _tag;
799 req = hba->reqs[tag].req_virt;
800 }
801
802 hptiop_finish_scsi_req(hba, tag, req);
803 }
804
805 void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
806 {
807 struct hpt_iop_request_header __iomem *req;
808 struct hpt_iop_request_ioctl_command __iomem *p;
809 struct hpt_ioctl_k *arg;
810
811 req = (struct hpt_iop_request_header __iomem *)
812 ((unsigned long)hba->u.itl.iop + tag);
813 dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
814 "result=%d, context=0x%x tag=%d\n",
815 req, readl(&req->type), readl(&req->result),
816 readl(&req->context), tag);
817
818 BUG_ON(!readl(&req->result));
819 BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
820
821 p = (struct hpt_iop_request_ioctl_command __iomem *)req;
822 arg = (struct hpt_ioctl_k *)(unsigned long)
823 (readl(&req->context) |
824 ((u64)readl(&req->context_hi32)<<32));
825
826 if (readl(&req->result) == IOP_RESULT_SUCCESS) {
827 arg->result = HPT_IOCTL_RESULT_OK;
828
829 if (arg->outbuf_size)
830 memcpy_fromio(arg->outbuf,
831 &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
832 arg->outbuf_size);
833
834 if (arg->bytes_returned)
835 *arg->bytes_returned = arg->outbuf_size;
836 }
837 else
838 arg->result = HPT_IOCTL_RESULT_FAILED;
839
840 arg->done(arg);
841 writel(tag, &hba->u.itl.iop->outbound_queue);
842 }
843
844 static irqreturn_t hptiop_intr(int irq, void *dev_id)
845 {
846 struct hptiop_hba *hba = dev_id;
847 int handled;
848 unsigned long flags;
849
850 spin_lock_irqsave(hba->host->host_lock, flags);
851 handled = hba->ops->iop_intr(hba);
852 spin_unlock_irqrestore(hba->host->host_lock, flags);
853
854 return handled;
855 }
856
857 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
858 {
859 struct Scsi_Host *host = scp->device->host;
860 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
861 struct scatterlist *sg;
862 int idx, nseg;
863
864 nseg = scsi_dma_map(scp);
865 BUG_ON(nseg < 0);
866 if (!nseg)
867 return 0;
868
869 HPT_SCP(scp)->sgcnt = nseg;
870 HPT_SCP(scp)->mapped = 1;
871
872 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
873
874 scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
875 psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) |
876 hba->ops->host_phy_flag;
877 psg[idx].size = cpu_to_le32(sg_dma_len(sg));
878 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
879 cpu_to_le32(1) : 0;
880 }
881 return HPT_SCP(scp)->sgcnt;
882 }
883
884 static void hptiop_post_req_itl(struct hptiop_hba *hba,
885 struct hptiop_request *_req)
886 {
887 struct hpt_iop_request_header *reqhdr = _req->req_virt;
888
889 reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
890 (u32)_req->index);
891 reqhdr->context_hi32 = 0;
892
893 if (hba->iopintf_v2) {
894 u32 size, size_bits;
895
896 size = le32_to_cpu(reqhdr->size);
897 if (size < 256)
898 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
899 else if (size < 512)
900 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
901 else
902 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
903 IOPMU_QUEUE_ADDR_HOST_BIT;
904 writel(_req->req_shifted_phy | size_bits,
905 &hba->u.itl.iop->inbound_queue);
906 } else
907 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
908 &hba->u.itl.iop->inbound_queue);
909 }
910
911 static void hptiop_post_req_mv(struct hptiop_hba *hba,
912 struct hptiop_request *_req)
913 {
914 struct hpt_iop_request_header *reqhdr = _req->req_virt;
915 u32 size, size_bit;
916
917 reqhdr->context = cpu_to_le32(_req->index<<8 |
918 IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
919 reqhdr->context_hi32 = 0;
920 size = le32_to_cpu(reqhdr->size);
921
922 if (size <= 256)
923 size_bit = 0;
924 else if (size <= 256*2)
925 size_bit = 1;
926 else if (size <= 256*3)
927 size_bit = 2;
928 else
929 size_bit = 3;
930
931 mv_inbound_write((_req->req_shifted_phy << 5) |
932 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
933 }
934
935 static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
936 struct hptiop_request *_req)
937 {
938 struct hpt_iop_request_header *reqhdr = _req->req_virt;
939 u32 index;
940
941 reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT |
942 IOP_REQUEST_FLAG_ADDR_BITS |
943 ((_req->req_shifted_phy >> 11) & 0xffff0000));
944 reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
945 (_req->index << 4) | reqhdr->type);
946 reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) &
947 0xffffffff);
948
949 hba->u.mvfrey.inlist_wptr++;
950 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
951
952 if (index == hba->u.mvfrey.list_count) {
953 index = 0;
954 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
955 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
956 }
957
958 hba->u.mvfrey.inlist[index].addr =
959 (dma_addr_t)_req->req_shifted_phy << 5;
960 hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
961 writel(hba->u.mvfrey.inlist_wptr,
962 &(hba->u.mvfrey.mu->inbound_write_ptr));
963 readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
964 }
965
966 static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
967 {
968 return 0;
969 }
970
971 static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
972 {
973 return 0;
974 }
975
976 static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
977 {
978 u32 list_count = hba->u.mvfrey.list_count;
979
980 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
981 return -1;
982
983 /* wait 100ms for MCU ready */
984 msleep(100);
985
986 writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
987 &(hba->u.mvfrey.mu->inbound_base));
988 writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
989 &(hba->u.mvfrey.mu->inbound_base_high));
990
991 writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
992 &(hba->u.mvfrey.mu->outbound_base));
993 writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
994 &(hba->u.mvfrey.mu->outbound_base_high));
995
996 writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
997 &(hba->u.mvfrey.mu->outbound_shadow_base));
998 writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
999 &(hba->u.mvfrey.mu->outbound_shadow_base_high));
1000
1001 hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
1002 *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
1003 hba->u.mvfrey.outlist_rptr = list_count - 1;
1004 return 0;
1005 }
1006
1007 static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
1008 void (*done)(struct scsi_cmnd *))
1009 {
1010 struct Scsi_Host *host = scp->device->host;
1011 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1012 struct hpt_iop_request_scsi_command *req;
1013 int sg_count = 0;
1014 struct hptiop_request *_req;
1015
1016 BUG_ON(!done);
1017 scp->scsi_done = done;
1018
1019 _req = get_req(hba);
1020 if (_req == NULL) {
1021 dprintk("hptiop_queuecmd : no free req\n");
1022 return SCSI_MLQUEUE_HOST_BUSY;
1023 }
1024
1025 _req->scp = scp;
1026
1027 dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%08x-%08x-%08x-%08x) "
1028 "req_index=%d, req=%p\n",
1029 scp,
1030 host->host_no, scp->device->channel,
1031 scp->device->id, scp->device->lun,
1032 cpu_to_be32(((u32 *)scp->cmnd)[0]),
1033 cpu_to_be32(((u32 *)scp->cmnd)[1]),
1034 cpu_to_be32(((u32 *)scp->cmnd)[2]),
1035 cpu_to_be32(((u32 *)scp->cmnd)[3]),
1036 _req->index, _req->req_virt);
1037
1038 scp->result = 0;
1039
1040 if (scp->device->channel || scp->device->lun ||
1041 scp->device->id > hba->max_devices) {
1042 scp->result = DID_BAD_TARGET << 16;
1043 free_req(hba, _req);
1044 goto cmd_done;
1045 }
1046
1047 req = _req->req_virt;
1048
1049 /* build S/G table */
1050 sg_count = hptiop_buildsgl(scp, req->sg_list);
1051 if (!sg_count)
1052 HPT_SCP(scp)->mapped = 0;
1053
1054 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
1055 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
1056 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
1057 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
1058 req->channel = scp->device->channel;
1059 req->target = scp->device->id;
1060 req->lun = scp->device->lun;
1061 req->header.size = cpu_to_le32(
1062 sizeof(struct hpt_iop_request_scsi_command)
1063 - sizeof(struct hpt_iopsg)
1064 + sg_count * sizeof(struct hpt_iopsg));
1065
1066 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
1067 hba->ops->post_req(hba, _req);
1068 return 0;
1069
1070 cmd_done:
1071 dprintk("scsi_done(scp=%p)\n", scp);
1072 scp->scsi_done(scp);
1073 return 0;
1074 }
1075
1076 static DEF_SCSI_QCMD(hptiop_queuecommand)
1077
1078 static const char *hptiop_info(struct Scsi_Host *host)
1079 {
1080 return driver_name_long;
1081 }
1082
1083 static int hptiop_reset_hba(struct hptiop_hba *hba)
1084 {
1085 if (atomic_xchg(&hba->resetting, 1) == 0) {
1086 atomic_inc(&hba->reset_count);
1087 hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
1088 }
1089
1090 wait_event_timeout(hba->reset_wq,
1091 atomic_read(&hba->resetting) == 0, 60 * HZ);
1092
1093 if (atomic_read(&hba->resetting)) {
1094 /* IOP is in unknown state, abort reset */
1095 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
1096 return -1;
1097 }
1098
1099 if (iop_send_sync_msg(hba,
1100 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
1101 dprintk("scsi%d: fail to start background task\n",
1102 hba->host->host_no);
1103 }
1104
1105 return 0;
1106 }
1107
1108 static int hptiop_reset(struct scsi_cmnd *scp)
1109 {
1110 struct Scsi_Host * host = scp->device->host;
1111 struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
1112
1113 printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
1114 scp->device->host->host_no, scp->device->channel,
1115 scp->device->id, scp);
1116
1117 return hptiop_reset_hba(hba)? FAILED : SUCCESS;
1118 }
1119
1120 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
1121 int queue_depth, int reason)
1122 {
1123 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
1124
1125 if (reason != SCSI_QDEPTH_DEFAULT)
1126 return -EOPNOTSUPP;
1127
1128 if (queue_depth > hba->max_requests)
1129 queue_depth = hba->max_requests;
1130 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
1131 return queue_depth;
1132 }
1133
1134 static ssize_t hptiop_show_version(struct device *dev,
1135 struct device_attribute *attr, char *buf)
1136 {
1137 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
1138 }
1139
1140 static ssize_t hptiop_show_fw_version(struct device *dev,
1141 struct device_attribute *attr, char *buf)
1142 {
1143 struct Scsi_Host *host = class_to_shost(dev);
1144 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1145
1146 return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
1147 hba->firmware_version >> 24,
1148 (hba->firmware_version >> 16) & 0xff,
1149 (hba->firmware_version >> 8) & 0xff,
1150 hba->firmware_version & 0xff);
1151 }
1152
1153 static struct device_attribute hptiop_attr_version = {
1154 .attr = {
1155 .name = "driver-version",
1156 .mode = S_IRUGO,
1157 },
1158 .show = hptiop_show_version,
1159 };
1160
1161 static struct device_attribute hptiop_attr_fw_version = {
1162 .attr = {
1163 .name = "firmware-version",
1164 .mode = S_IRUGO,
1165 },
1166 .show = hptiop_show_fw_version,
1167 };
1168
1169 static struct device_attribute *hptiop_attrs[] = {
1170 &hptiop_attr_version,
1171 &hptiop_attr_fw_version,
1172 NULL
1173 };
1174
1175 static struct scsi_host_template driver_template = {
1176 .module = THIS_MODULE,
1177 .name = driver_name,
1178 .queuecommand = hptiop_queuecommand,
1179 .eh_device_reset_handler = hptiop_reset,
1180 .eh_bus_reset_handler = hptiop_reset,
1181 .info = hptiop_info,
1182 .emulated = 0,
1183 .use_clustering = ENABLE_CLUSTERING,
1184 .proc_name = driver_name,
1185 .shost_attrs = hptiop_attrs,
1186 .this_id = -1,
1187 .change_queue_depth = hptiop_adjust_disk_queue_depth,
1188 };
1189
1190 static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
1191 {
1192 return 0;
1193 }
1194
1195 static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
1196 {
1197 hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
1198 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
1199 if (hba->u.mv.internal_req)
1200 return 0;
1201 else
1202 return -1;
1203 }
1204
1205 static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
1206 {
1207 u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
1208 char *p;
1209 dma_addr_t phy;
1210
1211 BUG_ON(hba->max_request_size == 0);
1212
1213 if (list_count == 0) {
1214 BUG_ON(1);
1215 return -1;
1216 }
1217
1218 list_count >>= 16;
1219
1220 hba->u.mvfrey.list_count = list_count;
1221 hba->u.mvfrey.internal_mem_size = 0x800 +
1222 list_count * sizeof(struct mvfrey_inlist_entry) +
1223 list_count * sizeof(struct mvfrey_outlist_entry) +
1224 sizeof(int);
1225
1226 p = dma_alloc_coherent(&hba->pcidev->dev,
1227 hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
1228 if (!p)
1229 return -1;
1230
1231 hba->u.mvfrey.internal_req.req_virt = p;
1232 hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
1233 hba->u.mvfrey.internal_req.scp = NULL;
1234 hba->u.mvfrey.internal_req.next = NULL;
1235
1236 p += 0x800;
1237 phy += 0x800;
1238
1239 hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
1240 hba->u.mvfrey.inlist_phy = phy;
1241
1242 p += list_count * sizeof(struct mvfrey_inlist_entry);
1243 phy += list_count * sizeof(struct mvfrey_inlist_entry);
1244
1245 hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
1246 hba->u.mvfrey.outlist_phy = phy;
1247
1248 p += list_count * sizeof(struct mvfrey_outlist_entry);
1249 phy += list_count * sizeof(struct mvfrey_outlist_entry);
1250
1251 hba->u.mvfrey.outlist_cptr = (__le32 *)p;
1252 hba->u.mvfrey.outlist_cptr_phy = phy;
1253
1254 return 0;
1255 }
1256
1257 static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
1258 {
1259 return 0;
1260 }
1261
1262 static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
1263 {
1264 if (hba->u.mv.internal_req) {
1265 dma_free_coherent(&hba->pcidev->dev, 0x800,
1266 hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
1267 return 0;
1268 } else
1269 return -1;
1270 }
1271
1272 static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
1273 {
1274 if (hba->u.mvfrey.internal_req.req_virt) {
1275 dma_free_coherent(&hba->pcidev->dev,
1276 hba->u.mvfrey.internal_mem_size,
1277 hba->u.mvfrey.internal_req.req_virt,
1278 (dma_addr_t)
1279 hba->u.mvfrey.internal_req.req_shifted_phy << 5);
1280 return 0;
1281 } else
1282 return -1;
1283 }
1284
1285 static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
1286 {
1287 struct Scsi_Host *host = NULL;
1288 struct hptiop_hba *hba;
1289 struct hptiop_adapter_ops *iop_ops;
1290 struct hpt_iop_request_get_config iop_config;
1291 struct hpt_iop_request_set_config set_config;
1292 dma_addr_t start_phy;
1293 void *start_virt;
1294 u32 offset, i, req_size;
1295
1296 dprintk("hptiop_probe(%p)\n", pcidev);
1297
1298 if (pci_enable_device(pcidev)) {
1299 printk(KERN_ERR "hptiop: fail to enable pci device\n");
1300 return -ENODEV;
1301 }
1302
1303 printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
1304 pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
1305 pcidev->irq);
1306
1307 pci_set_master(pcidev);
1308
1309 /* Enable 64bit DMA if possible */
1310 iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
1311 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) {
1312 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
1313 printk(KERN_ERR "hptiop: fail to set dma_mask\n");
1314 goto disable_pci_device;
1315 }
1316 }
1317
1318 if (pci_request_regions(pcidev, driver_name)) {
1319 printk(KERN_ERR "hptiop: pci_request_regions failed\n");
1320 goto disable_pci_device;
1321 }
1322
1323 host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
1324 if (!host) {
1325 printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
1326 goto free_pci_regions;
1327 }
1328
1329 hba = (struct hptiop_hba *)host->hostdata;
1330
1331 hba->ops = iop_ops;
1332 hba->pcidev = pcidev;
1333 hba->host = host;
1334 hba->initialized = 0;
1335 hba->iopintf_v2 = 0;
1336
1337 atomic_set(&hba->resetting, 0);
1338 atomic_set(&hba->reset_count, 0);
1339
1340 init_waitqueue_head(&hba->reset_wq);
1341 init_waitqueue_head(&hba->ioctl_wq);
1342
1343 host->max_lun = 1;
1344 host->max_channel = 0;
1345 host->io_port = 0;
1346 host->n_io_port = 0;
1347 host->irq = pcidev->irq;
1348
1349 if (hba->ops->map_pci_bar(hba))
1350 goto free_scsi_host;
1351
1352 if (hba->ops->iop_wait_ready(hba, 20000)) {
1353 printk(KERN_ERR "scsi%d: firmware not ready\n",
1354 hba->host->host_no);
1355 goto unmap_pci_bar;
1356 }
1357
1358 if (hba->ops->family == MV_BASED_IOP) {
1359 if (hba->ops->internal_memalloc(hba)) {
1360 printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1361 hba->host->host_no);
1362 goto unmap_pci_bar;
1363 }
1364 }
1365
1366 if (hba->ops->get_config(hba, &iop_config)) {
1367 printk(KERN_ERR "scsi%d: get config failed\n",
1368 hba->host->host_no);
1369 goto unmap_pci_bar;
1370 }
1371
1372 hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
1373 HPTIOP_MAX_REQUESTS);
1374 hba->max_devices = le32_to_cpu(iop_config.max_devices);
1375 hba->max_request_size = le32_to_cpu(iop_config.request_size);
1376 hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
1377 hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
1378 hba->interface_version = le32_to_cpu(iop_config.interface_version);
1379 hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
1380
1381 if (hba->ops->family == MVFREY_BASED_IOP) {
1382 if (hba->ops->internal_memalloc(hba)) {
1383 printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1384 hba->host->host_no);
1385 goto unmap_pci_bar;
1386 }
1387 if (hba->ops->reset_comm(hba)) {
1388 printk(KERN_ERR "scsi%d: reset comm failed\n",
1389 hba->host->host_no);
1390 goto unmap_pci_bar;
1391 }
1392 }
1393
1394 if (hba->firmware_version > 0x01020000 ||
1395 hba->interface_version > 0x01020000)
1396 hba->iopintf_v2 = 1;
1397
1398 host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
1399 host->max_id = le32_to_cpu(iop_config.max_devices);
1400 host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
1401 host->can_queue = le32_to_cpu(iop_config.max_requests);
1402 host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
1403 host->max_cmd_len = 16;
1404
1405 req_size = sizeof(struct hpt_iop_request_scsi_command)
1406 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
1407 if ((req_size & 0x1f) != 0)
1408 req_size = (req_size + 0x1f) & ~0x1f;
1409
1410 memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
1411 set_config.iop_id = cpu_to_le32(host->host_no);
1412 set_config.vbus_id = cpu_to_le16(host->host_no);
1413 set_config.max_host_request_size = cpu_to_le16(req_size);
1414
1415 if (hba->ops->set_config(hba, &set_config)) {
1416 printk(KERN_ERR "scsi%d: set config failed\n",
1417 hba->host->host_no);
1418 goto unmap_pci_bar;
1419 }
1420
1421 pci_set_drvdata(pcidev, host);
1422
1423 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1424 driver_name, hba)) {
1425 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1426 hba->host->host_no, pcidev->irq);
1427 goto unmap_pci_bar;
1428 }
1429
1430 /* Allocate request mem */
1431
1432 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
1433
1434 hba->req_size = req_size;
1435 start_virt = dma_alloc_coherent(&pcidev->dev,
1436 hba->req_size*hba->max_requests + 0x20,
1437 &start_phy, GFP_KERNEL);
1438
1439 if (!start_virt) {
1440 printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
1441 hba->host->host_no);
1442 goto free_request_irq;
1443 }
1444
1445 hba->dma_coherent = start_virt;
1446 hba->dma_coherent_handle = start_phy;
1447
1448 if ((start_phy & 0x1f) != 0) {
1449 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
1450 start_phy += offset;
1451 start_virt += offset;
1452 }
1453
1454 hba->req_list = NULL;
1455 for (i = 0; i < hba->max_requests; i++) {
1456 hba->reqs[i].next = NULL;
1457 hba->reqs[i].req_virt = start_virt;
1458 hba->reqs[i].req_shifted_phy = start_phy >> 5;
1459 hba->reqs[i].index = i;
1460 free_req(hba, &hba->reqs[i]);
1461 start_virt = (char *)start_virt + hba->req_size;
1462 start_phy = start_phy + hba->req_size;
1463 }
1464
1465 /* Enable Interrupt and start background task */
1466 if (hptiop_initialize_iop(hba))
1467 goto free_request_mem;
1468
1469 if (scsi_add_host(host, &pcidev->dev)) {
1470 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1471 hba->host->host_no);
1472 goto free_request_mem;
1473 }
1474
1475 scsi_scan_host(host);
1476
1477 dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
1478 return 0;
1479
1480 free_request_mem:
1481 dma_free_coherent(&hba->pcidev->dev,
1482 hba->req_size * hba->max_requests + 0x20,
1483 hba->dma_coherent, hba->dma_coherent_handle);
1484
1485 free_request_irq:
1486 free_irq(hba->pcidev->irq, hba);
1487
1488 unmap_pci_bar:
1489 hba->ops->internal_memfree(hba);
1490
1491 hba->ops->unmap_pci_bar(hba);
1492
1493 free_scsi_host:
1494 scsi_host_put(host);
1495
1496 free_pci_regions:
1497 pci_release_regions(pcidev);
1498
1499 disable_pci_device:
1500 pci_disable_device(pcidev);
1501
1502 dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
1503 return -ENODEV;
1504 }
1505
1506 static void hptiop_shutdown(struct pci_dev *pcidev)
1507 {
1508 struct Scsi_Host *host = pci_get_drvdata(pcidev);
1509 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1510
1511 dprintk("hptiop_shutdown(%p)\n", hba);
1512
1513 /* stop the iop */
1514 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1515 printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
1516 hba->host->host_no);
1517
1518 /* disable all outbound interrupts */
1519 hba->ops->disable_intr(hba);
1520 }
1521
1522 static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
1523 {
1524 u32 int_mask;
1525
1526 int_mask = readl(&hba->u.itl.iop->outbound_intmask);
1527 writel(int_mask |
1528 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
1529 &hba->u.itl.iop->outbound_intmask);
1530 readl(&hba->u.itl.iop->outbound_intmask);
1531 }
1532
1533 static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
1534 {
1535 writel(0, &hba->u.mv.regs->outbound_intmask);
1536 readl(&hba->u.mv.regs->outbound_intmask);
1537 }
1538
1539 static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
1540 {
1541 writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
1542 readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
1543 writel(0, &(hba->u.mvfrey.mu->isr_enable));
1544 readl(&(hba->u.mvfrey.mu->isr_enable));
1545 writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
1546 readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
1547 }
1548
1549 static void hptiop_remove(struct pci_dev *pcidev)
1550 {
1551 struct Scsi_Host *host = pci_get_drvdata(pcidev);
1552 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1553
1554 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
1555
1556 scsi_remove_host(host);
1557
1558 hptiop_shutdown(pcidev);
1559
1560 free_irq(hba->pcidev->irq, hba);
1561
1562 dma_free_coherent(&hba->pcidev->dev,
1563 hba->req_size * hba->max_requests + 0x20,
1564 hba->dma_coherent,
1565 hba->dma_coherent_handle);
1566
1567 hba->ops->internal_memfree(hba);
1568
1569 hba->ops->unmap_pci_bar(hba);
1570
1571 pci_release_regions(hba->pcidev);
1572 pci_set_drvdata(hba->pcidev, NULL);
1573 pci_disable_device(hba->pcidev);
1574
1575 scsi_host_put(host);
1576 }
1577
1578 static struct hptiop_adapter_ops hptiop_itl_ops = {
1579 .family = INTEL_BASED_IOP,
1580 .iop_wait_ready = iop_wait_ready_itl,
1581 .internal_memalloc = hptiop_internal_memalloc_itl,
1582 .internal_memfree = hptiop_internal_memfree_itl,
1583 .map_pci_bar = hptiop_map_pci_bar_itl,
1584 .unmap_pci_bar = hptiop_unmap_pci_bar_itl,
1585 .enable_intr = hptiop_enable_intr_itl,
1586 .disable_intr = hptiop_disable_intr_itl,
1587 .get_config = iop_get_config_itl,
1588 .set_config = iop_set_config_itl,
1589 .iop_intr = iop_intr_itl,
1590 .post_msg = hptiop_post_msg_itl,
1591 .post_req = hptiop_post_req_itl,
1592 .hw_dma_bit_mask = 64,
1593 .reset_comm = hptiop_reset_comm_itl,
1594 .host_phy_flag = cpu_to_le64(0),
1595 };
1596
1597 static struct hptiop_adapter_ops hptiop_mv_ops = {
1598 .family = MV_BASED_IOP,
1599 .iop_wait_ready = iop_wait_ready_mv,
1600 .internal_memalloc = hptiop_internal_memalloc_mv,
1601 .internal_memfree = hptiop_internal_memfree_mv,
1602 .map_pci_bar = hptiop_map_pci_bar_mv,
1603 .unmap_pci_bar = hptiop_unmap_pci_bar_mv,
1604 .enable_intr = hptiop_enable_intr_mv,
1605 .disable_intr = hptiop_disable_intr_mv,
1606 .get_config = iop_get_config_mv,
1607 .set_config = iop_set_config_mv,
1608 .iop_intr = iop_intr_mv,
1609 .post_msg = hptiop_post_msg_mv,
1610 .post_req = hptiop_post_req_mv,
1611 .hw_dma_bit_mask = 33,
1612 .reset_comm = hptiop_reset_comm_mv,
1613 .host_phy_flag = cpu_to_le64(0),
1614 };
1615
1616 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1617 .family = MVFREY_BASED_IOP,
1618 .iop_wait_ready = iop_wait_ready_mvfrey,
1619 .internal_memalloc = hptiop_internal_memalloc_mvfrey,
1620 .internal_memfree = hptiop_internal_memfree_mvfrey,
1621 .map_pci_bar = hptiop_map_pci_bar_mvfrey,
1622 .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey,
1623 .enable_intr = hptiop_enable_intr_mvfrey,
1624 .disable_intr = hptiop_disable_intr_mvfrey,
1625 .get_config = iop_get_config_mvfrey,
1626 .set_config = iop_set_config_mvfrey,
1627 .iop_intr = iop_intr_mvfrey,
1628 .post_msg = hptiop_post_msg_mvfrey,
1629 .post_req = hptiop_post_req_mvfrey,
1630 .hw_dma_bit_mask = 64,
1631 .reset_comm = hptiop_reset_comm_mvfrey,
1632 .host_phy_flag = cpu_to_le64(1),
1633 };
1634
1635 static struct pci_device_id hptiop_id_table[] = {
1636 { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
1637 { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
1638 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1639 { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
1640 { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
1641 { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
1642 { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
1643 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1644 { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1645 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1646 { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1647 { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1648 { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1649 { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1650 { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1651 { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1652 { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
1653 { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1654 { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
1655 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1656 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1657 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
1658 { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
1659 { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
1660 {},
1661 };
1662
1663 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
1664
1665 static struct pci_driver hptiop_pci_driver = {
1666 .name = driver_name,
1667 .id_table = hptiop_id_table,
1668 .probe = hptiop_probe,
1669 .remove = hptiop_remove,
1670 .shutdown = hptiop_shutdown,
1671 };
1672
1673 static int __init hptiop_module_init(void)
1674 {
1675 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1676 return pci_register_driver(&hptiop_pci_driver);
1677 }
1678
1679 static void __exit hptiop_module_exit(void)
1680 {
1681 pci_unregister_driver(&hptiop_pci_driver);
1682 }
1683
1684
1685 module_init(hptiop_module_init);
1686 module_exit(hptiop_module_exit);
1687
1688 MODULE_LICENSE("GPL");
1689