[SCSI] qla2xxx: Clear error status after uncorrectable non-fatal errors.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / qla2xxx / qla_os.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
01e58d8e 3 * Copyright (c) 2003-2008 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
8
9#include <linux/moduleparam.h>
10#include <linux/vmalloc.h>
1da177e4 11#include <linux/delay.h>
39a11240 12#include <linux/kthread.h>
e1e82b6f 13#include <linux/mutex.h>
3420d36c 14#include <linux/kobject.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16
17#include <scsi/scsi_tcq.h>
18#include <scsi/scsicam.h>
19#include <scsi/scsi_transport.h>
20#include <scsi/scsi_transport_fc.h>
21
22/*
23 * Driver version
24 */
25char qla2x00_version_str[40];
26
27/*
28 * SRB allocation cache
29 */
e18b890b 30static struct kmem_cache *srb_cachep;
1da177e4 31
a9083016
GM
32/*
33 * CT6 CTX allocation cache
34 */
35static struct kmem_cache *ctx_cachep;
36
1da177e4
LT
37int ql2xlogintimeout = 20;
38module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
39MODULE_PARM_DESC(ql2xlogintimeout,
40 "Login timeout value in seconds.");
41
a7b61842 42int qlport_down_retry;
1da177e4
LT
43module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
44MODULE_PARM_DESC(qlport_down_retry,
900d9f98 45 "Maximum number of command retries to a port that returns "
1da177e4
LT
46 "a PORT-DOWN status.");
47
1da177e4
LT
48int ql2xplogiabsentdevice;
49module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
50MODULE_PARM_DESC(ql2xplogiabsentdevice,
51 "Option to enable PLOGI to devices that are not present after "
900d9f98 52 "a Fabric scan. This is needed for several broken switches. "
1da177e4
LT
53 "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
54
1da177e4
LT
55int ql2xloginretrycount = 0;
56module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
57MODULE_PARM_DESC(ql2xloginretrycount,
58 "Specify an alternate value for the NVRAM login retry count.");
59
a7a167bf
AV
60int ql2xallocfwdump = 1;
61module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR);
62MODULE_PARM_DESC(ql2xallocfwdump,
63 "Option to enable allocation of memory for a firmware dump "
64 "during HBA initialization. Memory allocation requirements "
65 "vary by ISP type. Default is 1 - allocate memory.");
66
11010fec 67int ql2xextended_error_logging;
27d94035 68module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
11010fec 69MODULE_PARM_DESC(ql2xextended_error_logging,
0181944f
AV
70 "Option to enable extended error logging, "
71 "Default is 0 - no logging. 1 - log errors.");
72
a9083016
GM
73int ql2xshiftctondsd = 6;
74module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
75MODULE_PARM_DESC(ql2xshiftctondsd,
76 "Set to control shifting of command type processing "
77 "based on total number of SG elements.");
78
1da177e4
LT
79static void qla2x00_free_device(scsi_qla_host_t *);
80
7e47e5ca 81int ql2xfdmienable=1;
cca5335c
AV
82module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
83MODULE_PARM_DESC(ql2xfdmienable,
7794a5af
FW
84 "Enables FDMI registrations. "
85 "0 - no FDMI. Default is 1 - perform FDMI.");
cca5335c 86
df7baa50
AV
87#define MAX_Q_DEPTH 32
88static int ql2xmaxqdepth = MAX_Q_DEPTH;
89module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
90MODULE_PARM_DESC(ql2xmaxqdepth,
91 "Maximum queue depth to report for target devices.");
92
e5896bd5
AV
93int ql2xiidmaenable=1;
94module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
95MODULE_PARM_DESC(ql2xiidmaenable,
96 "Enables iIDMA settings "
97 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
98
73208dfd
AC
99int ql2xmaxqueues = 1;
100module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
101MODULE_PARM_DESC(ql2xmaxqueues,
102 "Enables MQ settings "
103 "Default is 1 for single queue. Set it to number \
104 of queues in MQ mode.");
68ca949c
AC
105
106int ql2xmultique_tag;
107module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
108MODULE_PARM_DESC(ql2xmultique_tag,
109 "Enables CPU affinity settings for the driver "
110 "Default is 0 for no affinity of request and response IO. "
111 "Set it to 1 to turn on the cpu affinity.");
e337d907
AV
112
113int ql2xfwloadbin;
114module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
115MODULE_PARM_DESC(ql2xfwloadbin,
116 "Option to specify location from which to load ISP firmware:\n"
117 " 2 -- load firmware via the request_firmware() (hotplug)\n"
118 " interface.\n"
119 " 1 -- load firmware from flash.\n"
120 " 0 -- use default semantics.\n");
121
ae97c91e
AV
122int ql2xetsenable;
123module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
124MODULE_PARM_DESC(ql2xetsenable,
125 "Enables firmware ETS burst."
126 "Default is 0 - skip ETS enablement.");
127
a9083016
GM
128int ql2xdbwr;
129module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
130MODULE_PARM_DESC(ql2xdbwr,
131 "Option to specify scheme for request queue posting\n"
132 " 0 -- Regular doorbell.\n"
133 " 1 -- CAMRAM doorbell (faster).\n");
134
135int ql2xdontresethba;
136module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
137MODULE_PARM_DESC(ql2xdontresethba,
138 "Option to specify reset behaviour\n"
139 " 0 (Default) -- Reset on failure.\n"
140 " 1 -- Do not reset on failure.\n");
141
142
1da177e4 143/*
fa2a1ce5 144 * SCSI host template entry points
1da177e4
LT
145 */
146static int qla2xxx_slave_configure(struct scsi_device * device);
f4f051eb 147static int qla2xxx_slave_alloc(struct scsi_device *);
1e99e33a
AV
148static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
149static void qla2xxx_scan_start(struct Scsi_Host *);
f4f051eb 150static void qla2xxx_slave_destroy(struct scsi_device *);
a5326f86 151static int qla2xxx_queuecommand(struct scsi_cmnd *cmd,
fca29703 152 void (*fn)(struct scsi_cmnd *));
1da177e4
LT
153static int qla2xxx_eh_abort(struct scsi_cmnd *);
154static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
523ec773 155static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
1da177e4
LT
156static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
157static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
1da177e4 158
e881a172 159static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
ce7e4af7
AV
160static int qla2x00_change_queue_type(struct scsi_device *, int);
161
a5326f86 162struct scsi_host_template qla2xxx_driver_template = {
1da177e4 163 .module = THIS_MODULE,
cb63067a 164 .name = QLA2XXX_DRIVER_NAME,
a5326f86 165 .queuecommand = qla2xxx_queuecommand,
fca29703
AV
166
167 .eh_abort_handler = qla2xxx_eh_abort,
168 .eh_device_reset_handler = qla2xxx_eh_device_reset,
523ec773 169 .eh_target_reset_handler = qla2xxx_eh_target_reset,
fca29703
AV
170 .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
171 .eh_host_reset_handler = qla2xxx_eh_host_reset,
172
173 .slave_configure = qla2xxx_slave_configure,
174
175 .slave_alloc = qla2xxx_slave_alloc,
176 .slave_destroy = qla2xxx_slave_destroy,
ed677086
AV
177 .scan_finished = qla2xxx_scan_finished,
178 .scan_start = qla2xxx_scan_start,
ce7e4af7
AV
179 .change_queue_depth = qla2x00_change_queue_depth,
180 .change_queue_type = qla2x00_change_queue_type,
fca29703
AV
181 .this_id = -1,
182 .cmd_per_lun = 3,
183 .use_clustering = ENABLE_CLUSTERING,
184 .sg_tablesize = SG_ALL,
185
186 .max_sectors = 0xFFFF,
afb046e2 187 .shost_attrs = qla2x00_host_attrs,
fca29703
AV
188};
189
1da177e4 190static struct scsi_transport_template *qla2xxx_transport_template = NULL;
2c3dfe3f 191struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
1da177e4 192
1da177e4
LT
193/* TODO Convert to inlines
194 *
195 * Timer routines
196 */
1da177e4 197
2c3dfe3f 198__inline__ void
e315cd28 199qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
1da177e4 200{
e315cd28
AC
201 init_timer(&vha->timer);
202 vha->timer.expires = jiffies + interval * HZ;
203 vha->timer.data = (unsigned long)vha;
204 vha->timer.function = (void (*)(unsigned long))func;
205 add_timer(&vha->timer);
206 vha->timer_active = 1;
1da177e4
LT
207}
208
209static inline void
e315cd28 210qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
1da177e4 211{
a9083016
GM
212 /* Currently used for 82XX only. */
213 if (vha->device_flags & DFLG_DEV_FAILED)
214 return;
215
e315cd28 216 mod_timer(&vha->timer, jiffies + interval * HZ);
1da177e4
LT
217}
218
a824ebb3 219static __inline__ void
e315cd28 220qla2x00_stop_timer(scsi_qla_host_t *vha)
1da177e4 221{
e315cd28
AC
222 del_timer_sync(&vha->timer);
223 vha->timer_active = 0;
1da177e4
LT
224}
225
1da177e4
LT
226static int qla2x00_do_dpc(void *data);
227
228static void qla2x00_rst_aen(scsi_qla_host_t *);
229
73208dfd
AC
230static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
231 struct req_que **, struct rsp_que **);
e315cd28
AC
232static void qla2x00_mem_free(struct qla_hw_data *);
233static void qla2x00_sp_free_dma(srb_t *);
1da177e4 234
1da177e4 235/* -------------------------------------------------------------------------- */
73208dfd
AC
236static int qla2x00_alloc_queues(struct qla_hw_data *ha)
237{
2afa19a9 238 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
73208dfd
AC
239 GFP_KERNEL);
240 if (!ha->req_q_map) {
241 qla_printk(KERN_WARNING, ha,
242 "Unable to allocate memory for request queue ptrs\n");
243 goto fail_req_map;
244 }
245
2afa19a9 246 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
73208dfd
AC
247 GFP_KERNEL);
248 if (!ha->rsp_q_map) {
249 qla_printk(KERN_WARNING, ha,
250 "Unable to allocate memory for response queue ptrs\n");
251 goto fail_rsp_map;
252 }
253 set_bit(0, ha->rsp_qid_map);
254 set_bit(0, ha->req_qid_map);
255 return 1;
256
257fail_rsp_map:
258 kfree(ha->req_q_map);
259 ha->req_q_map = NULL;
260fail_req_map:
261 return -ENOMEM;
262}
263
2afa19a9 264static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
73208dfd 265{
73208dfd
AC
266 if (req && req->ring)
267 dma_free_coherent(&ha->pdev->dev,
268 (req->length + 1) * sizeof(request_t),
269 req->ring, req->dma);
270
271 kfree(req);
272 req = NULL;
273}
274
2afa19a9
AC
275static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
276{
277 if (rsp && rsp->ring)
278 dma_free_coherent(&ha->pdev->dev,
279 (rsp->length + 1) * sizeof(response_t),
280 rsp->ring, rsp->dma);
281
282 kfree(rsp);
283 rsp = NULL;
284}
285
73208dfd
AC
286static void qla2x00_free_queues(struct qla_hw_data *ha)
287{
288 struct req_que *req;
289 struct rsp_que *rsp;
290 int cnt;
291
2afa19a9 292 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
73208dfd 293 req = ha->req_q_map[cnt];
2afa19a9 294 qla2x00_free_req_que(ha, req);
73208dfd 295 }
73208dfd
AC
296 kfree(ha->req_q_map);
297 ha->req_q_map = NULL;
2afa19a9
AC
298
299 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
300 rsp = ha->rsp_q_map[cnt];
301 qla2x00_free_rsp_que(ha, rsp);
302 }
303 kfree(ha->rsp_q_map);
304 ha->rsp_q_map = NULL;
73208dfd
AC
305}
306
68ca949c
AC
307static int qla25xx_setup_mode(struct scsi_qla_host *vha)
308{
309 uint16_t options = 0;
310 int ques, req, ret;
311 struct qla_hw_data *ha = vha->hw;
312
7163ea81
AC
313 if (!(ha->fw_attributes & BIT_6)) {
314 qla_printk(KERN_INFO, ha,
315 "Firmware is not multi-queue capable\n");
316 goto fail;
317 }
68ca949c 318 if (ql2xmultique_tag) {
68ca949c
AC
319 /* create a request queue for IO */
320 options |= BIT_7;
321 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
322 QLA_DEFAULT_QUE_QOS);
323 if (!req) {
324 qla_printk(KERN_WARNING, ha,
325 "Can't create request queue\n");
326 goto fail;
327 }
7163ea81 328 ha->wq = create_workqueue("qla2xxx_wq");
68ca949c
AC
329 vha->req = ha->req_q_map[req];
330 options |= BIT_1;
331 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
332 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
333 if (!ret) {
334 qla_printk(KERN_WARNING, ha,
335 "Response Queue create failed\n");
336 goto fail2;
337 }
338 }
7163ea81
AC
339 ha->flags.cpu_affinity_enabled = 1;
340
68ca949c
AC
341 DEBUG2(qla_printk(KERN_INFO, ha,
342 "CPU affinity mode enabled, no. of response"
343 " queues:%d, no. of request queues:%d\n",
344 ha->max_rsp_queues, ha->max_req_queues));
345 }
346 return 0;
347fail2:
348 qla25xx_delete_queues(vha);
7163ea81
AC
349 destroy_workqueue(ha->wq);
350 ha->wq = NULL;
68ca949c
AC
351fail:
352 ha->mqenable = 0;
7163ea81
AC
353 kfree(ha->req_q_map);
354 kfree(ha->rsp_q_map);
355 ha->max_req_queues = ha->max_rsp_queues = 1;
68ca949c
AC
356 return 1;
357}
358
1da177e4 359static char *
e315cd28 360qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
1da177e4 361{
e315cd28 362 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
363 static char *pci_bus_modes[] = {
364 "33", "66", "100", "133",
365 };
366 uint16_t pci_bus;
367
368 strcpy(str, "PCI");
369 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
370 if (pci_bus) {
371 strcat(str, "-X (");
372 strcat(str, pci_bus_modes[pci_bus]);
373 } else {
374 pci_bus = (ha->pci_attr & BIT_8) >> 8;
375 strcat(str, " (");
376 strcat(str, pci_bus_modes[pci_bus]);
377 }
378 strcat(str, " MHz)");
379
380 return (str);
381}
382
fca29703 383static char *
e315cd28 384qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
fca29703
AV
385{
386 static char *pci_bus_modes[] = { "33", "66", "100", "133", };
e315cd28 387 struct qla_hw_data *ha = vha->hw;
fca29703
AV
388 uint32_t pci_bus;
389 int pcie_reg;
390
391 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
392 if (pcie_reg) {
393 char lwstr[6];
394 uint16_t pcie_lstat, lspeed, lwidth;
395
396 pcie_reg += 0x12;
397 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
398 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
399 lwidth = (pcie_lstat &
400 (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
401
402 strcpy(str, "PCIe (");
403 if (lspeed == 1)
c87a0d8c 404 strcat(str, "2.5GT/s ");
c3a2f0df 405 else if (lspeed == 2)
c87a0d8c 406 strcat(str, "5.0GT/s ");
fca29703
AV
407 else
408 strcat(str, "<unknown> ");
409 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
410 strcat(str, lwstr);
411
412 return str;
413 }
414
415 strcpy(str, "PCI");
416 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
417 if (pci_bus == 0 || pci_bus == 8) {
418 strcat(str, " (");
419 strcat(str, pci_bus_modes[pci_bus >> 3]);
420 } else {
421 strcat(str, "-X ");
422 if (pci_bus & BIT_2)
423 strcat(str, "Mode 2");
424 else
425 strcat(str, "Mode 1");
426 strcat(str, " (");
427 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
428 }
429 strcat(str, " MHz)");
430
431 return str;
432}
433
e5f82ab8 434static char *
e315cd28 435qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
1da177e4
LT
436{
437 char un_str[10];
e315cd28 438 struct qla_hw_data *ha = vha->hw;
fa2a1ce5 439
1da177e4
LT
440 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
441 ha->fw_minor_version,
442 ha->fw_subminor_version);
443
444 if (ha->fw_attributes & BIT_9) {
445 strcat(str, "FLX");
446 return (str);
447 }
448
449 switch (ha->fw_attributes & 0xFF) {
450 case 0x7:
451 strcat(str, "EF");
452 break;
453 case 0x17:
454 strcat(str, "TP");
455 break;
456 case 0x37:
457 strcat(str, "IP");
458 break;
459 case 0x77:
460 strcat(str, "VI");
461 break;
462 default:
463 sprintf(un_str, "(%x)", ha->fw_attributes);
464 strcat(str, un_str);
465 break;
466 }
467 if (ha->fw_attributes & 0x100)
468 strcat(str, "X");
469
470 return (str);
471}
472
e5f82ab8 473static char *
e315cd28 474qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
fca29703 475{
e315cd28 476 struct qla_hw_data *ha = vha->hw;
f0883ac6 477
3a03eb79
AV
478 sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
479 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
fca29703 480 return str;
fca29703
AV
481}
482
483static inline srb_t *
e315cd28 484qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
fca29703
AV
485 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
486{
487 srb_t *sp;
e315cd28 488 struct qla_hw_data *ha = vha->hw;
fca29703
AV
489
490 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
491 if (!sp)
492 return sp;
493
fca29703
AV
494 sp->fcport = fcport;
495 sp->cmd = cmd;
496 sp->flags = 0;
497 CMD_SP(cmd) = (void *)sp;
498 cmd->scsi_done = done;
cf53b069 499 sp->ctx = NULL;
fca29703
AV
500
501 return sp;
502}
503
1da177e4 504static int
a5326f86 505qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
fca29703 506{
e315cd28 507 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fca29703 508 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
19a7b4ae 509 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
e315cd28
AC
510 struct qla_hw_data *ha = vha->hw;
511 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
fca29703
AV
512 srb_t *sp;
513 int rval;
514
85880801
AV
515 if (ha->flags.eeh_busy) {
516 if (ha->flags.pci_channel_io_perm_failure)
b9b12f73 517 cmd->result = DID_NO_CONNECT << 16;
85880801
AV
518 else
519 cmd->result = DID_REQUEUE << 16;
14e660e6
SJ
520 goto qc24_fail_command;
521 }
522
19a7b4ae
JSEC
523 rval = fc_remote_port_chkready(rport);
524 if (rval) {
525 cmd->result = rval;
fca29703
AV
526 goto qc24_fail_command;
527 }
528
387f96b4 529 /* Close window on fcport/rport state-transitioning. */
7b594131
MC
530 if (fcport->drport)
531 goto qc24_target_busy;
387f96b4 532
fca29703
AV
533 if (atomic_read(&fcport->state) != FCS_ONLINE) {
534 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
e315cd28 535 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
fca29703
AV
536 cmd->result = DID_NO_CONNECT << 16;
537 goto qc24_fail_command;
538 }
7b594131 539 goto qc24_target_busy;
fca29703
AV
540 }
541
e315cd28 542 spin_unlock_irq(vha->host->host_lock);
fca29703 543
e315cd28 544 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
fca29703
AV
545 if (!sp)
546 goto qc24_host_busy_lock;
547
e315cd28 548 rval = ha->isp_ops->start_scsi(sp);
fca29703
AV
549 if (rval != QLA_SUCCESS)
550 goto qc24_host_busy_free_sp;
551
e315cd28 552 spin_lock_irq(vha->host->host_lock);
fca29703
AV
553
554 return 0;
555
556qc24_host_busy_free_sp:
e315cd28
AC
557 qla2x00_sp_free_dma(sp);
558 mempool_free(sp, ha->srb_mempool);
fca29703
AV
559
560qc24_host_busy_lock:
e315cd28 561 spin_lock_irq(vha->host->host_lock);
fca29703
AV
562 return SCSI_MLQUEUE_HOST_BUSY;
563
7b594131
MC
564qc24_target_busy:
565 return SCSI_MLQUEUE_TARGET_BUSY;
566
fca29703
AV
567qc24_fail_command:
568 done(cmd);
569
570 return 0;
571}
572
573
1da177e4
LT
574/*
575 * qla2x00_eh_wait_on_command
576 * Waits for the command to be returned by the Firmware for some
577 * max time.
578 *
579 * Input:
1da177e4 580 * cmd = Scsi Command to wait on.
1da177e4
LT
581 *
582 * Return:
583 * Not Found : 0
584 * Found : 1
585 */
586static int
e315cd28 587qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
1da177e4 588{
fe74c71f
AV
589#define ABORT_POLLING_PERIOD 1000
590#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
f4f051eb 591 unsigned long wait_iter = ABORT_WAIT_ITER;
85880801
AV
592 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
593 struct qla_hw_data *ha = vha->hw;
f4f051eb 594 int ret = QLA_SUCCESS;
1da177e4 595
85880801
AV
596 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
597 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
598 return ret;
599 }
600
d970432c 601 while (CMD_SP(cmd) && wait_iter--) {
fe74c71f 602 msleep(ABORT_POLLING_PERIOD);
f4f051eb
AV
603 }
604 if (CMD_SP(cmd))
605 ret = QLA_FUNCTION_FAILED;
1da177e4 606
f4f051eb 607 return ret;
1da177e4
LT
608}
609
610/*
611 * qla2x00_wait_for_hba_online
fa2a1ce5 612 * Wait till the HBA is online after going through
1da177e4
LT
613 * <= MAX_RETRIES_OF_ISP_ABORT or
614 * finally HBA is disabled ie marked offline
615 *
616 * Input:
617 * ha - pointer to host adapter structure
fa2a1ce5
AV
618 *
619 * Note:
1da177e4
LT
620 * Does context switching-Release SPIN_LOCK
621 * (if any) before calling this routine.
622 *
623 * Return:
624 * Success (Adapter is online) : 0
625 * Failed (Adapter is offline/disabled) : 1
626 */
854165f4 627int
e315cd28 628qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
1da177e4 629{
fca29703
AV
630 int return_status;
631 unsigned long wait_online;
e315cd28
AC
632 struct qla_hw_data *ha = vha->hw;
633 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 634
fa2a1ce5 635 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
e315cd28
AC
636 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
637 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
638 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
639 ha->dpc_active) && time_before(jiffies, wait_online)) {
1da177e4
LT
640
641 msleep(1000);
642 }
e315cd28 643 if (base_vha->flags.online)
fa2a1ce5 644 return_status = QLA_SUCCESS;
1da177e4
LT
645 else
646 return_status = QLA_FUNCTION_FAILED;
647
1da177e4
LT
648 return (return_status);
649}
650
2533cf67
LC
651int
652qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
653{
654 int return_status;
655 unsigned long wait_reset;
656 struct qla_hw_data *ha = vha->hw;
657 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
658
659 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
660 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
661 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
662 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
663 ha->dpc_active) && time_before(jiffies, wait_reset)) {
664
665 msleep(1000);
666
667 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
668 ha->flags.chip_reset_done)
669 break;
670 }
671 if (ha->flags.chip_reset_done)
672 return_status = QLA_SUCCESS;
673 else
674 return_status = QLA_FUNCTION_FAILED;
675
676 return return_status;
677}
678
1da177e4
LT
679/*
680 * qla2x00_wait_for_loop_ready
681 * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
fa2a1ce5 682 * to be in LOOP_READY state.
1da177e4
LT
683 * Input:
684 * ha - pointer to host adapter structure
fa2a1ce5
AV
685 *
686 * Note:
1da177e4
LT
687 * Does context switching-Release SPIN_LOCK
688 * (if any) before calling this routine.
fa2a1ce5 689 *
1da177e4
LT
690 *
691 * Return:
692 * Success (LOOP_READY) : 0
693 * Failed (LOOP_NOT_READY) : 1
694 */
fa2a1ce5 695static inline int
e315cd28 696qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
1da177e4
LT
697{
698 int return_status = QLA_SUCCESS;
699 unsigned long loop_timeout ;
e315cd28
AC
700 struct qla_hw_data *ha = vha->hw;
701 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
702
703 /* wait for 5 min at the max for loop to be ready */
fa2a1ce5 704 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1da177e4 705
e315cd28
AC
706 while ((!atomic_read(&base_vha->loop_down_timer) &&
707 atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
708 atomic_read(&base_vha->loop_state) != LOOP_READY) {
709 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
57680080
RA
710 return_status = QLA_FUNCTION_FAILED;
711 break;
712 }
1da177e4
LT
713 msleep(1000);
714 if (time_after_eq(jiffies, loop_timeout)) {
715 return_status = QLA_FUNCTION_FAILED;
716 break;
717 }
718 }
fa2a1ce5 719 return (return_status);
1da177e4
LT
720}
721
722/**************************************************************************
723* qla2xxx_eh_abort
724*
725* Description:
726* The abort function will abort the specified command.
727*
728* Input:
729* cmd = Linux SCSI command packet to be aborted.
730*
731* Returns:
732* Either SUCCESS or FAILED.
733*
734* Note:
2ea00202 735* Only return FAILED if command not returned by firmware.
1da177e4 736**************************************************************************/
e5f82ab8 737static int
1da177e4
LT
738qla2xxx_eh_abort(struct scsi_cmnd *cmd)
739{
e315cd28 740 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
f4f051eb 741 srb_t *sp;
17d98630 742 int ret, i;
f4f051eb
AV
743 unsigned int id, lun;
744 unsigned long serial;
18e144d3 745 unsigned long flags;
2ea00202 746 int wait = 0;
e315cd28 747 struct qla_hw_data *ha = vha->hw;
67c2e93a 748 struct req_que *req = vha->req;
17d98630 749 srb_t *spt;
1da177e4 750
65d430fa 751 fc_block_scsi_eh(cmd);
07db5183 752
f4f051eb 753 if (!CMD_SP(cmd))
2ea00202 754 return SUCCESS;
1da177e4 755
2ea00202 756 ret = SUCCESS;
1da177e4 757
f4f051eb
AV
758 id = cmd->device->id;
759 lun = cmd->device->lun;
760 serial = cmd->serial_number;
17d98630
AC
761 spt = (srb_t *) CMD_SP(cmd);
762 if (!spt)
763 return SUCCESS;
1da177e4 764
f4f051eb 765 /* Check active list for command command. */
e315cd28 766 spin_lock_irqsave(&ha->hardware_lock, flags);
17d98630
AC
767 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
768 sp = req->outstanding_cmds[i];
1da177e4 769
17d98630
AC
770 if (sp == NULL)
771 continue;
a9083016 772 if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID))
cf53b069 773 continue;
17d98630
AC
774 if (sp->cmd != cmd)
775 continue;
1da177e4 776
17d98630
AC
777 DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
778 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
779
780 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2afa19a9 781 if (ha->isp_ops->abort_command(sp)) {
17d98630
AC
782 DEBUG2(printk("%s(%ld): abort_command "
783 "mbx failed.\n", __func__, vha->host_no));
2ac4b64f 784 ret = FAILED;
17d98630
AC
785 } else {
786 DEBUG3(printk("%s(%ld): abort_command "
787 "mbx success.\n", __func__, vha->host_no));
788 wait = 1;
73208dfd 789 }
17d98630
AC
790 spin_lock_irqsave(&ha->hardware_lock, flags);
791 break;
f4f051eb 792 }
e315cd28 793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 794
f4f051eb 795 /* Wait for the command to be returned. */
2ea00202 796 if (wait) {
e315cd28 797 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
fa2a1ce5 798 qla_printk(KERN_ERR, ha,
f4f051eb 799 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
e315cd28 800 "%x.\n", vha->host_no, id, lun, serial, ret);
2ea00202 801 ret = FAILED;
f4f051eb 802 }
1da177e4 803 }
1da177e4 804
fa2a1ce5 805 qla_printk(KERN_INFO, ha,
2ea00202 806 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
e315cd28 807 vha->host_no, id, lun, wait, serial, ret);
1da177e4 808
f4f051eb
AV
809 return ret;
810}
1da177e4 811
523ec773
AV
812enum nexus_wait_type {
813 WAIT_HOST = 0,
814 WAIT_TARGET,
815 WAIT_LUN,
816};
817
f4f051eb 818static int
e315cd28 819qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
17d98630 820 unsigned int l, srb_t *sp, enum nexus_wait_type type)
f4f051eb 821{
17d98630 822 int cnt, match, status;
18e144d3 823 unsigned long flags;
e315cd28 824 struct qla_hw_data *ha = vha->hw;
73208dfd 825 struct req_que *req;
1da177e4 826
523ec773 827 status = QLA_SUCCESS;
17d98630
AC
828 if (!sp)
829 return status;
830
e315cd28 831 spin_lock_irqsave(&ha->hardware_lock, flags);
67c2e93a 832 req = vha->req;
17d98630
AC
833 for (cnt = 1; status == QLA_SUCCESS &&
834 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
835 sp = req->outstanding_cmds[cnt];
836 if (!sp)
523ec773 837 continue;
cf53b069
AV
838 if (sp->ctx)
839 continue;
17d98630
AC
840 if (vha->vp_idx != sp->fcport->vha->vp_idx)
841 continue;
842 match = 0;
843 switch (type) {
844 case WAIT_HOST:
845 match = 1;
846 break;
847 case WAIT_TARGET:
848 match = sp->cmd->device->id == t;
849 break;
850 case WAIT_LUN:
851 match = (sp->cmd->device->id == t &&
852 sp->cmd->device->lun == l);
853 break;
73208dfd 854 }
17d98630
AC
855 if (!match)
856 continue;
857
858 spin_unlock_irqrestore(&ha->hardware_lock, flags);
859 status = qla2x00_eh_wait_on_command(sp->cmd);
860 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 861 }
e315cd28 862 spin_unlock_irqrestore(&ha->hardware_lock, flags);
523ec773
AV
863
864 return status;
1da177e4
LT
865}
866
a9083016
GM
867void qla82xx_wait_for_pending_commands(scsi_qla_host_t *vha)
868{
869 int cnt;
870 srb_t *sp;
871 struct req_que *req = vha->req;
872
873 DEBUG2(qla_printk(KERN_INFO, vha->hw,
874 "Waiting for pending commands\n"));
875 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
876 sp = req->outstanding_cmds[cnt];
877 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
878 sp, WAIT_HOST) == QLA_SUCCESS) {
879 DEBUG2(qla_printk(KERN_INFO, vha->hw,
880 "Done wait for pending commands\n"));
881 }
882 }
883}
884
523ec773
AV
885static char *reset_errors[] = {
886 "HBA not online",
887 "HBA not ready",
888 "Task management failed",
889 "Waiting for command completions",
890};
1da177e4 891
e5f82ab8 892static int
523ec773 893__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
2afa19a9 894 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
1da177e4 895{
e315cd28 896 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
bdf79621 897 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
523ec773 898 int err;
1da177e4 899
65d430fa 900 fc_block_scsi_eh(cmd);
07db5183 901
b0328bee 902 if (!fcport)
523ec773 903 return FAILED;
1da177e4 904
e315cd28
AC
905 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
906 vha->host_no, cmd->device->id, cmd->device->lun, name);
1da177e4 907
523ec773 908 err = 0;
e315cd28 909 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
523ec773
AV
910 goto eh_reset_failed;
911 err = 1;
e315cd28 912 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
523ec773
AV
913 goto eh_reset_failed;
914 err = 2;
2afa19a9
AC
915 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
916 != QLA_SUCCESS)
523ec773
AV
917 goto eh_reset_failed;
918 err = 3;
e315cd28 919 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
17d98630 920 cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
523ec773
AV
921 goto eh_reset_failed;
922
e315cd28
AC
923 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
924 vha->host_no, cmd->device->id, cmd->device->lun, name);
523ec773
AV
925
926 return SUCCESS;
927
928 eh_reset_failed:
e315cd28
AC
929 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
930 , vha->host_no, cmd->device->id, cmd->device->lun, name,
523ec773
AV
931 reset_errors[err]);
932 return FAILED;
933}
1da177e4 934
523ec773
AV
935static int
936qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
937{
e315cd28
AC
938 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
939 struct qla_hw_data *ha = vha->hw;
1da177e4 940
523ec773
AV
941 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
942 ha->isp_ops->lun_reset);
1da177e4
LT
943}
944
1da177e4 945static int
523ec773 946qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
1da177e4 947{
e315cd28
AC
948 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
949 struct qla_hw_data *ha = vha->hw;
1da177e4 950
523ec773
AV
951 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
952 ha->isp_ops->target_reset);
1da177e4
LT
953}
954
1da177e4
LT
955/**************************************************************************
956* qla2xxx_eh_bus_reset
957*
958* Description:
959* The bus reset function will reset the bus and abort any executing
960* commands.
961*
962* Input:
963* cmd = Linux SCSI command packet of the command that cause the
964* bus reset.
965*
966* Returns:
967* SUCCESS/FAILURE (defined as macro in scsi.h).
968*
969**************************************************************************/
e5f82ab8 970static int
1da177e4
LT
971qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
972{
e315cd28 973 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
bdf79621 974 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
2c3dfe3f 975 int ret = FAILED;
f4f051eb
AV
976 unsigned int id, lun;
977 unsigned long serial;
17d98630 978 srb_t *sp = (srb_t *) CMD_SP(cmd);
f4f051eb 979
65d430fa 980 fc_block_scsi_eh(cmd);
07db5183 981
f4f051eb
AV
982 id = cmd->device->id;
983 lun = cmd->device->lun;
984 serial = cmd->serial_number;
1da177e4 985
b0328bee 986 if (!fcport)
f4f051eb 987 return ret;
1da177e4 988
e315cd28 989 qla_printk(KERN_INFO, vha->hw,
749af3d5 990 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
1da177e4 991
e315cd28 992 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1da177e4 993 DEBUG2(printk("%s failed:board disabled\n",__func__));
f4f051eb 994 goto eh_bus_reset_done;
1da177e4
LT
995 }
996
e315cd28
AC
997 if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
998 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
f4f051eb 999 ret = SUCCESS;
1da177e4 1000 }
f4f051eb
AV
1001 if (ret == FAILED)
1002 goto eh_bus_reset_done;
1da177e4 1003
9a41a62b 1004 /* Flush outstanding commands. */
17d98630 1005 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
523ec773 1006 QLA_SUCCESS)
9a41a62b 1007 ret = FAILED;
1da177e4 1008
f4f051eb 1009eh_bus_reset_done:
e315cd28 1010 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
f4f051eb 1011 (ret == FAILED) ? "failed" : "succeded");
1da177e4 1012
f4f051eb 1013 return ret;
1da177e4
LT
1014}
1015
1016/**************************************************************************
1017* qla2xxx_eh_host_reset
1018*
1019* Description:
1020* The reset function will reset the Adapter.
1021*
1022* Input:
1023* cmd = Linux SCSI command packet of the command that cause the
1024* adapter reset.
1025*
1026* Returns:
1027* Either SUCCESS or FAILED.
1028*
1029* Note:
1030**************************************************************************/
e5f82ab8 1031static int
1da177e4
LT
1032qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1033{
e315cd28 1034 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
bdf79621 1035 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
e315cd28 1036 struct qla_hw_data *ha = vha->hw;
2c3dfe3f 1037 int ret = FAILED;
f4f051eb
AV
1038 unsigned int id, lun;
1039 unsigned long serial;
17d98630 1040 srb_t *sp = (srb_t *) CMD_SP(cmd);
e315cd28 1041 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 1042
65d430fa 1043 fc_block_scsi_eh(cmd);
07db5183 1044
f4f051eb
AV
1045 id = cmd->device->id;
1046 lun = cmd->device->lun;
1047 serial = cmd->serial_number;
1048
b0328bee 1049 if (!fcport)
f4f051eb 1050 return ret;
1da177e4 1051
1da177e4 1052 qla_printk(KERN_INFO, ha,
e315cd28 1053 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
1da177e4 1054
e315cd28 1055 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
f4f051eb 1056 goto eh_host_reset_lock;
1da177e4
LT
1057
1058 /*
1059 * Fixme-may be dpc thread is active and processing
fa2a1ce5 1060 * loop_resync,so wait a while for it to
1da177e4
LT
1061 * be completed and then issue big hammer.Otherwise
1062 * it may cause I/O failure as big hammer marks the
1063 * devices as lost kicking of the port_down_timer
1064 * while dpc is stuck for the mailbox to complete.
1065 */
e315cd28
AC
1066 qla2x00_wait_for_loop_ready(vha);
1067 if (vha != base_vha) {
1068 if (qla2x00_vp_abort_isp(vha))
f4f051eb 1069 goto eh_host_reset_lock;
e315cd28 1070 } else {
a9083016
GM
1071 if (IS_QLA82XX(vha->hw)) {
1072 if (!qla82xx_fcoe_ctx_reset(vha)) {
1073 /* Ctx reset success */
1074 ret = SUCCESS;
1075 goto eh_host_reset_lock;
1076 }
1077 /* fall thru if ctx reset failed */
1078 }
68ca949c
AC
1079 if (ha->wq)
1080 flush_workqueue(ha->wq);
1081
e315cd28 1082 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
a9083016 1083 if (ha->isp_ops->abort_isp(base_vha)) {
e315cd28
AC
1084 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1085 /* failed. schedule dpc to try */
1086 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1087
1088 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1089 goto eh_host_reset_lock;
1090 }
1091 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
fa2a1ce5 1092 }
1da177e4 1093
e315cd28 1094 /* Waiting for command to be returned to OS.*/
17d98630 1095 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
e315cd28 1096 QLA_SUCCESS)
f4f051eb 1097 ret = SUCCESS;
1da177e4 1098
f4f051eb 1099eh_host_reset_lock:
f4f051eb
AV
1100 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1101 (ret == FAILED) ? "failed" : "succeded");
1da177e4 1102
f4f051eb
AV
1103 return ret;
1104}
1da177e4
LT
1105
1106/*
1107* qla2x00_loop_reset
1108* Issue loop reset.
1109*
1110* Input:
1111* ha = adapter block pointer.
1112*
1113* Returns:
1114* 0 = success
1115*/
a4722cf2 1116int
e315cd28 1117qla2x00_loop_reset(scsi_qla_host_t *vha)
1da177e4 1118{
0c8c39af 1119 int ret;
bdf79621 1120 struct fc_port *fcport;
e315cd28 1121 struct qla_hw_data *ha = vha->hw;
1da177e4 1122
55e5ed27
AV
1123 if (ha->flags.enable_target_reset) {
1124 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1125 if (fcport->port_type != FCT_TARGET)
1126 continue;
1127
1128 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1129 if (ret != QLA_SUCCESS) {
1130 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1131 "target_reset=%d d_id=%x.\n", __func__,
1132 vha->host_no, ret, fcport->d_id.b24));
1133 }
1134 }
1135 }
1136
a9083016 1137 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
e315cd28 1138 ret = qla2x00_full_login_lip(vha);
0c8c39af 1139 if (ret != QLA_SUCCESS) {
749af3d5 1140 DEBUG2_3(printk("%s(%ld): failed: "
e315cd28 1141 "full_login_lip=%d.\n", __func__, vha->host_no,
0c8c39af 1142 ret));
749af3d5
AC
1143 }
1144 atomic_set(&vha->loop_state, LOOP_DOWN);
1145 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1146 qla2x00_mark_all_devices_lost(vha, 0);
1147 qla2x00_wait_for_loop_ready(vha);
0c8c39af
AV
1148 }
1149
0d6e61bc 1150 if (ha->flags.enable_lip_reset) {
e315cd28 1151 ret = qla2x00_lip_reset(vha);
0c8c39af 1152 if (ret != QLA_SUCCESS) {
749af3d5 1153 DEBUG2_3(printk("%s(%ld): failed: "
e315cd28
AC
1154 "lip_reset=%d.\n", __func__, vha->host_no, ret));
1155 } else
1156 qla2x00_wait_for_loop_ready(vha);
1da177e4
LT
1157 }
1158
1da177e4 1159 /* Issue marker command only when we are going to start the I/O */
e315cd28 1160 vha->marker_needed = 1;
1da177e4 1161
0c8c39af 1162 return QLA_SUCCESS;
1da177e4
LT
1163}
1164
df4bf0bb 1165void
e315cd28 1166qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
df4bf0bb 1167{
73208dfd 1168 int que, cnt;
df4bf0bb
AV
1169 unsigned long flags;
1170 srb_t *sp;
ac280b67 1171 struct srb_ctx *ctx;
e315cd28 1172 struct qla_hw_data *ha = vha->hw;
73208dfd 1173 struct req_que *req;
df4bf0bb
AV
1174
1175 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 1176 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe 1177 req = ha->req_q_map[que];
73208dfd
AC
1178 if (!req)
1179 continue;
1180 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1181 sp = req->outstanding_cmds[cnt];
e612d465 1182 if (sp) {
73208dfd 1183 req->outstanding_cmds[cnt] = NULL;
a9083016
GM
1184 if (!sp->ctx ||
1185 (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
ac280b67
AV
1186 sp->cmd->result = res;
1187 qla2x00_sp_compl(ha, sp);
1188 } else {
1189 ctx = sp->ctx;
6c452a45
AV
1190 if (ctx->type == SRB_LOGIN_CMD ||
1191 ctx->type == SRB_LOGOUT_CMD) {
db3ad7f8
GM
1192 del_timer_sync(&ctx->timer);
1193 ctx->free(sp);
1194 } else {
6c452a45
AV
1195 struct srb_bsg *sp_bsg =
1196 (struct srb_bsg *)sp->ctx;
1197 struct fc_bsg_job *bsg_job =
1198 sp_bsg->bsg_job;
1199
1200 if (bsg_job->request->msgcode
1201 == FC_BSG_HST_CT)
db3ad7f8 1202 kfree(sp->fcport);
6c452a45
AV
1203 bsg_job->req->errors = 0;
1204 bsg_job->reply->result = res;
1205 bsg_job->job_done(
1206 sp_bsg->bsg_job);
db3ad7f8 1207 kfree(sp->ctx);
6c452a45
AV
1208 mempool_free(sp,
1209 ha->srb_mempool);
db3ad7f8 1210 }
ac280b67 1211 }
73208dfd 1212 }
df4bf0bb
AV
1213 }
1214 }
1215 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1216}
1217
f4f051eb
AV
1218static int
1219qla2xxx_slave_alloc(struct scsi_device *sdev)
1da177e4 1220{
bdf79621 1221 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1da177e4 1222
19a7b4ae 1223 if (!rport || fc_remote_port_chkready(rport))
f4f051eb 1224 return -ENXIO;
bdf79621 1225
19a7b4ae 1226 sdev->hostdata = *(fc_port_t **)rport->dd_data;
1da177e4 1227
f4f051eb
AV
1228 return 0;
1229}
1da177e4 1230
f4f051eb
AV
1231static int
1232qla2xxx_slave_configure(struct scsi_device *sdev)
1233{
e315cd28
AC
1234 scsi_qla_host_t *vha = shost_priv(sdev->host);
1235 struct qla_hw_data *ha = vha->hw;
8482e118 1236 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2afa19a9 1237 struct req_que *req = vha->req;
8482e118 1238
f4f051eb 1239 if (sdev->tagged_supported)
73208dfd 1240 scsi_activate_tcq(sdev, req->max_q_depth);
f4f051eb 1241 else
73208dfd 1242 scsi_deactivate_tcq(sdev, req->max_q_depth);
1da177e4 1243
85821c90 1244 rport->dev_loss_tmo = ha->port_down_retry_count;
8482e118 1245
f4f051eb
AV
1246 return 0;
1247}
1da177e4 1248
f4f051eb
AV
1249static void
1250qla2xxx_slave_destroy(struct scsi_device *sdev)
1251{
1252 sdev->hostdata = NULL;
1da177e4
LT
1253}
1254
c45dd305
GM
1255static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1256{
1257 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1258
1259 if (!scsi_track_queue_full(sdev, qdepth))
1260 return;
1261
1262 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1263 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1264 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1265 sdev->queue_depth));
1266}
1267
1268static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1269{
1270 fc_port_t *fcport = sdev->hostdata;
1271 struct scsi_qla_host *vha = fcport->vha;
1272 struct qla_hw_data *ha = vha->hw;
1273 struct req_que *req = NULL;
1274
1275 req = vha->req;
1276 if (!req)
1277 return;
1278
1279 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1280 return;
1281
1282 if (sdev->ordered_tags)
1283 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1284 else
1285 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1286
1287 DEBUG2(qla_printk(KERN_INFO, ha,
1288 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1289 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1290 sdev->queue_depth));
1291}
1292
ce7e4af7 1293static int
e881a172 1294qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
ce7e4af7 1295{
c45dd305
GM
1296 switch (reason) {
1297 case SCSI_QDEPTH_DEFAULT:
1298 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1299 break;
1300 case SCSI_QDEPTH_QFULL:
1301 qla2x00_handle_queue_full(sdev, qdepth);
1302 break;
1303 case SCSI_QDEPTH_RAMP_UP:
1304 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1305 break;
1306 default:
08002af2 1307 return -EOPNOTSUPP;
c45dd305 1308 }
e881a172 1309
ce7e4af7
AV
1310 return sdev->queue_depth;
1311}
1312
1313static int
1314qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1315{
1316 if (sdev->tagged_supported) {
1317 scsi_set_tag_type(sdev, tag_type);
1318 if (tag_type)
1319 scsi_activate_tcq(sdev, sdev->queue_depth);
1320 else
1321 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1322 } else
1323 tag_type = 0;
1324
1325 return tag_type;
1326}
1327
1da177e4
LT
1328/**
1329 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1330 * @ha: HA context
1331 *
1332 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1333 * supported addressing method.
1334 */
1335static void
53303c42 1336qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1da177e4 1337{
7524f9b9 1338 /* Assume a 32bit DMA mask. */
1da177e4 1339 ha->flags.enable_64bit_addressing = 0;
1da177e4 1340
6a35528a 1341 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
7524f9b9
AV
1342 /* Any upper-dword bits set? */
1343 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
6a35528a 1344 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
7524f9b9 1345 /* Ok, a 64bit DMA mask is applicable. */
1da177e4 1346 ha->flags.enable_64bit_addressing = 1;
fd34f556
AV
1347 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1348 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
7524f9b9 1349 return;
1da177e4 1350 }
1da177e4 1351 }
7524f9b9 1352
284901a9
YH
1353 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1354 pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1da177e4
LT
1355}
1356
fd34f556 1357static void
e315cd28 1358qla2x00_enable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1359{
1360 unsigned long flags = 0;
1361 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1362
1363 spin_lock_irqsave(&ha->hardware_lock, flags);
1364 ha->interrupts_on = 1;
1365 /* enable risc and host interrupts */
1366 WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1367 RD_REG_WORD(&reg->ictrl);
1368 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1369
1370}
1371
1372static void
e315cd28 1373qla2x00_disable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1374{
1375 unsigned long flags = 0;
1376 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1377
1378 spin_lock_irqsave(&ha->hardware_lock, flags);
1379 ha->interrupts_on = 0;
1380 /* disable risc and host interrupts */
1381 WRT_REG_WORD(&reg->ictrl, 0);
1382 RD_REG_WORD(&reg->ictrl);
1383 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1384}
1385
1386static void
e315cd28 1387qla24xx_enable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1388{
1389 unsigned long flags = 0;
1390 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1391
1392 spin_lock_irqsave(&ha->hardware_lock, flags);
1393 ha->interrupts_on = 1;
1394 WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1395 RD_REG_DWORD(&reg->ictrl);
1396 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1397}
1398
1399static void
e315cd28 1400qla24xx_disable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1401{
1402 unsigned long flags = 0;
1403 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1404
124f85e6
AV
1405 if (IS_NOPOLLING_TYPE(ha))
1406 return;
fd34f556
AV
1407 spin_lock_irqsave(&ha->hardware_lock, flags);
1408 ha->interrupts_on = 0;
1409 WRT_REG_DWORD(&reg->ictrl, 0);
1410 RD_REG_DWORD(&reg->ictrl);
1411 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1412}
1413
1414static struct isp_operations qla2100_isp_ops = {
1415 .pci_config = qla2100_pci_config,
1416 .reset_chip = qla2x00_reset_chip,
1417 .chip_diag = qla2x00_chip_diag,
1418 .config_rings = qla2x00_config_rings,
1419 .reset_adapter = qla2x00_reset_adapter,
1420 .nvram_config = qla2x00_nvram_config,
1421 .update_fw_options = qla2x00_update_fw_options,
1422 .load_risc = qla2x00_load_risc,
1423 .pci_info_str = qla2x00_pci_info_str,
1424 .fw_version_str = qla2x00_fw_version_str,
1425 .intr_handler = qla2100_intr_handler,
1426 .enable_intrs = qla2x00_enable_intrs,
1427 .disable_intrs = qla2x00_disable_intrs,
1428 .abort_command = qla2x00_abort_command,
523ec773
AV
1429 .target_reset = qla2x00_abort_target,
1430 .lun_reset = qla2x00_lun_reset,
fd34f556
AV
1431 .fabric_login = qla2x00_login_fabric,
1432 .fabric_logout = qla2x00_fabric_logout,
1433 .calc_req_entries = qla2x00_calc_iocbs_32,
1434 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1435 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1436 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1437 .read_nvram = qla2x00_read_nvram_data,
1438 .write_nvram = qla2x00_write_nvram_data,
1439 .fw_dump = qla2100_fw_dump,
1440 .beacon_on = NULL,
1441 .beacon_off = NULL,
1442 .beacon_blink = NULL,
1443 .read_optrom = qla2x00_read_optrom_data,
1444 .write_optrom = qla2x00_write_optrom_data,
1445 .get_flash_version = qla2x00_get_flash_version,
e315cd28 1446 .start_scsi = qla2x00_start_scsi,
a9083016 1447 .abort_isp = qla2x00_abort_isp,
fd34f556
AV
1448};
1449
1450static struct isp_operations qla2300_isp_ops = {
1451 .pci_config = qla2300_pci_config,
1452 .reset_chip = qla2x00_reset_chip,
1453 .chip_diag = qla2x00_chip_diag,
1454 .config_rings = qla2x00_config_rings,
1455 .reset_adapter = qla2x00_reset_adapter,
1456 .nvram_config = qla2x00_nvram_config,
1457 .update_fw_options = qla2x00_update_fw_options,
1458 .load_risc = qla2x00_load_risc,
1459 .pci_info_str = qla2x00_pci_info_str,
1460 .fw_version_str = qla2x00_fw_version_str,
1461 .intr_handler = qla2300_intr_handler,
1462 .enable_intrs = qla2x00_enable_intrs,
1463 .disable_intrs = qla2x00_disable_intrs,
1464 .abort_command = qla2x00_abort_command,
523ec773
AV
1465 .target_reset = qla2x00_abort_target,
1466 .lun_reset = qla2x00_lun_reset,
fd34f556
AV
1467 .fabric_login = qla2x00_login_fabric,
1468 .fabric_logout = qla2x00_fabric_logout,
1469 .calc_req_entries = qla2x00_calc_iocbs_32,
1470 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1471 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1472 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1473 .read_nvram = qla2x00_read_nvram_data,
1474 .write_nvram = qla2x00_write_nvram_data,
1475 .fw_dump = qla2300_fw_dump,
1476 .beacon_on = qla2x00_beacon_on,
1477 .beacon_off = qla2x00_beacon_off,
1478 .beacon_blink = qla2x00_beacon_blink,
1479 .read_optrom = qla2x00_read_optrom_data,
1480 .write_optrom = qla2x00_write_optrom_data,
1481 .get_flash_version = qla2x00_get_flash_version,
e315cd28 1482 .start_scsi = qla2x00_start_scsi,
a9083016 1483 .abort_isp = qla2x00_abort_isp,
fd34f556
AV
1484};
1485
1486static struct isp_operations qla24xx_isp_ops = {
1487 .pci_config = qla24xx_pci_config,
1488 .reset_chip = qla24xx_reset_chip,
1489 .chip_diag = qla24xx_chip_diag,
1490 .config_rings = qla24xx_config_rings,
1491 .reset_adapter = qla24xx_reset_adapter,
1492 .nvram_config = qla24xx_nvram_config,
1493 .update_fw_options = qla24xx_update_fw_options,
1494 .load_risc = qla24xx_load_risc,
1495 .pci_info_str = qla24xx_pci_info_str,
1496 .fw_version_str = qla24xx_fw_version_str,
1497 .intr_handler = qla24xx_intr_handler,
1498 .enable_intrs = qla24xx_enable_intrs,
1499 .disable_intrs = qla24xx_disable_intrs,
1500 .abort_command = qla24xx_abort_command,
523ec773
AV
1501 .target_reset = qla24xx_abort_target,
1502 .lun_reset = qla24xx_lun_reset,
fd34f556
AV
1503 .fabric_login = qla24xx_login_fabric,
1504 .fabric_logout = qla24xx_fabric_logout,
1505 .calc_req_entries = NULL,
1506 .build_iocbs = NULL,
1507 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1508 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1509 .read_nvram = qla24xx_read_nvram_data,
1510 .write_nvram = qla24xx_write_nvram_data,
1511 .fw_dump = qla24xx_fw_dump,
1512 .beacon_on = qla24xx_beacon_on,
1513 .beacon_off = qla24xx_beacon_off,
1514 .beacon_blink = qla24xx_beacon_blink,
1515 .read_optrom = qla24xx_read_optrom_data,
1516 .write_optrom = qla24xx_write_optrom_data,
1517 .get_flash_version = qla24xx_get_flash_version,
e315cd28 1518 .start_scsi = qla24xx_start_scsi,
a9083016 1519 .abort_isp = qla2x00_abort_isp,
fd34f556
AV
1520};
1521
c3a2f0df
AV
1522static struct isp_operations qla25xx_isp_ops = {
1523 .pci_config = qla25xx_pci_config,
1524 .reset_chip = qla24xx_reset_chip,
1525 .chip_diag = qla24xx_chip_diag,
1526 .config_rings = qla24xx_config_rings,
1527 .reset_adapter = qla24xx_reset_adapter,
1528 .nvram_config = qla24xx_nvram_config,
1529 .update_fw_options = qla24xx_update_fw_options,
1530 .load_risc = qla24xx_load_risc,
1531 .pci_info_str = qla24xx_pci_info_str,
1532 .fw_version_str = qla24xx_fw_version_str,
1533 .intr_handler = qla24xx_intr_handler,
1534 .enable_intrs = qla24xx_enable_intrs,
1535 .disable_intrs = qla24xx_disable_intrs,
1536 .abort_command = qla24xx_abort_command,
523ec773
AV
1537 .target_reset = qla24xx_abort_target,
1538 .lun_reset = qla24xx_lun_reset,
c3a2f0df
AV
1539 .fabric_login = qla24xx_login_fabric,
1540 .fabric_logout = qla24xx_fabric_logout,
1541 .calc_req_entries = NULL,
1542 .build_iocbs = NULL,
1543 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1544 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1545 .read_nvram = qla25xx_read_nvram_data,
1546 .write_nvram = qla25xx_write_nvram_data,
1547 .fw_dump = qla25xx_fw_dump,
1548 .beacon_on = qla24xx_beacon_on,
1549 .beacon_off = qla24xx_beacon_off,
1550 .beacon_blink = qla24xx_beacon_blink,
338c9161 1551 .read_optrom = qla25xx_read_optrom_data,
c3a2f0df
AV
1552 .write_optrom = qla24xx_write_optrom_data,
1553 .get_flash_version = qla24xx_get_flash_version,
e315cd28 1554 .start_scsi = qla24xx_start_scsi,
a9083016 1555 .abort_isp = qla2x00_abort_isp,
c3a2f0df
AV
1556};
1557
3a03eb79
AV
1558static struct isp_operations qla81xx_isp_ops = {
1559 .pci_config = qla25xx_pci_config,
1560 .reset_chip = qla24xx_reset_chip,
1561 .chip_diag = qla24xx_chip_diag,
1562 .config_rings = qla24xx_config_rings,
1563 .reset_adapter = qla24xx_reset_adapter,
1564 .nvram_config = qla81xx_nvram_config,
1565 .update_fw_options = qla81xx_update_fw_options,
eaac30be 1566 .load_risc = qla81xx_load_risc,
3a03eb79
AV
1567 .pci_info_str = qla24xx_pci_info_str,
1568 .fw_version_str = qla24xx_fw_version_str,
1569 .intr_handler = qla24xx_intr_handler,
1570 .enable_intrs = qla24xx_enable_intrs,
1571 .disable_intrs = qla24xx_disable_intrs,
1572 .abort_command = qla24xx_abort_command,
1573 .target_reset = qla24xx_abort_target,
1574 .lun_reset = qla24xx_lun_reset,
1575 .fabric_login = qla24xx_login_fabric,
1576 .fabric_logout = qla24xx_fabric_logout,
1577 .calc_req_entries = NULL,
1578 .build_iocbs = NULL,
1579 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1580 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
3d79038f
AV
1581 .read_nvram = NULL,
1582 .write_nvram = NULL,
3a03eb79
AV
1583 .fw_dump = qla81xx_fw_dump,
1584 .beacon_on = qla24xx_beacon_on,
1585 .beacon_off = qla24xx_beacon_off,
1586 .beacon_blink = qla24xx_beacon_blink,
1587 .read_optrom = qla25xx_read_optrom_data,
1588 .write_optrom = qla24xx_write_optrom_data,
1589 .get_flash_version = qla24xx_get_flash_version,
1590 .start_scsi = qla24xx_start_scsi,
a9083016
GM
1591 .abort_isp = qla2x00_abort_isp,
1592};
1593
1594static struct isp_operations qla82xx_isp_ops = {
1595 .pci_config = qla82xx_pci_config,
1596 .reset_chip = qla82xx_reset_chip,
1597 .chip_diag = qla24xx_chip_diag,
1598 .config_rings = qla82xx_config_rings,
1599 .reset_adapter = qla24xx_reset_adapter,
1600 .nvram_config = qla81xx_nvram_config,
1601 .update_fw_options = qla24xx_update_fw_options,
1602 .load_risc = qla82xx_load_risc,
1603 .pci_info_str = qla82xx_pci_info_str,
1604 .fw_version_str = qla24xx_fw_version_str,
1605 .intr_handler = qla82xx_intr_handler,
1606 .enable_intrs = qla82xx_enable_intrs,
1607 .disable_intrs = qla82xx_disable_intrs,
1608 .abort_command = qla24xx_abort_command,
1609 .target_reset = qla24xx_abort_target,
1610 .lun_reset = qla24xx_lun_reset,
1611 .fabric_login = qla24xx_login_fabric,
1612 .fabric_logout = qla24xx_fabric_logout,
1613 .calc_req_entries = NULL,
1614 .build_iocbs = NULL,
1615 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1616 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1617 .read_nvram = qla24xx_read_nvram_data,
1618 .write_nvram = qla24xx_write_nvram_data,
1619 .fw_dump = qla24xx_fw_dump,
1620 .beacon_on = qla24xx_beacon_on,
1621 .beacon_off = qla24xx_beacon_off,
1622 .beacon_blink = qla24xx_beacon_blink,
1623 .read_optrom = qla82xx_read_optrom_data,
1624 .write_optrom = qla82xx_write_optrom_data,
1625 .get_flash_version = qla24xx_get_flash_version,
1626 .start_scsi = qla82xx_start_scsi,
1627 .abort_isp = qla82xx_abort_isp,
3a03eb79
AV
1628};
1629
ea5b6382 1630static inline void
e315cd28 1631qla2x00_set_isp_flags(struct qla_hw_data *ha)
ea5b6382
AV
1632{
1633 ha->device_type = DT_EXTENDED_IDS;
1634 switch (ha->pdev->device) {
1635 case PCI_DEVICE_ID_QLOGIC_ISP2100:
1636 ha->device_type |= DT_ISP2100;
1637 ha->device_type &= ~DT_EXTENDED_IDS;
441d1072 1638 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
ea5b6382
AV
1639 break;
1640 case PCI_DEVICE_ID_QLOGIC_ISP2200:
1641 ha->device_type |= DT_ISP2200;
1642 ha->device_type &= ~DT_EXTENDED_IDS;
441d1072 1643 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
ea5b6382
AV
1644 break;
1645 case PCI_DEVICE_ID_QLOGIC_ISP2300:
1646 ha->device_type |= DT_ISP2300;
4a59f71d 1647 ha->device_type |= DT_ZIO_SUPPORTED;
441d1072 1648 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1649 break;
1650 case PCI_DEVICE_ID_QLOGIC_ISP2312:
1651 ha->device_type |= DT_ISP2312;
4a59f71d 1652 ha->device_type |= DT_ZIO_SUPPORTED;
441d1072 1653 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1654 break;
1655 case PCI_DEVICE_ID_QLOGIC_ISP2322:
1656 ha->device_type |= DT_ISP2322;
4a59f71d 1657 ha->device_type |= DT_ZIO_SUPPORTED;
ea5b6382
AV
1658 if (ha->pdev->subsystem_vendor == 0x1028 &&
1659 ha->pdev->subsystem_device == 0x0170)
1660 ha->device_type |= DT_OEM_001;
441d1072 1661 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1662 break;
1663 case PCI_DEVICE_ID_QLOGIC_ISP6312:
1664 ha->device_type |= DT_ISP6312;
441d1072 1665 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1666 break;
1667 case PCI_DEVICE_ID_QLOGIC_ISP6322:
1668 ha->device_type |= DT_ISP6322;
441d1072 1669 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382
AV
1670 break;
1671 case PCI_DEVICE_ID_QLOGIC_ISP2422:
1672 ha->device_type |= DT_ISP2422;
4a59f71d 1673 ha->device_type |= DT_ZIO_SUPPORTED;
e428924c 1674 ha->device_type |= DT_FWI2;
c76f2c01 1675 ha->device_type |= DT_IIDMA;
441d1072 1676 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382
AV
1677 break;
1678 case PCI_DEVICE_ID_QLOGIC_ISP2432:
1679 ha->device_type |= DT_ISP2432;
4a59f71d 1680 ha->device_type |= DT_ZIO_SUPPORTED;
e428924c 1681 ha->device_type |= DT_FWI2;
c76f2c01 1682 ha->device_type |= DT_IIDMA;
441d1072 1683 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1684 break;
4d4df193
HK
1685 case PCI_DEVICE_ID_QLOGIC_ISP8432:
1686 ha->device_type |= DT_ISP8432;
1687 ha->device_type |= DT_ZIO_SUPPORTED;
1688 ha->device_type |= DT_FWI2;
1689 ha->device_type |= DT_IIDMA;
1690 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1691 break;
044cc6c8
AV
1692 case PCI_DEVICE_ID_QLOGIC_ISP5422:
1693 ha->device_type |= DT_ISP5422;
e428924c 1694 ha->device_type |= DT_FWI2;
441d1072 1695 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1696 break;
044cc6c8
AV
1697 case PCI_DEVICE_ID_QLOGIC_ISP5432:
1698 ha->device_type |= DT_ISP5432;
e428924c 1699 ha->device_type |= DT_FWI2;
441d1072 1700 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1701 break;
c3a2f0df
AV
1702 case PCI_DEVICE_ID_QLOGIC_ISP2532:
1703 ha->device_type |= DT_ISP2532;
1704 ha->device_type |= DT_ZIO_SUPPORTED;
1705 ha->device_type |= DT_FWI2;
1706 ha->device_type |= DT_IIDMA;
441d1072 1707 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1708 break;
3a03eb79
AV
1709 case PCI_DEVICE_ID_QLOGIC_ISP8001:
1710 ha->device_type |= DT_ISP8001;
1711 ha->device_type |= DT_ZIO_SUPPORTED;
1712 ha->device_type |= DT_FWI2;
1713 ha->device_type |= DT_IIDMA;
1714 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1715 break;
a9083016
GM
1716 case PCI_DEVICE_ID_QLOGIC_ISP8021:
1717 ha->device_type |= DT_ISP8021;
1718 ha->device_type |= DT_ZIO_SUPPORTED;
1719 ha->device_type |= DT_FWI2;
1720 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1721 /* Initialize 82XX ISP flags */
1722 qla82xx_init_flags(ha);
1723 break;
ea5b6382 1724 }
e5b68a61 1725
a9083016
GM
1726 if (IS_QLA82XX(ha))
1727 ha->port_no = !(ha->portnum & 1);
1728 else
1729 /* Get adapter physical port no from interrupt pin register. */
1730 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1731
e5b68a61
AC
1732 if (ha->port_no & 1)
1733 ha->flags.port0 = 1;
1734 else
1735 ha->flags.port0 = 0;
ea5b6382
AV
1736}
1737
1da177e4 1738static int
e315cd28 1739qla2x00_iospace_config(struct qla_hw_data *ha)
1da177e4 1740{
3776541d 1741 resource_size_t pio;
73208dfd 1742 uint16_t msix;
68ca949c 1743 int cpus;
1da177e4 1744
a9083016
GM
1745 if (IS_QLA82XX(ha))
1746 return qla82xx_iospace_config(ha);
1747
285d0321
AV
1748 if (pci_request_selected_regions(ha->pdev, ha->bars,
1749 QLA2XXX_DRIVER_NAME)) {
1750 qla_printk(KERN_WARNING, ha,
1751 "Failed to reserve PIO/MMIO regions (%s)\n",
1752 pci_name(ha->pdev));
1753
1754 goto iospace_error_exit;
1755 }
1756 if (!(ha->bars & 1))
1757 goto skip_pio;
1758
1da177e4
LT
1759 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1760 pio = pci_resource_start(ha->pdev, 0);
3776541d
AV
1761 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1762 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1da177e4
LT
1763 qla_printk(KERN_WARNING, ha,
1764 "Invalid PCI I/O region size (%s)...\n",
1765 pci_name(ha->pdev));
1766 pio = 0;
1767 }
1768 } else {
1769 qla_printk(KERN_WARNING, ha,
1770 "region #0 not a PIO resource (%s)...\n",
1771 pci_name(ha->pdev));
1772 pio = 0;
1773 }
285d0321 1774 ha->pio_address = pio;
1da177e4 1775
285d0321 1776skip_pio:
1da177e4 1777 /* Use MMIO operations for all accesses. */
3776541d 1778 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1da177e4 1779 qla_printk(KERN_ERR, ha,
3776541d 1780 "region #1 not an MMIO resource (%s), aborting\n",
1da177e4
LT
1781 pci_name(ha->pdev));
1782 goto iospace_error_exit;
1783 }
3776541d 1784 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1da177e4
LT
1785 qla_printk(KERN_ERR, ha,
1786 "Invalid PCI mem region size (%s), aborting\n",
1787 pci_name(ha->pdev));
1788 goto iospace_error_exit;
1789 }
1790
3776541d 1791 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1da177e4
LT
1792 if (!ha->iobase) {
1793 qla_printk(KERN_ERR, ha,
1794 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
1795
1796 goto iospace_error_exit;
1797 }
1798
73208dfd 1799 /* Determine queue resources */
2afa19a9 1800 ha->max_req_queues = ha->max_rsp_queues = 1;
d84a47c2
MH
1801 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1802 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
2afa19a9 1803 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
17d98630 1804 goto mqiobase_exit;
d84a47c2 1805
17d98630
AC
1806 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1807 pci_resource_len(ha->pdev, 3));
1808 if (ha->mqiobase) {
1809 /* Read MSIX vector size of the board */
1810 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1811 ha->msix_count = msix;
68ca949c
AC
1812 /* Max queues are bounded by available msix vectors */
1813 /* queue 0 uses two msix vectors */
1814 if (ql2xmultique_tag) {
1815 cpus = num_online_cpus();
27dc9c5a 1816 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
68ca949c
AC
1817 (cpus + 1) : (ha->msix_count - 1);
1818 ha->max_req_queues = 2;
1819 } else if (ql2xmaxqueues > 1) {
2afa19a9
AC
1820 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1821 QLA_MQ_SIZE : ql2xmaxqueues;
1822 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1823 " of request queues:%d\n", ha->max_req_queues));
1824 }
68ca949c
AC
1825 qla_printk(KERN_INFO, ha,
1826 "MSI-X vector count: %d\n", msix);
2afa19a9
AC
1827 } else
1828 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
17d98630
AC
1829
1830mqiobase_exit:
2afa19a9 1831 ha->msix_count = ha->max_rsp_queues + 1;
1da177e4
LT
1832 return (0);
1833
1834iospace_error_exit:
1835 return (-ENOMEM);
1836}
1837
1e99e33a
AV
1838static void
1839qla2xxx_scan_start(struct Scsi_Host *shost)
1840{
e315cd28 1841 scsi_qla_host_t *vha = shost_priv(shost);
1e99e33a 1842
cbc8eb67
AV
1843 if (vha->hw->flags.running_gold_fw)
1844 return;
1845
e315cd28
AC
1846 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1847 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1848 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1849 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1e99e33a
AV
1850}
1851
1852static int
1853qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1854{
e315cd28 1855 scsi_qla_host_t *vha = shost_priv(shost);
1e99e33a 1856
e315cd28 1857 if (!vha->host)
1e99e33a 1858 return 1;
e315cd28 1859 if (time > vha->hw->loop_reset_delay * HZ)
1e99e33a
AV
1860 return 1;
1861
e315cd28 1862 return atomic_read(&vha->loop_state) == LOOP_READY;
1e99e33a
AV
1863}
1864
1da177e4
LT
1865/*
1866 * PCI driver interface
1867 */
7ee61397
AV
1868static int __devinit
1869qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1da177e4 1870{
a1541d5a 1871 int ret = -ENODEV;
1da177e4 1872 struct Scsi_Host *host;
e315cd28
AC
1873 scsi_qla_host_t *base_vha = NULL;
1874 struct qla_hw_data *ha;
29856e28 1875 char pci_info[30];
1da177e4 1876 char fw_str[30];
5433383e 1877 struct scsi_host_template *sht;
c51da4ec 1878 int bars, max_id, mem_only = 0;
e315cd28 1879 uint16_t req_length = 0, rsp_length = 0;
73208dfd
AC
1880 struct req_que *req = NULL;
1881 struct rsp_que *rsp = NULL;
1da177e4 1882
285d0321 1883 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
a5326f86 1884 sht = &qla2xxx_driver_template;
5433383e 1885 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
8bc69e7d 1886 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
4d4df193 1887 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
8bc69e7d 1888 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
c3a2f0df 1889 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
3a03eb79 1890 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
a9083016
GM
1891 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
1892 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
285d0321 1893 bars = pci_select_bars(pdev, IORESOURCE_MEM);
09483916 1894 mem_only = 1;
285d0321
AV
1895 }
1896
09483916
BH
1897 if (mem_only) {
1898 if (pci_enable_device_mem(pdev))
1899 goto probe_out;
1900 } else {
1901 if (pci_enable_device(pdev))
1902 goto probe_out;
1903 }
285d0321 1904
0927678f
JB
1905 /* This may fail but that's ok */
1906 pci_enable_pcie_error_reporting(pdev);
285d0321 1907
e315cd28
AC
1908 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1909 if (!ha) {
1910 DEBUG(printk("Unable to allocate memory for ha\n"));
1911 goto probe_out;
1da177e4 1912 }
e315cd28 1913 ha->pdev = pdev;
1da177e4
LT
1914
1915 /* Clear our data area */
285d0321 1916 ha->bars = bars;
09483916 1917 ha->mem_only = mem_only;
df4bf0bb 1918 spin_lock_init(&ha->hardware_lock);
1da177e4 1919
ea5b6382
AV
1920 /* Set ISP-type information. */
1921 qla2x00_set_isp_flags(ha);
ca79cf66
DG
1922
1923 /* Set EEH reset type to fundamental if required by hba */
1924 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
1925 pdev->needs_freset = 1;
ca79cf66
DG
1926 }
1927
1da177e4
LT
1928 /* Configure PCI I/O space */
1929 ret = qla2x00_iospace_config(ha);
a1541d5a 1930 if (ret)
e315cd28 1931 goto probe_hw_failed;
1da177e4 1932
1da177e4 1933 qla_printk(KERN_INFO, ha,
5433383e
AV
1934 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
1935 ha->iobase);
1da177e4 1936
1da177e4 1937 ha->prev_topology = 0;
fca29703 1938 ha->init_cb_size = sizeof(init_cb_t);
d8b45213 1939 ha->link_data_rate = PORT_SPEED_UNKNOWN;
854165f4 1940 ha->optrom_size = OPTROM_SIZE_2300;
1da177e4 1941
abbd8870 1942 /* Assign ISP specific operations. */
e315cd28 1943 max_id = MAX_TARGETS_2200;
1da177e4 1944 if (IS_QLA2100(ha)) {
e315cd28 1945 max_id = MAX_TARGETS_2100;
1da177e4 1946 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
e315cd28
AC
1947 req_length = REQUEST_ENTRY_CNT_2100;
1948 rsp_length = RESPONSE_ENTRY_CNT_2100;
1949 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
abbd8870 1950 ha->gid_list_info_size = 4;
3a03eb79
AV
1951 ha->flash_conf_off = ~0;
1952 ha->flash_data_off = ~0;
1953 ha->nvram_conf_off = ~0;
1954 ha->nvram_data_off = ~0;
fd34f556 1955 ha->isp_ops = &qla2100_isp_ops;
1da177e4 1956 } else if (IS_QLA2200(ha)) {
1da177e4 1957 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
1958 req_length = REQUEST_ENTRY_CNT_2200;
1959 rsp_length = RESPONSE_ENTRY_CNT_2100;
1960 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
abbd8870 1961 ha->gid_list_info_size = 4;
3a03eb79
AV
1962 ha->flash_conf_off = ~0;
1963 ha->flash_data_off = ~0;
1964 ha->nvram_conf_off = ~0;
1965 ha->nvram_data_off = ~0;
fd34f556 1966 ha->isp_ops = &qla2100_isp_ops;
fca29703 1967 } else if (IS_QLA23XX(ha)) {
1da177e4 1968 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
1969 req_length = REQUEST_ENTRY_CNT_2200;
1970 rsp_length = RESPONSE_ENTRY_CNT_2300;
1971 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
abbd8870 1972 ha->gid_list_info_size = 6;
854165f4
AV
1973 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1974 ha->optrom_size = OPTROM_SIZE_2322;
3a03eb79
AV
1975 ha->flash_conf_off = ~0;
1976 ha->flash_data_off = ~0;
1977 ha->nvram_conf_off = ~0;
1978 ha->nvram_data_off = ~0;
fd34f556 1979 ha->isp_ops = &qla2300_isp_ops;
4d4df193 1980 } else if (IS_QLA24XX_TYPE(ha)) {
fca29703 1981 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
1982 req_length = REQUEST_ENTRY_CNT_24XX;
1983 rsp_length = RESPONSE_ENTRY_CNT_2300;
1984 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2c3dfe3f 1985 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
fca29703 1986 ha->gid_list_info_size = 8;
854165f4 1987 ha->optrom_size = OPTROM_SIZE_24XX;
73208dfd 1988 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
fd34f556 1989 ha->isp_ops = &qla24xx_isp_ops;
3a03eb79
AV
1990 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1991 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
1992 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
1993 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
c3a2f0df 1994 } else if (IS_QLA25XX(ha)) {
c3a2f0df 1995 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
1996 req_length = REQUEST_ENTRY_CNT_24XX;
1997 rsp_length = RESPONSE_ENTRY_CNT_2300;
1998 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
c3a2f0df 1999 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
c3a2f0df
AV
2000 ha->gid_list_info_size = 8;
2001 ha->optrom_size = OPTROM_SIZE_25XX;
73208dfd 2002 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
c3a2f0df 2003 ha->isp_ops = &qla25xx_isp_ops;
3a03eb79
AV
2004 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2005 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2006 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2007 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2008 } else if (IS_QLA81XX(ha)) {
2009 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2010 req_length = REQUEST_ENTRY_CNT_24XX;
2011 rsp_length = RESPONSE_ENTRY_CNT_2300;
2012 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2013 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2014 ha->gid_list_info_size = 8;
2015 ha->optrom_size = OPTROM_SIZE_81XX;
40859ae5 2016 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3a03eb79
AV
2017 ha->isp_ops = &qla81xx_isp_ops;
2018 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2019 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2020 ha->nvram_conf_off = ~0;
2021 ha->nvram_data_off = ~0;
a9083016
GM
2022 } else if (IS_QLA82XX(ha)) {
2023 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2024 req_length = REQUEST_ENTRY_CNT_82XX;
2025 rsp_length = RESPONSE_ENTRY_CNT_82XX;
2026 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2027 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2028 ha->gid_list_info_size = 8;
2029 ha->optrom_size = OPTROM_SIZE_82XX;
2030 ha->isp_ops = &qla82xx_isp_ops;
2031 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2032 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2033 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2034 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1da177e4 2035 }
1da177e4 2036
6c2f527c 2037 mutex_init(&ha->vport_lock);
0b05a1f0
MB
2038 init_completion(&ha->mbx_cmd_comp);
2039 complete(&ha->mbx_cmd_comp);
2040 init_completion(&ha->mbx_intr_comp);
1da177e4 2041
2c3dfe3f 2042 set_bit(0, (unsigned long *) ha->vp_idx_map);
1da177e4 2043
53303c42 2044 qla2x00_config_dma_addressing(ha);
73208dfd 2045 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
e315cd28 2046 if (!ret) {
1da177e4
LT
2047 qla_printk(KERN_WARNING, ha,
2048 "[ERROR] Failed to allocate memory for adapter\n");
2049
e315cd28
AC
2050 goto probe_hw_failed;
2051 }
2052
73208dfd 2053 req->max_q_depth = MAX_Q_DEPTH;
e315cd28 2054 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
73208dfd
AC
2055 req->max_q_depth = ql2xmaxqdepth;
2056
e315cd28
AC
2057
2058 base_vha = qla2x00_create_host(sht, ha);
2059 if (!base_vha) {
2060 qla_printk(KERN_WARNING, ha,
2061 "[ERROR] Failed to allocate memory for scsi_host\n");
2062
a1541d5a 2063 ret = -ENOMEM;
6e9f21f3 2064 qla2x00_mem_free(ha);
2afa19a9
AC
2065 qla2x00_free_req_que(ha, req);
2066 qla2x00_free_rsp_que(ha, rsp);
e315cd28 2067 goto probe_hw_failed;
1da177e4
LT
2068 }
2069
e315cd28
AC
2070 pci_set_drvdata(pdev, base_vha);
2071
e315cd28 2072 host = base_vha->host;
2afa19a9 2073 base_vha->req = req;
73208dfd
AC
2074 host->can_queue = req->length + 128;
2075 if (IS_QLA2XXX_MIDTYPE(ha))
e315cd28 2076 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
73208dfd 2077 else
e315cd28
AC
2078 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
2079 base_vha->vp_idx;
e315cd28
AC
2080 if (IS_QLA2100(ha))
2081 host->sg_tablesize = 32;
2082 host->max_id = max_id;
2083 host->this_id = 255;
2084 host->cmd_per_lun = 3;
2085 host->unique_id = host->host_no;
2086 host->max_cmd_len = MAX_CMDSZ;
2087 host->max_channel = MAX_BUSES - 1;
2088 host->max_lun = MAX_LUNS;
2089 host->transportt = qla2xxx_transport_template;
9a069e19 2090 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
e315cd28 2091
73208dfd
AC
2092 /* Set up the irqs */
2093 ret = qla2x00_request_irqs(ha, rsp);
2094 if (ret)
6e9f21f3 2095 goto probe_init_failed;
90a86fc0
JC
2096
2097 pci_save_state(pdev);
2098
73208dfd 2099 /* Alloc arrays of request and response ring ptrs */
7163ea81 2100que_init:
73208dfd
AC
2101 if (!qla2x00_alloc_queues(ha)) {
2102 qla_printk(KERN_WARNING, ha,
2103 "[ERROR] Failed to allocate memory for queue"
2104 " pointers\n");
6e9f21f3 2105 goto probe_init_failed;
73208dfd 2106 }
a9083016 2107
73208dfd
AC
2108 ha->rsp_q_map[0] = rsp;
2109 ha->req_q_map[0] = req;
2afa19a9
AC
2110 rsp->req = req;
2111 req->rsp = rsp;
2112 set_bit(0, ha->req_qid_map);
2113 set_bit(0, ha->rsp_qid_map);
08029990
AV
2114 /* FWI2-capable only. */
2115 req->req_q_in = &ha->iobase->isp24.req_q_in;
2116 req->req_q_out = &ha->iobase->isp24.req_q_out;
2117 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
2118 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
17d98630 2119 if (ha->mqenable) {
08029990
AV
2120 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
2121 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
2122 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
2123 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
17d98630
AC
2124 }
2125
a9083016
GM
2126 if (IS_QLA82XX(ha)) {
2127 req->req_q_out = &ha->iobase->isp82.req_q_out[0];
2128 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
2129 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2130 }
2131
e315cd28 2132 if (qla2x00_initialize_adapter(base_vha)) {
1da177e4
LT
2133 qla_printk(KERN_WARNING, ha,
2134 "Failed to initialize adapter\n");
2135
2136 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
2137 "Adapter flags %x.\n",
e315cd28 2138 base_vha->host_no, base_vha->device_flags));
1da177e4 2139
a9083016
GM
2140 if (IS_QLA82XX(ha)) {
2141 qla82xx_idc_lock(ha);
2142 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2143 QLA82XX_DEV_FAILED);
2144 qla82xx_idc_unlock(ha);
2145 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
2146 }
2147
a1541d5a 2148 ret = -ENODEV;
1da177e4
LT
2149 goto probe_failed;
2150 }
2151
7163ea81
AC
2152 if (ha->mqenable) {
2153 if (qla25xx_setup_mode(base_vha)) {
68ca949c
AC
2154 qla_printk(KERN_WARNING, ha,
2155 "Can't create queues, falling back to single"
2156 " queue mode\n");
7163ea81
AC
2157 goto que_init;
2158 }
2159 }
68ca949c 2160
cbc8eb67
AV
2161 if (ha->flags.running_gold_fw)
2162 goto skip_dpc;
2163
1da177e4
LT
2164 /*
2165 * Startup the kernel thread for this host adapter
2166 */
39a11240 2167 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
e315cd28 2168 "%s_dpc", base_vha->host_str);
39a11240 2169 if (IS_ERR(ha->dpc_thread)) {
1da177e4
LT
2170 qla_printk(KERN_WARNING, ha,
2171 "Unable to start DPC thread!\n");
39a11240 2172 ret = PTR_ERR(ha->dpc_thread);
1da177e4
LT
2173 goto probe_failed;
2174 }
1da177e4 2175
cbc8eb67 2176skip_dpc:
e315cd28
AC
2177 list_add_tail(&base_vha->list, &ha->vp_list);
2178 base_vha->host->irq = ha->pdev->irq;
1da177e4
LT
2179
2180 /* Initialized the timer */
e315cd28 2181 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
1da177e4
LT
2182
2183 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
e315cd28 2184 base_vha->host_no, ha));
d19044c3 2185
a9083016
GM
2186 ha->isp_ops->enable_intrs(ha);
2187
a1541d5a
AV
2188 ret = scsi_add_host(host, &pdev->dev);
2189 if (ret)
2190 goto probe_failed;
2191
1486400f
MR
2192 base_vha->flags.init_done = 1;
2193 base_vha->flags.online = 1;
2194
1e99e33a
AV
2195 scsi_scan_host(host);
2196
e315cd28 2197 qla2x00_alloc_sysfs_attr(base_vha);
a1541d5a 2198
e315cd28 2199 qla2x00_init_host_attr(base_vha);
a1541d5a 2200
e315cd28 2201 qla2x00_dfs_setup(base_vha);
df613b96 2202
1da177e4
LT
2203 qla_printk(KERN_INFO, ha, "\n"
2204 " QLogic Fibre Channel HBA Driver: %s\n"
2205 " QLogic %s - %s\n"
5433383e
AV
2206 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
2207 qla2x00_version_str, ha->model_number,
e315cd28
AC
2208 ha->model_desc ? ha->model_desc : "", pdev->device,
2209 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
2210 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
2211 ha->isp_ops->fw_version_str(base_vha, fw_str));
1da177e4 2212
1da177e4
LT
2213 return 0;
2214
6e9f21f3 2215probe_init_failed:
2afa19a9
AC
2216 qla2x00_free_req_que(ha, req);
2217 qla2x00_free_rsp_que(ha, rsp);
2218 ha->max_req_queues = ha->max_rsp_queues = 0;
6e9f21f3 2219
1da177e4 2220probe_failed:
b9978769
AV
2221 if (base_vha->timer_active)
2222 qla2x00_stop_timer(base_vha);
2223 base_vha->flags.online = 0;
2224 if (ha->dpc_thread) {
2225 struct task_struct *t = ha->dpc_thread;
2226
2227 ha->dpc_thread = NULL;
2228 kthread_stop(t);
2229 }
2230
e315cd28 2231 qla2x00_free_device(base_vha);
1da177e4 2232
e315cd28 2233 scsi_host_put(base_vha->host);
1da177e4 2234
e315cd28 2235probe_hw_failed:
a9083016
GM
2236 if (IS_QLA82XX(ha)) {
2237 qla82xx_idc_lock(ha);
2238 qla82xx_clear_drv_active(ha);
2239 qla82xx_idc_unlock(ha);
2240 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2241 if (!ql2xdbwr)
2242 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2243 } else {
2244 if (ha->iobase)
2245 iounmap(ha->iobase);
2246 }
e315cd28
AC
2247 pci_release_selected_regions(ha->pdev, ha->bars);
2248 kfree(ha);
2249 ha = NULL;
1da177e4 2250
a1541d5a 2251probe_out:
e315cd28 2252 pci_disable_device(pdev);
a1541d5a 2253 return ret;
1da177e4 2254}
1da177e4 2255
4c993f76 2256static void
7ee61397 2257qla2x00_remove_one(struct pci_dev *pdev)
1da177e4 2258{
e315cd28
AC
2259 scsi_qla_host_t *base_vha, *vha, *temp;
2260 struct qla_hw_data *ha;
2261
2262 base_vha = pci_get_drvdata(pdev);
2263 ha = base_vha->hw;
2264
2265 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
2266 if (vha && vha->fc_vport)
2267 fc_vport_terminate(vha->fc_vport);
2268 }
1da177e4 2269
e315cd28 2270 set_bit(UNLOADING, &base_vha->dpc_flags);
1da177e4 2271
b9978769
AV
2272 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2273
e315cd28 2274 qla2x00_dfs_remove(base_vha);
c795c1e4 2275
e315cd28 2276 qla84xx_put_chip(base_vha);
c795c1e4 2277
b9978769
AV
2278 /* Disable timer */
2279 if (base_vha->timer_active)
2280 qla2x00_stop_timer(base_vha);
2281
2282 base_vha->flags.online = 0;
2283
68ca949c
AC
2284 /* Flush the work queue and remove it */
2285 if (ha->wq) {
2286 flush_workqueue(ha->wq);
2287 destroy_workqueue(ha->wq);
2288 ha->wq = NULL;
2289 }
2290
b9978769
AV
2291 /* Kill the kernel thread for this host */
2292 if (ha->dpc_thread) {
2293 struct task_struct *t = ha->dpc_thread;
2294
2295 /*
2296 * qla2xxx_wake_dpc checks for ->dpc_thread
2297 * so we need to zero it out.
2298 */
2299 ha->dpc_thread = NULL;
2300 kthread_stop(t);
2301 }
2302
e315cd28 2303 qla2x00_free_sysfs_attr(base_vha);
df613b96 2304
e315cd28 2305 fc_remove_host(base_vha->host);
4d4df193 2306
e315cd28 2307 scsi_remove_host(base_vha->host);
1da177e4 2308
e315cd28 2309 qla2x00_free_device(base_vha);
bdf79621 2310
e315cd28 2311 scsi_host_put(base_vha->host);
1da177e4 2312
a9083016
GM
2313 if (IS_QLA82XX(ha)) {
2314 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2315 if (!ql2xdbwr)
2316 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2317 } else {
2318 if (ha->iobase)
2319 iounmap(ha->iobase);
1da177e4 2320
a9083016
GM
2321 if (ha->mqiobase)
2322 iounmap(ha->mqiobase);
2323 }
73208dfd 2324
e315cd28
AC
2325 pci_release_selected_regions(ha->pdev, ha->bars);
2326 kfree(ha);
2327 ha = NULL;
1da177e4 2328
90a86fc0
JC
2329 pci_disable_pcie_error_reporting(pdev);
2330
665db93b 2331 pci_disable_device(pdev);
1da177e4
LT
2332 pci_set_drvdata(pdev, NULL);
2333}
1da177e4
LT
2334
2335static void
e315cd28 2336qla2x00_free_device(scsi_qla_host_t *vha)
1da177e4 2337{
e315cd28 2338 struct qla_hw_data *ha = vha->hw;
1da177e4 2339
85880801
AV
2340 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
2341
2342 /* Disable timer */
2343 if (vha->timer_active)
2344 qla2x00_stop_timer(vha);
2345
2346 /* Kill the kernel thread for this host */
2347 if (ha->dpc_thread) {
2348 struct task_struct *t = ha->dpc_thread;
2349
2350 /*
2351 * qla2xxx_wake_dpc checks for ->dpc_thread
2352 * so we need to zero it out.
2353 */
2354 ha->dpc_thread = NULL;
2355 kthread_stop(t);
2356 }
2357
2afa19a9
AC
2358 qla25xx_delete_queues(vha);
2359
df613b96 2360 if (ha->flags.fce_enabled)
e315cd28 2361 qla2x00_disable_fce_trace(vha, NULL, NULL);
df613b96 2362
a7a167bf 2363 if (ha->eft)
e315cd28 2364 qla2x00_disable_eft_trace(vha);
a7a167bf 2365
f6ef3b18 2366 /* Stop currently executing firmware. */
e315cd28 2367 qla2x00_try_to_stop_firmware(vha);
1da177e4 2368
85880801
AV
2369 vha->flags.online = 0;
2370
f6ef3b18 2371 /* turn-off interrupts on the card */
a9083016
GM
2372 if (ha->interrupts_on) {
2373 vha->flags.init_done = 0;
fd34f556 2374 ha->isp_ops->disable_intrs(ha);
a9083016 2375 }
f6ef3b18 2376
e315cd28 2377 qla2x00_free_irqs(vha);
1da177e4 2378
e315cd28 2379 qla2x00_mem_free(ha);
73208dfd
AC
2380
2381 qla2x00_free_queues(ha);
1da177e4
LT
2382}
2383
d97994dc 2384static inline void
e315cd28 2385qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
d97994dc
AV
2386 int defer)
2387{
d97994dc 2388 struct fc_rport *rport;
67becc00 2389 scsi_qla_host_t *base_vha;
d97994dc
AV
2390
2391 if (!fcport->rport)
2392 return;
2393
2394 rport = fcport->rport;
2395 if (defer) {
67becc00 2396 base_vha = pci_get_drvdata(vha->hw->pdev);
e315cd28 2397 spin_lock_irq(vha->host->host_lock);
d97994dc 2398 fcport->drport = rport;
e315cd28 2399 spin_unlock_irq(vha->host->host_lock);
67becc00
AV
2400 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2401 qla2xxx_wake_dpc(base_vha);
5f3a9a20 2402 } else
d97994dc 2403 fc_remote_port_delete(rport);
d97994dc
AV
2404}
2405
1da177e4
LT
2406/*
2407 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
2408 *
2409 * Input: ha = adapter block pointer. fcport = port structure pointer.
2410 *
2411 * Return: None.
2412 *
2413 * Context:
2414 */
e315cd28 2415void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
d97994dc 2416 int do_login, int defer)
1da177e4 2417{
2c3dfe3f 2418 if (atomic_read(&fcport->state) == FCS_ONLINE &&
e315cd28
AC
2419 vha->vp_idx == fcport->vp_idx) {
2420 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2421 qla2x00_schedule_rport_del(vha, fcport, defer);
2422 }
fa2a1ce5 2423 /*
1da177e4
LT
2424 * We may need to retry the login, so don't change the state of the
2425 * port but do the retries.
2426 */
2427 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
2428 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2429
2430 if (!do_login)
2431 return;
2432
2433 if (fcport->login_retry == 0) {
e315cd28
AC
2434 fcport->login_retry = vha->hw->login_retry_count;
2435 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1da177e4
LT
2436
2437 DEBUG(printk("scsi(%ld): Port login retry: "
2438 "%02x%02x%02x%02x%02x%02x%02x%02x, "
2439 "id = 0x%04x retry cnt=%d\n",
e315cd28 2440 vha->host_no,
1da177e4
LT
2441 fcport->port_name[0],
2442 fcport->port_name[1],
2443 fcport->port_name[2],
2444 fcport->port_name[3],
2445 fcport->port_name[4],
2446 fcport->port_name[5],
2447 fcport->port_name[6],
2448 fcport->port_name[7],
2449 fcport->loop_id,
2450 fcport->login_retry));
2451 }
2452}
2453
2454/*
2455 * qla2x00_mark_all_devices_lost
2456 * Updates fcport state when device goes offline.
2457 *
2458 * Input:
2459 * ha = adapter block pointer.
2460 * fcport = port structure pointer.
2461 *
2462 * Return:
2463 * None.
2464 *
2465 * Context:
2466 */
2467void
e315cd28 2468qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
1da177e4
LT
2469{
2470 fc_port_t *fcport;
2471
e315cd28 2472 list_for_each_entry(fcport, &vha->vp_fcports, list) {
0d6e61bc 2473 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
1da177e4 2474 continue;
0d6e61bc 2475
1da177e4
LT
2476 /*
2477 * No point in marking the device as lost, if the device is
2478 * already DEAD.
2479 */
2480 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2481 continue;
e315cd28 2482 if (atomic_read(&fcport->state) == FCS_ONLINE) {
0d6e61bc
AV
2483 if (defer)
2484 qla2x00_schedule_rport_del(vha, fcport, defer);
2485 else if (vha->vp_idx == fcport->vp_idx)
2486 qla2x00_schedule_rport_del(vha, fcport, defer);
2487 }
2488 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1da177e4
LT
2489 }
2490}
2491
2492/*
2493* qla2x00_mem_alloc
2494* Allocates adapter memory.
2495*
2496* Returns:
2497* 0 = success.
e8711085 2498* !0 = failure.
1da177e4 2499*/
e8711085 2500static int
73208dfd
AC
2501qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2502 struct req_que **req, struct rsp_que **rsp)
1da177e4
LT
2503{
2504 char name[16];
1da177e4 2505
e8711085 2506 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
e315cd28 2507 &ha->init_cb_dma, GFP_KERNEL);
e8711085 2508 if (!ha->init_cb)
e315cd28 2509 goto fail;
e8711085 2510
e315cd28
AC
2511 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2512 &ha->gid_list_dma, GFP_KERNEL);
2513 if (!ha->gid_list)
e8711085 2514 goto fail_free_init_cb;
1da177e4 2515
e8711085
AV
2516 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2517 if (!ha->srb_mempool)
e315cd28 2518 goto fail_free_gid_list;
e8711085 2519
a9083016
GM
2520 if (IS_QLA82XX(ha)) {
2521 /* Allocate cache for CT6 Ctx. */
2522 if (!ctx_cachep) {
2523 ctx_cachep = kmem_cache_create("qla2xxx_ctx",
2524 sizeof(struct ct6_dsd), 0,
2525 SLAB_HWCACHE_ALIGN, NULL);
2526 if (!ctx_cachep)
2527 goto fail_free_gid_list;
2528 }
2529 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
2530 ctx_cachep);
2531 if (!ha->ctx_mempool)
2532 goto fail_free_srb_mempool;
2533 }
2534
e8711085
AV
2535 /* Get memory for cached NVRAM */
2536 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2537 if (!ha->nvram)
a9083016 2538 goto fail_free_ctx_mempool;
e8711085 2539
e315cd28
AC
2540 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2541 ha->pdev->device);
2542 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2543 DMA_POOL_SIZE, 8, 0);
2544 if (!ha->s_dma_pool)
2545 goto fail_free_nvram;
2546
a9083016
GM
2547 if (IS_QLA82XX(ha)) {
2548 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2549 DSD_LIST_DMA_POOL_SIZE, 8, 0);
2550 if (!ha->dl_dma_pool) {
2551 qla_printk(KERN_WARNING, ha,
2552 "Memory Allocation failed - dl_dma_pool\n");
2553 goto fail_s_dma_pool;
2554 }
2555
2556 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2557 FCP_CMND_DMA_POOL_SIZE, 8, 0);
2558 if (!ha->fcp_cmnd_dma_pool) {
2559 qla_printk(KERN_WARNING, ha,
2560 "Memory Allocation failed - fcp_cmnd_dma_pool\n");
2561 goto fail_dl_dma_pool;
2562 }
2563 }
2564
e8711085
AV
2565 /* Allocate memory for SNS commands */
2566 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
e315cd28 2567 /* Get consistent memory allocated for SNS commands */
e8711085 2568 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
e315cd28 2569 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
e8711085 2570 if (!ha->sns_cmd)
e315cd28 2571 goto fail_dma_pool;
e8711085 2572 } else {
e315cd28 2573 /* Get consistent memory allocated for MS IOCB */
e8711085 2574 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
e315cd28 2575 &ha->ms_iocb_dma);
e8711085 2576 if (!ha->ms_iocb)
e315cd28
AC
2577 goto fail_dma_pool;
2578 /* Get consistent memory allocated for CT SNS commands */
e8711085 2579 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
e315cd28 2580 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
e8711085
AV
2581 if (!ha->ct_sns)
2582 goto fail_free_ms_iocb;
1da177e4
LT
2583 }
2584
e315cd28 2585 /* Allocate memory for request ring */
73208dfd
AC
2586 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2587 if (!*req) {
e315cd28
AC
2588 DEBUG(printk("Unable to allocate memory for req\n"));
2589 goto fail_req;
2590 }
73208dfd
AC
2591 (*req)->length = req_len;
2592 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2593 ((*req)->length + 1) * sizeof(request_t),
2594 &(*req)->dma, GFP_KERNEL);
2595 if (!(*req)->ring) {
e315cd28
AC
2596 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2597 goto fail_req_ring;
2598 }
2599 /* Allocate memory for response ring */
73208dfd
AC
2600 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2601 if (!*rsp) {
2602 qla_printk(KERN_WARNING, ha,
2603 "Unable to allocate memory for rsp\n");
e315cd28
AC
2604 goto fail_rsp;
2605 }
73208dfd
AC
2606 (*rsp)->hw = ha;
2607 (*rsp)->length = rsp_len;
2608 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2609 ((*rsp)->length + 1) * sizeof(response_t),
2610 &(*rsp)->dma, GFP_KERNEL);
2611 if (!(*rsp)->ring) {
2612 qla_printk(KERN_WARNING, ha,
2613 "Unable to allocate memory for rsp_ring\n");
e315cd28
AC
2614 goto fail_rsp_ring;
2615 }
73208dfd
AC
2616 (*req)->rsp = *rsp;
2617 (*rsp)->req = *req;
2618 /* Allocate memory for NVRAM data for vports */
2619 if (ha->nvram_npiv_size) {
2620 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2621 ha->nvram_npiv_size, GFP_KERNEL);
2622 if (!ha->npiv_info) {
2623 qla_printk(KERN_WARNING, ha,
2624 "Unable to allocate memory for npiv info\n");
2625 goto fail_npiv_info;
2626 }
2627 } else
2628 ha->npiv_info = NULL;
e8711085 2629
b64b0e8f 2630 /* Get consistent memory allocated for EX-INIT-CB. */
a9083016 2631 if (IS_QLA8XXX_TYPE(ha)) {
b64b0e8f
AV
2632 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2633 &ha->ex_init_cb_dma);
2634 if (!ha->ex_init_cb)
2635 goto fail_ex_init_cb;
2636 }
2637
a9083016
GM
2638 INIT_LIST_HEAD(&ha->gbl_dsd_list);
2639
e315cd28
AC
2640 INIT_LIST_HEAD(&ha->vp_list);
2641 return 1;
2642
b64b0e8f
AV
2643fail_ex_init_cb:
2644 kfree(ha->npiv_info);
73208dfd
AC
2645fail_npiv_info:
2646 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2647 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2648 (*rsp)->ring = NULL;
2649 (*rsp)->dma = 0;
e315cd28 2650fail_rsp_ring:
73208dfd 2651 kfree(*rsp);
e315cd28 2652fail_rsp:
73208dfd
AC
2653 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2654 sizeof(request_t), (*req)->ring, (*req)->dma);
2655 (*req)->ring = NULL;
2656 (*req)->dma = 0;
e315cd28 2657fail_req_ring:
73208dfd 2658 kfree(*req);
e315cd28
AC
2659fail_req:
2660 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2661 ha->ct_sns, ha->ct_sns_dma);
2662 ha->ct_sns = NULL;
2663 ha->ct_sns_dma = 0;
e8711085
AV
2664fail_free_ms_iocb:
2665 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2666 ha->ms_iocb = NULL;
2667 ha->ms_iocb_dma = 0;
e315cd28 2668fail_dma_pool:
a9083016
GM
2669 if (IS_QLA82XX(ha)) {
2670 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2671 ha->fcp_cmnd_dma_pool = NULL;
2672 }
2673fail_dl_dma_pool:
2674 if (IS_QLA82XX(ha)) {
2675 dma_pool_destroy(ha->dl_dma_pool);
2676 ha->dl_dma_pool = NULL;
2677 }
2678fail_s_dma_pool:
e315cd28
AC
2679 dma_pool_destroy(ha->s_dma_pool);
2680 ha->s_dma_pool = NULL;
e8711085
AV
2681fail_free_nvram:
2682 kfree(ha->nvram);
2683 ha->nvram = NULL;
a9083016
GM
2684fail_free_ctx_mempool:
2685 mempool_destroy(ha->ctx_mempool);
2686 ha->ctx_mempool = NULL;
e8711085
AV
2687fail_free_srb_mempool:
2688 mempool_destroy(ha->srb_mempool);
2689 ha->srb_mempool = NULL;
e8711085
AV
2690fail_free_gid_list:
2691 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
e315cd28 2692 ha->gid_list_dma);
e8711085
AV
2693 ha->gid_list = NULL;
2694 ha->gid_list_dma = 0;
e315cd28
AC
2695fail_free_init_cb:
2696 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2697 ha->init_cb_dma);
2698 ha->init_cb = NULL;
2699 ha->init_cb_dma = 0;
e8711085 2700fail:
e315cd28 2701 DEBUG(printk("%s: Memory allocation failure\n", __func__));
e8711085 2702 return -ENOMEM;
1da177e4
LT
2703}
2704
2705/*
2706* qla2x00_mem_free
2707* Frees all adapter allocated memory.
2708*
2709* Input:
2710* ha = adapter block pointer.
2711*/
a824ebb3 2712static void
e315cd28 2713qla2x00_mem_free(struct qla_hw_data *ha)
1da177e4 2714{
e8711085
AV
2715 if (ha->srb_mempool)
2716 mempool_destroy(ha->srb_mempool);
1da177e4 2717
df613b96
AV
2718 if (ha->fce)
2719 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
e315cd28 2720 ha->fce_dma);
df613b96 2721
a7a167bf
AV
2722 if (ha->fw_dump) {
2723 if (ha->eft)
2724 dma_free_coherent(&ha->pdev->dev,
e315cd28 2725 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
a7a167bf
AV
2726 vfree(ha->fw_dump);
2727 }
2728
11bbc1d8
AV
2729 if (ha->dcbx_tlv)
2730 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2731 ha->dcbx_tlv, ha->dcbx_tlv_dma);
2732
ce0423f4
AV
2733 if (ha->xgmac_data)
2734 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2735 ha->xgmac_data, ha->xgmac_data_dma);
2736
1da177e4
LT
2737 if (ha->sns_cmd)
2738 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
e315cd28 2739 ha->sns_cmd, ha->sns_cmd_dma);
1da177e4
LT
2740
2741 if (ha->ct_sns)
2742 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
e315cd28 2743 ha->ct_sns, ha->ct_sns_dma);
1da177e4 2744
88729e53
AV
2745 if (ha->sfp_data)
2746 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
2747
ad0ecd61
JC
2748 if (ha->edc_data)
2749 dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
2750
1da177e4
LT
2751 if (ha->ms_iocb)
2752 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2753
b64b0e8f 2754 if (ha->ex_init_cb)
a9083016
GM
2755 dma_pool_free(ha->s_dma_pool,
2756 ha->ex_init_cb, ha->ex_init_cb_dma);
b64b0e8f 2757
1da177e4
LT
2758 if (ha->s_dma_pool)
2759 dma_pool_destroy(ha->s_dma_pool);
2760
1da177e4
LT
2761 if (ha->gid_list)
2762 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
e315cd28 2763 ha->gid_list_dma);
1da177e4 2764
a9083016
GM
2765 if (IS_QLA82XX(ha)) {
2766 if (!list_empty(&ha->gbl_dsd_list)) {
2767 struct dsd_dma *dsd_ptr, *tdsd_ptr;
2768
2769 /* clean up allocated prev pool */
2770 list_for_each_entry_safe(dsd_ptr,
2771 tdsd_ptr, &ha->gbl_dsd_list, list) {
2772 dma_pool_free(ha->dl_dma_pool,
2773 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
2774 list_del(&dsd_ptr->list);
2775 kfree(dsd_ptr);
2776 }
2777 }
2778 }
2779
2780 if (ha->dl_dma_pool)
2781 dma_pool_destroy(ha->dl_dma_pool);
2782
2783 if (ha->fcp_cmnd_dma_pool)
2784 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2785
2786 if (ha->ctx_mempool)
2787 mempool_destroy(ha->ctx_mempool);
2788
e315cd28
AC
2789 if (ha->init_cb)
2790 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
a9083016 2791 ha->init_cb, ha->init_cb_dma);
e315cd28
AC
2792 vfree(ha->optrom_buffer);
2793 kfree(ha->nvram);
73208dfd 2794 kfree(ha->npiv_info);
1da177e4 2795
e8711085 2796 ha->srb_mempool = NULL;
a9083016 2797 ha->ctx_mempool = NULL;
a7a167bf
AV
2798 ha->eft = NULL;
2799 ha->eft_dma = 0;
1da177e4
LT
2800 ha->sns_cmd = NULL;
2801 ha->sns_cmd_dma = 0;
2802 ha->ct_sns = NULL;
2803 ha->ct_sns_dma = 0;
2804 ha->ms_iocb = NULL;
2805 ha->ms_iocb_dma = 0;
1da177e4
LT
2806 ha->init_cb = NULL;
2807 ha->init_cb_dma = 0;
b64b0e8f
AV
2808 ha->ex_init_cb = NULL;
2809 ha->ex_init_cb_dma = 0;
1da177e4
LT
2810
2811 ha->s_dma_pool = NULL;
a9083016
GM
2812 ha->dl_dma_pool = NULL;
2813 ha->fcp_cmnd_dma_pool = NULL;
1da177e4 2814
1da177e4
LT
2815 ha->gid_list = NULL;
2816 ha->gid_list_dma = 0;
2817
e315cd28
AC
2818 ha->fw_dump = NULL;
2819 ha->fw_dumped = 0;
2820 ha->fw_dump_reading = 0;
e315cd28 2821}
1da177e4 2822
e315cd28
AC
2823struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2824 struct qla_hw_data *ha)
2825{
2826 struct Scsi_Host *host;
2827 struct scsi_qla_host *vha = NULL;
854165f4 2828
e315cd28
AC
2829 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
2830 if (host == NULL) {
2831 printk(KERN_WARNING
2832 "qla2xxx: Couldn't allocate host from scsi layer!\n");
2833 goto fail;
2834 }
2835
2836 /* Clear our data area */
2837 vha = shost_priv(host);
2838 memset(vha, 0, sizeof(scsi_qla_host_t));
2839
2840 vha->host = host;
2841 vha->host_no = host->host_no;
2842 vha->hw = ha;
2843
2844 INIT_LIST_HEAD(&vha->vp_fcports);
2845 INIT_LIST_HEAD(&vha->work_list);
2846 INIT_LIST_HEAD(&vha->list);
2847
f999f4c1
AV
2848 spin_lock_init(&vha->work_lock);
2849
e315cd28
AC
2850 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2851 return vha;
2852
2853fail:
2854 return vha;
1da177e4
LT
2855}
2856
01ef66bb 2857static struct qla_work_evt *
f999f4c1 2858qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
0971de7f
AV
2859{
2860 struct qla_work_evt *e;
2861
f999f4c1 2862 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
0971de7f
AV
2863 if (!e)
2864 return NULL;
2865
2866 INIT_LIST_HEAD(&e->list);
2867 e->type = type;
2868 e->flags = QLA_EVT_FLAG_FREE;
2869 return e;
2870}
2871
01ef66bb 2872static int
f999f4c1 2873qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
0971de7f 2874{
f999f4c1 2875 unsigned long flags;
0971de7f 2876
f999f4c1 2877 spin_lock_irqsave(&vha->work_lock, flags);
e315cd28 2878 list_add_tail(&e->list, &vha->work_list);
f999f4c1 2879 spin_unlock_irqrestore(&vha->work_lock, flags);
e315cd28 2880 qla2xxx_wake_dpc(vha);
f999f4c1 2881
0971de7f
AV
2882 return QLA_SUCCESS;
2883}
2884
2885int
e315cd28 2886qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
0971de7f
AV
2887 u32 data)
2888{
2889 struct qla_work_evt *e;
2890
f999f4c1 2891 e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
0971de7f
AV
2892 if (!e)
2893 return QLA_FUNCTION_FAILED;
2894
2895 e->u.aen.code = code;
2896 e->u.aen.data = data;
f999f4c1 2897 return qla2x00_post_work(vha, e);
0971de7f
AV
2898}
2899
8a659571
AV
2900int
2901qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
2902{
2903 struct qla_work_evt *e;
2904
f999f4c1 2905 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
8a659571
AV
2906 if (!e)
2907 return QLA_FUNCTION_FAILED;
2908
2909 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
f999f4c1 2910 return qla2x00_post_work(vha, e);
8a659571
AV
2911}
2912
ac280b67
AV
2913#define qla2x00_post_async_work(name, type) \
2914int qla2x00_post_async_##name##_work( \
2915 struct scsi_qla_host *vha, \
2916 fc_port_t *fcport, uint16_t *data) \
2917{ \
2918 struct qla_work_evt *e; \
2919 \
2920 e = qla2x00_alloc_work(vha, type); \
2921 if (!e) \
2922 return QLA_FUNCTION_FAILED; \
2923 \
2924 e->u.logio.fcport = fcport; \
2925 if (data) { \
2926 e->u.logio.data[0] = data[0]; \
2927 e->u.logio.data[1] = data[1]; \
2928 } \
2929 return qla2x00_post_work(vha, e); \
2930}
2931
2932qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
2933qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
2934qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
2935qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
2936
3420d36c
AV
2937int
2938qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
2939{
2940 struct qla_work_evt *e;
2941
2942 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
2943 if (!e)
2944 return QLA_FUNCTION_FAILED;
2945
2946 e->u.uevent.code = code;
2947 return qla2x00_post_work(vha, e);
2948}
2949
2950static void
2951qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
2952{
2953 char event_string[40];
2954 char *envp[] = { event_string, NULL };
2955
2956 switch (code) {
2957 case QLA_UEVENT_CODE_FW_DUMP:
2958 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
2959 vha->host_no);
2960 break;
2961 default:
2962 /* do nothing */
2963 break;
2964 }
2965 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
2966}
2967
ac280b67 2968void
e315cd28 2969qla2x00_do_work(struct scsi_qla_host *vha)
0971de7f 2970{
f999f4c1
AV
2971 struct qla_work_evt *e, *tmp;
2972 unsigned long flags;
2973 LIST_HEAD(work);
0971de7f 2974
f999f4c1
AV
2975 spin_lock_irqsave(&vha->work_lock, flags);
2976 list_splice_init(&vha->work_list, &work);
2977 spin_unlock_irqrestore(&vha->work_lock, flags);
2978
2979 list_for_each_entry_safe(e, tmp, &work, list) {
0971de7f 2980 list_del_init(&e->list);
0971de7f
AV
2981
2982 switch (e->type) {
2983 case QLA_EVT_AEN:
e315cd28 2984 fc_host_post_event(vha->host, fc_get_event_number(),
0971de7f
AV
2985 e->u.aen.code, e->u.aen.data);
2986 break;
8a659571
AV
2987 case QLA_EVT_IDC_ACK:
2988 qla81xx_idc_ack(vha, e->u.idc_ack.mb);
2989 break;
ac280b67
AV
2990 case QLA_EVT_ASYNC_LOGIN:
2991 qla2x00_async_login(vha, e->u.logio.fcport,
2992 e->u.logio.data);
2993 break;
2994 case QLA_EVT_ASYNC_LOGIN_DONE:
2995 qla2x00_async_login_done(vha, e->u.logio.fcport,
2996 e->u.logio.data);
2997 break;
2998 case QLA_EVT_ASYNC_LOGOUT:
2999 qla2x00_async_logout(vha, e->u.logio.fcport);
3000 break;
3001 case QLA_EVT_ASYNC_LOGOUT_DONE:
3002 qla2x00_async_logout_done(vha, e->u.logio.fcport,
3003 e->u.logio.data);
3004 break;
3420d36c
AV
3005 case QLA_EVT_UEVENT:
3006 qla2x00_uevent_emit(vha, e->u.uevent.code);
3007 break;
0971de7f
AV
3008 }
3009 if (e->flags & QLA_EVT_FLAG_FREE)
3010 kfree(e);
e315cd28 3011 }
e315cd28 3012}
f999f4c1 3013
e315cd28
AC
3014/* Relogins all the fcports of a vport
3015 * Context: dpc thread
3016 */
3017void qla2x00_relogin(struct scsi_qla_host *vha)
3018{
3019 fc_port_t *fcport;
c6b2fca8 3020 int status;
e315cd28
AC
3021 uint16_t next_loopid = 0;
3022 struct qla_hw_data *ha = vha->hw;
ac280b67 3023 uint16_t data[2];
e315cd28
AC
3024
3025 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3026 /*
3027 * If the port is not ONLINE then try to login
3028 * to it if we haven't run out of retries.
3029 */
3030 if (atomic_read(&fcport->state) !=
3031 FCS_ONLINE && fcport->login_retry) {
3032
ac280b67 3033 fcport->login_retry--;
e315cd28 3034 if (fcport->flags & FCF_FABRIC_DEVICE) {
f08b7251 3035 if (fcport->flags & FCF_FCP2_DEVICE)
e315cd28
AC
3036 ha->isp_ops->fabric_logout(vha,
3037 fcport->loop_id,
3038 fcport->d_id.b.domain,
3039 fcport->d_id.b.area,
3040 fcport->d_id.b.al_pa);
3041
ac280b67
AV
3042 if (IS_ALOGIO_CAPABLE(ha)) {
3043 data[0] = 0;
3044 data[1] = QLA_LOGIO_LOGIN_RETRIED;
3045 status = qla2x00_post_async_login_work(
3046 vha, fcport, data);
3047 if (status == QLA_SUCCESS)
3048 continue;
3049 /* Attempt a retry. */
3050 status = 1;
3051 } else
3052 status = qla2x00_fabric_login(vha,
3053 fcport, &next_loopid);
e315cd28
AC
3054 } else
3055 status = qla2x00_local_device_login(vha,
3056 fcport);
3057
e315cd28
AC
3058 if (status == QLA_SUCCESS) {
3059 fcport->old_loop_id = fcport->loop_id;
3060
3061 DEBUG(printk("scsi(%ld): port login OK: logged "
3062 "in ID 0x%x\n", vha->host_no, fcport->loop_id));
3063
3064 qla2x00_update_fcport(vha, fcport);
3065
3066 } else if (status == 1) {
3067 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3068 /* retry the login again */
3069 DEBUG(printk("scsi(%ld): Retrying"
3070 " %d login again loop_id 0x%x\n",
3071 vha->host_no, fcport->login_retry,
3072 fcport->loop_id));
3073 } else {
3074 fcport->login_retry = 0;
3075 }
3076
3077 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
3078 fcport->loop_id = FC_NO_LOOP_ID;
3079 }
3080 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3081 break;
0971de7f 3082 }
0971de7f
AV
3083}
3084
1da177e4
LT
3085/**************************************************************************
3086* qla2x00_do_dpc
3087* This kernel thread is a task that is schedule by the interrupt handler
3088* to perform the background processing for interrupts.
3089*
3090* Notes:
3091* This task always run in the context of a kernel thread. It
3092* is kick-off by the driver's detect code and starts up
3093* up one per adapter. It immediately goes to sleep and waits for
3094* some fibre event. When either the interrupt handler or
3095* the timer routine detects a event it will one of the task
3096* bits then wake us up.
3097**************************************************************************/
3098static int
3099qla2x00_do_dpc(void *data)
3100{
2c3dfe3f 3101 int rval;
e315cd28
AC
3102 scsi_qla_host_t *base_vha;
3103 struct qla_hw_data *ha;
1da177e4 3104
e315cd28
AC
3105 ha = (struct qla_hw_data *)data;
3106 base_vha = pci_get_drvdata(ha->pdev);
1da177e4 3107
1da177e4
LT
3108 set_user_nice(current, -20);
3109
39a11240 3110 while (!kthread_should_stop()) {
1da177e4
LT
3111 DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
3112
39a11240
CH
3113 set_current_state(TASK_INTERRUPTIBLE);
3114 schedule();
3115 __set_current_state(TASK_RUNNING);
1da177e4
LT
3116
3117 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
3118
3119 /* Initialization not yet finished. Don't do anything yet. */
e315cd28 3120 if (!base_vha->flags.init_done)
1da177e4
LT
3121 continue;
3122
85880801
AV
3123 if (ha->flags.eeh_busy) {
3124 DEBUG17(qla_printk(KERN_WARNING, ha,
3125 "qla2x00_do_dpc: dpc_flags: %lx\n",
3126 base_vha->dpc_flags));
3127 continue;
3128 }
3129
e315cd28 3130 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
1da177e4
LT
3131
3132 ha->dpc_active = 1;
3133
1da177e4 3134 if (ha->flags.mbox_busy) {
1da177e4
LT
3135 ha->dpc_active = 0;
3136 continue;
3137 }
3138
e315cd28 3139 qla2x00_do_work(base_vha);
0971de7f 3140
a9083016
GM
3141 if (IS_QLA82XX(ha)) {
3142 if (test_and_clear_bit(ISP_UNRECOVERABLE,
3143 &base_vha->dpc_flags)) {
3144 qla82xx_idc_lock(ha);
3145 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3146 QLA82XX_DEV_FAILED);
3147 qla82xx_idc_unlock(ha);
3148 qla_printk(KERN_INFO, ha,
3149 "HW State: FAILED\n");
3150 qla82xx_device_state_handler(base_vha);
3151 continue;
3152 }
3153
3154 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3155 &base_vha->dpc_flags)) {
3156
3157 DEBUG(printk(KERN_INFO
3158 "scsi(%ld): dpc: sched "
3159 "qla82xx_fcoe_ctx_reset ha = %p\n",
3160 base_vha->host_no, ha));
3161 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3162 &base_vha->dpc_flags))) {
3163 if (qla82xx_fcoe_ctx_reset(base_vha)) {
3164 /* FCoE-ctx reset failed.
3165 * Escalate to chip-reset
3166 */
3167 set_bit(ISP_ABORT_NEEDED,
3168 &base_vha->dpc_flags);
3169 }
3170 clear_bit(ABORT_ISP_ACTIVE,
3171 &base_vha->dpc_flags);
3172 }
3173
3174 DEBUG(printk("scsi(%ld): dpc:"
3175 " qla82xx_fcoe_ctx_reset end\n",
3176 base_vha->host_no));
3177 }
3178 }
3179
e315cd28
AC
3180 if (test_and_clear_bit(ISP_ABORT_NEEDED,
3181 &base_vha->dpc_flags)) {
1da177e4
LT
3182
3183 DEBUG(printk("scsi(%ld): dpc: sched "
3184 "qla2x00_abort_isp ha = %p\n",
e315cd28 3185 base_vha->host_no, ha));
1da177e4 3186 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
e315cd28 3187 &base_vha->dpc_flags))) {
1da177e4 3188
a9083016 3189 if (ha->isp_ops->abort_isp(base_vha)) {
1da177e4
LT
3190 /* failed. retry later */
3191 set_bit(ISP_ABORT_NEEDED,
e315cd28 3192 &base_vha->dpc_flags);
99363ef8 3193 }
e315cd28
AC
3194 clear_bit(ABORT_ISP_ACTIVE,
3195 &base_vha->dpc_flags);
99363ef8
SJ
3196 }
3197
1da177e4 3198 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
e315cd28 3199 base_vha->host_no));
1da177e4
LT
3200 }
3201
e315cd28
AC
3202 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
3203 qla2x00_update_fcports(base_vha);
3204 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
c9c5ced9 3205 }
d97994dc 3206
e315cd28
AC
3207 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3208 &base_vha->dpc_flags) &&
3209 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
1da177e4
LT
3210
3211 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
e315cd28 3212 base_vha->host_no));
1da177e4 3213
e315cd28
AC
3214 qla2x00_rst_aen(base_vha);
3215 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
1da177e4
LT
3216 }
3217
3218 /* Retry each device up to login retry count */
e315cd28
AC
3219 if ((test_and_clear_bit(RELOGIN_NEEDED,
3220 &base_vha->dpc_flags)) &&
3221 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3222 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
1da177e4
LT
3223
3224 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
e315cd28
AC
3225 base_vha->host_no));
3226 qla2x00_relogin(base_vha);
3227
1da177e4 3228 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
e315cd28 3229 base_vha->host_no));
1da177e4
LT
3230 }
3231
e315cd28
AC
3232 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3233 &base_vha->dpc_flags)) {
1da177e4
LT
3234
3235 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
e315cd28 3236 base_vha->host_no));
1da177e4
LT
3237
3238 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
e315cd28 3239 &base_vha->dpc_flags))) {
1da177e4 3240
e315cd28 3241 rval = qla2x00_loop_resync(base_vha);
1da177e4 3242
e315cd28
AC
3243 clear_bit(LOOP_RESYNC_ACTIVE,
3244 &base_vha->dpc_flags);
1da177e4
LT
3245 }
3246
3247 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
e315cd28 3248 base_vha->host_no));
1da177e4
LT
3249 }
3250
e315cd28
AC
3251 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
3252 atomic_read(&base_vha->loop_state) == LOOP_READY) {
3253 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
3254 qla2xxx_flash_npiv_conf(base_vha);
272976ca
AV
3255 }
3256
1da177e4 3257 if (!ha->interrupts_on)
fd34f556 3258 ha->isp_ops->enable_intrs(ha);
1da177e4 3259
e315cd28
AC
3260 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
3261 &base_vha->dpc_flags))
3262 ha->isp_ops->beacon_blink(base_vha);
f6df144c 3263
e315cd28 3264 qla2x00_do_dpc_all_vps(base_vha);
2c3dfe3f 3265
1da177e4
LT
3266 ha->dpc_active = 0;
3267 } /* End of while(1) */
3268
e315cd28 3269 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
1da177e4
LT
3270
3271 /*
3272 * Make sure that nobody tries to wake us up again.
3273 */
1da177e4
LT
3274 ha->dpc_active = 0;
3275
ac280b67
AV
3276 /* Cleanup any residual CTX SRBs. */
3277 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
3278
39a11240
CH
3279 return 0;
3280}
3281
3282void
e315cd28 3283qla2xxx_wake_dpc(struct scsi_qla_host *vha)
39a11240 3284{
e315cd28 3285 struct qla_hw_data *ha = vha->hw;
c795c1e4
AV
3286 struct task_struct *t = ha->dpc_thread;
3287
e315cd28 3288 if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
c795c1e4 3289 wake_up_process(t);
1da177e4
LT
3290}
3291
1da177e4
LT
3292/*
3293* qla2x00_rst_aen
3294* Processes asynchronous reset.
3295*
3296* Input:
3297* ha = adapter block pointer.
3298*/
3299static void
e315cd28 3300qla2x00_rst_aen(scsi_qla_host_t *vha)
1da177e4 3301{
e315cd28
AC
3302 if (vha->flags.online && !vha->flags.reset_active &&
3303 !atomic_read(&vha->loop_down_timer) &&
3304 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
1da177e4 3305 do {
e315cd28 3306 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
3307
3308 /*
3309 * Issue marker command only when we are going to start
3310 * the I/O.
3311 */
e315cd28
AC
3312 vha->marker_needed = 1;
3313 } while (!atomic_read(&vha->loop_down_timer) &&
3314 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
1da177e4
LT
3315 }
3316}
3317
f4f051eb 3318static void
e315cd28 3319qla2x00_sp_free_dma(srb_t *sp)
f4f051eb
AV
3320{
3321 struct scsi_cmnd *cmd = sp->cmd;
3322
3323 if (sp->flags & SRB_DMA_VALID) {
385d70b4 3324 scsi_dma_unmap(cmd);
f4f051eb
AV
3325 sp->flags &= ~SRB_DMA_VALID;
3326 }
fca29703 3327 CMD_SP(cmd) = NULL;
f4f051eb
AV
3328}
3329
3330void
73208dfd 3331qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
f4f051eb
AV
3332{
3333 struct scsi_cmnd *cmd = sp->cmd;
3334
e315cd28 3335 qla2x00_sp_free_dma(sp);
f4f051eb 3336
a9083016
GM
3337 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3338 struct ct6_dsd *ctx = sp->ctx;
3339 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
3340 ctx->fcp_cmnd_dma);
3341 list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
3342 ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
3343 ha->gbl_dsd_avail += ctx->dsd_use_cnt;
3344 mempool_free(sp->ctx, ha->ctx_mempool);
3345 sp->ctx = NULL;
3346 }
f4f051eb 3347
a9083016 3348 mempool_free(sp, ha->srb_mempool);
f4f051eb
AV
3349 cmd->scsi_done(cmd);
3350}
bdf79621 3351
1da177e4
LT
3352/**************************************************************************
3353* qla2x00_timer
3354*
3355* Description:
3356* One second timer
3357*
3358* Context: Interrupt
3359***************************************************************************/
2c3dfe3f 3360void
e315cd28 3361qla2x00_timer(scsi_qla_host_t *vha)
1da177e4 3362{
1da177e4
LT
3363 unsigned long cpu_flags = 0;
3364 fc_port_t *fcport;
1da177e4
LT
3365 int start_dpc = 0;
3366 int index;
3367 srb_t *sp;
f4f051eb 3368 int t;
85880801 3369 uint16_t w;
e315cd28 3370 struct qla_hw_data *ha = vha->hw;
73208dfd 3371 struct req_que *req;
85880801 3372
a9083016
GM
3373 if (IS_QLA82XX(ha))
3374 qla82xx_watchdog(vha);
3375
85880801
AV
3376 /* Hardware read to raise pending EEH errors during mailbox waits. */
3377 if (!pci_channel_offline(ha->pdev))
3378 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
1da177e4
LT
3379 /*
3380 * Ports - Port down timer.
3381 *
3382 * Whenever, a port is in the LOST state we start decrementing its port
3383 * down timer every second until it reaches zero. Once it reaches zero
fa2a1ce5 3384 * the port it marked DEAD.
1da177e4
LT
3385 */
3386 t = 0;
e315cd28 3387 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
3388 if (fcport->port_type != FCT_TARGET)
3389 continue;
3390
3391 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3392
3393 if (atomic_read(&fcport->port_down_timer) == 0)
3394 continue;
3395
fa2a1ce5 3396 if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
1da177e4 3397 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
fa2a1ce5 3398
1da177e4 3399 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
fca29703 3400 "%d remaining\n",
e315cd28 3401 vha->host_no,
1da177e4
LT
3402 t, atomic_read(&fcport->port_down_timer)));
3403 }
3404 t++;
3405 } /* End of for fcport */
3406
1da177e4
LT
3407
3408 /* Loop down handler. */
e315cd28
AC
3409 if (atomic_read(&vha->loop_down_timer) > 0 &&
3410 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3411 && vha->flags.online) {
1da177e4 3412
e315cd28
AC
3413 if (atomic_read(&vha->loop_down_timer) ==
3414 vha->loop_down_abort_time) {
1da177e4
LT
3415
3416 DEBUG(printk("scsi(%ld): Loop Down - aborting the "
3417 "queues before time expire\n",
e315cd28 3418 vha->host_no));
1da177e4 3419
e315cd28
AC
3420 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3421 atomic_set(&vha->loop_state, LOOP_DEAD);
1da177e4 3422
f08b7251
AV
3423 /*
3424 * Schedule an ISP abort to return any FCP2-device
3425 * commands.
3426 */
2c3dfe3f 3427 /* NPIV - scan physical port only */
e315cd28 3428 if (!vha->vp_idx) {
2c3dfe3f
SJ
3429 spin_lock_irqsave(&ha->hardware_lock,
3430 cpu_flags);
73208dfd 3431 req = ha->req_q_map[0];
2c3dfe3f
SJ
3432 for (index = 1;
3433 index < MAX_OUTSTANDING_COMMANDS;
3434 index++) {
3435 fc_port_t *sfcp;
3436
e315cd28 3437 sp = req->outstanding_cmds[index];
2c3dfe3f
SJ
3438 if (!sp)
3439 continue;
cf53b069
AV
3440 if (sp->ctx)
3441 continue;
2c3dfe3f 3442 sfcp = sp->fcport;
f08b7251 3443 if (!(sfcp->flags & FCF_FCP2_DEVICE))
2c3dfe3f 3444 continue;
bdf79621 3445
2c3dfe3f 3446 set_bit(ISP_ABORT_NEEDED,
e315cd28 3447 &vha->dpc_flags);
2c3dfe3f
SJ
3448 break;
3449 }
3450 spin_unlock_irqrestore(&ha->hardware_lock,
e315cd28 3451 cpu_flags);
1da177e4 3452 }
1da177e4
LT
3453 start_dpc++;
3454 }
3455
3456 /* if the loop has been down for 4 minutes, reinit adapter */
e315cd28 3457 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
0d6e61bc 3458 if (!(vha->device_flags & DFLG_NO_CABLE)) {
1da177e4
LT
3459 DEBUG(printk("scsi(%ld): Loop down - "
3460 "aborting ISP.\n",
e315cd28 3461 vha->host_no));
1da177e4
LT
3462 qla_printk(KERN_WARNING, ha,
3463 "Loop down - aborting ISP.\n");
3464
e315cd28 3465 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
3466 }
3467 }
fca29703 3468 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
e315cd28
AC
3469 vha->host_no,
3470 atomic_read(&vha->loop_down_timer)));
1da177e4
LT
3471 }
3472
f6df144c
AV
3473 /* Check if beacon LED needs to be blinked */
3474 if (ha->beacon_blink_led == 1) {
e315cd28 3475 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
f6df144c
AV
3476 start_dpc++;
3477 }
3478
550bf57d 3479 /* Process any deferred work. */
e315cd28 3480 if (!list_empty(&vha->work_list))
550bf57d
AV
3481 start_dpc++;
3482
1da177e4 3483 /* Schedule the DPC routine if needed */
e315cd28
AC
3484 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3485 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
3486 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
1da177e4 3487 start_dpc ||
e315cd28
AC
3488 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
3489 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
a9083016
GM
3490 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3491 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
e315cd28
AC
3492 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3493 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
3494 qla2xxx_wake_dpc(vha);
1da177e4 3495
e315cd28 3496 qla2x00_restart_timer(vha, WATCH_INTERVAL);
1da177e4
LT
3497}
3498
5433383e
AV
3499/* Firmware interface routines. */
3500
a9083016 3501#define FW_BLOBS 8
5433383e
AV
3502#define FW_ISP21XX 0
3503#define FW_ISP22XX 1
3504#define FW_ISP2300 2
3505#define FW_ISP2322 3
48c02fde 3506#define FW_ISP24XX 4
c3a2f0df 3507#define FW_ISP25XX 5
3a03eb79 3508#define FW_ISP81XX 6
a9083016 3509#define FW_ISP82XX 7
5433383e 3510
bb8ee499
AV
3511#define FW_FILE_ISP21XX "ql2100_fw.bin"
3512#define FW_FILE_ISP22XX "ql2200_fw.bin"
3513#define FW_FILE_ISP2300 "ql2300_fw.bin"
3514#define FW_FILE_ISP2322 "ql2322_fw.bin"
3515#define FW_FILE_ISP24XX "ql2400_fw.bin"
c3a2f0df 3516#define FW_FILE_ISP25XX "ql2500_fw.bin"
3a03eb79 3517#define FW_FILE_ISP81XX "ql8100_fw.bin"
a9083016 3518#define FW_FILE_ISP82XX "ql8200_fw.bin"
bb8ee499 3519
e1e82b6f 3520static DEFINE_MUTEX(qla_fw_lock);
5433383e
AV
3521
3522static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
bb8ee499
AV
3523 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
3524 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
3525 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
3526 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
3527 { .name = FW_FILE_ISP24XX, },
c3a2f0df 3528 { .name = FW_FILE_ISP25XX, },
3a03eb79 3529 { .name = FW_FILE_ISP81XX, },
a9083016 3530 { .name = FW_FILE_ISP82XX, },
5433383e
AV
3531};
3532
3533struct fw_blob *
e315cd28 3534qla2x00_request_firmware(scsi_qla_host_t *vha)
5433383e 3535{
e315cd28 3536 struct qla_hw_data *ha = vha->hw;
5433383e
AV
3537 struct fw_blob *blob;
3538
3539 blob = NULL;
3540 if (IS_QLA2100(ha)) {
3541 blob = &qla_fw_blobs[FW_ISP21XX];
3542 } else if (IS_QLA2200(ha)) {
3543 blob = &qla_fw_blobs[FW_ISP22XX];
48c02fde 3544 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
5433383e 3545 blob = &qla_fw_blobs[FW_ISP2300];
48c02fde 3546 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
5433383e 3547 blob = &qla_fw_blobs[FW_ISP2322];
4d4df193 3548 } else if (IS_QLA24XX_TYPE(ha)) {
5433383e 3549 blob = &qla_fw_blobs[FW_ISP24XX];
c3a2f0df
AV
3550 } else if (IS_QLA25XX(ha)) {
3551 blob = &qla_fw_blobs[FW_ISP25XX];
3a03eb79
AV
3552 } else if (IS_QLA81XX(ha)) {
3553 blob = &qla_fw_blobs[FW_ISP81XX];
a9083016
GM
3554 } else if (IS_QLA82XX(ha)) {
3555 blob = &qla_fw_blobs[FW_ISP82XX];
5433383e
AV
3556 }
3557
e1e82b6f 3558 mutex_lock(&qla_fw_lock);
5433383e
AV
3559 if (blob->fw)
3560 goto out;
3561
3562 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3563 DEBUG2(printk("scsi(%ld): Failed to load firmware image "
e315cd28 3564 "(%s).\n", vha->host_no, blob->name));
5433383e
AV
3565 blob->fw = NULL;
3566 blob = NULL;
3567 goto out;
3568 }
3569
3570out:
e1e82b6f 3571 mutex_unlock(&qla_fw_lock);
5433383e
AV
3572 return blob;
3573}
3574
3575static void
3576qla2x00_release_firmware(void)
3577{
3578 int idx;
3579
e1e82b6f 3580 mutex_lock(&qla_fw_lock);
5433383e
AV
3581 for (idx = 0; idx < FW_BLOBS; idx++)
3582 if (qla_fw_blobs[idx].fw)
3583 release_firmware(qla_fw_blobs[idx].fw);
e1e82b6f 3584 mutex_unlock(&qla_fw_lock);
5433383e
AV
3585}
3586
14e660e6
SJ
3587static pci_ers_result_t
3588qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3589{
85880801
AV
3590 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3591 struct qla_hw_data *ha = vha->hw;
3592
3593 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
3594 state));
b9b12f73 3595
14e660e6
SJ
3596 switch (state) {
3597 case pci_channel_io_normal:
85880801 3598 ha->flags.eeh_busy = 0;
14e660e6
SJ
3599 return PCI_ERS_RESULT_CAN_RECOVER;
3600 case pci_channel_io_frozen:
85880801 3601 ha->flags.eeh_busy = 1;
90a86fc0 3602 qla2x00_free_irqs(vha);
14e660e6
SJ
3603 pci_disable_device(pdev);
3604 return PCI_ERS_RESULT_NEED_RESET;
3605 case pci_channel_io_perm_failure:
85880801
AV
3606 ha->flags.pci_channel_io_perm_failure = 1;
3607 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
14e660e6
SJ
3608 return PCI_ERS_RESULT_DISCONNECT;
3609 }
3610 return PCI_ERS_RESULT_NEED_RESET;
3611}
3612
3613static pci_ers_result_t
3614qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3615{
3616 int risc_paused = 0;
3617 uint32_t stat;
3618 unsigned long flags;
e315cd28
AC
3619 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3620 struct qla_hw_data *ha = base_vha->hw;
14e660e6
SJ
3621 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3622 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
3623
3624 spin_lock_irqsave(&ha->hardware_lock, flags);
3625 if (IS_QLA2100(ha) || IS_QLA2200(ha)){
3626 stat = RD_REG_DWORD(&reg->hccr);
3627 if (stat & HCCR_RISC_PAUSE)
3628 risc_paused = 1;
3629 } else if (IS_QLA23XX(ha)) {
3630 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
3631 if (stat & HSR_RISC_PAUSED)
3632 risc_paused = 1;
3633 } else if (IS_FWI2_CAPABLE(ha)) {
3634 stat = RD_REG_DWORD(&reg24->host_status);
3635 if (stat & HSRX_RISC_PAUSED)
3636 risc_paused = 1;
3637 }
3638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3639
3640 if (risc_paused) {
3641 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
3642 "Dumping firmware!\n");
e315cd28 3643 ha->isp_ops->fw_dump(base_vha, 0);
14e660e6
SJ
3644
3645 return PCI_ERS_RESULT_NEED_RESET;
3646 } else
3647 return PCI_ERS_RESULT_RECOVERED;
3648}
3649
3650static pci_ers_result_t
3651qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3652{
3653 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
e315cd28
AC
3654 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3655 struct qla_hw_data *ha = base_vha->hw;
90a86fc0
JC
3656 struct rsp_que *rsp;
3657 int rc, retries = 10;
09483916 3658
85880801
AV
3659 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
3660
90a86fc0
JC
3661 /* Workaround: qla2xxx driver which access hardware earlier
3662 * needs error state to be pci_channel_io_online.
3663 * Otherwise mailbox command timesout.
3664 */
3665 pdev->error_state = pci_channel_io_normal;
3666
3667 pci_restore_state(pdev);
3668
8c1496bd
RL
3669 /* pci_restore_state() clears the saved_state flag of the device
3670 * save restored state which resets saved_state flag
3671 */
3672 pci_save_state(pdev);
3673
09483916
BH
3674 if (ha->mem_only)
3675 rc = pci_enable_device_mem(pdev);
3676 else
3677 rc = pci_enable_device(pdev);
14e660e6 3678
09483916 3679 if (rc) {
14e660e6
SJ
3680 qla_printk(KERN_WARNING, ha,
3681 "Can't re-enable PCI device after reset.\n");
14e660e6
SJ
3682 return ret;
3683 }
14e660e6 3684
90a86fc0
JC
3685 rsp = ha->rsp_q_map[0];
3686 if (qla2x00_request_irqs(ha, rsp))
3687 return ret;
3688
e315cd28 3689 if (ha->isp_ops->pci_config(base_vha))
14e660e6
SJ
3690 return ret;
3691
90a86fc0
JC
3692 while (ha->flags.mbox_busy && retries--)
3693 msleep(1000);
85880801 3694
e315cd28 3695 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
a9083016 3696 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
14e660e6 3697 ret = PCI_ERS_RESULT_RECOVERED;
e315cd28 3698 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
14e660e6 3699
90a86fc0 3700
85880801
AV
3701 DEBUG17(qla_printk(KERN_WARNING, ha,
3702 "slot_reset-return:ret=%x\n", ret));
3703
14e660e6
SJ
3704 return ret;
3705}
3706
3707static void
3708qla2xxx_pci_resume(struct pci_dev *pdev)
3709{
e315cd28
AC
3710 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3711 struct qla_hw_data *ha = base_vha->hw;
14e660e6
SJ
3712 int ret;
3713
85880801
AV
3714 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
3715
e315cd28 3716 ret = qla2x00_wait_for_hba_online(base_vha);
14e660e6
SJ
3717 if (ret != QLA_SUCCESS) {
3718 qla_printk(KERN_ERR, ha,
3719 "the device failed to resume I/O "
3720 "from slot/link_reset");
3721 }
85880801 3722
3e46f031
LC
3723 pci_cleanup_aer_uncorrect_error_status(pdev);
3724
85880801 3725 ha->flags.eeh_busy = 0;
14e660e6
SJ
3726}
3727
3728static struct pci_error_handlers qla2xxx_err_handler = {
3729 .error_detected = qla2xxx_pci_error_detected,
3730 .mmio_enabled = qla2xxx_pci_mmio_enabled,
3731 .slot_reset = qla2xxx_pci_slot_reset,
3732 .resume = qla2xxx_pci_resume,
3733};
3734
5433383e 3735static struct pci_device_id qla2xxx_pci_tbl[] = {
47f5e069
AV
3736 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
3737 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
3738 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
3739 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
3740 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
3741 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
3742 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
3743 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
3744 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
4d4df193 3745 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
47f5e069
AV
3746 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
3747 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
c3a2f0df 3748 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
3a03eb79 3749 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
a9083016 3750 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
5433383e
AV
3751 { 0 },
3752};
3753MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
3754
fca29703 3755static struct pci_driver qla2xxx_pci_driver = {
cb63067a 3756 .name = QLA2XXX_DRIVER_NAME,
0a21ef1e
JB
3757 .driver = {
3758 .owner = THIS_MODULE,
3759 },
fca29703 3760 .id_table = qla2xxx_pci_tbl,
7ee61397 3761 .probe = qla2x00_probe_one,
4c993f76 3762 .remove = qla2x00_remove_one,
14e660e6 3763 .err_handler = &qla2xxx_err_handler,
fca29703
AV
3764};
3765
1da177e4
LT
3766/**
3767 * qla2x00_module_init - Module initialization.
3768 **/
3769static int __init
3770qla2x00_module_init(void)
3771{
fca29703
AV
3772 int ret = 0;
3773
1da177e4 3774 /* Allocate cache for SRBs. */
354d6b21 3775 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
20c2df83 3776 SLAB_HWCACHE_ALIGN, NULL);
1da177e4
LT
3777 if (srb_cachep == NULL) {
3778 printk(KERN_ERR
3779 "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
3780 return -ENOMEM;
3781 }
3782
3783 /* Derive version string. */
3784 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
11010fec 3785 if (ql2xextended_error_logging)
0181944f
AV
3786 strcat(qla2x00_version_str, "-debug");
3787
1c97a12a
AV
3788 qla2xxx_transport_template =
3789 fc_attach_transport(&qla2xxx_transport_functions);
2c3dfe3f
SJ
3790 if (!qla2xxx_transport_template) {
3791 kmem_cache_destroy(srb_cachep);
1da177e4 3792 return -ENODEV;
2c3dfe3f
SJ
3793 }
3794 qla2xxx_transport_vport_template =
3795 fc_attach_transport(&qla2xxx_transport_vport_functions);
3796 if (!qla2xxx_transport_vport_template) {
3797 kmem_cache_destroy(srb_cachep);
3798 fc_release_transport(qla2xxx_transport_template);
1da177e4 3799 return -ENODEV;
2c3dfe3f 3800 }
1da177e4 3801
fd9a29f0
AV
3802 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
3803 qla2x00_version_str);
7ee61397 3804 ret = pci_register_driver(&qla2xxx_pci_driver);
fca29703
AV
3805 if (ret) {
3806 kmem_cache_destroy(srb_cachep);
3807 fc_release_transport(qla2xxx_transport_template);
2c3dfe3f 3808 fc_release_transport(qla2xxx_transport_vport_template);
fca29703
AV
3809 }
3810 return ret;
1da177e4
LT
3811}
3812
3813/**
3814 * qla2x00_module_exit - Module cleanup.
3815 **/
3816static void __exit
3817qla2x00_module_exit(void)
3818{
7ee61397 3819 pci_unregister_driver(&qla2xxx_pci_driver);
5433383e 3820 qla2x00_release_firmware();
354d6b21 3821 kmem_cache_destroy(srb_cachep);
a9083016
GM
3822 if (ctx_cachep)
3823 kmem_cache_destroy(ctx_cachep);
1da177e4 3824 fc_release_transport(qla2xxx_transport_template);
2c3dfe3f 3825 fc_release_transport(qla2xxx_transport_vport_template);
1da177e4
LT
3826}
3827
3828module_init(qla2x00_module_init);
3829module_exit(qla2x00_module_exit);
3830
3831MODULE_AUTHOR("QLogic Corporation");
3832MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
3833MODULE_LICENSE("GPL");
3834MODULE_VERSION(QLA2XXX_VERSION);
bb8ee499
AV
3835MODULE_FIRMWARE(FW_FILE_ISP21XX);
3836MODULE_FIRMWARE(FW_FILE_ISP22XX);
3837MODULE_FIRMWARE(FW_FILE_ISP2300);
3838MODULE_FIRMWARE(FW_FILE_ISP2322);
3839MODULE_FIRMWARE(FW_FILE_ISP24XX);
61623fc3 3840MODULE_FIRMWARE(FW_FILE_ISP25XX);