Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / qla2xxx / qla_attr.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12
13 static int qla24xx_vport_disable(struct fc_vport *, bool);
14
15 /* SYSFS attributes --------------------------------------------------------- */
16
17 static ssize_t
18 qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
19 struct bin_attribute *bin_attr,
20 char *buf, loff_t off, size_t count)
21 {
22 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
23 struct device, kobj)));
24 struct qla_hw_data *ha = vha->hw;
25
26 if (ha->fw_dump_reading == 0)
27 return 0;
28
29 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
30 ha->fw_dump_len);
31 }
32
33 static ssize_t
34 qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
35 struct bin_attribute *bin_attr,
36 char *buf, loff_t off, size_t count)
37 {
38 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
39 struct device, kobj)));
40 struct qla_hw_data *ha = vha->hw;
41 int reading;
42
43 if (off != 0)
44 return (0);
45
46 reading = simple_strtol(buf, NULL, 10);
47 switch (reading) {
48 case 0:
49 if (!ha->fw_dump_reading)
50 break;
51
52 qla_printk(KERN_INFO, ha,
53 "Firmware dump cleared on (%ld).\n", vha->host_no);
54
55 ha->fw_dump_reading = 0;
56 ha->fw_dumped = 0;
57 break;
58 case 1:
59 if (ha->fw_dumped && !ha->fw_dump_reading) {
60 ha->fw_dump_reading = 1;
61
62 qla_printk(KERN_INFO, ha,
63 "Raw firmware dump ready for read on (%ld).\n",
64 vha->host_no);
65 }
66 break;
67 case 2:
68 qla2x00_alloc_fw_dump(vha);
69 break;
70 case 3:
71 qla2x00_system_error(vha);
72 break;
73 }
74 return (count);
75 }
76
77 static struct bin_attribute sysfs_fw_dump_attr = {
78 .attr = {
79 .name = "fw_dump",
80 .mode = S_IRUSR | S_IWUSR,
81 },
82 .size = 0,
83 .read = qla2x00_sysfs_read_fw_dump,
84 .write = qla2x00_sysfs_write_fw_dump,
85 };
86
87 static ssize_t
88 qla2x00_sysfs_read_nvram(struct kobject *kobj,
89 struct bin_attribute *bin_attr,
90 char *buf, loff_t off, size_t count)
91 {
92 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
93 struct device, kobj)));
94 struct qla_hw_data *ha = vha->hw;
95
96 if (!capable(CAP_SYS_ADMIN))
97 return 0;
98
99 if (IS_NOCACHE_VPD_TYPE(ha))
100 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2,
101 ha->nvram_size);
102 return memory_read_from_buffer(buf, count, &off, ha->nvram,
103 ha->nvram_size);
104 }
105
106 static ssize_t
107 qla2x00_sysfs_write_nvram(struct kobject *kobj,
108 struct bin_attribute *bin_attr,
109 char *buf, loff_t off, size_t count)
110 {
111 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
112 struct device, kobj)));
113 struct qla_hw_data *ha = vha->hw;
114 uint16_t cnt;
115
116 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
117 !ha->isp_ops->write_nvram)
118 return 0;
119
120 /* Checksum NVRAM. */
121 if (IS_FWI2_CAPABLE(ha)) {
122 uint32_t *iter;
123 uint32_t chksum;
124
125 iter = (uint32_t *)buf;
126 chksum = 0;
127 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
128 chksum += le32_to_cpu(*iter++);
129 chksum = ~chksum + 1;
130 *iter = cpu_to_le32(chksum);
131 } else {
132 uint8_t *iter;
133 uint8_t chksum;
134
135 iter = (uint8_t *)buf;
136 chksum = 0;
137 for (cnt = 0; cnt < count - 1; cnt++)
138 chksum += *iter++;
139 chksum = ~chksum + 1;
140 *iter = chksum;
141 }
142
143 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
144 qla_printk(KERN_WARNING, ha,
145 "HBA not online, failing NVRAM update.\n");
146 return -EAGAIN;
147 }
148
149 /* Write NVRAM. */
150 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
151 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
152 count);
153
154 /* NVRAM settings take effect immediately. */
155 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
156 qla2xxx_wake_dpc(vha);
157 qla2x00_wait_for_chip_reset(vha);
158
159 return (count);
160 }
161
162 static struct bin_attribute sysfs_nvram_attr = {
163 .attr = {
164 .name = "nvram",
165 .mode = S_IRUSR | S_IWUSR,
166 },
167 .size = 512,
168 .read = qla2x00_sysfs_read_nvram,
169 .write = qla2x00_sysfs_write_nvram,
170 };
171
172 static ssize_t
173 qla2x00_sysfs_read_optrom(struct kobject *kobj,
174 struct bin_attribute *bin_attr,
175 char *buf, loff_t off, size_t count)
176 {
177 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
178 struct device, kobj)));
179 struct qla_hw_data *ha = vha->hw;
180
181 if (ha->optrom_state != QLA_SREADING)
182 return 0;
183
184 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
185 ha->optrom_region_size);
186 }
187
188 static ssize_t
189 qla2x00_sysfs_write_optrom(struct kobject *kobj,
190 struct bin_attribute *bin_attr,
191 char *buf, loff_t off, size_t count)
192 {
193 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
194 struct device, kobj)));
195 struct qla_hw_data *ha = vha->hw;
196
197 if (ha->optrom_state != QLA_SWRITING)
198 return -EINVAL;
199 if (off > ha->optrom_region_size)
200 return -ERANGE;
201 if (off + count > ha->optrom_region_size)
202 count = ha->optrom_region_size - off;
203
204 memcpy(&ha->optrom_buffer[off], buf, count);
205
206 return count;
207 }
208
209 static struct bin_attribute sysfs_optrom_attr = {
210 .attr = {
211 .name = "optrom",
212 .mode = S_IRUSR | S_IWUSR,
213 },
214 .size = 0,
215 .read = qla2x00_sysfs_read_optrom,
216 .write = qla2x00_sysfs_write_optrom,
217 };
218
219 static ssize_t
220 qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
221 struct bin_attribute *bin_attr,
222 char *buf, loff_t off, size_t count)
223 {
224 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
225 struct device, kobj)));
226 struct qla_hw_data *ha = vha->hw;
227
228 uint32_t start = 0;
229 uint32_t size = ha->optrom_size;
230 int val, valid;
231
232 if (off)
233 return 0;
234
235 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
236 return -EINVAL;
237 if (start > ha->optrom_size)
238 return -EINVAL;
239
240 switch (val) {
241 case 0:
242 if (ha->optrom_state != QLA_SREADING &&
243 ha->optrom_state != QLA_SWRITING)
244 break;
245
246 ha->optrom_state = QLA_SWAITING;
247
248 DEBUG2(qla_printk(KERN_INFO, ha,
249 "Freeing flash region allocation -- 0x%x bytes.\n",
250 ha->optrom_region_size));
251
252 vfree(ha->optrom_buffer);
253 ha->optrom_buffer = NULL;
254 break;
255 case 1:
256 if (ha->optrom_state != QLA_SWAITING)
257 break;
258
259 ha->optrom_region_start = start;
260 ha->optrom_region_size = start + size > ha->optrom_size ?
261 ha->optrom_size - start : size;
262
263 ha->optrom_state = QLA_SREADING;
264 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
265 if (ha->optrom_buffer == NULL) {
266 qla_printk(KERN_WARNING, ha,
267 "Unable to allocate memory for optrom retrieval "
268 "(%x).\n", ha->optrom_region_size);
269
270 ha->optrom_state = QLA_SWAITING;
271 return count;
272 }
273
274 DEBUG2(qla_printk(KERN_INFO, ha,
275 "Reading flash region -- 0x%x/0x%x.\n",
276 ha->optrom_region_start, ha->optrom_region_size));
277
278 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
279 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
280 ha->optrom_region_start, ha->optrom_region_size);
281 break;
282 case 2:
283 if (ha->optrom_state != QLA_SWAITING)
284 break;
285
286 /*
287 * We need to be more restrictive on which FLASH regions are
288 * allowed to be updated via user-space. Regions accessible
289 * via this method include:
290 *
291 * ISP21xx/ISP22xx/ISP23xx type boards:
292 *
293 * 0x000000 -> 0x020000 -- Boot code.
294 *
295 * ISP2322/ISP24xx type boards:
296 *
297 * 0x000000 -> 0x07ffff -- Boot code.
298 * 0x080000 -> 0x0fffff -- Firmware.
299 *
300 * ISP25xx type boards:
301 *
302 * 0x000000 -> 0x07ffff -- Boot code.
303 * 0x080000 -> 0x0fffff -- Firmware.
304 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
305 */
306 valid = 0;
307 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
308 valid = 1;
309 else if (start == (ha->flt_region_boot * 4) ||
310 start == (ha->flt_region_fw * 4))
311 valid = 1;
312 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
313 valid = 1;
314 if (!valid) {
315 qla_printk(KERN_WARNING, ha,
316 "Invalid start region 0x%x/0x%x.\n", start, size);
317 return -EINVAL;
318 }
319
320 ha->optrom_region_start = start;
321 ha->optrom_region_size = start + size > ha->optrom_size ?
322 ha->optrom_size - start : size;
323
324 ha->optrom_state = QLA_SWRITING;
325 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
326 if (ha->optrom_buffer == NULL) {
327 qla_printk(KERN_WARNING, ha,
328 "Unable to allocate memory for optrom update "
329 "(%x).\n", ha->optrom_region_size);
330
331 ha->optrom_state = QLA_SWAITING;
332 return count;
333 }
334
335 DEBUG2(qla_printk(KERN_INFO, ha,
336 "Staging flash region write -- 0x%x/0x%x.\n",
337 ha->optrom_region_start, ha->optrom_region_size));
338
339 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
340 break;
341 case 3:
342 if (ha->optrom_state != QLA_SWRITING)
343 break;
344
345 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
346 qla_printk(KERN_WARNING, ha,
347 "HBA not online, failing flash update.\n");
348 return -EAGAIN;
349 }
350
351 DEBUG2(qla_printk(KERN_INFO, ha,
352 "Writing flash region -- 0x%x/0x%x.\n",
353 ha->optrom_region_start, ha->optrom_region_size));
354
355 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
356 ha->optrom_region_start, ha->optrom_region_size);
357 break;
358 default:
359 count = -EINVAL;
360 }
361 return count;
362 }
363
364 static struct bin_attribute sysfs_optrom_ctl_attr = {
365 .attr = {
366 .name = "optrom_ctl",
367 .mode = S_IWUSR,
368 },
369 .size = 0,
370 .write = qla2x00_sysfs_write_optrom_ctl,
371 };
372
373 static ssize_t
374 qla2x00_sysfs_read_vpd(struct kobject *kobj,
375 struct bin_attribute *bin_attr,
376 char *buf, loff_t off, size_t count)
377 {
378 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
379 struct device, kobj)));
380 struct qla_hw_data *ha = vha->hw;
381
382 if (!capable(CAP_SYS_ADMIN))
383 return 0;
384
385 if (IS_NOCACHE_VPD_TYPE(ha))
386 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
387 ha->vpd_size);
388 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
389 }
390
391 static ssize_t
392 qla2x00_sysfs_write_vpd(struct kobject *kobj,
393 struct bin_attribute *bin_attr,
394 char *buf, loff_t off, size_t count)
395 {
396 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
397 struct device, kobj)));
398 struct qla_hw_data *ha = vha->hw;
399 uint8_t *tmp_data;
400
401 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
402 !ha->isp_ops->write_nvram)
403 return 0;
404
405 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
406 qla_printk(KERN_WARNING, ha,
407 "HBA not online, failing VPD update.\n");
408 return -EAGAIN;
409 }
410
411 /* Write NVRAM. */
412 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
413 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
414
415 /* Update flash version information for 4Gb & above. */
416 if (!IS_FWI2_CAPABLE(ha))
417 goto done;
418
419 tmp_data = vmalloc(256);
420 if (!tmp_data) {
421 qla_printk(KERN_WARNING, ha,
422 "Unable to allocate memory for VPD information update.\n");
423 goto done;
424 }
425 ha->isp_ops->get_flash_version(vha, tmp_data);
426 vfree(tmp_data);
427 done:
428 return count;
429 }
430
431 static struct bin_attribute sysfs_vpd_attr = {
432 .attr = {
433 .name = "vpd",
434 .mode = S_IRUSR | S_IWUSR,
435 },
436 .size = 0,
437 .read = qla2x00_sysfs_read_vpd,
438 .write = qla2x00_sysfs_write_vpd,
439 };
440
441 static ssize_t
442 qla2x00_sysfs_read_sfp(struct kobject *kobj,
443 struct bin_attribute *bin_attr,
444 char *buf, loff_t off, size_t count)
445 {
446 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
447 struct device, kobj)));
448 struct qla_hw_data *ha = vha->hw;
449 uint16_t iter, addr, offset;
450 int rval;
451
452 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
453 return 0;
454
455 if (ha->sfp_data)
456 goto do_read;
457
458 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
459 &ha->sfp_data_dma);
460 if (!ha->sfp_data) {
461 qla_printk(KERN_WARNING, ha,
462 "Unable to allocate memory for SFP read-data.\n");
463 return 0;
464 }
465
466 do_read:
467 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
468 addr = 0xa0;
469 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
470 iter++, offset += SFP_BLOCK_SIZE) {
471 if (iter == 4) {
472 /* Skip to next device address. */
473 addr = 0xa2;
474 offset = 0;
475 }
476
477 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
478 SFP_BLOCK_SIZE);
479 if (rval != QLA_SUCCESS) {
480 qla_printk(KERN_WARNING, ha,
481 "Unable to read SFP data (%x/%x/%x).\n", rval,
482 addr, offset);
483 count = 0;
484 break;
485 }
486 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
487 buf += SFP_BLOCK_SIZE;
488 }
489
490 return count;
491 }
492
493 static struct bin_attribute sysfs_sfp_attr = {
494 .attr = {
495 .name = "sfp",
496 .mode = S_IRUSR | S_IWUSR,
497 },
498 .size = SFP_DEV_SIZE * 2,
499 .read = qla2x00_sysfs_read_sfp,
500 };
501
502 static ssize_t
503 qla2x00_sysfs_write_reset(struct kobject *kobj,
504 struct bin_attribute *bin_attr,
505 char *buf, loff_t off, size_t count)
506 {
507 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
508 struct device, kobj)));
509 struct qla_hw_data *ha = vha->hw;
510 int type;
511
512 if (off != 0)
513 return 0;
514
515 type = simple_strtol(buf, NULL, 10);
516 switch (type) {
517 case 0x2025c:
518 qla_printk(KERN_INFO, ha,
519 "Issuing ISP reset on (%ld).\n", vha->host_no);
520
521 scsi_block_requests(vha->host);
522 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
523 qla2xxx_wake_dpc(vha);
524 qla2x00_wait_for_chip_reset(vha);
525 scsi_unblock_requests(vha->host);
526 break;
527 case 0x2025d:
528 if (!IS_QLA81XX(ha))
529 break;
530
531 qla_printk(KERN_INFO, ha,
532 "Issuing MPI reset on (%ld).\n", vha->host_no);
533
534 /* Make sure FC side is not in reset */
535 qla2x00_wait_for_hba_online(vha);
536
537 /* Issue MPI reset */
538 scsi_block_requests(vha->host);
539 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
540 qla_printk(KERN_WARNING, ha,
541 "MPI reset failed on (%ld).\n", vha->host_no);
542 scsi_unblock_requests(vha->host);
543 break;
544 }
545 return count;
546 }
547
548 static struct bin_attribute sysfs_reset_attr = {
549 .attr = {
550 .name = "reset",
551 .mode = S_IWUSR,
552 },
553 .size = 0,
554 .write = qla2x00_sysfs_write_reset,
555 };
556
557 static ssize_t
558 qla2x00_sysfs_write_edc(struct kobject *kobj,
559 struct bin_attribute *bin_attr,
560 char *buf, loff_t off, size_t count)
561 {
562 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
563 struct device, kobj)));
564 struct qla_hw_data *ha = vha->hw;
565 uint16_t dev, adr, opt, len;
566 int rval;
567
568 ha->edc_data_len = 0;
569
570 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
571 return 0;
572
573 if (!ha->edc_data) {
574 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
575 &ha->edc_data_dma);
576 if (!ha->edc_data) {
577 DEBUG2(qla_printk(KERN_INFO, ha,
578 "Unable to allocate memory for EDC write.\n"));
579 return 0;
580 }
581 }
582
583 dev = le16_to_cpup((void *)&buf[0]);
584 adr = le16_to_cpup((void *)&buf[2]);
585 opt = le16_to_cpup((void *)&buf[4]);
586 len = le16_to_cpup((void *)&buf[6]);
587
588 if (!(opt & BIT_0))
589 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
590 return -EINVAL;
591
592 memcpy(ha->edc_data, &buf[8], len);
593
594 rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma,
595 ha->edc_data, len, opt);
596 if (rval != QLA_SUCCESS) {
597 DEBUG2(qla_printk(KERN_INFO, ha,
598 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
599 rval, dev, adr, opt, len, *buf));
600 return 0;
601 }
602
603 return count;
604 }
605
606 static struct bin_attribute sysfs_edc_attr = {
607 .attr = {
608 .name = "edc",
609 .mode = S_IWUSR,
610 },
611 .size = 0,
612 .write = qla2x00_sysfs_write_edc,
613 };
614
615 static ssize_t
616 qla2x00_sysfs_write_edc_status(struct kobject *kobj,
617 struct bin_attribute *bin_attr,
618 char *buf, loff_t off, size_t count)
619 {
620 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
621 struct device, kobj)));
622 struct qla_hw_data *ha = vha->hw;
623 uint16_t dev, adr, opt, len;
624 int rval;
625
626 ha->edc_data_len = 0;
627
628 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
629 return 0;
630
631 if (!ha->edc_data) {
632 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
633 &ha->edc_data_dma);
634 if (!ha->edc_data) {
635 DEBUG2(qla_printk(KERN_INFO, ha,
636 "Unable to allocate memory for EDC status.\n"));
637 return 0;
638 }
639 }
640
641 dev = le16_to_cpup((void *)&buf[0]);
642 adr = le16_to_cpup((void *)&buf[2]);
643 opt = le16_to_cpup((void *)&buf[4]);
644 len = le16_to_cpup((void *)&buf[6]);
645
646 if (!(opt & BIT_0))
647 if (len == 0 || len > DMA_POOL_SIZE)
648 return -EINVAL;
649
650 memset(ha->edc_data, 0, len);
651 rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma,
652 ha->edc_data, len, opt);
653 if (rval != QLA_SUCCESS) {
654 DEBUG2(qla_printk(KERN_INFO, ha,
655 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
656 rval, dev, adr, opt, len));
657 return 0;
658 }
659
660 ha->edc_data_len = len;
661
662 return count;
663 }
664
665 static ssize_t
666 qla2x00_sysfs_read_edc_status(struct kobject *kobj,
667 struct bin_attribute *bin_attr,
668 char *buf, loff_t off, size_t count)
669 {
670 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
671 struct device, kobj)));
672 struct qla_hw_data *ha = vha->hw;
673
674 if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
675 return 0;
676
677 if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
678 return -EINVAL;
679
680 memcpy(buf, ha->edc_data, ha->edc_data_len);
681
682 return ha->edc_data_len;
683 }
684
685 static struct bin_attribute sysfs_edc_status_attr = {
686 .attr = {
687 .name = "edc_status",
688 .mode = S_IRUSR | S_IWUSR,
689 },
690 .size = 0,
691 .write = qla2x00_sysfs_write_edc_status,
692 .read = qla2x00_sysfs_read_edc_status,
693 };
694
695 static struct sysfs_entry {
696 char *name;
697 struct bin_attribute *attr;
698 int is4GBp_only;
699 } bin_file_entries[] = {
700 { "fw_dump", &sysfs_fw_dump_attr, },
701 { "nvram", &sysfs_nvram_attr, },
702 { "optrom", &sysfs_optrom_attr, },
703 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
704 { "vpd", &sysfs_vpd_attr, 1 },
705 { "sfp", &sysfs_sfp_attr, 1 },
706 { "reset", &sysfs_reset_attr, },
707 { "edc", &sysfs_edc_attr, 2 },
708 { "edc_status", &sysfs_edc_status_attr, 2 },
709 { NULL },
710 };
711
712 void
713 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
714 {
715 struct Scsi_Host *host = vha->host;
716 struct sysfs_entry *iter;
717 int ret;
718
719 for (iter = bin_file_entries; iter->name; iter++) {
720 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
721 continue;
722 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
723 continue;
724
725 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
726 iter->attr);
727 if (ret)
728 qla_printk(KERN_INFO, vha->hw,
729 "Unable to create sysfs %s binary attribute "
730 "(%d).\n", iter->name, ret);
731 }
732 }
733
734 void
735 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
736 {
737 struct Scsi_Host *host = vha->host;
738 struct sysfs_entry *iter;
739 struct qla_hw_data *ha = vha->hw;
740
741 for (iter = bin_file_entries; iter->name; iter++) {
742 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
743 continue;
744 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
745 continue;
746
747 sysfs_remove_bin_file(&host->shost_gendev.kobj,
748 iter->attr);
749 }
750
751 if (ha->beacon_blink_led == 1)
752 ha->isp_ops->beacon_off(vha);
753 }
754
755 /* Scsi_Host attributes. */
756
757 static ssize_t
758 qla2x00_drvr_version_show(struct device *dev,
759 struct device_attribute *attr, char *buf)
760 {
761 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
762 }
763
764 static ssize_t
765 qla2x00_fw_version_show(struct device *dev,
766 struct device_attribute *attr, char *buf)
767 {
768 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
769 struct qla_hw_data *ha = vha->hw;
770 char fw_str[128];
771
772 return snprintf(buf, PAGE_SIZE, "%s\n",
773 ha->isp_ops->fw_version_str(vha, fw_str));
774 }
775
776 static ssize_t
777 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
778 char *buf)
779 {
780 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
781 struct qla_hw_data *ha = vha->hw;
782 uint32_t sn;
783
784 if (IS_FWI2_CAPABLE(ha)) {
785 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
786 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
787 }
788
789 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
790 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
791 sn % 100000);
792 }
793
794 static ssize_t
795 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
796 char *buf)
797 {
798 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
799 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
800 }
801
802 static ssize_t
803 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
804 char *buf)
805 {
806 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
807 struct qla_hw_data *ha = vha->hw;
808 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
809 ha->product_id[0], ha->product_id[1], ha->product_id[2],
810 ha->product_id[3]);
811 }
812
813 static ssize_t
814 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
815 char *buf)
816 {
817 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
818 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
819 }
820
821 static ssize_t
822 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
823 char *buf)
824 {
825 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
826 return snprintf(buf, PAGE_SIZE, "%s\n",
827 vha->hw->model_desc ? vha->hw->model_desc : "");
828 }
829
830 static ssize_t
831 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
832 char *buf)
833 {
834 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
835 char pci_info[30];
836
837 return snprintf(buf, PAGE_SIZE, "%s\n",
838 vha->hw->isp_ops->pci_info_str(vha, pci_info));
839 }
840
841 static ssize_t
842 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
843 char *buf)
844 {
845 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
846 struct qla_hw_data *ha = vha->hw;
847 int len = 0;
848
849 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
850 atomic_read(&vha->loop_state) == LOOP_DEAD)
851 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
852 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
853 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
854 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
855 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
856 else {
857 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
858
859 switch (ha->current_topology) {
860 case ISP_CFG_NL:
861 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
862 break;
863 case ISP_CFG_FL:
864 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
865 break;
866 case ISP_CFG_N:
867 len += snprintf(buf + len, PAGE_SIZE-len,
868 "N_Port to N_Port\n");
869 break;
870 case ISP_CFG_F:
871 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
872 break;
873 default:
874 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
875 break;
876 }
877 }
878 return len;
879 }
880
881 static ssize_t
882 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
883 char *buf)
884 {
885 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
886 int len = 0;
887
888 switch (vha->hw->zio_mode) {
889 case QLA_ZIO_MODE_6:
890 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
891 break;
892 case QLA_ZIO_DISABLED:
893 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
894 break;
895 }
896 return len;
897 }
898
899 static ssize_t
900 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
901 const char *buf, size_t count)
902 {
903 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
904 struct qla_hw_data *ha = vha->hw;
905 int val = 0;
906 uint16_t zio_mode;
907
908 if (!IS_ZIO_SUPPORTED(ha))
909 return -ENOTSUPP;
910
911 if (sscanf(buf, "%d", &val) != 1)
912 return -EINVAL;
913
914 if (val)
915 zio_mode = QLA_ZIO_MODE_6;
916 else
917 zio_mode = QLA_ZIO_DISABLED;
918
919 /* Update per-hba values and queue a reset. */
920 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
921 ha->zio_mode = zio_mode;
922 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
923 }
924 return strlen(buf);
925 }
926
927 static ssize_t
928 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
929 char *buf)
930 {
931 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
932
933 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
934 }
935
936 static ssize_t
937 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
938 const char *buf, size_t count)
939 {
940 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
941 int val = 0;
942 uint16_t zio_timer;
943
944 if (sscanf(buf, "%d", &val) != 1)
945 return -EINVAL;
946 if (val > 25500 || val < 100)
947 return -ERANGE;
948
949 zio_timer = (uint16_t)(val / 100);
950 vha->hw->zio_timer = zio_timer;
951
952 return strlen(buf);
953 }
954
955 static ssize_t
956 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
957 char *buf)
958 {
959 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
960 int len = 0;
961
962 if (vha->hw->beacon_blink_led)
963 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
964 else
965 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
966 return len;
967 }
968
969 static ssize_t
970 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
971 const char *buf, size_t count)
972 {
973 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
974 struct qla_hw_data *ha = vha->hw;
975 int val = 0;
976 int rval;
977
978 if (IS_QLA2100(ha) || IS_QLA2200(ha))
979 return -EPERM;
980
981 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
982 qla_printk(KERN_WARNING, ha,
983 "Abort ISP active -- ignoring beacon request.\n");
984 return -EBUSY;
985 }
986
987 if (sscanf(buf, "%d", &val) != 1)
988 return -EINVAL;
989
990 if (val)
991 rval = ha->isp_ops->beacon_on(vha);
992 else
993 rval = ha->isp_ops->beacon_off(vha);
994
995 if (rval != QLA_SUCCESS)
996 count = 0;
997
998 return count;
999 }
1000
1001 static ssize_t
1002 qla2x00_optrom_bios_version_show(struct device *dev,
1003 struct device_attribute *attr, char *buf)
1004 {
1005 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1006 struct qla_hw_data *ha = vha->hw;
1007 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1008 ha->bios_revision[0]);
1009 }
1010
1011 static ssize_t
1012 qla2x00_optrom_efi_version_show(struct device *dev,
1013 struct device_attribute *attr, char *buf)
1014 {
1015 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1016 struct qla_hw_data *ha = vha->hw;
1017 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1018 ha->efi_revision[0]);
1019 }
1020
1021 static ssize_t
1022 qla2x00_optrom_fcode_version_show(struct device *dev,
1023 struct device_attribute *attr, char *buf)
1024 {
1025 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1026 struct qla_hw_data *ha = vha->hw;
1027 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1028 ha->fcode_revision[0]);
1029 }
1030
1031 static ssize_t
1032 qla2x00_optrom_fw_version_show(struct device *dev,
1033 struct device_attribute *attr, char *buf)
1034 {
1035 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1036 struct qla_hw_data *ha = vha->hw;
1037 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1038 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1039 ha->fw_revision[3]);
1040 }
1041
1042 static ssize_t
1043 qla2x00_total_isp_aborts_show(struct device *dev,
1044 struct device_attribute *attr, char *buf)
1045 {
1046 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1047 struct qla_hw_data *ha = vha->hw;
1048 return snprintf(buf, PAGE_SIZE, "%d\n",
1049 ha->qla_stats.total_isp_aborts);
1050 }
1051
1052 static ssize_t
1053 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1054 char *buf)
1055 {
1056 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1057 struct qla_hw_data *ha = vha->hw;
1058
1059 if (!IS_QLA81XX(ha))
1060 return snprintf(buf, PAGE_SIZE, "\n");
1061
1062 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1063 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1064 ha->mpi_capabilities);
1065 }
1066
1067 static ssize_t
1068 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1069 char *buf)
1070 {
1071 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1072 struct qla_hw_data *ha = vha->hw;
1073
1074 if (!IS_QLA81XX(ha))
1075 return snprintf(buf, PAGE_SIZE, "\n");
1076
1077 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1078 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1079 }
1080
1081 static ssize_t
1082 qla2x00_flash_block_size_show(struct device *dev,
1083 struct device_attribute *attr, char *buf)
1084 {
1085 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1086 struct qla_hw_data *ha = vha->hw;
1087
1088 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1089 }
1090
1091 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1092 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1093 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1094 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1095 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1096 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1097 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1098 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1099 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1100 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1101 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1102 qla2x00_zio_timer_store);
1103 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1104 qla2x00_beacon_store);
1105 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1106 qla2x00_optrom_bios_version_show, NULL);
1107 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1108 qla2x00_optrom_efi_version_show, NULL);
1109 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1110 qla2x00_optrom_fcode_version_show, NULL);
1111 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1112 NULL);
1113 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1114 NULL);
1115 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1116 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1117 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1118 NULL);
1119
1120 struct device_attribute *qla2x00_host_attrs[] = {
1121 &dev_attr_driver_version,
1122 &dev_attr_fw_version,
1123 &dev_attr_serial_num,
1124 &dev_attr_isp_name,
1125 &dev_attr_isp_id,
1126 &dev_attr_model_name,
1127 &dev_attr_model_desc,
1128 &dev_attr_pci_info,
1129 &dev_attr_link_state,
1130 &dev_attr_zio,
1131 &dev_attr_zio_timer,
1132 &dev_attr_beacon,
1133 &dev_attr_optrom_bios_version,
1134 &dev_attr_optrom_efi_version,
1135 &dev_attr_optrom_fcode_version,
1136 &dev_attr_optrom_fw_version,
1137 &dev_attr_total_isp_aborts,
1138 &dev_attr_mpi_version,
1139 &dev_attr_phy_version,
1140 &dev_attr_flash_block_size,
1141 NULL,
1142 };
1143
1144 /* Host attributes. */
1145
1146 static void
1147 qla2x00_get_host_port_id(struct Scsi_Host *shost)
1148 {
1149 scsi_qla_host_t *vha = shost_priv(shost);
1150
1151 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1152 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1153 }
1154
1155 static void
1156 qla2x00_get_host_speed(struct Scsi_Host *shost)
1157 {
1158 struct qla_hw_data *ha = ((struct scsi_qla_host *)
1159 (shost_priv(shost)))->hw;
1160 u32 speed = FC_PORTSPEED_UNKNOWN;
1161
1162 switch (ha->link_data_rate) {
1163 case PORT_SPEED_1GB:
1164 speed = FC_PORTSPEED_1GBIT;
1165 break;
1166 case PORT_SPEED_2GB:
1167 speed = FC_PORTSPEED_2GBIT;
1168 break;
1169 case PORT_SPEED_4GB:
1170 speed = FC_PORTSPEED_4GBIT;
1171 break;
1172 case PORT_SPEED_8GB:
1173 speed = FC_PORTSPEED_8GBIT;
1174 break;
1175 case PORT_SPEED_10GB:
1176 speed = FC_PORTSPEED_10GBIT;
1177 break;
1178 }
1179 fc_host_speed(shost) = speed;
1180 }
1181
1182 static void
1183 qla2x00_get_host_port_type(struct Scsi_Host *shost)
1184 {
1185 scsi_qla_host_t *vha = shost_priv(shost);
1186 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1187
1188 if (vha->vp_idx) {
1189 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1190 return;
1191 }
1192 switch (vha->hw->current_topology) {
1193 case ISP_CFG_NL:
1194 port_type = FC_PORTTYPE_LPORT;
1195 break;
1196 case ISP_CFG_FL:
1197 port_type = FC_PORTTYPE_NLPORT;
1198 break;
1199 case ISP_CFG_N:
1200 port_type = FC_PORTTYPE_PTP;
1201 break;
1202 case ISP_CFG_F:
1203 port_type = FC_PORTTYPE_NPORT;
1204 break;
1205 }
1206 fc_host_port_type(shost) = port_type;
1207 }
1208
1209 static void
1210 qla2x00_get_starget_node_name(struct scsi_target *starget)
1211 {
1212 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1213 scsi_qla_host_t *vha = shost_priv(host);
1214 fc_port_t *fcport;
1215 u64 node_name = 0;
1216
1217 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1218 if (fcport->rport &&
1219 starget->id == fcport->rport->scsi_target_id) {
1220 node_name = wwn_to_u64(fcport->node_name);
1221 break;
1222 }
1223 }
1224
1225 fc_starget_node_name(starget) = node_name;
1226 }
1227
1228 static void
1229 qla2x00_get_starget_port_name(struct scsi_target *starget)
1230 {
1231 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1232 scsi_qla_host_t *vha = shost_priv(host);
1233 fc_port_t *fcport;
1234 u64 port_name = 0;
1235
1236 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1237 if (fcport->rport &&
1238 starget->id == fcport->rport->scsi_target_id) {
1239 port_name = wwn_to_u64(fcport->port_name);
1240 break;
1241 }
1242 }
1243
1244 fc_starget_port_name(starget) = port_name;
1245 }
1246
1247 static void
1248 qla2x00_get_starget_port_id(struct scsi_target *starget)
1249 {
1250 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1251 scsi_qla_host_t *vha = shost_priv(host);
1252 fc_port_t *fcport;
1253 uint32_t port_id = ~0U;
1254
1255 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1256 if (fcport->rport &&
1257 starget->id == fcport->rport->scsi_target_id) {
1258 port_id = fcport->d_id.b.domain << 16 |
1259 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1260 break;
1261 }
1262 }
1263
1264 fc_starget_port_id(starget) = port_id;
1265 }
1266
1267 static void
1268 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1269 {
1270 if (timeout)
1271 rport->dev_loss_tmo = timeout;
1272 else
1273 rport->dev_loss_tmo = 1;
1274 }
1275
1276 static void
1277 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1278 {
1279 struct Scsi_Host *host = rport_to_shost(rport);
1280 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1281
1282 if (!fcport)
1283 return;
1284
1285 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev)))
1286 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1287 else
1288 qla2x00_abort_fcport_cmds(fcport);
1289
1290 /*
1291 * Transport has effectively 'deleted' the rport, clear
1292 * all local references.
1293 */
1294 spin_lock_irq(host->host_lock);
1295 fcport->rport = NULL;
1296 *((fc_port_t **)rport->dd_data) = NULL;
1297 spin_unlock_irq(host->host_lock);
1298 }
1299
1300 static void
1301 qla2x00_terminate_rport_io(struct fc_rport *rport)
1302 {
1303 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1304
1305 if (!fcport)
1306 return;
1307
1308 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1309 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1310 return;
1311 }
1312 /*
1313 * At this point all fcport's software-states are cleared. Perform any
1314 * final cleanup of firmware resources (PCBs and XCBs).
1315 */
1316 if (fcport->loop_id != FC_NO_LOOP_ID)
1317 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1318 fcport->loop_id, fcport->d_id.b.domain,
1319 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1320
1321 qla2x00_abort_fcport_cmds(fcport);
1322 }
1323
1324 static int
1325 qla2x00_issue_lip(struct Scsi_Host *shost)
1326 {
1327 scsi_qla_host_t *vha = shost_priv(shost);
1328
1329 qla2x00_loop_reset(vha);
1330 return 0;
1331 }
1332
1333 static struct fc_host_statistics *
1334 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1335 {
1336 scsi_qla_host_t *vha = shost_priv(shost);
1337 struct qla_hw_data *ha = vha->hw;
1338 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1339 int rval;
1340 struct link_statistics *stats;
1341 dma_addr_t stats_dma;
1342 struct fc_host_statistics *pfc_host_stat;
1343
1344 pfc_host_stat = &ha->fc_host_stat;
1345 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1346
1347 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1348 if (stats == NULL) {
1349 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
1350 __func__, base_vha->host_no));
1351 goto done;
1352 }
1353 memset(stats, 0, DMA_POOL_SIZE);
1354
1355 rval = QLA_FUNCTION_FAILED;
1356 if (IS_FWI2_CAPABLE(ha)) {
1357 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1358 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1359 !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1360 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1361 !ha->dpc_active) {
1362 /* Must be in a 'READY' state for statistics retrieval. */
1363 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1364 stats, stats_dma);
1365 }
1366
1367 if (rval != QLA_SUCCESS)
1368 goto done_free;
1369
1370 pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1371 pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1372 pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1373 pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1374 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1375 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1376 if (IS_FWI2_CAPABLE(ha)) {
1377 pfc_host_stat->lip_count = stats->lip_cnt;
1378 pfc_host_stat->tx_frames = stats->tx_frames;
1379 pfc_host_stat->rx_frames = stats->rx_frames;
1380 pfc_host_stat->dumped_frames = stats->dumped_frames;
1381 pfc_host_stat->nos_count = stats->nos_rcvd;
1382 }
1383 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1384 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1385
1386 done_free:
1387 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1388 done:
1389 return pfc_host_stat;
1390 }
1391
1392 static void
1393 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1394 {
1395 scsi_qla_host_t *vha = shost_priv(shost);
1396
1397 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1398 }
1399
1400 static void
1401 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1402 {
1403 scsi_qla_host_t *vha = shost_priv(shost);
1404
1405 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1406 }
1407
1408 static void
1409 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1410 {
1411 scsi_qla_host_t *vha = shost_priv(shost);
1412 u64 node_name;
1413
1414 if (vha->device_flags & SWITCH_FOUND)
1415 node_name = wwn_to_u64(vha->fabric_node_name);
1416 else
1417 node_name = wwn_to_u64(vha->node_name);
1418
1419 fc_host_fabric_name(shost) = node_name;
1420 }
1421
1422 static void
1423 qla2x00_get_host_port_state(struct Scsi_Host *shost)
1424 {
1425 scsi_qla_host_t *vha = shost_priv(shost);
1426 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1427
1428 if (!base_vha->flags.online)
1429 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1430 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
1431 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1432 else
1433 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1434 }
1435
1436 static int
1437 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1438 {
1439 int ret = 0;
1440 int cnt = 0;
1441 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1442 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1443 scsi_qla_host_t *vha = NULL;
1444 struct qla_hw_data *ha = base_vha->hw;
1445
1446 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1447 if (ret) {
1448 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
1449 "status %x\n", ret));
1450 return (ret);
1451 }
1452
1453 vha = qla24xx_create_vhost(fc_vport);
1454 if (vha == NULL) {
1455 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
1456 vha));
1457 return FC_VPORT_FAILED;
1458 }
1459 if (disable) {
1460 atomic_set(&vha->vp_state, VP_OFFLINE);
1461 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1462 } else
1463 atomic_set(&vha->vp_state, VP_FAILED);
1464
1465 /* ready to create vport */
1466 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1467 vha->vp_idx);
1468
1469 /* initialized vport states */
1470 atomic_set(&vha->loop_state, LOOP_DOWN);
1471 vha->vp_err_state= VP_ERR_PORTDWN;
1472 vha->vp_prev_err_state= VP_ERR_UNKWN;
1473 /* Check if physical ha port is Up */
1474 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1475 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1476 /* Don't retry or attempt login of this virtual port */
1477 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
1478 base_vha->host_no));
1479 atomic_set(&vha->loop_state, LOOP_DEAD);
1480 if (!disable)
1481 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1482 }
1483
1484 if (scsi_add_host(vha->host, &fc_vport->dev)) {
1485 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
1486 vha->host_no, vha->vp_idx));
1487 goto vport_create_failed_2;
1488 }
1489
1490 /* initialize attributes */
1491 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1492 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1493 fc_host_supported_classes(vha->host) =
1494 fc_host_supported_classes(base_vha->host);
1495 fc_host_supported_speeds(vha->host) =
1496 fc_host_supported_speeds(base_vha->host);
1497
1498 qla24xx_vport_disable(fc_vport, disable);
1499
1500 /* Create a queue pair for the vport */
1501 if (ha->mqenable) {
1502 if (ha->npiv_info) {
1503 for (; cnt < ha->nvram_npiv_size; cnt++) {
1504 if (ha->npiv_info[cnt].port_name ==
1505 vha->port_name &&
1506 ha->npiv_info[cnt].node_name ==
1507 vha->node_name) {
1508 qos = ha->npiv_info[cnt].q_qos;
1509 break;
1510 }
1511 }
1512 }
1513 qla25xx_create_queues(vha, qos);
1514 }
1515
1516 return 0;
1517 vport_create_failed_2:
1518 qla24xx_disable_vp(vha);
1519 qla24xx_deallocate_vp_id(vha);
1520 scsi_host_put(vha->host);
1521 return FC_VPORT_FAILED;
1522 }
1523
1524 static int
1525 qla24xx_vport_delete(struct fc_vport *fc_vport)
1526 {
1527 scsi_qla_host_t *vha = fc_vport->dd_data;
1528 fc_port_t *fcport, *tfcport;
1529 struct qla_hw_data *ha = vha->hw;
1530 uint16_t id = vha->vp_idx;
1531
1532 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1533 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1534 msleep(1000);
1535
1536 qla24xx_disable_vp(vha);
1537
1538 fc_remove_host(vha->host);
1539
1540 scsi_remove_host(vha->host);
1541
1542 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1543 list_del(&fcport->list);
1544 kfree(fcport);
1545 fcport = NULL;
1546 }
1547
1548 qla24xx_deallocate_vp_id(vha);
1549
1550 if (vha->timer_active) {
1551 qla2x00_vp_stop_timer(vha);
1552 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1553 "has stopped\n",
1554 vha->host_no, vha->vp_idx, vha));
1555 }
1556
1557 if (ha->mqenable) {
1558 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
1559 qla_printk(KERN_WARNING, ha,
1560 "Queue delete failed.\n");
1561 }
1562
1563 scsi_host_put(vha->host);
1564 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
1565 return 0;
1566 }
1567
1568 static int
1569 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1570 {
1571 scsi_qla_host_t *vha = fc_vport->dd_data;
1572
1573 if (disable)
1574 qla24xx_disable_vp(vha);
1575 else
1576 qla24xx_enable_vp(vha);
1577
1578 return 0;
1579 }
1580
1581 struct fc_function_template qla2xxx_transport_functions = {
1582
1583 .show_host_node_name = 1,
1584 .show_host_port_name = 1,
1585 .show_host_supported_classes = 1,
1586 .show_host_supported_speeds = 1,
1587
1588 .get_host_port_id = qla2x00_get_host_port_id,
1589 .show_host_port_id = 1,
1590 .get_host_speed = qla2x00_get_host_speed,
1591 .show_host_speed = 1,
1592 .get_host_port_type = qla2x00_get_host_port_type,
1593 .show_host_port_type = 1,
1594 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
1595 .show_host_symbolic_name = 1,
1596 .set_host_system_hostname = qla2x00_set_host_system_hostname,
1597 .show_host_system_hostname = 1,
1598 .get_host_fabric_name = qla2x00_get_host_fabric_name,
1599 .show_host_fabric_name = 1,
1600 .get_host_port_state = qla2x00_get_host_port_state,
1601 .show_host_port_state = 1,
1602
1603 .dd_fcrport_size = sizeof(struct fc_port *),
1604 .show_rport_supported_classes = 1,
1605
1606 .get_starget_node_name = qla2x00_get_starget_node_name,
1607 .show_starget_node_name = 1,
1608 .get_starget_port_name = qla2x00_get_starget_port_name,
1609 .show_starget_port_name = 1,
1610 .get_starget_port_id = qla2x00_get_starget_port_id,
1611 .show_starget_port_id = 1,
1612
1613 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1614 .show_rport_dev_loss_tmo = 1,
1615
1616 .issue_fc_host_lip = qla2x00_issue_lip,
1617 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1618 .terminate_rport_io = qla2x00_terminate_rport_io,
1619 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1620
1621 .vport_create = qla24xx_vport_create,
1622 .vport_disable = qla24xx_vport_disable,
1623 .vport_delete = qla24xx_vport_delete,
1624 };
1625
1626 struct fc_function_template qla2xxx_transport_vport_functions = {
1627
1628 .show_host_node_name = 1,
1629 .show_host_port_name = 1,
1630 .show_host_supported_classes = 1,
1631
1632 .get_host_port_id = qla2x00_get_host_port_id,
1633 .show_host_port_id = 1,
1634 .get_host_speed = qla2x00_get_host_speed,
1635 .show_host_speed = 1,
1636 .get_host_port_type = qla2x00_get_host_port_type,
1637 .show_host_port_type = 1,
1638 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
1639 .show_host_symbolic_name = 1,
1640 .set_host_system_hostname = qla2x00_set_host_system_hostname,
1641 .show_host_system_hostname = 1,
1642 .get_host_fabric_name = qla2x00_get_host_fabric_name,
1643 .show_host_fabric_name = 1,
1644 .get_host_port_state = qla2x00_get_host_port_state,
1645 .show_host_port_state = 1,
1646
1647 .dd_fcrport_size = sizeof(struct fc_port *),
1648 .show_rport_supported_classes = 1,
1649
1650 .get_starget_node_name = qla2x00_get_starget_node_name,
1651 .show_starget_node_name = 1,
1652 .get_starget_port_name = qla2x00_get_starget_port_name,
1653 .show_starget_port_name = 1,
1654 .get_starget_port_id = qla2x00_get_starget_port_id,
1655 .show_starget_port_id = 1,
1656
1657 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1658 .show_rport_dev_loss_tmo = 1,
1659
1660 .issue_fc_host_lip = qla2x00_issue_lip,
1661 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1662 .terminate_rport_io = qla2x00_terminate_rport_io,
1663 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1664 };
1665
1666 void
1667 qla2x00_init_host_attr(scsi_qla_host_t *vha)
1668 {
1669 struct qla_hw_data *ha = vha->hw;
1670 u32 speed = FC_PORTSPEED_UNKNOWN;
1671
1672 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1673 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1674 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
1675 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1676 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1677
1678 if (IS_QLA81XX(ha))
1679 speed = FC_PORTSPEED_10GBIT;
1680 else if (IS_QLA25XX(ha))
1681 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
1682 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1683 else if (IS_QLA24XX_TYPE(ha))
1684 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
1685 FC_PORTSPEED_1GBIT;
1686 else if (IS_QLA23XX(ha))
1687 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1688 else
1689 speed = FC_PORTSPEED_1GBIT;
1690 fc_host_supported_speeds(vha->host) = speed;
1691 }