[SCSI] ipr: Resource path error logging cleanup
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / ipr.c
CommitLineData
1da177e4
LT
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
1da177e4
LT
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
5a0e3ad6 62#include <linux/slab.h>
4d4dd706 63#include <linux/vmalloc.h>
1da177e4
LT
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
35a39691 75#include <linux/libata.h>
0ce3a7e5 76#include <linux/hdreg.h>
f72919ec 77#include <linux/reboot.h>
3e7ebdfa 78#include <linux/stringify.h>
1da177e4
LT
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
1da177e4
LT
87#include "ipr.h"
88
89/*
90 * Global Data
91 */
b7d68ca3 92static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
5469cb5b 97static unsigned int ipr_transop_timeout = 0;
d3c74871 98static unsigned int ipr_debug = 0;
3e7ebdfa 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 100static unsigned int ipr_dual_ioa_raid = 1;
1da177e4
LT
101static DEFINE_SPINLOCK(ipr_driver_lock);
102
103/* This table describes the differences between DMA controller chips */
104static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 105 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 106 .mailbox = 0x0042C,
89aad428 107 .max_cmds = 100,
1da177e4 108 .cache_line_size = 0x20,
7dd21308 109 .clear_isr = 1,
1da177e4
LT
110 {
111 .set_interrupt_mask_reg = 0x0022C,
112 .clr_interrupt_mask_reg = 0x00230,
214777ba 113 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 114 .sense_interrupt_mask_reg = 0x0022C,
214777ba 115 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 116 .clr_interrupt_reg = 0x00228,
214777ba 117 .clr_interrupt_reg32 = 0x00228,
1da177e4 118 .sense_interrupt_reg = 0x00224,
214777ba 119 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
120 .ioarrin_reg = 0x00404,
121 .sense_uproc_interrupt_reg = 0x00214,
214777ba 122 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 123 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
124 .set_uproc_interrupt_reg32 = 0x00214,
125 .clr_uproc_interrupt_reg = 0x00218,
126 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
127 }
128 },
129 { /* Snipe and Scamp */
130 .mailbox = 0x0052C,
89aad428 131 .max_cmds = 100,
1da177e4 132 .cache_line_size = 0x20,
7dd21308 133 .clear_isr = 1,
1da177e4
LT
134 {
135 .set_interrupt_mask_reg = 0x00288,
136 .clr_interrupt_mask_reg = 0x0028C,
214777ba 137 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 138 .sense_interrupt_mask_reg = 0x00288,
214777ba 139 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 140 .clr_interrupt_reg = 0x00284,
214777ba 141 .clr_interrupt_reg32 = 0x00284,
1da177e4 142 .sense_interrupt_reg = 0x00280,
214777ba 143 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
144 .ioarrin_reg = 0x00504,
145 .sense_uproc_interrupt_reg = 0x00290,
214777ba 146 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 147 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
148 .set_uproc_interrupt_reg32 = 0x00290,
149 .clr_uproc_interrupt_reg = 0x00294,
150 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
151 }
152 },
a74c1639 153 { /* CRoC */
110def85 154 .mailbox = 0x00044,
89aad428 155 .max_cmds = 1000,
a74c1639 156 .cache_line_size = 0x20,
7dd21308 157 .clear_isr = 0,
a74c1639
WB
158 {
159 .set_interrupt_mask_reg = 0x00010,
160 .clr_interrupt_mask_reg = 0x00018,
214777ba 161 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 162 .sense_interrupt_mask_reg = 0x00010,
214777ba 163 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 164 .clr_interrupt_reg = 0x00008,
214777ba 165 .clr_interrupt_reg32 = 0x0000C,
a74c1639 166 .sense_interrupt_reg = 0x00000,
214777ba 167 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
168 .ioarrin_reg = 0x00070,
169 .sense_uproc_interrupt_reg = 0x00020,
214777ba 170 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 171 .set_uproc_interrupt_reg = 0x00020,
214777ba 172 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 173 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
174 .clr_uproc_interrupt_reg32 = 0x0002C,
175 .init_feedback_reg = 0x0005C,
dcbad00e 176 .dump_addr_reg = 0x00064,
8701f185
WB
177 .dump_data_reg = 0x00068,
178 .endian_swap_reg = 0x00084
a74c1639
WB
179 }
180 },
1da177e4
LT
181};
182
183static const struct ipr_chip_t ipr_chip[] = {
cb237ef7
WB
184 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
185 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
186 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
187 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
cd9b3d04 192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
193};
194
203fa3fe 195static int ipr_max_bus_speeds[] = {
1da177e4
LT
196 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
197};
198
199MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
200MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
201module_param_named(max_speed, ipr_max_speed, uint, 0);
202MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
203module_param_named(log_level, ipr_log_level, uint, 0);
204MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
205module_param_named(testmode, ipr_testmode, int, 0);
206MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 207module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
208MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
209module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
210MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 211module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 212MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
213module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
214MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
215module_param_named(max_devs, ipr_max_devs, int, 0);
216MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
217 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
1da177e4
LT
218MODULE_LICENSE("GPL");
219MODULE_VERSION(IPR_DRIVER_VERSION);
220
1da177e4
LT
221/* A constant array of IOASCs/URCs/Error Messages */
222static const
223struct ipr_error_table_t ipr_error_table[] = {
933916f3 224 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
225 "8155: An unknown error was received"},
226 {0x00330000, 0, 0,
227 "Soft underlength error"},
228 {0x005A0000, 0, 0,
229 "Command to be cancelled not found"},
230 {0x00808000, 0, 0,
231 "Qualified success"},
933916f3 232 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 233 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 234 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 235 "4101: Soft device bus fabric error"},
5aa3a333
WB
236 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
237 "FFFC: Logical block guard error recovered by the device"},
238 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FFFC: Logical block reference tag error recovered by the device"},
240 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4171: Recovered scatter list tag / sequence number error"},
242 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
244 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
245 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
246 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFD: Recovered logical block reference tag error detected by the IOA"},
248 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 250 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 251 "FFF9: Device sector reassign successful"},
933916f3 252 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 253 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 254 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 255 "7001: IOA sector reassignment successful"},
933916f3 256 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 257 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 258 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 259 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 260 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 262 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "FFF6: Device hardware error recovered by the IOA"},
933916f3 264 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 265 "FFF6: Device hardware error recovered by the device"},
933916f3 266 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 267 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 268 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 269 "FFFA: Undefined device response recovered by the IOA"},
933916f3 270 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 271 "FFF6: Device bus error, message or command phase"},
933916f3 272 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 273 "FFFE: Task Management Function failed"},
933916f3 274 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 275 "FFF6: Failure prediction threshold exceeded"},
933916f3 276 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
277 "8009: Impending cache battery pack failure"},
278 {0x02040400, 0, 0,
279 "34FF: Disk device format in progress"},
65f56475
BK
280 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "9070: IOA requested reset"},
1da177e4
LT
282 {0x023F0000, 0, 0,
283 "Synchronization required"},
284 {0x024E0000, 0, 0,
285 "No ready, IOA shutdown"},
286 {0x025A0000, 0, 0,
287 "Not ready, IOA has been shutdown"},
933916f3 288 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
289 "3020: Storage subsystem configuration error"},
290 {0x03110B00, 0, 0,
291 "FFF5: Medium error, data unreadable, recommend reassign"},
292 {0x03110C00, 0, 0,
293 "7000: Medium error, data unreadable, do not reassign"},
933916f3 294 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 295 "FFF3: Disk media format bad"},
933916f3 296 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 297 "3002: Addressed device failed to respond to selection"},
933916f3 298 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 299 "3100: Device bus error"},
933916f3 300 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
301 "3109: IOA timed out a device command"},
302 {0x04088000, 0, 0,
303 "3120: SCSI bus is not operational"},
933916f3 304 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 305 "4100: Hard device bus fabric error"},
5aa3a333
WB
306 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
307 "310C: Logical block guard error detected by the device"},
308 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
309 "310C: Logical block reference tag error detected by the device"},
310 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
311 "4170: Scatter list tag / sequence number error"},
312 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
313 "8150: Logical block CRC error on IOA to Host transfer"},
314 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
315 "4170: Logical block sequence number error on IOA to Host transfer"},
316 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
317 "310D: Logical block reference tag error detected by the IOA"},
318 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
319 "310D: Logical block guard error detected by the IOA"},
933916f3 320 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 321 "9000: IOA reserved area data check"},
933916f3 322 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 323 "9001: IOA reserved area invalid data pattern"},
933916f3 324 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 325 "9002: IOA reserved area LRC error"},
5aa3a333
WB
326 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
327 "Hardware Error, IOA metadata access error"},
933916f3 328 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 329 "102E: Out of alternate sectors for disk storage"},
933916f3 330 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 331 "FFF4: Data transfer underlength error"},
933916f3 332 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 333 "FFF4: Data transfer overlength error"},
933916f3 334 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 335 "3400: Logical unit failure"},
933916f3 336 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 337 "FFF4: Device microcode is corrupt"},
933916f3 338 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
339 "8150: PCI bus error"},
340 {0x04430000, 1, 0,
341 "Unsupported device bus message received"},
933916f3 342 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 343 "FFF4: Disk device problem"},
933916f3 344 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 345 "8150: Permanent IOA failure"},
933916f3 346 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 347 "3010: Disk device returned wrong response to IOA"},
933916f3 348 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
349 "8151: IOA microcode error"},
350 {0x04448500, 0, 0,
351 "Device bus status error"},
933916f3 352 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 353 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
354 {0x04448700, 0, 0,
355 "ATA device status error"},
1da177e4
LT
356 {0x04490000, 0, 0,
357 "Message reject received from the device"},
933916f3 358 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 359 "8008: A permanent cache battery pack failure occurred"},
933916f3 360 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 361 "9090: Disk unit has been modified after the last known status"},
933916f3 362 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 363 "9081: IOA detected device error"},
933916f3 364 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 365 "9082: IOA detected device error"},
933916f3 366 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 367 "3110: Device bus error, message or command phase"},
933916f3 368 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 369 "3110: SAS Command / Task Management Function failed"},
933916f3 370 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 371 "9091: Incorrect hardware configuration change has been detected"},
933916f3 372 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 373 "9073: Invalid multi-adapter configuration"},
933916f3 374 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 375 "4010: Incorrect connection between cascaded expanders"},
933916f3 376 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 377 "4020: Connections exceed IOA design limits"},
933916f3 378 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 379 "4030: Incorrect multipath connection"},
933916f3 380 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 381 "4110: Unsupported enclosure function"},
933916f3 382 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
383 "FFF4: Command to logical unit failed"},
384 {0x05240000, 1, 0,
385 "Illegal request, invalid request type or request packet"},
386 {0x05250000, 0, 0,
387 "Illegal request, invalid resource handle"},
b0df54bb
BK
388 {0x05258000, 0, 0,
389 "Illegal request, commands not allowed to this device"},
390 {0x05258100, 0, 0,
391 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
392 {0x05258200, 0, 0,
393 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
394 {0x05260000, 0, 0,
395 "Illegal request, invalid field in parameter list"},
396 {0x05260100, 0, 0,
397 "Illegal request, parameter not supported"},
398 {0x05260200, 0, 0,
399 "Illegal request, parameter value invalid"},
400 {0x052C0000, 0, 0,
401 "Illegal request, command sequence error"},
b0df54bb
BK
402 {0x052C8000, 1, 0,
403 "Illegal request, dual adapter support not enabled"},
933916f3 404 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 405 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 406 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 407 "9040: Array protection temporarily suspended, protection resuming"},
933916f3 408 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 409 "3140: Device bus not ready to ready transition"},
933916f3 410 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
411 "FFFB: SCSI bus was reset"},
412 {0x06290500, 0, 0,
413 "FFFE: SCSI bus transition to single ended"},
414 {0x06290600, 0, 0,
415 "FFFE: SCSI bus transition to LVD"},
933916f3 416 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 417 "FFFB: SCSI bus was reset by another initiator"},
933916f3 418 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 419 "3029: A device replacement has occurred"},
933916f3 420 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 421 "9051: IOA cache data exists for a missing or failed device"},
933916f3 422 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 423 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 424 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 425 "9025: Disk unit is not supported at its physical location"},
933916f3 426 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 427 "3020: IOA detected a SCSI bus configuration error"},
933916f3 428 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 429 "3150: SCSI bus configuration error"},
933916f3 430 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 431 "9074: Asymmetric advanced function disk configuration"},
933916f3 432 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 433 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 434 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 435 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 436 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 437 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 438 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 439 "9076: Configuration error, missing remote IOA"},
933916f3 440 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 441 "4050: Enclosure does not support a required multipath function"},
b75424fc
WB
442 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
443 "4070: Logically bad block written on device"},
933916f3 444 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 445 "9041: Array protection temporarily suspended"},
933916f3 446 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 447 "9042: Corrupt array parity detected on specified device"},
933916f3 448 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 449 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 450 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 451 "9071: Link operational transition"},
933916f3 452 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 453 "9072: Link not operational transition"},
933916f3 454 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 455 "9032: Array exposed but still protected"},
e435340c
BK
456 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
457 "70DD: Device forced failed by disrupt device command"},
933916f3 458 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 459 "4061: Multipath redundancy level got better"},
933916f3 460 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 461 "4060: Multipath redundancy level got worse"},
1da177e4
LT
462 {0x07270000, 0, 0,
463 "Failure due to other device"},
933916f3 464 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 465 "9008: IOA does not support functions expected by devices"},
933916f3 466 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 467 "9010: Cache data associated with attached devices cannot be found"},
933916f3 468 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 469 "9011: Cache data belongs to devices other than those attached"},
933916f3 470 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 471 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 472 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 473 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 474 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 475 "9022: Exposed array is missing a required device"},
933916f3 476 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 477 "9023: Array member(s) not at required physical locations"},
933916f3 478 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 479 "9024: Array not functional due to present hardware configuration"},
933916f3 480 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 481 "9026: Array not functional due to present hardware configuration"},
933916f3 482 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 483 "9027: Array is missing a device and parity is out of sync"},
933916f3 484 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 485 "9028: Maximum number of arrays already exist"},
933916f3 486 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 487 "9050: Required cache data cannot be located for a disk unit"},
933916f3 488 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 489 "9052: Cache data exists for a device that has been modified"},
933916f3 490 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 491 "9054: IOA resources not available due to previous problems"},
933916f3 492 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 493 "9092: Disk unit requires initialization before use"},
933916f3 494 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 495 "9029: Incorrect hardware configuration change has been detected"},
933916f3 496 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 497 "9060: One or more disk pairs are missing from an array"},
933916f3 498 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 499 "9061: One or more disks are missing from an array"},
933916f3 500 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 501 "9062: One or more disks are missing from an array"},
933916f3 502 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
503 "9063: Maximum number of functional arrays has been exceeded"},
504 {0x0B260000, 0, 0,
505 "Aborted command, invalid descriptor"},
506 {0x0B5A0000, 0, 0,
507 "Command terminated by host"}
508};
509
510static const struct ipr_ses_table_entry ipr_ses_table[] = {
511 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
512 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
513 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
514 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
515 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
516 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
517 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
518 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
519 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
520 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
521 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
522 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
523 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
524};
525
526/*
527 * Function Prototypes
528 */
529static int ipr_reset_alert(struct ipr_cmnd *);
530static void ipr_process_ccn(struct ipr_cmnd *);
531static void ipr_process_error(struct ipr_cmnd *);
532static void ipr_reset_ioa_job(struct ipr_cmnd *);
533static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
534 enum ipr_shutdown_type);
535
536#ifdef CONFIG_SCSI_IPR_TRACE
537/**
538 * ipr_trc_hook - Add a trace entry to the driver trace
539 * @ipr_cmd: ipr command struct
540 * @type: trace type
541 * @add_data: additional data
542 *
543 * Return value:
544 * none
545 **/
546static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
547 u8 type, u32 add_data)
548{
549 struct ipr_trace_entry *trace_entry;
550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
551
552 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
553 trace_entry->time = jiffies;
554 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
555 trace_entry->type = type;
a32c055f
WB
556 if (ipr_cmd->ioa_cfg->sis64)
557 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
558 else
559 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 560 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
561 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
562 trace_entry->u.add_data = add_data;
563}
564#else
203fa3fe 565#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
566#endif
567
172cd6e1
BK
568/**
569 * ipr_lock_and_done - Acquire lock and complete command
570 * @ipr_cmd: ipr command struct
571 *
572 * Return value:
573 * none
574 **/
575static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
576{
577 unsigned long lock_flags;
578 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
579
580 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
581 ipr_cmd->done(ipr_cmd);
582 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
583}
584
1da177e4
LT
585/**
586 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
587 * @ipr_cmd: ipr command struct
588 *
589 * Return value:
590 * none
591 **/
592static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
593{
594 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
595 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
596 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 597 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
598
599 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 600 ioarcb->data_transfer_length = 0;
1da177e4 601 ioarcb->read_data_transfer_length = 0;
a32c055f 602 ioarcb->ioadl_len = 0;
1da177e4 603 ioarcb->read_ioadl_len = 0;
a32c055f 604
96d21f00 605 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
606 ioarcb->u.sis64_addr_data.data_ioadl_addr =
607 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
608 ioasa64->u.gata.status = 0;
609 } else {
a32c055f
WB
610 ioarcb->write_ioadl_addr =
611 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
612 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 613 ioasa->u.gata.status = 0;
a32c055f
WB
614 }
615
96d21f00
WB
616 ioasa->hdr.ioasc = 0;
617 ioasa->hdr.residual_data_len = 0;
1da177e4 618 ipr_cmd->scsi_cmd = NULL;
35a39691 619 ipr_cmd->qc = NULL;
1da177e4
LT
620 ipr_cmd->sense_buffer[0] = 0;
621 ipr_cmd->dma_use_sg = 0;
622}
623
624/**
625 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
626 * @ipr_cmd: ipr command struct
627 *
628 * Return value:
629 * none
630 **/
172cd6e1
BK
631static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
632 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
633{
634 ipr_reinit_ipr_cmnd(ipr_cmd);
635 ipr_cmd->u.scratch = 0;
636 ipr_cmd->sibling = NULL;
172cd6e1 637 ipr_cmd->fast_done = fast_done;
1da177e4
LT
638 init_timer(&ipr_cmd->timer);
639}
640
641/**
00bfef2c 642 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
1da177e4
LT
643 * @ioa_cfg: ioa config struct
644 *
645 * Return value:
646 * pointer to ipr command struct
647 **/
648static
00bfef2c 649struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
650{
651 struct ipr_cmnd *ipr_cmd;
652
653 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
654 list_del(&ipr_cmd->queue);
1da177e4
LT
655
656 return ipr_cmd;
657}
658
00bfef2c
BK
659/**
660 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
661 * @ioa_cfg: ioa config struct
662 *
663 * Return value:
664 * pointer to ipr command struct
665 **/
666static
667struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
668{
669 struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
172cd6e1 670 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
671 return ipr_cmd;
672}
673
1da177e4
LT
674/**
675 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
676 * @ioa_cfg: ioa config struct
677 * @clr_ints: interrupts to clear
678 *
679 * This function masks all interrupts on the adapter, then clears the
680 * interrupts specified in the mask
681 *
682 * Return value:
683 * none
684 **/
685static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
686 u32 clr_ints)
687{
688 volatile u32 int_reg;
689
690 /* Stop new interrupts */
691 ioa_cfg->allow_interrupts = 0;
692
693 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
694 if (ioa_cfg->sis64)
695 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
696 else
697 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
698
699 /* Clear any pending interrupts */
214777ba
WB
700 if (ioa_cfg->sis64)
701 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
702 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
1da177e4
LT
703 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
704}
705
706/**
707 * ipr_save_pcix_cmd_reg - Save PCI-X command register
708 * @ioa_cfg: ioa config struct
709 *
710 * Return value:
711 * 0 on success / -EIO on failure
712 **/
713static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
714{
715 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
716
7dce0e1c
BK
717 if (pcix_cmd_reg == 0)
718 return 0;
1da177e4
LT
719
720 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
721 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
722 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
723 return -EIO;
724 }
725
726 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
727 return 0;
728}
729
730/**
731 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
732 * @ioa_cfg: ioa config struct
733 *
734 * Return value:
735 * 0 on success / -EIO on failure
736 **/
737static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
738{
739 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
740
741 if (pcix_cmd_reg) {
742 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
743 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
744 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
745 return -EIO;
746 }
1da177e4
LT
747 }
748
749 return 0;
750}
751
35a39691
BK
752/**
753 * ipr_sata_eh_done - done function for aborted SATA commands
754 * @ipr_cmd: ipr command struct
755 *
756 * This function is invoked for ops generated to SATA
757 * devices which are being aborted.
758 *
759 * Return value:
760 * none
761 **/
762static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
763{
764 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
765 struct ata_queued_cmd *qc = ipr_cmd->qc;
766 struct ipr_sata_port *sata_port = qc->ap->private_data;
767
768 qc->err_mask |= AC_ERR_OTHER;
769 sata_port->ioasa.status |= ATA_BUSY;
770 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
771 ata_qc_complete(qc);
772}
773
1da177e4
LT
774/**
775 * ipr_scsi_eh_done - mid-layer done function for aborted ops
776 * @ipr_cmd: ipr command struct
777 *
778 * This function is invoked by the interrupt handler for
779 * ops generated by the SCSI mid-layer which are being aborted.
780 *
781 * Return value:
782 * none
783 **/
784static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
785{
786 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
787 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
788
789 scsi_cmd->result |= (DID_ERROR << 16);
790
63015bc9 791 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
792 scsi_cmd->scsi_done(scsi_cmd);
793 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
794}
795
796/**
797 * ipr_fail_all_ops - Fails all outstanding ops.
798 * @ioa_cfg: ioa config struct
799 *
800 * This function fails all outstanding ops.
801 *
802 * Return value:
803 * none
804 **/
805static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
806{
807 struct ipr_cmnd *ipr_cmd, *temp;
808
809 ENTER;
810 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
811 list_del(&ipr_cmd->queue);
812
96d21f00
WB
813 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
814 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
1da177e4
LT
815
816 if (ipr_cmd->scsi_cmd)
817 ipr_cmd->done = ipr_scsi_eh_done;
35a39691
BK
818 else if (ipr_cmd->qc)
819 ipr_cmd->done = ipr_sata_eh_done;
1da177e4
LT
820
821 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
822 del_timer(&ipr_cmd->timer);
823 ipr_cmd->done(ipr_cmd);
824 }
825
826 LEAVE;
827}
828
a32c055f
WB
829/**
830 * ipr_send_command - Send driver initiated requests.
831 * @ipr_cmd: ipr command struct
832 *
833 * This function sends a command to the adapter using the correct write call.
834 * In the case of sis64, calculate the ioarcb size required. Then or in the
835 * appropriate bits.
836 *
837 * Return value:
838 * none
839 **/
840static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
841{
842 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
843 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
844
845 if (ioa_cfg->sis64) {
846 /* The default size is 256 bytes */
847 send_dma_addr |= 0x1;
848
849 /* If the number of ioadls * size of ioadl > 128 bytes,
850 then use a 512 byte ioarcb */
851 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
852 send_dma_addr |= 0x4;
853 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
854 } else
855 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
856}
857
1da177e4
LT
858/**
859 * ipr_do_req - Send driver initiated requests.
860 * @ipr_cmd: ipr command struct
861 * @done: done function
862 * @timeout_func: timeout function
863 * @timeout: timeout value
864 *
865 * This function sends the specified command to the adapter with the
866 * timeout given. The done function is invoked on command completion.
867 *
868 * Return value:
869 * none
870 **/
871static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
872 void (*done) (struct ipr_cmnd *),
873 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
874{
875 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
876
877 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
878
879 ipr_cmd->done = done;
880
881 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
882 ipr_cmd->timer.expires = jiffies + timeout;
883 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
884
885 add_timer(&ipr_cmd->timer);
886
887 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
888
a32c055f 889 ipr_send_command(ipr_cmd);
1da177e4
LT
890}
891
892/**
893 * ipr_internal_cmd_done - Op done function for an internally generated op.
894 * @ipr_cmd: ipr command struct
895 *
896 * This function is the op done function for an internally generated,
897 * blocking op. It simply wakes the sleeping thread.
898 *
899 * Return value:
900 * none
901 **/
902static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
903{
904 if (ipr_cmd->sibling)
905 ipr_cmd->sibling = NULL;
906 else
907 complete(&ipr_cmd->completion);
908}
909
a32c055f
WB
910/**
911 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
912 * @ipr_cmd: ipr command struct
913 * @dma_addr: dma address
914 * @len: transfer length
915 * @flags: ioadl flag value
916 *
917 * This function initializes an ioadl in the case where there is only a single
918 * descriptor.
919 *
920 * Return value:
921 * nothing
922 **/
923static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
924 u32 len, int flags)
925{
926 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
927 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
928
929 ipr_cmd->dma_use_sg = 1;
930
931 if (ipr_cmd->ioa_cfg->sis64) {
932 ioadl64->flags = cpu_to_be32(flags);
933 ioadl64->data_len = cpu_to_be32(len);
934 ioadl64->address = cpu_to_be64(dma_addr);
935
936 ipr_cmd->ioarcb.ioadl_len =
937 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
938 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
939 } else {
940 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
941 ioadl->address = cpu_to_be32(dma_addr);
942
943 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
944 ipr_cmd->ioarcb.read_ioadl_len =
945 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
946 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
947 } else {
948 ipr_cmd->ioarcb.ioadl_len =
949 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
950 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
951 }
952 }
953}
954
1da177e4
LT
955/**
956 * ipr_send_blocking_cmd - Send command and sleep on its completion.
957 * @ipr_cmd: ipr command struct
958 * @timeout_func: function to invoke if command times out
959 * @timeout: timeout
960 *
961 * Return value:
962 * none
963 **/
964static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
965 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
966 u32 timeout)
967{
968 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
969
970 init_completion(&ipr_cmd->completion);
971 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
972
973 spin_unlock_irq(ioa_cfg->host->host_lock);
974 wait_for_completion(&ipr_cmd->completion);
975 spin_lock_irq(ioa_cfg->host->host_lock);
976}
977
978/**
979 * ipr_send_hcam - Send an HCAM to the adapter.
980 * @ioa_cfg: ioa config struct
981 * @type: HCAM type
982 * @hostrcb: hostrcb struct
983 *
984 * This function will send a Host Controlled Async command to the adapter.
985 * If HCAMs are currently not allowed to be issued to the adapter, it will
986 * place the hostrcb on the free queue.
987 *
988 * Return value:
989 * none
990 **/
991static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
992 struct ipr_hostrcb *hostrcb)
993{
994 struct ipr_cmnd *ipr_cmd;
995 struct ipr_ioarcb *ioarcb;
996
997 if (ioa_cfg->allow_cmds) {
998 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
999 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
1000 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1001
1002 ipr_cmd->u.hostrcb = hostrcb;
1003 ioarcb = &ipr_cmd->ioarcb;
1004
1005 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1006 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1007 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1008 ioarcb->cmd_pkt.cdb[1] = type;
1009 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1010 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1011
a32c055f
WB
1012 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1013 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1014
1015 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1016 ipr_cmd->done = ipr_process_ccn;
1017 else
1018 ipr_cmd->done = ipr_process_error;
1019
1020 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1021
a32c055f 1022 ipr_send_command(ipr_cmd);
1da177e4
LT
1023 } else {
1024 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1025 }
1026}
1027
3e7ebdfa
WB
1028/**
1029 * ipr_update_ata_class - Update the ata class in the resource entry
1030 * @res: resource entry struct
1031 * @proto: cfgte device bus protocol value
1032 *
1033 * Return value:
1034 * none
1035 **/
1036static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1037{
203fa3fe 1038 switch (proto) {
3e7ebdfa
WB
1039 case IPR_PROTO_SATA:
1040 case IPR_PROTO_SAS_STP:
1041 res->ata_class = ATA_DEV_ATA;
1042 break;
1043 case IPR_PROTO_SATA_ATAPI:
1044 case IPR_PROTO_SAS_STP_ATAPI:
1045 res->ata_class = ATA_DEV_ATAPI;
1046 break;
1047 default:
1048 res->ata_class = ATA_DEV_UNKNOWN;
1049 break;
1050 };
1051}
1052
1da177e4
LT
1053/**
1054 * ipr_init_res_entry - Initialize a resource entry struct.
1055 * @res: resource entry struct
3e7ebdfa 1056 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1057 *
1058 * Return value:
1059 * none
1060 **/
3e7ebdfa
WB
1061static void ipr_init_res_entry(struct ipr_resource_entry *res,
1062 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1063{
3e7ebdfa
WB
1064 int found = 0;
1065 unsigned int proto;
1066 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1067 struct ipr_resource_entry *gscsi_res = NULL;
1068
ee0a90fa 1069 res->needs_sync_complete = 0;
1da177e4
LT
1070 res->in_erp = 0;
1071 res->add_to_ml = 0;
1072 res->del_from_ml = 0;
1073 res->resetting_device = 0;
1074 res->sdev = NULL;
35a39691 1075 res->sata_port = NULL;
3e7ebdfa
WB
1076
1077 if (ioa_cfg->sis64) {
1078 proto = cfgtew->u.cfgte64->proto;
1079 res->res_flags = cfgtew->u.cfgte64->res_flags;
1080 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1081 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1082
1083 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1084 sizeof(res->res_path));
1085
1086 res->bus = 0;
0cb992ed
WB
1087 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1088 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1089 res->lun = scsilun_to_int(&res->dev_lun);
1090
1091 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1092 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1093 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1094 found = 1;
1095 res->target = gscsi_res->target;
1096 break;
1097 }
1098 }
1099 if (!found) {
1100 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1101 ioa_cfg->max_devs_supported);
1102 set_bit(res->target, ioa_cfg->target_ids);
1103 }
3e7ebdfa
WB
1104 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1105 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1106 res->target = 0;
1107 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1108 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1109 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1110 ioa_cfg->max_devs_supported);
1111 set_bit(res->target, ioa_cfg->array_ids);
1112 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1113 res->bus = IPR_VSET_VIRTUAL_BUS;
1114 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1115 ioa_cfg->max_devs_supported);
1116 set_bit(res->target, ioa_cfg->vset_ids);
1117 } else {
1118 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1119 ioa_cfg->max_devs_supported);
1120 set_bit(res->target, ioa_cfg->target_ids);
1121 }
1122 } else {
1123 proto = cfgtew->u.cfgte->proto;
1124 res->qmodel = IPR_QUEUEING_MODEL(res);
1125 res->flags = cfgtew->u.cfgte->flags;
1126 if (res->flags & IPR_IS_IOA_RESOURCE)
1127 res->type = IPR_RES_TYPE_IOAFP;
1128 else
1129 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1130
1131 res->bus = cfgtew->u.cfgte->res_addr.bus;
1132 res->target = cfgtew->u.cfgte->res_addr.target;
1133 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1134 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1135 }
1136
1137 ipr_update_ata_class(res, proto);
1138}
1139
1140/**
1141 * ipr_is_same_device - Determine if two devices are the same.
1142 * @res: resource entry struct
1143 * @cfgtew: config table entry wrapper struct
1144 *
1145 * Return value:
1146 * 1 if the devices are the same / 0 otherwise
1147 **/
1148static int ipr_is_same_device(struct ipr_resource_entry *res,
1149 struct ipr_config_table_entry_wrapper *cfgtew)
1150{
1151 if (res->ioa_cfg->sis64) {
1152 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1153 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1154 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1155 sizeof(cfgtew->u.cfgte64->lun))) {
1156 return 1;
1157 }
1158 } else {
1159 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1160 res->target == cfgtew->u.cfgte->res_addr.target &&
1161 res->lun == cfgtew->u.cfgte->res_addr.lun)
1162 return 1;
1163 }
1164
1165 return 0;
1166}
1167
1168/**
b3b3b407 1169 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa
WB
1170 * @res_path: resource path
1171 * @buf: buffer
b3b3b407 1172 * @len: length of buffer provided
3e7ebdfa
WB
1173 *
1174 * Return value:
1175 * pointer to buffer
1176 **/
b3b3b407 1177static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1178{
1179 int i;
5adcbeb3 1180 char *p = buffer;
3e7ebdfa 1181
46d74563 1182 *p = '\0';
5adcbeb3
WB
1183 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1184 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1185 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1186
1187 return buffer;
1188}
1189
b3b3b407
BK
1190/**
1191 * ipr_format_res_path - Format the resource path for printing.
1192 * @ioa_cfg: ioa config struct
1193 * @res_path: resource path
1194 * @buf: buffer
1195 * @len: length of buffer provided
1196 *
1197 * Return value:
1198 * pointer to buffer
1199 **/
1200static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1201 u8 *res_path, char *buffer, int len)
1202{
1203 char *p = buffer;
1204
1205 *p = '\0';
1206 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1207 __ipr_format_res_path(res_path, p, len - (buffer - p));
1208 return buffer;
1209}
1210
3e7ebdfa
WB
1211/**
1212 * ipr_update_res_entry - Update the resource entry.
1213 * @res: resource entry struct
1214 * @cfgtew: config table entry wrapper struct
1215 *
1216 * Return value:
1217 * none
1218 **/
1219static void ipr_update_res_entry(struct ipr_resource_entry *res,
1220 struct ipr_config_table_entry_wrapper *cfgtew)
1221{
1222 char buffer[IPR_MAX_RES_PATH_LENGTH];
1223 unsigned int proto;
1224 int new_path = 0;
1225
1226 if (res->ioa_cfg->sis64) {
1227 res->flags = cfgtew->u.cfgte64->flags;
1228 res->res_flags = cfgtew->u.cfgte64->res_flags;
75576bb9 1229 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1230
1231 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1232 sizeof(struct ipr_std_inq_data));
1233
1234 res->qmodel = IPR_QUEUEING_MODEL64(res);
1235 proto = cfgtew->u.cfgte64->proto;
1236 res->res_handle = cfgtew->u.cfgte64->res_handle;
1237 res->dev_id = cfgtew->u.cfgte64->dev_id;
1238
1239 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1240 sizeof(res->dev_lun.scsi_lun));
1241
1242 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1243 sizeof(res->res_path))) {
1244 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1245 sizeof(res->res_path));
1246 new_path = 1;
1247 }
1248
1249 if (res->sdev && new_path)
1250 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1251 ipr_format_res_path(res->ioa_cfg,
1252 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1253 } else {
1254 res->flags = cfgtew->u.cfgte->flags;
1255 if (res->flags & IPR_IS_IOA_RESOURCE)
1256 res->type = IPR_RES_TYPE_IOAFP;
1257 else
1258 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1259
1260 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1261 sizeof(struct ipr_std_inq_data));
1262
1263 res->qmodel = IPR_QUEUEING_MODEL(res);
1264 proto = cfgtew->u.cfgte->proto;
1265 res->res_handle = cfgtew->u.cfgte->res_handle;
1266 }
1267
1268 ipr_update_ata_class(res, proto);
1269}
1270
1271/**
1272 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1273 * for the resource.
1274 * @res: resource entry struct
1275 * @cfgtew: config table entry wrapper struct
1276 *
1277 * Return value:
1278 * none
1279 **/
1280static void ipr_clear_res_target(struct ipr_resource_entry *res)
1281{
1282 struct ipr_resource_entry *gscsi_res = NULL;
1283 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1284
1285 if (!ioa_cfg->sis64)
1286 return;
1287
1288 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1289 clear_bit(res->target, ioa_cfg->array_ids);
1290 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1291 clear_bit(res->target, ioa_cfg->vset_ids);
1292 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1293 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1294 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1295 return;
1296 clear_bit(res->target, ioa_cfg->target_ids);
1297
1298 } else if (res->bus == 0)
1299 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1300}
1301
1302/**
1303 * ipr_handle_config_change - Handle a config change from the adapter
1304 * @ioa_cfg: ioa config struct
1305 * @hostrcb: hostrcb
1306 *
1307 * Return value:
1308 * none
1309 **/
1310static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1311 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1312{
1313 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1314 struct ipr_config_table_entry_wrapper cfgtew;
1315 __be32 cc_res_handle;
1316
1da177e4
LT
1317 u32 is_ndn = 1;
1318
3e7ebdfa
WB
1319 if (ioa_cfg->sis64) {
1320 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1321 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1322 } else {
1323 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1324 cc_res_handle = cfgtew.u.cfgte->res_handle;
1325 }
1da177e4
LT
1326
1327 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1328 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1329 is_ndn = 0;
1330 break;
1331 }
1332 }
1333
1334 if (is_ndn) {
1335 if (list_empty(&ioa_cfg->free_res_q)) {
1336 ipr_send_hcam(ioa_cfg,
1337 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1338 hostrcb);
1339 return;
1340 }
1341
1342 res = list_entry(ioa_cfg->free_res_q.next,
1343 struct ipr_resource_entry, queue);
1344
1345 list_del(&res->queue);
3e7ebdfa 1346 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1347 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1348 }
1349
3e7ebdfa 1350 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1351
1352 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1353 if (res->sdev) {
1da177e4 1354 res->del_from_ml = 1;
3e7ebdfa 1355 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4
LT
1356 if (ioa_cfg->allow_ml_add_del)
1357 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1358 } else {
1359 ipr_clear_res_target(res);
1da177e4 1360 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1361 }
5767a1c4 1362 } else if (!res->sdev || res->del_from_ml) {
1da177e4
LT
1363 res->add_to_ml = 1;
1364 if (ioa_cfg->allow_ml_add_del)
1365 schedule_work(&ioa_cfg->work_q);
1366 }
1367
1368 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1369}
1370
1371/**
1372 * ipr_process_ccn - Op done function for a CCN.
1373 * @ipr_cmd: ipr command struct
1374 *
1375 * This function is the op done function for a configuration
1376 * change notification host controlled async from the adapter.
1377 *
1378 * Return value:
1379 * none
1380 **/
1381static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1382{
1383 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1384 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1385 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
1386
1387 list_del(&hostrcb->queue);
1388 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1389
1390 if (ioasc) {
1391 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1392 dev_err(&ioa_cfg->pdev->dev,
1393 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1394
1395 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1396 } else {
1397 ipr_handle_config_change(ioa_cfg, hostrcb);
1398 }
1399}
1400
8cf093e2
BK
1401/**
1402 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1403 * @i: index into buffer
1404 * @buf: string to modify
1405 *
1406 * This function will strip all trailing whitespace, pad the end
1407 * of the string with a single space, and NULL terminate the string.
1408 *
1409 * Return value:
1410 * new length of string
1411 **/
1412static int strip_and_pad_whitespace(int i, char *buf)
1413{
1414 while (i && buf[i] == ' ')
1415 i--;
1416 buf[i+1] = ' ';
1417 buf[i+2] = '\0';
1418 return i + 2;
1419}
1420
1421/**
1422 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1423 * @prefix: string to print at start of printk
1424 * @hostrcb: hostrcb pointer
1425 * @vpd: vendor/product id/sn struct
1426 *
1427 * Return value:
1428 * none
1429 **/
1430static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1431 struct ipr_vpd *vpd)
1432{
1433 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1434 int i = 0;
1435
1436 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1437 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1438
1439 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1440 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1441
1442 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1443 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1444
1445 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1446}
1447
1da177e4
LT
1448/**
1449 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1450 * @vpd: vendor/product id/sn struct
1da177e4
LT
1451 *
1452 * Return value:
1453 * none
1454 **/
cfc32139 1455static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1456{
1457 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1458 + IPR_SERIAL_NUM_LEN];
1459
cfc32139
BK
1460 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1461 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1462 IPR_PROD_ID_LEN);
1463 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1464 ipr_err("Vendor/Product ID: %s\n", buffer);
1465
cfc32139 1466 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1467 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1468 ipr_err(" Serial Number: %s\n", buffer);
1469}
1470
8cf093e2
BK
1471/**
1472 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1473 * @prefix: string to print at start of printk
1474 * @hostrcb: hostrcb pointer
1475 * @vpd: vendor/product id/sn/wwn struct
1476 *
1477 * Return value:
1478 * none
1479 **/
1480static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1481 struct ipr_ext_vpd *vpd)
1482{
1483 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1484 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1485 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1486}
1487
ee0f05b8
BK
1488/**
1489 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1490 * @vpd: vendor/product id/sn/wwn struct
1491 *
1492 * Return value:
1493 * none
1494 **/
1495static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1496{
1497 ipr_log_vpd(&vpd->vpd);
1498 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1499 be32_to_cpu(vpd->wwid[1]));
1500}
1501
1502/**
1503 * ipr_log_enhanced_cache_error - Log a cache error.
1504 * @ioa_cfg: ioa config struct
1505 * @hostrcb: hostrcb struct
1506 *
1507 * Return value:
1508 * none
1509 **/
1510static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1511 struct ipr_hostrcb *hostrcb)
1512{
4565e370
WB
1513 struct ipr_hostrcb_type_12_error *error;
1514
1515 if (ioa_cfg->sis64)
1516 error = &hostrcb->hcam.u.error64.u.type_12_error;
1517 else
1518 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8
BK
1519
1520 ipr_err("-----Current Configuration-----\n");
1521 ipr_err("Cache Directory Card Information:\n");
1522 ipr_log_ext_vpd(&error->ioa_vpd);
1523 ipr_err("Adapter Card Information:\n");
1524 ipr_log_ext_vpd(&error->cfc_vpd);
1525
1526 ipr_err("-----Expected Configuration-----\n");
1527 ipr_err("Cache Directory Card Information:\n");
1528 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1529 ipr_err("Adapter Card Information:\n");
1530 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1531
1532 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1533 be32_to_cpu(error->ioa_data[0]),
1534 be32_to_cpu(error->ioa_data[1]),
1535 be32_to_cpu(error->ioa_data[2]));
1536}
1537
1da177e4
LT
1538/**
1539 * ipr_log_cache_error - Log a cache error.
1540 * @ioa_cfg: ioa config struct
1541 * @hostrcb: hostrcb struct
1542 *
1543 * Return value:
1544 * none
1545 **/
1546static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1547 struct ipr_hostrcb *hostrcb)
1548{
1549 struct ipr_hostrcb_type_02_error *error =
1550 &hostrcb->hcam.u.error.u.type_02_error;
1551
1552 ipr_err("-----Current Configuration-----\n");
1553 ipr_err("Cache Directory Card Information:\n");
cfc32139 1554 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1555 ipr_err("Adapter Card Information:\n");
cfc32139 1556 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1557
1558 ipr_err("-----Expected Configuration-----\n");
1559 ipr_err("Cache Directory Card Information:\n");
cfc32139 1560 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1561 ipr_err("Adapter Card Information:\n");
cfc32139 1562 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1563
1564 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1565 be32_to_cpu(error->ioa_data[0]),
1566 be32_to_cpu(error->ioa_data[1]),
1567 be32_to_cpu(error->ioa_data[2]));
1568}
1569
ee0f05b8
BK
1570/**
1571 * ipr_log_enhanced_config_error - Log a configuration error.
1572 * @ioa_cfg: ioa config struct
1573 * @hostrcb: hostrcb struct
1574 *
1575 * Return value:
1576 * none
1577 **/
1578static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1579 struct ipr_hostrcb *hostrcb)
1580{
1581 int errors_logged, i;
1582 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1583 struct ipr_hostrcb_type_13_error *error;
1584
1585 error = &hostrcb->hcam.u.error.u.type_13_error;
1586 errors_logged = be32_to_cpu(error->errors_logged);
1587
1588 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1589 be32_to_cpu(error->errors_detected), errors_logged);
1590
1591 dev_entry = error->dev;
1592
1593 for (i = 0; i < errors_logged; i++, dev_entry++) {
1594 ipr_err_separator;
1595
1596 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1597 ipr_log_ext_vpd(&dev_entry->vpd);
1598
1599 ipr_err("-----New Device Information-----\n");
1600 ipr_log_ext_vpd(&dev_entry->new_vpd);
1601
1602 ipr_err("Cache Directory Card Information:\n");
1603 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1604
1605 ipr_err("Adapter Card Information:\n");
1606 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1607 }
1608}
1609
4565e370
WB
1610/**
1611 * ipr_log_sis64_config_error - Log a device error.
1612 * @ioa_cfg: ioa config struct
1613 * @hostrcb: hostrcb struct
1614 *
1615 * Return value:
1616 * none
1617 **/
1618static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1619 struct ipr_hostrcb *hostrcb)
1620{
1621 int errors_logged, i;
1622 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1623 struct ipr_hostrcb_type_23_error *error;
1624 char buffer[IPR_MAX_RES_PATH_LENGTH];
1625
1626 error = &hostrcb->hcam.u.error64.u.type_23_error;
1627 errors_logged = be32_to_cpu(error->errors_logged);
1628
1629 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1630 be32_to_cpu(error->errors_detected), errors_logged);
1631
1632 dev_entry = error->dev;
1633
1634 for (i = 0; i < errors_logged; i++, dev_entry++) {
1635 ipr_err_separator;
1636
1637 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1638 __ipr_format_res_path(dev_entry->res_path,
1639 buffer, sizeof(buffer)));
4565e370
WB
1640 ipr_log_ext_vpd(&dev_entry->vpd);
1641
1642 ipr_err("-----New Device Information-----\n");
1643 ipr_log_ext_vpd(&dev_entry->new_vpd);
1644
1645 ipr_err("Cache Directory Card Information:\n");
1646 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1647
1648 ipr_err("Adapter Card Information:\n");
1649 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1650 }
1651}
1652
1da177e4
LT
1653/**
1654 * ipr_log_config_error - Log a configuration error.
1655 * @ioa_cfg: ioa config struct
1656 * @hostrcb: hostrcb struct
1657 *
1658 * Return value:
1659 * none
1660 **/
1661static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1662 struct ipr_hostrcb *hostrcb)
1663{
1664 int errors_logged, i;
1665 struct ipr_hostrcb_device_data_entry *dev_entry;
1666 struct ipr_hostrcb_type_03_error *error;
1667
1668 error = &hostrcb->hcam.u.error.u.type_03_error;
1669 errors_logged = be32_to_cpu(error->errors_logged);
1670
1671 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1672 be32_to_cpu(error->errors_detected), errors_logged);
1673
cfc32139 1674 dev_entry = error->dev;
1da177e4
LT
1675
1676 for (i = 0; i < errors_logged; i++, dev_entry++) {
1677 ipr_err_separator;
1678
fa15b1f6 1679 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1680 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1681
1682 ipr_err("-----New Device Information-----\n");
cfc32139 1683 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1684
1685 ipr_err("Cache Directory Card Information:\n");
cfc32139 1686 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1687
1688 ipr_err("Adapter Card Information:\n");
cfc32139 1689 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1690
1691 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1692 be32_to_cpu(dev_entry->ioa_data[0]),
1693 be32_to_cpu(dev_entry->ioa_data[1]),
1694 be32_to_cpu(dev_entry->ioa_data[2]),
1695 be32_to_cpu(dev_entry->ioa_data[3]),
1696 be32_to_cpu(dev_entry->ioa_data[4]));
1697 }
1698}
1699
ee0f05b8
BK
1700/**
1701 * ipr_log_enhanced_array_error - Log an array configuration error.
1702 * @ioa_cfg: ioa config struct
1703 * @hostrcb: hostrcb struct
1704 *
1705 * Return value:
1706 * none
1707 **/
1708static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1709 struct ipr_hostrcb *hostrcb)
1710{
1711 int i, num_entries;
1712 struct ipr_hostrcb_type_14_error *error;
1713 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1714 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1715
1716 error = &hostrcb->hcam.u.error.u.type_14_error;
1717
1718 ipr_err_separator;
1719
1720 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1721 error->protection_level,
1722 ioa_cfg->host->host_no,
1723 error->last_func_vset_res_addr.bus,
1724 error->last_func_vset_res_addr.target,
1725 error->last_func_vset_res_addr.lun);
1726
1727 ipr_err_separator;
1728
1729 array_entry = error->array_member;
1730 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1731 ARRAY_SIZE(error->array_member));
ee0f05b8
BK
1732
1733 for (i = 0; i < num_entries; i++, array_entry++) {
1734 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1735 continue;
1736
1737 if (be32_to_cpu(error->exposed_mode_adn) == i)
1738 ipr_err("Exposed Array Member %d:\n", i);
1739 else
1740 ipr_err("Array Member %d:\n", i);
1741
1742 ipr_log_ext_vpd(&array_entry->vpd);
1743 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1744 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1745 "Expected Location");
1746
1747 ipr_err_separator;
1748 }
1749}
1750
1da177e4
LT
1751/**
1752 * ipr_log_array_error - Log an array configuration error.
1753 * @ioa_cfg: ioa config struct
1754 * @hostrcb: hostrcb struct
1755 *
1756 * Return value:
1757 * none
1758 **/
1759static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1760 struct ipr_hostrcb *hostrcb)
1761{
1762 int i;
1763 struct ipr_hostrcb_type_04_error *error;
1764 struct ipr_hostrcb_array_data_entry *array_entry;
1765 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1766
1767 error = &hostrcb->hcam.u.error.u.type_04_error;
1768
1769 ipr_err_separator;
1770
1771 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1772 error->protection_level,
1773 ioa_cfg->host->host_no,
1774 error->last_func_vset_res_addr.bus,
1775 error->last_func_vset_res_addr.target,
1776 error->last_func_vset_res_addr.lun);
1777
1778 ipr_err_separator;
1779
1780 array_entry = error->array_member;
1781
1782 for (i = 0; i < 18; i++) {
cfc32139 1783 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1784 continue;
1785
fa15b1f6 1786 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1787 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1788 else
1da177e4 1789 ipr_err("Array Member %d:\n", i);
1da177e4 1790
cfc32139 1791 ipr_log_vpd(&array_entry->vpd);
1da177e4 1792
fa15b1f6
BK
1793 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1794 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1795 "Expected Location");
1da177e4
LT
1796
1797 ipr_err_separator;
1798
1799 if (i == 9)
1800 array_entry = error->array_member2;
1801 else
1802 array_entry++;
1803 }
1804}
1805
1806/**
b0df54bb 1807 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1808 * @ioa_cfg: ioa config struct
b0df54bb
BK
1809 * @data: IOA error data
1810 * @len: data length
1da177e4
LT
1811 *
1812 * Return value:
1813 * none
1814 **/
ac719aba 1815static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1da177e4
LT
1816{
1817 int i;
1da177e4 1818
b0df54bb 1819 if (len == 0)
1da177e4
LT
1820 return;
1821
ac719aba
BK
1822 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1823 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1824
b0df54bb 1825 for (i = 0; i < len / 4; i += 4) {
1da177e4 1826 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb
BK
1827 be32_to_cpu(data[i]),
1828 be32_to_cpu(data[i+1]),
1829 be32_to_cpu(data[i+2]),
1830 be32_to_cpu(data[i+3]));
1da177e4
LT
1831 }
1832}
1833
ee0f05b8
BK
1834/**
1835 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1836 * @ioa_cfg: ioa config struct
1837 * @hostrcb: hostrcb struct
1838 *
1839 * Return value:
1840 * none
1841 **/
1842static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1843 struct ipr_hostrcb *hostrcb)
1844{
1845 struct ipr_hostrcb_type_17_error *error;
1846
4565e370
WB
1847 if (ioa_cfg->sis64)
1848 error = &hostrcb->hcam.u.error64.u.type_17_error;
1849 else
1850 error = &hostrcb->hcam.u.error.u.type_17_error;
1851
ee0f05b8 1852 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1853 strim(error->failure_reason);
ee0f05b8 1854
8cf093e2
BK
1855 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1856 be32_to_cpu(hostrcb->hcam.u.error.prc));
1857 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1858 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8
BK
1859 be32_to_cpu(hostrcb->hcam.length) -
1860 (offsetof(struct ipr_hostrcb_error, u) +
1861 offsetof(struct ipr_hostrcb_type_17_error, data)));
1862}
1863
b0df54bb
BK
1864/**
1865 * ipr_log_dual_ioa_error - Log a dual adapter error.
1866 * @ioa_cfg: ioa config struct
1867 * @hostrcb: hostrcb struct
1868 *
1869 * Return value:
1870 * none
1871 **/
1872static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1873 struct ipr_hostrcb *hostrcb)
1874{
1875 struct ipr_hostrcb_type_07_error *error;
1876
1877 error = &hostrcb->hcam.u.error.u.type_07_error;
1878 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1879 strim(error->failure_reason);
b0df54bb 1880
8cf093e2
BK
1881 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1882 be32_to_cpu(hostrcb->hcam.u.error.prc));
1883 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1884 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb
BK
1885 be32_to_cpu(hostrcb->hcam.length) -
1886 (offsetof(struct ipr_hostrcb_error, u) +
1887 offsetof(struct ipr_hostrcb_type_07_error, data)));
1888}
1889
49dc6a18
BK
1890static const struct {
1891 u8 active;
1892 char *desc;
1893} path_active_desc[] = {
1894 { IPR_PATH_NO_INFO, "Path" },
1895 { IPR_PATH_ACTIVE, "Active path" },
1896 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1897};
1898
1899static const struct {
1900 u8 state;
1901 char *desc;
1902} path_state_desc[] = {
1903 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1904 { IPR_PATH_HEALTHY, "is healthy" },
1905 { IPR_PATH_DEGRADED, "is degraded" },
1906 { IPR_PATH_FAILED, "is failed" }
1907};
1908
1909/**
1910 * ipr_log_fabric_path - Log a fabric path error
1911 * @hostrcb: hostrcb struct
1912 * @fabric: fabric descriptor
1913 *
1914 * Return value:
1915 * none
1916 **/
1917static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1918 struct ipr_hostrcb_fabric_desc *fabric)
1919{
1920 int i, j;
1921 u8 path_state = fabric->path_state;
1922 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1923 u8 state = path_state & IPR_PATH_STATE_MASK;
1924
1925 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1926 if (path_active_desc[i].active != active)
1927 continue;
1928
1929 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1930 if (path_state_desc[j].state != state)
1931 continue;
1932
1933 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1934 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1935 path_active_desc[i].desc, path_state_desc[j].desc,
1936 fabric->ioa_port);
1937 } else if (fabric->cascaded_expander == 0xff) {
1938 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1939 path_active_desc[i].desc, path_state_desc[j].desc,
1940 fabric->ioa_port, fabric->phy);
1941 } else if (fabric->phy == 0xff) {
1942 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1943 path_active_desc[i].desc, path_state_desc[j].desc,
1944 fabric->ioa_port, fabric->cascaded_expander);
1945 } else {
1946 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1947 path_active_desc[i].desc, path_state_desc[j].desc,
1948 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1949 }
1950 return;
1951 }
1952 }
1953
1954 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1955 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1956}
1957
4565e370
WB
1958/**
1959 * ipr_log64_fabric_path - Log a fabric path error
1960 * @hostrcb: hostrcb struct
1961 * @fabric: fabric descriptor
1962 *
1963 * Return value:
1964 * none
1965 **/
1966static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1967 struct ipr_hostrcb64_fabric_desc *fabric)
1968{
1969 int i, j;
1970 u8 path_state = fabric->path_state;
1971 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1972 u8 state = path_state & IPR_PATH_STATE_MASK;
1973 char buffer[IPR_MAX_RES_PATH_LENGTH];
1974
1975 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1976 if (path_active_desc[i].active != active)
1977 continue;
1978
1979 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1980 if (path_state_desc[j].state != state)
1981 continue;
1982
1983 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1984 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
1985 ipr_format_res_path(hostrcb->ioa_cfg,
1986 fabric->res_path,
1987 buffer, sizeof(buffer)));
4565e370
WB
1988 return;
1989 }
1990 }
1991
1992 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
1993 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
1994 buffer, sizeof(buffer)));
4565e370
WB
1995}
1996
49dc6a18
BK
1997static const struct {
1998 u8 type;
1999 char *desc;
2000} path_type_desc[] = {
2001 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2002 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2003 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2004 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2005};
2006
2007static const struct {
2008 u8 status;
2009 char *desc;
2010} path_status_desc[] = {
2011 { IPR_PATH_CFG_NO_PROB, "Functional" },
2012 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2013 { IPR_PATH_CFG_FAILED, "Failed" },
2014 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2015 { IPR_PATH_NOT_DETECTED, "Missing" },
2016 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2017};
2018
2019static const char *link_rate[] = {
2020 "unknown",
2021 "disabled",
2022 "phy reset problem",
2023 "spinup hold",
2024 "port selector",
2025 "unknown",
2026 "unknown",
2027 "unknown",
2028 "1.5Gbps",
2029 "3.0Gbps",
2030 "unknown",
2031 "unknown",
2032 "unknown",
2033 "unknown",
2034 "unknown",
2035 "unknown"
2036};
2037
2038/**
2039 * ipr_log_path_elem - Log a fabric path element.
2040 * @hostrcb: hostrcb struct
2041 * @cfg: fabric path element struct
2042 *
2043 * Return value:
2044 * none
2045 **/
2046static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2047 struct ipr_hostrcb_config_element *cfg)
2048{
2049 int i, j;
2050 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2051 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2052
2053 if (type == IPR_PATH_CFG_NOT_EXIST)
2054 return;
2055
2056 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2057 if (path_type_desc[i].type != type)
2058 continue;
2059
2060 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2061 if (path_status_desc[j].status != status)
2062 continue;
2063
2064 if (type == IPR_PATH_CFG_IOA_PORT) {
2065 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2066 path_status_desc[j].desc, path_type_desc[i].desc,
2067 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2068 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2069 } else {
2070 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2071 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2072 path_status_desc[j].desc, path_type_desc[i].desc,
2073 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2074 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2075 } else if (cfg->cascaded_expander == 0xff) {
2076 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2077 "WWN=%08X%08X\n", path_status_desc[j].desc,
2078 path_type_desc[i].desc, cfg->phy,
2079 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2080 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2081 } else if (cfg->phy == 0xff) {
2082 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2083 "WWN=%08X%08X\n", path_status_desc[j].desc,
2084 path_type_desc[i].desc, cfg->cascaded_expander,
2085 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2086 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2087 } else {
2088 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2089 "WWN=%08X%08X\n", path_status_desc[j].desc,
2090 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2091 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2092 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2093 }
2094 }
2095 return;
2096 }
2097 }
2098
2099 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2100 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2101 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2102 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2103}
2104
4565e370
WB
2105/**
2106 * ipr_log64_path_elem - Log a fabric path element.
2107 * @hostrcb: hostrcb struct
2108 * @cfg: fabric path element struct
2109 *
2110 * Return value:
2111 * none
2112 **/
2113static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2114 struct ipr_hostrcb64_config_element *cfg)
2115{
2116 int i, j;
2117 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2118 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2119 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2120 char buffer[IPR_MAX_RES_PATH_LENGTH];
2121
2122 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2123 return;
2124
2125 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2126 if (path_type_desc[i].type != type)
2127 continue;
2128
2129 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2130 if (path_status_desc[j].status != status)
2131 continue;
2132
2133 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2134 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2135 ipr_format_res_path(hostrcb->ioa_cfg,
2136 cfg->res_path, buffer, sizeof(buffer)),
2137 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2138 be32_to_cpu(cfg->wwid[0]),
2139 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2140 return;
2141 }
2142 }
2143 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2144 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2145 ipr_format_res_path(hostrcb->ioa_cfg,
2146 cfg->res_path, buffer, sizeof(buffer)),
2147 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2148 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2149}
2150
49dc6a18
BK
2151/**
2152 * ipr_log_fabric_error - Log a fabric error.
2153 * @ioa_cfg: ioa config struct
2154 * @hostrcb: hostrcb struct
2155 *
2156 * Return value:
2157 * none
2158 **/
2159static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2160 struct ipr_hostrcb *hostrcb)
2161{
2162 struct ipr_hostrcb_type_20_error *error;
2163 struct ipr_hostrcb_fabric_desc *fabric;
2164 struct ipr_hostrcb_config_element *cfg;
2165 int i, add_len;
2166
2167 error = &hostrcb->hcam.u.error.u.type_20_error;
2168 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2169 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2170
2171 add_len = be32_to_cpu(hostrcb->hcam.length) -
2172 (offsetof(struct ipr_hostrcb_error, u) +
2173 offsetof(struct ipr_hostrcb_type_20_error, desc));
2174
2175 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2176 ipr_log_fabric_path(hostrcb, fabric);
2177 for_each_fabric_cfg(fabric, cfg)
2178 ipr_log_path_elem(hostrcb, cfg);
2179
2180 add_len -= be16_to_cpu(fabric->length);
2181 fabric = (struct ipr_hostrcb_fabric_desc *)
2182 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2183 }
2184
ac719aba 2185 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
49dc6a18
BK
2186}
2187
4565e370
WB
2188/**
2189 * ipr_log_sis64_array_error - Log a sis64 array error.
2190 * @ioa_cfg: ioa config struct
2191 * @hostrcb: hostrcb struct
2192 *
2193 * Return value:
2194 * none
2195 **/
2196static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2197 struct ipr_hostrcb *hostrcb)
2198{
2199 int i, num_entries;
2200 struct ipr_hostrcb_type_24_error *error;
2201 struct ipr_hostrcb64_array_data_entry *array_entry;
2202 char buffer[IPR_MAX_RES_PATH_LENGTH];
2203 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2204
2205 error = &hostrcb->hcam.u.error64.u.type_24_error;
2206
2207 ipr_err_separator;
2208
2209 ipr_err("RAID %s Array Configuration: %s\n",
2210 error->protection_level,
b3b3b407
BK
2211 ipr_format_res_path(ioa_cfg, error->last_res_path,
2212 buffer, sizeof(buffer)));
4565e370
WB
2213
2214 ipr_err_separator;
2215
2216 array_entry = error->array_member;
7262026f
WB
2217 num_entries = min_t(u32, error->num_entries,
2218 ARRAY_SIZE(error->array_member));
4565e370
WB
2219
2220 for (i = 0; i < num_entries; i++, array_entry++) {
2221
2222 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2223 continue;
2224
2225 if (error->exposed_mode_adn == i)
2226 ipr_err("Exposed Array Member %d:\n", i);
2227 else
2228 ipr_err("Array Member %d:\n", i);
2229
2230 ipr_err("Array Member %d:\n", i);
2231 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2232 ipr_err("Current Location: %s\n",
b3b3b407
BK
2233 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2234 buffer, sizeof(buffer)));
7262026f 2235 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2236 ipr_format_res_path(ioa_cfg,
2237 array_entry->expected_res_path,
2238 buffer, sizeof(buffer)));
4565e370
WB
2239
2240 ipr_err_separator;
2241 }
2242}
2243
2244/**
2245 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2246 * @ioa_cfg: ioa config struct
2247 * @hostrcb: hostrcb struct
2248 *
2249 * Return value:
2250 * none
2251 **/
2252static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2253 struct ipr_hostrcb *hostrcb)
2254{
2255 struct ipr_hostrcb_type_30_error *error;
2256 struct ipr_hostrcb64_fabric_desc *fabric;
2257 struct ipr_hostrcb64_config_element *cfg;
2258 int i, add_len;
2259
2260 error = &hostrcb->hcam.u.error64.u.type_30_error;
2261
2262 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2263 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2264
2265 add_len = be32_to_cpu(hostrcb->hcam.length) -
2266 (offsetof(struct ipr_hostrcb64_error, u) +
2267 offsetof(struct ipr_hostrcb_type_30_error, desc));
2268
2269 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2270 ipr_log64_fabric_path(hostrcb, fabric);
2271 for_each_fabric_cfg(fabric, cfg)
2272 ipr_log64_path_elem(hostrcb, cfg);
2273
2274 add_len -= be16_to_cpu(fabric->length);
2275 fabric = (struct ipr_hostrcb64_fabric_desc *)
2276 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2277 }
2278
2279 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2280}
2281
b0df54bb
BK
2282/**
2283 * ipr_log_generic_error - Log an adapter error.
2284 * @ioa_cfg: ioa config struct
2285 * @hostrcb: hostrcb struct
2286 *
2287 * Return value:
2288 * none
2289 **/
2290static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2291 struct ipr_hostrcb *hostrcb)
2292{
ac719aba 2293 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb
BK
2294 be32_to_cpu(hostrcb->hcam.length));
2295}
2296
1da177e4
LT
2297/**
2298 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2299 * @ioasc: IOASC
2300 *
2301 * This function will return the index of into the ipr_error_table
2302 * for the specified IOASC. If the IOASC is not in the table,
2303 * 0 will be returned, which points to the entry used for unknown errors.
2304 *
2305 * Return value:
2306 * index into the ipr_error_table
2307 **/
2308static u32 ipr_get_error(u32 ioasc)
2309{
2310 int i;
2311
2312 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2313 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2314 return i;
2315
2316 return 0;
2317}
2318
2319/**
2320 * ipr_handle_log_data - Log an adapter error.
2321 * @ioa_cfg: ioa config struct
2322 * @hostrcb: hostrcb struct
2323 *
2324 * This function logs an adapter error to the system.
2325 *
2326 * Return value:
2327 * none
2328 **/
2329static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2330 struct ipr_hostrcb *hostrcb)
2331{
2332 u32 ioasc;
2333 int error_index;
2334
2335 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2336 return;
2337
2338 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2339 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2340
4565e370
WB
2341 if (ioa_cfg->sis64)
2342 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2343 else
2344 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2345
4565e370
WB
2346 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2347 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2348 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2349 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2350 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2351 }
2352
2353 error_index = ipr_get_error(ioasc);
2354
2355 if (!ipr_error_table[error_index].log_hcam)
2356 return;
2357
49dc6a18 2358 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2359
2360 /* Set indication we have logged an error */
2361 ioa_cfg->errors_logged++;
2362
933916f3 2363 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2364 return;
cf852037
BK
2365 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2366 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2367
2368 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2369 case IPR_HOST_RCB_OVERLAY_ID_2:
2370 ipr_log_cache_error(ioa_cfg, hostrcb);
2371 break;
2372 case IPR_HOST_RCB_OVERLAY_ID_3:
2373 ipr_log_config_error(ioa_cfg, hostrcb);
2374 break;
2375 case IPR_HOST_RCB_OVERLAY_ID_4:
2376 case IPR_HOST_RCB_OVERLAY_ID_6:
2377 ipr_log_array_error(ioa_cfg, hostrcb);
2378 break;
b0df54bb
BK
2379 case IPR_HOST_RCB_OVERLAY_ID_7:
2380 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2381 break;
ee0f05b8
BK
2382 case IPR_HOST_RCB_OVERLAY_ID_12:
2383 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2384 break;
2385 case IPR_HOST_RCB_OVERLAY_ID_13:
2386 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2387 break;
2388 case IPR_HOST_RCB_OVERLAY_ID_14:
2389 case IPR_HOST_RCB_OVERLAY_ID_16:
2390 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2391 break;
2392 case IPR_HOST_RCB_OVERLAY_ID_17:
2393 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2394 break;
49dc6a18
BK
2395 case IPR_HOST_RCB_OVERLAY_ID_20:
2396 ipr_log_fabric_error(ioa_cfg, hostrcb);
2397 break;
4565e370
WB
2398 case IPR_HOST_RCB_OVERLAY_ID_23:
2399 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2400 break;
2401 case IPR_HOST_RCB_OVERLAY_ID_24:
2402 case IPR_HOST_RCB_OVERLAY_ID_26:
2403 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2404 break;
2405 case IPR_HOST_RCB_OVERLAY_ID_30:
2406 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2407 break;
cf852037 2408 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2409 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2410 default:
a9cfca96 2411 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2412 break;
2413 }
2414}
2415
2416/**
2417 * ipr_process_error - Op done function for an adapter error log.
2418 * @ipr_cmd: ipr command struct
2419 *
2420 * This function is the op done function for an error log host
2421 * controlled async from the adapter. It will log the error and
2422 * send the HCAM back to the adapter.
2423 *
2424 * Return value:
2425 * none
2426 **/
2427static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2428{
2429 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2430 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2431 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2432 u32 fd_ioasc;
2433
2434 if (ioa_cfg->sis64)
2435 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2436 else
2437 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4
LT
2438
2439 list_del(&hostrcb->queue);
2440 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2441
2442 if (!ioasc) {
2443 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2444 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2445 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1da177e4
LT
2446 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2447 dev_err(&ioa_cfg->pdev->dev,
2448 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2449 }
2450
2451 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2452}
2453
2454/**
2455 * ipr_timeout - An internally generated op has timed out.
2456 * @ipr_cmd: ipr command struct
2457 *
2458 * This function blocks host requests and initiates an
2459 * adapter reset.
2460 *
2461 * Return value:
2462 * none
2463 **/
2464static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2465{
2466 unsigned long lock_flags = 0;
2467 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2468
2469 ENTER;
2470 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2471
2472 ioa_cfg->errors_logged++;
2473 dev_err(&ioa_cfg->pdev->dev,
2474 "Adapter being reset due to command timeout.\n");
2475
2476 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2477 ioa_cfg->sdt_state = GET_DUMP;
2478
2479 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2480 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2481
2482 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2483 LEAVE;
2484}
2485
2486/**
2487 * ipr_oper_timeout - Adapter timed out transitioning to operational
2488 * @ipr_cmd: ipr command struct
2489 *
2490 * This function blocks host requests and initiates an
2491 * adapter reset.
2492 *
2493 * Return value:
2494 * none
2495 **/
2496static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2497{
2498 unsigned long lock_flags = 0;
2499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2500
2501 ENTER;
2502 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2503
2504 ioa_cfg->errors_logged++;
2505 dev_err(&ioa_cfg->pdev->dev,
2506 "Adapter timed out transitioning to operational.\n");
2507
2508 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2509 ioa_cfg->sdt_state = GET_DUMP;
2510
2511 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2512 if (ipr_fastfail)
2513 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2514 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2515 }
2516
2517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2518 LEAVE;
2519}
2520
2521/**
2522 * ipr_reset_reload - Reset/Reload the IOA
2523 * @ioa_cfg: ioa config struct
2524 * @shutdown_type: shutdown type
2525 *
2526 * This function resets the adapter and re-initializes it.
2527 * This function assumes that all new host commands have been stopped.
2528 * Return value:
2529 * SUCCESS / FAILED
2530 **/
2531static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2532 enum ipr_shutdown_type shutdown_type)
2533{
2534 if (!ioa_cfg->in_reset_reload)
2535 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2536
2537 spin_unlock_irq(ioa_cfg->host->host_lock);
2538 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2539 spin_lock_irq(ioa_cfg->host->host_lock);
2540
2541 /* If we got hit with a host reset while we were already resetting
2542 the adapter for some reason, and the reset failed. */
2543 if (ioa_cfg->ioa_is_dead) {
2544 ipr_trace;
2545 return FAILED;
2546 }
2547
2548 return SUCCESS;
2549}
2550
2551/**
2552 * ipr_find_ses_entry - Find matching SES in SES table
2553 * @res: resource entry struct of SES
2554 *
2555 * Return value:
2556 * pointer to SES table entry / NULL on failure
2557 **/
2558static const struct ipr_ses_table_entry *
2559ipr_find_ses_entry(struct ipr_resource_entry *res)
2560{
2561 int i, j, matches;
3e7ebdfa 2562 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2563 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2564
2565 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2566 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2567 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2568 vpids = &res->std_inq_data.vpids;
2569 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2570 matches++;
2571 else
2572 break;
2573 } else
2574 matches++;
2575 }
2576
2577 if (matches == IPR_PROD_ID_LEN)
2578 return ste;
2579 }
2580
2581 return NULL;
2582}
2583
2584/**
2585 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2586 * @ioa_cfg: ioa config struct
2587 * @bus: SCSI bus
2588 * @bus_width: bus width
2589 *
2590 * Return value:
2591 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2592 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2593 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2594 * max 160MHz = max 320MB/sec).
2595 **/
2596static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2597{
2598 struct ipr_resource_entry *res;
2599 const struct ipr_ses_table_entry *ste;
2600 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2601
2602 /* Loop through each config table entry in the config table buffer */
2603 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2604 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2605 continue;
2606
3e7ebdfa 2607 if (bus != res->bus)
1da177e4
LT
2608 continue;
2609
2610 if (!(ste = ipr_find_ses_entry(res)))
2611 continue;
2612
2613 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2614 }
2615
2616 return max_xfer_rate;
2617}
2618
2619/**
2620 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2621 * @ioa_cfg: ioa config struct
2622 * @max_delay: max delay in micro-seconds to wait
2623 *
2624 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2625 *
2626 * Return value:
2627 * 0 on success / other on failure
2628 **/
2629static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2630{
2631 volatile u32 pcii_reg;
2632 int delay = 1;
2633
2634 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2635 while (delay < max_delay) {
2636 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2637
2638 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2639 return 0;
2640
2641 /* udelay cannot be used if delay is more than a few milliseconds */
2642 if ((delay / 1000) > MAX_UDELAY_MS)
2643 mdelay(delay / 1000);
2644 else
2645 udelay(delay);
2646
2647 delay += delay;
2648 }
2649 return -EIO;
2650}
2651
dcbad00e
WB
2652/**
2653 * ipr_get_sis64_dump_data_section - Dump IOA memory
2654 * @ioa_cfg: ioa config struct
2655 * @start_addr: adapter address to dump
2656 * @dest: destination kernel buffer
2657 * @length_in_words: length to dump in 4 byte words
2658 *
2659 * Return value:
2660 * 0 on success
2661 **/
2662static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2663 u32 start_addr,
2664 __be32 *dest, u32 length_in_words)
2665{
2666 int i;
2667
2668 for (i = 0; i < length_in_words; i++) {
2669 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2670 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2671 dest++;
2672 }
2673
2674 return 0;
2675}
2676
1da177e4
LT
2677/**
2678 * ipr_get_ldump_data_section - Dump IOA memory
2679 * @ioa_cfg: ioa config struct
2680 * @start_addr: adapter address to dump
2681 * @dest: destination kernel buffer
2682 * @length_in_words: length to dump in 4 byte words
2683 *
2684 * Return value:
2685 * 0 on success / -EIO on failure
2686 **/
2687static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2688 u32 start_addr,
2689 __be32 *dest, u32 length_in_words)
2690{
2691 volatile u32 temp_pcii_reg;
2692 int i, delay = 0;
2693
dcbad00e
WB
2694 if (ioa_cfg->sis64)
2695 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2696 dest, length_in_words);
2697
1da177e4
LT
2698 /* Write IOA interrupt reg starting LDUMP state */
2699 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2700 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2701
2702 /* Wait for IO debug acknowledge */
2703 if (ipr_wait_iodbg_ack(ioa_cfg,
2704 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2705 dev_err(&ioa_cfg->pdev->dev,
2706 "IOA dump long data transfer timeout\n");
2707 return -EIO;
2708 }
2709
2710 /* Signal LDUMP interlocked - clear IO debug ack */
2711 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2712 ioa_cfg->regs.clr_interrupt_reg);
2713
2714 /* Write Mailbox with starting address */
2715 writel(start_addr, ioa_cfg->ioa_mailbox);
2716
2717 /* Signal address valid - clear IOA Reset alert */
2718 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2719 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2720
2721 for (i = 0; i < length_in_words; i++) {
2722 /* Wait for IO debug acknowledge */
2723 if (ipr_wait_iodbg_ack(ioa_cfg,
2724 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2725 dev_err(&ioa_cfg->pdev->dev,
2726 "IOA dump short data transfer timeout\n");
2727 return -EIO;
2728 }
2729
2730 /* Read data from mailbox and increment destination pointer */
2731 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2732 dest++;
2733
2734 /* For all but the last word of data, signal data received */
2735 if (i < (length_in_words - 1)) {
2736 /* Signal dump data received - Clear IO debug Ack */
2737 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2738 ioa_cfg->regs.clr_interrupt_reg);
2739 }
2740 }
2741
2742 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2743 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2744 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2745
2746 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2747 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2748
2749 /* Signal dump data received - Clear IO debug Ack */
2750 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2751 ioa_cfg->regs.clr_interrupt_reg);
2752
2753 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2754 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2755 temp_pcii_reg =
214777ba 2756 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2757
2758 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2759 return 0;
2760
2761 udelay(10);
2762 delay += 10;
2763 }
2764
2765 return 0;
2766}
2767
2768#ifdef CONFIG_SCSI_IPR_DUMP
2769/**
2770 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2771 * @ioa_cfg: ioa config struct
2772 * @pci_address: adapter address
2773 * @length: length of data to copy
2774 *
2775 * Copy data from PCI adapter to kernel buffer.
2776 * Note: length MUST be a 4 byte multiple
2777 * Return value:
2778 * 0 on success / other on failure
2779 **/
2780static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2781 unsigned long pci_address, u32 length)
2782{
2783 int bytes_copied = 0;
4d4dd706 2784 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2785 __be32 *page;
2786 unsigned long lock_flags = 0;
2787 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2788
4d4dd706
KSS
2789 if (ioa_cfg->sis64)
2790 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2791 else
2792 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2793
1da177e4 2794 while (bytes_copied < length &&
4d4dd706 2795 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2796 if (ioa_dump->page_offset >= PAGE_SIZE ||
2797 ioa_dump->page_offset == 0) {
2798 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2799
2800 if (!page) {
2801 ipr_trace;
2802 return bytes_copied;
2803 }
2804
2805 ioa_dump->page_offset = 0;
2806 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2807 ioa_dump->next_page_index++;
2808 } else
2809 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2810
2811 rem_len = length - bytes_copied;
2812 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2813 cur_len = min(rem_len, rem_page_len);
2814
2815 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2816 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2817 rc = -EIO;
2818 } else {
2819 rc = ipr_get_ldump_data_section(ioa_cfg,
2820 pci_address + bytes_copied,
2821 &page[ioa_dump->page_offset / 4],
2822 (cur_len / sizeof(u32)));
2823 }
2824 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2825
2826 if (!rc) {
2827 ioa_dump->page_offset += cur_len;
2828 bytes_copied += cur_len;
2829 } else {
2830 ipr_trace;
2831 break;
2832 }
2833 schedule();
2834 }
2835
2836 return bytes_copied;
2837}
2838
2839/**
2840 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2841 * @hdr: dump entry header struct
2842 *
2843 * Return value:
2844 * nothing
2845 **/
2846static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2847{
2848 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2849 hdr->num_elems = 1;
2850 hdr->offset = sizeof(*hdr);
2851 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2852}
2853
2854/**
2855 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2856 * @ioa_cfg: ioa config struct
2857 * @driver_dump: driver dump struct
2858 *
2859 * Return value:
2860 * nothing
2861 **/
2862static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2863 struct ipr_driver_dump *driver_dump)
2864{
2865 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2866
2867 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2868 driver_dump->ioa_type_entry.hdr.len =
2869 sizeof(struct ipr_dump_ioa_type_entry) -
2870 sizeof(struct ipr_dump_entry_header);
2871 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2872 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2873 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2874 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2875 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2876 ucode_vpd->minor_release[1];
2877 driver_dump->hdr.num_entries++;
2878}
2879
2880/**
2881 * ipr_dump_version_data - Fill in the driver version in the dump.
2882 * @ioa_cfg: ioa config struct
2883 * @driver_dump: driver dump struct
2884 *
2885 * Return value:
2886 * nothing
2887 **/
2888static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2889 struct ipr_driver_dump *driver_dump)
2890{
2891 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2892 driver_dump->version_entry.hdr.len =
2893 sizeof(struct ipr_dump_version_entry) -
2894 sizeof(struct ipr_dump_entry_header);
2895 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2896 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2897 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2898 driver_dump->hdr.num_entries++;
2899}
2900
2901/**
2902 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2903 * @ioa_cfg: ioa config struct
2904 * @driver_dump: driver dump struct
2905 *
2906 * Return value:
2907 * nothing
2908 **/
2909static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2910 struct ipr_driver_dump *driver_dump)
2911{
2912 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2913 driver_dump->trace_entry.hdr.len =
2914 sizeof(struct ipr_dump_trace_entry) -
2915 sizeof(struct ipr_dump_entry_header);
2916 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2917 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2918 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2919 driver_dump->hdr.num_entries++;
2920}
2921
2922/**
2923 * ipr_dump_location_data - Fill in the IOA location in the dump.
2924 * @ioa_cfg: ioa config struct
2925 * @driver_dump: driver dump struct
2926 *
2927 * Return value:
2928 * nothing
2929 **/
2930static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2931 struct ipr_driver_dump *driver_dump)
2932{
2933 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2934 driver_dump->location_entry.hdr.len =
2935 sizeof(struct ipr_dump_location_entry) -
2936 sizeof(struct ipr_dump_entry_header);
2937 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2938 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 2939 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
2940 driver_dump->hdr.num_entries++;
2941}
2942
2943/**
2944 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2945 * @ioa_cfg: ioa config struct
2946 * @dump: dump struct
2947 *
2948 * Return value:
2949 * nothing
2950 **/
2951static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2952{
2953 unsigned long start_addr, sdt_word;
2954 unsigned long lock_flags = 0;
2955 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2956 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
2957 u32 num_entries, max_num_entries, start_off, end_off;
2958 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 2959 struct ipr_sdt *sdt;
dcbad00e 2960 int valid = 1;
1da177e4
LT
2961 int i;
2962
2963 ENTER;
2964
2965 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2966
41e9a696 2967 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
2968 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2969 return;
2970 }
2971
110def85
WB
2972 if (ioa_cfg->sis64) {
2973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2974 ssleep(IPR_DUMP_DELAY_SECONDS);
2975 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2976 }
2977
1da177e4
LT
2978 start_addr = readl(ioa_cfg->ioa_mailbox);
2979
dcbad00e 2980 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
2981 dev_err(&ioa_cfg->pdev->dev,
2982 "Invalid dump table format: %lx\n", start_addr);
2983 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2984 return;
2985 }
2986
2987 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2988
2989 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2990
2991 /* Initialize the overall dump header */
2992 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2993 driver_dump->hdr.num_entries = 1;
2994 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2995 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2996 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2997 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2998
2999 ipr_dump_version_data(ioa_cfg, driver_dump);
3000 ipr_dump_location_data(ioa_cfg, driver_dump);
3001 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3002 ipr_dump_trace_data(ioa_cfg, driver_dump);
3003
3004 /* Update dump_header */
3005 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3006
3007 /* IOA Dump entry */
3008 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3009 ioa_dump->hdr.len = 0;
3010 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3011 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3012
3013 /* First entries in sdt are actually a list of dump addresses and
3014 lengths to gather the real dump data. sdt represents the pointer
3015 to the ioa generated dump table. Dump data will be extracted based
3016 on entries in this table */
3017 sdt = &ioa_dump->sdt;
3018
4d4dd706
KSS
3019 if (ioa_cfg->sis64) {
3020 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3021 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3022 } else {
3023 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3024 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3025 }
3026
3027 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3028 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3029 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3030 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3031
3032 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3033 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3034 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3035 dev_err(&ioa_cfg->pdev->dev,
3036 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3037 rc, be32_to_cpu(sdt->hdr.state));
3038 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3039 ioa_cfg->sdt_state = DUMP_OBTAINED;
3040 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3041 return;
3042 }
3043
3044 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3045
4d4dd706
KSS
3046 if (num_entries > max_num_entries)
3047 num_entries = max_num_entries;
3048
3049 /* Update dump length to the actual data to be copied */
3050 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3051 if (ioa_cfg->sis64)
3052 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3053 else
3054 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3055
3056 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3057
3058 for (i = 0; i < num_entries; i++) {
4d4dd706 3059 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3060 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3061 break;
3062 }
3063
3064 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3065 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3066 if (ioa_cfg->sis64)
3067 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3068 else {
3069 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3070 end_off = be32_to_cpu(sdt->entry[i].end_token);
3071
3072 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3073 bytes_to_copy = end_off - start_off;
3074 else
3075 valid = 0;
3076 }
3077 if (valid) {
4d4dd706 3078 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3079 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3080 continue;
3081 }
3082
3083 /* Copy data from adapter to driver buffers */
3084 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3085 bytes_to_copy);
3086
3087 ioa_dump->hdr.len += bytes_copied;
3088
3089 if (bytes_copied != bytes_to_copy) {
3090 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3091 break;
3092 }
3093 }
3094 }
3095 }
3096
3097 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3098
3099 /* Update dump_header */
3100 driver_dump->hdr.len += ioa_dump->hdr.len;
3101 wmb();
3102 ioa_cfg->sdt_state = DUMP_OBTAINED;
3103 LEAVE;
3104}
3105
3106#else
203fa3fe 3107#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3108#endif
3109
3110/**
3111 * ipr_release_dump - Free adapter dump memory
3112 * @kref: kref struct
3113 *
3114 * Return value:
3115 * nothing
3116 **/
3117static void ipr_release_dump(struct kref *kref)
3118{
203fa3fe 3119 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3120 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3121 unsigned long lock_flags = 0;
3122 int i;
3123
3124 ENTER;
3125 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3126 ioa_cfg->dump = NULL;
3127 ioa_cfg->sdt_state = INACTIVE;
3128 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3129
3130 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3131 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3132
4d4dd706 3133 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3134 kfree(dump);
3135 LEAVE;
3136}
3137
3138/**
3139 * ipr_worker_thread - Worker thread
c4028958 3140 * @work: ioa config struct
1da177e4
LT
3141 *
3142 * Called at task level from a work thread. This function takes care
3143 * of adding and removing device from the mid-layer as configuration
3144 * changes are detected by the adapter.
3145 *
3146 * Return value:
3147 * nothing
3148 **/
c4028958 3149static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3150{
3151 unsigned long lock_flags;
3152 struct ipr_resource_entry *res;
3153 struct scsi_device *sdev;
3154 struct ipr_dump *dump;
c4028958
DH
3155 struct ipr_ioa_cfg *ioa_cfg =
3156 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3157 u8 bus, target, lun;
3158 int did_work;
3159
3160 ENTER;
3161 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3162
41e9a696 3163 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3164 dump = ioa_cfg->dump;
3165 if (!dump) {
3166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3167 return;
3168 }
3169 kref_get(&dump->kref);
3170 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3171 ipr_get_ioa_dump(ioa_cfg, dump);
3172 kref_put(&dump->kref, ipr_release_dump);
3173
3174 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3175 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3176 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3177 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3178 return;
3179 }
3180
3181restart:
3182 do {
3183 did_work = 0;
3184 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3186 return;
3187 }
3188
3189 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3190 if (res->del_from_ml && res->sdev) {
3191 did_work = 1;
3192 sdev = res->sdev;
3193 if (!scsi_device_get(sdev)) {
5767a1c4
KSS
3194 if (!res->add_to_ml)
3195 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3196 else
3197 res->del_from_ml = 0;
1da177e4
LT
3198 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3199 scsi_remove_device(sdev);
3200 scsi_device_put(sdev);
3201 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3202 }
3203 break;
3204 }
3205 }
203fa3fe 3206 } while (did_work);
1da177e4
LT
3207
3208 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3209 if (res->add_to_ml) {
3e7ebdfa
WB
3210 bus = res->bus;
3211 target = res->target;
3212 lun = res->lun;
1121b794 3213 res->add_to_ml = 0;
1da177e4
LT
3214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3215 scsi_add_device(ioa_cfg->host, bus, target, lun);
3216 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3217 goto restart;
3218 }
3219 }
3220
3221 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
ee959b00 3222 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1da177e4
LT
3223 LEAVE;
3224}
3225
3226#ifdef CONFIG_SCSI_IPR_TRACE
3227/**
3228 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3229 * @filp: open sysfs file
1da177e4 3230 * @kobj: kobject struct
91a69029 3231 * @bin_attr: bin_attribute struct
1da177e4
LT
3232 * @buf: buffer
3233 * @off: offset
3234 * @count: buffer size
3235 *
3236 * Return value:
3237 * number of bytes printed to buffer
3238 **/
2c3c8bea 3239static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3240 struct bin_attribute *bin_attr,
3241 char *buf, loff_t off, size_t count)
1da177e4 3242{
ee959b00
TJ
3243 struct device *dev = container_of(kobj, struct device, kobj);
3244 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3245 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3246 unsigned long lock_flags = 0;
d777aaf3 3247 ssize_t ret;
1da177e4
LT
3248
3249 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3250 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3251 IPR_TRACE_SIZE);
1da177e4 3252 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3253
3254 return ret;
1da177e4
LT
3255}
3256
3257static struct bin_attribute ipr_trace_attr = {
3258 .attr = {
3259 .name = "trace",
3260 .mode = S_IRUGO,
3261 },
3262 .size = 0,
3263 .read = ipr_read_trace,
3264};
3265#endif
3266
3267/**
3268 * ipr_show_fw_version - Show the firmware version
ee959b00
TJ
3269 * @dev: class device struct
3270 * @buf: buffer
1da177e4
LT
3271 *
3272 * Return value:
3273 * number of bytes printed to buffer
3274 **/
ee959b00
TJ
3275static ssize_t ipr_show_fw_version(struct device *dev,
3276 struct device_attribute *attr, char *buf)
1da177e4 3277{
ee959b00 3278 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3279 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3280 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3281 unsigned long lock_flags = 0;
3282 int len;
3283
3284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3285 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3286 ucode_vpd->major_release, ucode_vpd->card_type,
3287 ucode_vpd->minor_release[0],
3288 ucode_vpd->minor_release[1]);
3289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3290 return len;
3291}
3292
ee959b00 3293static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3294 .attr = {
3295 .name = "fw_version",
3296 .mode = S_IRUGO,
3297 },
3298 .show = ipr_show_fw_version,
3299};
3300
3301/**
3302 * ipr_show_log_level - Show the adapter's error logging level
ee959b00
TJ
3303 * @dev: class device struct
3304 * @buf: buffer
1da177e4
LT
3305 *
3306 * Return value:
3307 * number of bytes printed to buffer
3308 **/
ee959b00
TJ
3309static ssize_t ipr_show_log_level(struct device *dev,
3310 struct device_attribute *attr, char *buf)
1da177e4 3311{
ee959b00 3312 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3313 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3314 unsigned long lock_flags = 0;
3315 int len;
3316
3317 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3318 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3320 return len;
3321}
3322
3323/**
3324 * ipr_store_log_level - Change the adapter's error logging level
ee959b00
TJ
3325 * @dev: class device struct
3326 * @buf: buffer
1da177e4
LT
3327 *
3328 * Return value:
3329 * number of bytes printed to buffer
3330 **/
ee959b00 3331static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3332 struct device_attribute *attr,
1da177e4
LT
3333 const char *buf, size_t count)
3334{
ee959b00 3335 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3336 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3337 unsigned long lock_flags = 0;
3338
3339 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3342 return strlen(buf);
3343}
3344
ee959b00 3345static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3346 .attr = {
3347 .name = "log_level",
3348 .mode = S_IRUGO | S_IWUSR,
3349 },
3350 .show = ipr_show_log_level,
3351 .store = ipr_store_log_level
3352};
3353
3354/**
3355 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00
TJ
3356 * @dev: device struct
3357 * @buf: buffer
3358 * @count: buffer size
1da177e4
LT
3359 *
3360 * This function will reset the adapter and wait a reasonable
3361 * amount of time for any errors that the adapter might log.
3362 *
3363 * Return value:
3364 * count on success / other on failure
3365 **/
ee959b00
TJ
3366static ssize_t ipr_store_diagnostics(struct device *dev,
3367 struct device_attribute *attr,
1da177e4
LT
3368 const char *buf, size_t count)
3369{
ee959b00 3370 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3371 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3372 unsigned long lock_flags = 0;
3373 int rc = count;
3374
3375 if (!capable(CAP_SYS_ADMIN))
3376 return -EACCES;
3377
1da177e4 3378 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3379 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3381 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3382 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3383 }
3384
1da177e4
LT
3385 ioa_cfg->errors_logged = 0;
3386 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3387
3388 if (ioa_cfg->in_reset_reload) {
3389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3390 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3391
3392 /* Wait for a second for any errors to be logged */
3393 msleep(1000);
3394 } else {
3395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3396 return -EIO;
3397 }
3398
3399 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3400 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3401 rc = -EIO;
3402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3403
3404 return rc;
3405}
3406
ee959b00 3407static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3408 .attr = {
3409 .name = "run_diagnostics",
3410 .mode = S_IWUSR,
3411 },
3412 .store = ipr_store_diagnostics
3413};
3414
f37eb54b
BK
3415/**
3416 * ipr_show_adapter_state - Show the adapter's state
ee959b00
TJ
3417 * @class_dev: device struct
3418 * @buf: buffer
f37eb54b
BK
3419 *
3420 * Return value:
3421 * number of bytes printed to buffer
3422 **/
ee959b00
TJ
3423static ssize_t ipr_show_adapter_state(struct device *dev,
3424 struct device_attribute *attr, char *buf)
f37eb54b 3425{
ee959b00 3426 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3427 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3428 unsigned long lock_flags = 0;
3429 int len;
3430
3431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3432 if (ioa_cfg->ioa_is_dead)
3433 len = snprintf(buf, PAGE_SIZE, "offline\n");
3434 else
3435 len = snprintf(buf, PAGE_SIZE, "online\n");
3436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3437 return len;
3438}
3439
3440/**
3441 * ipr_store_adapter_state - Change adapter state
ee959b00
TJ
3442 * @dev: device struct
3443 * @buf: buffer
3444 * @count: buffer size
f37eb54b
BK
3445 *
3446 * This function will change the adapter's state.
3447 *
3448 * Return value:
3449 * count on success / other on failure
3450 **/
ee959b00
TJ
3451static ssize_t ipr_store_adapter_state(struct device *dev,
3452 struct device_attribute *attr,
f37eb54b
BK
3453 const char *buf, size_t count)
3454{
ee959b00 3455 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b
BK
3456 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3457 unsigned long lock_flags;
3458 int result = count;
3459
3460 if (!capable(CAP_SYS_ADMIN))
3461 return -EACCES;
3462
3463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3464 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3465 ioa_cfg->ioa_is_dead = 0;
3466 ioa_cfg->reset_retries = 0;
3467 ioa_cfg->in_ioa_bringdown = 0;
3468 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3469 }
3470 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3471 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3472
3473 return result;
3474}
3475
ee959b00 3476static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3477 .attr = {
49dd0961 3478 .name = "online_state",
f37eb54b
BK
3479 .mode = S_IRUGO | S_IWUSR,
3480 },
3481 .show = ipr_show_adapter_state,
3482 .store = ipr_store_adapter_state
3483};
3484
1da177e4
LT
3485/**
3486 * ipr_store_reset_adapter - Reset the adapter
ee959b00
TJ
3487 * @dev: device struct
3488 * @buf: buffer
3489 * @count: buffer size
1da177e4
LT
3490 *
3491 * This function will reset the adapter.
3492 *
3493 * Return value:
3494 * count on success / other on failure
3495 **/
ee959b00
TJ
3496static ssize_t ipr_store_reset_adapter(struct device *dev,
3497 struct device_attribute *attr,
1da177e4
LT
3498 const char *buf, size_t count)
3499{
ee959b00 3500 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3502 unsigned long lock_flags;
3503 int result = count;
3504
3505 if (!capable(CAP_SYS_ADMIN))
3506 return -EACCES;
3507
3508 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3509 if (!ioa_cfg->in_reset_reload)
3510 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3512 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3513
3514 return result;
3515}
3516
ee959b00 3517static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3518 .attr = {
3519 .name = "reset_host",
3520 .mode = S_IWUSR,
3521 },
3522 .store = ipr_store_reset_adapter
3523};
3524
3525/**
3526 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3527 * @buf_len: buffer length
3528 *
3529 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3530 * list to use for microcode download
3531 *
3532 * Return value:
3533 * pointer to sglist / NULL on failure
3534 **/
3535static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3536{
3537 int sg_size, order, bsize_elem, num_elem, i, j;
3538 struct ipr_sglist *sglist;
3539 struct scatterlist *scatterlist;
3540 struct page *page;
3541
3542 /* Get the minimum size per scatter/gather element */
3543 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3544
3545 /* Get the actual size per element */
3546 order = get_order(sg_size);
3547
3548 /* Determine the actual number of bytes per element */
3549 bsize_elem = PAGE_SIZE * (1 << order);
3550
3551 /* Determine the actual number of sg entries needed */
3552 if (buf_len % bsize_elem)
3553 num_elem = (buf_len / bsize_elem) + 1;
3554 else
3555 num_elem = buf_len / bsize_elem;
3556
3557 /* Allocate a scatter/gather list for the DMA */
0bc42e35 3558 sglist = kzalloc(sizeof(struct ipr_sglist) +
1da177e4
LT
3559 (sizeof(struct scatterlist) * (num_elem - 1)),
3560 GFP_KERNEL);
3561
3562 if (sglist == NULL) {
3563 ipr_trace;
3564 return NULL;
3565 }
3566
1da177e4 3567 scatterlist = sglist->scatterlist;
45711f1a 3568 sg_init_table(scatterlist, num_elem);
1da177e4
LT
3569
3570 sglist->order = order;
3571 sglist->num_sg = num_elem;
3572
3573 /* Allocate a bunch of sg elements */
3574 for (i = 0; i < num_elem; i++) {
3575 page = alloc_pages(GFP_KERNEL, order);
3576 if (!page) {
3577 ipr_trace;
3578
3579 /* Free up what we already allocated */
3580 for (j = i - 1; j >= 0; j--)
45711f1a 3581 __free_pages(sg_page(&scatterlist[j]), order);
1da177e4
LT
3582 kfree(sglist);
3583 return NULL;
3584 }
3585
642f1490 3586 sg_set_page(&scatterlist[i], page, 0, 0);
1da177e4
LT
3587 }
3588
3589 return sglist;
3590}
3591
3592/**
3593 * ipr_free_ucode_buffer - Frees a microcode download buffer
3594 * @p_dnld: scatter/gather list pointer
3595 *
3596 * Free a DMA'able ucode download buffer previously allocated with
3597 * ipr_alloc_ucode_buffer
3598 *
3599 * Return value:
3600 * nothing
3601 **/
3602static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3603{
3604 int i;
3605
3606 for (i = 0; i < sglist->num_sg; i++)
45711f1a 3607 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
1da177e4
LT
3608
3609 kfree(sglist);
3610}
3611
3612/**
3613 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3614 * @sglist: scatter/gather list pointer
3615 * @buffer: buffer pointer
3616 * @len: buffer length
3617 *
3618 * Copy a microcode image from a user buffer into a buffer allocated by
3619 * ipr_alloc_ucode_buffer
3620 *
3621 * Return value:
3622 * 0 on success / other on failure
3623 **/
3624static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3625 u8 *buffer, u32 len)
3626{
3627 int bsize_elem, i, result = 0;
3628 struct scatterlist *scatterlist;
3629 void *kaddr;
3630
3631 /* Determine the actual number of bytes per element */
3632 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3633
3634 scatterlist = sglist->scatterlist;
3635
3636 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
45711f1a
JA
3637 struct page *page = sg_page(&scatterlist[i]);
3638
3639 kaddr = kmap(page);
1da177e4 3640 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3641 kunmap(page);
1da177e4
LT
3642
3643 scatterlist[i].length = bsize_elem;
3644
3645 if (result != 0) {
3646 ipr_trace;
3647 return result;
3648 }
3649 }
3650
3651 if (len % bsize_elem) {
45711f1a
JA
3652 struct page *page = sg_page(&scatterlist[i]);
3653
3654 kaddr = kmap(page);
1da177e4 3655 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3656 kunmap(page);
1da177e4
LT
3657
3658 scatterlist[i].length = len % bsize_elem;
3659 }
3660
3661 sglist->buffer_len = len;
3662 return result;
3663}
3664
a32c055f
WB
3665/**
3666 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3667 * @ipr_cmd: ipr command struct
3668 * @sglist: scatter/gather list
3669 *
3670 * Builds a microcode download IOA data list (IOADL).
3671 *
3672 **/
3673static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3674 struct ipr_sglist *sglist)
3675{
3676 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3677 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3678 struct scatterlist *scatterlist = sglist->scatterlist;
3679 int i;
3680
3681 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3682 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3683 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3684
3685 ioarcb->ioadl_len =
3686 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3687 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3688 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3689 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3690 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3691 }
3692
3693 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3694}
3695
1da177e4 3696/**
12baa420 3697 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3698 * @ipr_cmd: ipr command struct
3699 * @sglist: scatter/gather list
1da177e4 3700 *
12baa420 3701 * Builds a microcode download IOA data list (IOADL).
1da177e4 3702 *
1da177e4 3703 **/
12baa420
BK
3704static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3705 struct ipr_sglist *sglist)
1da177e4 3706{
1da177e4 3707 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3708 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4
LT
3709 struct scatterlist *scatterlist = sglist->scatterlist;
3710 int i;
3711
12baa420 3712 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 3713 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
3714 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3715
3716 ioarcb->ioadl_len =
1da177e4
LT
3717 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3718
3719 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3720 ioadl[i].flags_and_data_len =
3721 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3722 ioadl[i].address =
3723 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3724 }
3725
12baa420
BK
3726 ioadl[i-1].flags_and_data_len |=
3727 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3728}
3729
3730/**
3731 * ipr_update_ioa_ucode - Update IOA's microcode
3732 * @ioa_cfg: ioa config struct
3733 * @sglist: scatter/gather list
3734 *
3735 * Initiate an adapter reset to update the IOA's microcode
3736 *
3737 * Return value:
3738 * 0 on success / -EIO on failure
3739 **/
3740static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3741 struct ipr_sglist *sglist)
3742{
3743 unsigned long lock_flags;
3744
3745 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3746 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3747 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3748 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3749 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3750 }
12baa420
BK
3751
3752 if (ioa_cfg->ucode_sglist) {
3753 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3754 dev_err(&ioa_cfg->pdev->dev,
3755 "Microcode download already in progress\n");
3756 return -EIO;
1da177e4 3757 }
12baa420
BK
3758
3759 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3760 sglist->num_sg, DMA_TO_DEVICE);
3761
3762 if (!sglist->num_dma_sg) {
3763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3764 dev_err(&ioa_cfg->pdev->dev,
3765 "Failed to map microcode download buffer!\n");
1da177e4
LT
3766 return -EIO;
3767 }
3768
12baa420
BK
3769 ioa_cfg->ucode_sglist = sglist;
3770 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3771 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3772 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3773
3774 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3775 ioa_cfg->ucode_sglist = NULL;
3776 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3777 return 0;
3778}
3779
3780/**
3781 * ipr_store_update_fw - Update the firmware on the adapter
ee959b00
TJ
3782 * @class_dev: device struct
3783 * @buf: buffer
3784 * @count: buffer size
1da177e4
LT
3785 *
3786 * This function will update the firmware on the adapter.
3787 *
3788 * Return value:
3789 * count on success / other on failure
3790 **/
ee959b00
TJ
3791static ssize_t ipr_store_update_fw(struct device *dev,
3792 struct device_attribute *attr,
3793 const char *buf, size_t count)
1da177e4 3794{
ee959b00 3795 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3796 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3797 struct ipr_ucode_image_header *image_hdr;
3798 const struct firmware *fw_entry;
3799 struct ipr_sglist *sglist;
1da177e4
LT
3800 char fname[100];
3801 char *src;
3802 int len, result, dnld_size;
3803
3804 if (!capable(CAP_SYS_ADMIN))
3805 return -EACCES;
3806
3807 len = snprintf(fname, 99, "%s", buf);
3808 fname[len-1] = '\0';
3809
203fa3fe 3810 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
3811 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3812 return -EIO;
3813 }
3814
3815 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3816
1da177e4
LT
3817 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3818 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3819 sglist = ipr_alloc_ucode_buffer(dnld_size);
3820
3821 if (!sglist) {
3822 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3823 release_firmware(fw_entry);
3824 return -ENOMEM;
3825 }
3826
3827 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3828
3829 if (result) {
3830 dev_err(&ioa_cfg->pdev->dev,
3831 "Microcode buffer copy to DMA buffer failed\n");
12baa420 3832 goto out;
1da177e4
LT
3833 }
3834
14ed9cc7
WB
3835 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3836
12baa420 3837 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 3838
12baa420
BK
3839 if (!result)
3840 result = count;
3841out:
1da177e4
LT
3842 ipr_free_ucode_buffer(sglist);
3843 release_firmware(fw_entry);
12baa420 3844 return result;
1da177e4
LT
3845}
3846
ee959b00 3847static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
3848 .attr = {
3849 .name = "update_fw",
3850 .mode = S_IWUSR,
3851 },
3852 .store = ipr_store_update_fw
3853};
3854
75576bb9
WB
3855/**
3856 * ipr_show_fw_type - Show the adapter's firmware type.
3857 * @dev: class device struct
3858 * @buf: buffer
3859 *
3860 * Return value:
3861 * number of bytes printed to buffer
3862 **/
3863static ssize_t ipr_show_fw_type(struct device *dev,
3864 struct device_attribute *attr, char *buf)
3865{
3866 struct Scsi_Host *shost = class_to_shost(dev);
3867 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3868 unsigned long lock_flags = 0;
3869 int len;
3870
3871 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3872 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3873 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3874 return len;
3875}
3876
3877static struct device_attribute ipr_ioa_fw_type_attr = {
3878 .attr = {
3879 .name = "fw_type",
3880 .mode = S_IRUGO,
3881 },
3882 .show = ipr_show_fw_type
3883};
3884
ee959b00 3885static struct device_attribute *ipr_ioa_attrs[] = {
1da177e4
LT
3886 &ipr_fw_version_attr,
3887 &ipr_log_level_attr,
3888 &ipr_diagnostics_attr,
f37eb54b 3889 &ipr_ioa_state_attr,
1da177e4
LT
3890 &ipr_ioa_reset_attr,
3891 &ipr_update_fw_attr,
75576bb9 3892 &ipr_ioa_fw_type_attr,
1da177e4
LT
3893 NULL,
3894};
3895
3896#ifdef CONFIG_SCSI_IPR_DUMP
3897/**
3898 * ipr_read_dump - Dump the adapter
2c3c8bea 3899 * @filp: open sysfs file
1da177e4 3900 * @kobj: kobject struct
91a69029 3901 * @bin_attr: bin_attribute struct
1da177e4
LT
3902 * @buf: buffer
3903 * @off: offset
3904 * @count: buffer size
3905 *
3906 * Return value:
3907 * number of bytes printed to buffer
3908 **/
2c3c8bea 3909static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
3910 struct bin_attribute *bin_attr,
3911 char *buf, loff_t off, size_t count)
1da177e4 3912{
ee959b00 3913 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
3914 struct Scsi_Host *shost = class_to_shost(cdev);
3915 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3916 struct ipr_dump *dump;
3917 unsigned long lock_flags = 0;
3918 char *src;
4d4dd706 3919 int len, sdt_end;
1da177e4
LT
3920 size_t rc = count;
3921
3922 if (!capable(CAP_SYS_ADMIN))
3923 return -EACCES;
3924
3925 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3926 dump = ioa_cfg->dump;
3927
3928 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3929 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3930 return 0;
3931 }
3932 kref_get(&dump->kref);
3933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3934
3935 if (off > dump->driver_dump.hdr.len) {
3936 kref_put(&dump->kref, ipr_release_dump);
3937 return 0;
3938 }
3939
3940 if (off + count > dump->driver_dump.hdr.len) {
3941 count = dump->driver_dump.hdr.len - off;
3942 rc = count;
3943 }
3944
3945 if (count && off < sizeof(dump->driver_dump)) {
3946 if (off + count > sizeof(dump->driver_dump))
3947 len = sizeof(dump->driver_dump) - off;
3948 else
3949 len = count;
3950 src = (u8 *)&dump->driver_dump + off;
3951 memcpy(buf, src, len);
3952 buf += len;
3953 off += len;
3954 count -= len;
3955 }
3956
3957 off -= sizeof(dump->driver_dump);
3958
4d4dd706
KSS
3959 if (ioa_cfg->sis64)
3960 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3961 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3962 sizeof(struct ipr_sdt_entry));
3963 else
3964 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3965 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3966
3967 if (count && off < sdt_end) {
3968 if (off + count > sdt_end)
3969 len = sdt_end - off;
1da177e4
LT
3970 else
3971 len = count;
3972 src = (u8 *)&dump->ioa_dump + off;
3973 memcpy(buf, src, len);
3974 buf += len;
3975 off += len;
3976 count -= len;
3977 }
3978
4d4dd706 3979 off -= sdt_end;
1da177e4
LT
3980
3981 while (count) {
3982 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3983 len = PAGE_ALIGN(off) - off;
3984 else
3985 len = count;
3986 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3987 src += off & ~PAGE_MASK;
3988 memcpy(buf, src, len);
3989 buf += len;
3990 off += len;
3991 count -= len;
3992 }
3993
3994 kref_put(&dump->kref, ipr_release_dump);
3995 return rc;
3996}
3997
3998/**
3999 * ipr_alloc_dump - Prepare for adapter dump
4000 * @ioa_cfg: ioa config struct
4001 *
4002 * Return value:
4003 * 0 on success / other on failure
4004 **/
4005static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4006{
4007 struct ipr_dump *dump;
4d4dd706 4008 __be32 **ioa_data;
1da177e4
LT
4009 unsigned long lock_flags = 0;
4010
0bc42e35 4011 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4012
4013 if (!dump) {
4014 ipr_err("Dump memory allocation failed\n");
4015 return -ENOMEM;
4016 }
4017
4d4dd706
KSS
4018 if (ioa_cfg->sis64)
4019 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4020 else
4021 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4022
4023 if (!ioa_data) {
4024 ipr_err("Dump memory allocation failed\n");
4025 kfree(dump);
4026 return -ENOMEM;
4027 }
4028
4029 dump->ioa_dump.ioa_data = ioa_data;
4030
1da177e4
LT
4031 kref_init(&dump->kref);
4032 dump->ioa_cfg = ioa_cfg;
4033
4034 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4035
4036 if (INACTIVE != ioa_cfg->sdt_state) {
4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4038 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4039 kfree(dump);
4040 return 0;
4041 }
4042
4043 ioa_cfg->dump = dump;
4044 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4045 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
4046 ioa_cfg->dump_taken = 1;
4047 schedule_work(&ioa_cfg->work_q);
4048 }
4049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4050
1da177e4
LT
4051 return 0;
4052}
4053
4054/**
4055 * ipr_free_dump - Free adapter dump memory
4056 * @ioa_cfg: ioa config struct
4057 *
4058 * Return value:
4059 * 0 on success / other on failure
4060 **/
4061static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4062{
4063 struct ipr_dump *dump;
4064 unsigned long lock_flags = 0;
4065
4066 ENTER;
4067
4068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4069 dump = ioa_cfg->dump;
4070 if (!dump) {
4071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4072 return 0;
4073 }
4074
4075 ioa_cfg->dump = NULL;
4076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4077
4078 kref_put(&dump->kref, ipr_release_dump);
4079
4080 LEAVE;
4081 return 0;
4082}
4083
4084/**
4085 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4086 * @filp: open sysfs file
1da177e4 4087 * @kobj: kobject struct
91a69029 4088 * @bin_attr: bin_attribute struct
1da177e4
LT
4089 * @buf: buffer
4090 * @off: offset
4091 * @count: buffer size
4092 *
4093 * Return value:
4094 * number of bytes printed to buffer
4095 **/
2c3c8bea 4096static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4097 struct bin_attribute *bin_attr,
4098 char *buf, loff_t off, size_t count)
1da177e4 4099{
ee959b00 4100 struct device *cdev = container_of(kobj, struct device, kobj);
1da177e4
LT
4101 struct Scsi_Host *shost = class_to_shost(cdev);
4102 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4103 int rc;
4104
4105 if (!capable(CAP_SYS_ADMIN))
4106 return -EACCES;
4107
4108 if (buf[0] == '1')
4109 rc = ipr_alloc_dump(ioa_cfg);
4110 else if (buf[0] == '0')
4111 rc = ipr_free_dump(ioa_cfg);
4112 else
4113 return -EINVAL;
4114
4115 if (rc)
4116 return rc;
4117 else
4118 return count;
4119}
4120
4121static struct bin_attribute ipr_dump_attr = {
4122 .attr = {
4123 .name = "dump",
4124 .mode = S_IRUSR | S_IWUSR,
4125 },
4126 .size = 0,
4127 .read = ipr_read_dump,
4128 .write = ipr_write_dump
4129};
4130#else
4131static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4132#endif
4133
4134/**
4135 * ipr_change_queue_depth - Change the device's queue depth
4136 * @sdev: scsi device struct
4137 * @qdepth: depth to set
e881a172 4138 * @reason: calling context
1da177e4
LT
4139 *
4140 * Return value:
4141 * actual depth set
4142 **/
e881a172
MC
4143static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4144 int reason)
1da177e4 4145{
35a39691
BK
4146 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4147 struct ipr_resource_entry *res;
4148 unsigned long lock_flags = 0;
4149
e881a172
MC
4150 if (reason != SCSI_QDEPTH_DEFAULT)
4151 return -EOPNOTSUPP;
4152
35a39691
BK
4153 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4154 res = (struct ipr_resource_entry *)sdev->hostdata;
4155
4156 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4157 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4158 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4159
1da177e4
LT
4160 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4161 return sdev->queue_depth;
4162}
4163
4164/**
4165 * ipr_change_queue_type - Change the device's queue type
4166 * @dsev: scsi device struct
4167 * @tag_type: type of tags to use
4168 *
4169 * Return value:
4170 * actual queue type set
4171 **/
4172static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4173{
4174 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4175 struct ipr_resource_entry *res;
4176 unsigned long lock_flags = 0;
4177
4178 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4179 res = (struct ipr_resource_entry *)sdev->hostdata;
4180
4181 if (res) {
4182 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4183 /*
4184 * We don't bother quiescing the device here since the
4185 * adapter firmware does it for us.
4186 */
4187 scsi_set_tag_type(sdev, tag_type);
4188
4189 if (tag_type)
4190 scsi_activate_tcq(sdev, sdev->queue_depth);
4191 else
4192 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4193 } else
4194 tag_type = 0;
4195 } else
4196 tag_type = 0;
4197
4198 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4199 return tag_type;
4200}
4201
4202/**
4203 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4204 * @dev: device struct
46d74563 4205 * @attr: device attribute structure
1da177e4
LT
4206 * @buf: buffer
4207 *
4208 * Return value:
4209 * number of bytes printed to buffer
4210 **/
10523b3b 4211static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4212{
4213 struct scsi_device *sdev = to_scsi_device(dev);
4214 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4215 struct ipr_resource_entry *res;
4216 unsigned long lock_flags = 0;
4217 ssize_t len = -ENXIO;
4218
4219 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4220 res = (struct ipr_resource_entry *)sdev->hostdata;
4221 if (res)
3e7ebdfa 4222 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4223 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4224 return len;
4225}
4226
4227static struct device_attribute ipr_adapter_handle_attr = {
4228 .attr = {
4229 .name = "adapter_handle",
4230 .mode = S_IRUSR,
4231 },
4232 .show = ipr_show_adapter_handle
4233};
4234
3e7ebdfa 4235/**
5adcbeb3
WB
4236 * ipr_show_resource_path - Show the resource path or the resource address for
4237 * this device.
3e7ebdfa 4238 * @dev: device struct
46d74563 4239 * @attr: device attribute structure
3e7ebdfa
WB
4240 * @buf: buffer
4241 *
4242 * Return value:
4243 * number of bytes printed to buffer
4244 **/
4245static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4246{
4247 struct scsi_device *sdev = to_scsi_device(dev);
4248 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4249 struct ipr_resource_entry *res;
4250 unsigned long lock_flags = 0;
4251 ssize_t len = -ENXIO;
4252 char buffer[IPR_MAX_RES_PATH_LENGTH];
4253
4254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4255 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4256 if (res && ioa_cfg->sis64)
3e7ebdfa 4257 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4258 __ipr_format_res_path(res->res_path, buffer,
4259 sizeof(buffer)));
5adcbeb3
WB
4260 else if (res)
4261 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4262 res->bus, res->target, res->lun);
4263
3e7ebdfa
WB
4264 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4265 return len;
4266}
4267
4268static struct device_attribute ipr_resource_path_attr = {
4269 .attr = {
4270 .name = "resource_path",
75576bb9 4271 .mode = S_IRUGO,
3e7ebdfa
WB
4272 },
4273 .show = ipr_show_resource_path
4274};
4275
46d74563
WB
4276/**
4277 * ipr_show_device_id - Show the device_id for this device.
4278 * @dev: device struct
4279 * @attr: device attribute structure
4280 * @buf: buffer
4281 *
4282 * Return value:
4283 * number of bytes printed to buffer
4284 **/
4285static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4286{
4287 struct scsi_device *sdev = to_scsi_device(dev);
4288 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4289 struct ipr_resource_entry *res;
4290 unsigned long lock_flags = 0;
4291 ssize_t len = -ENXIO;
4292
4293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4294 res = (struct ipr_resource_entry *)sdev->hostdata;
4295 if (res && ioa_cfg->sis64)
4296 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4297 else if (res)
4298 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4299
4300 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4301 return len;
4302}
4303
4304static struct device_attribute ipr_device_id_attr = {
4305 .attr = {
4306 .name = "device_id",
4307 .mode = S_IRUGO,
4308 },
4309 .show = ipr_show_device_id
4310};
4311
75576bb9
WB
4312/**
4313 * ipr_show_resource_type - Show the resource type for this device.
4314 * @dev: device struct
46d74563 4315 * @attr: device attribute structure
75576bb9
WB
4316 * @buf: buffer
4317 *
4318 * Return value:
4319 * number of bytes printed to buffer
4320 **/
4321static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4322{
4323 struct scsi_device *sdev = to_scsi_device(dev);
4324 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4325 struct ipr_resource_entry *res;
4326 unsigned long lock_flags = 0;
4327 ssize_t len = -ENXIO;
4328
4329 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4330 res = (struct ipr_resource_entry *)sdev->hostdata;
4331
4332 if (res)
4333 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4334
4335 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4336 return len;
4337}
4338
4339static struct device_attribute ipr_resource_type_attr = {
4340 .attr = {
4341 .name = "resource_type",
4342 .mode = S_IRUGO,
4343 },
4344 .show = ipr_show_resource_type
4345};
4346
1da177e4
LT
4347static struct device_attribute *ipr_dev_attrs[] = {
4348 &ipr_adapter_handle_attr,
3e7ebdfa 4349 &ipr_resource_path_attr,
46d74563 4350 &ipr_device_id_attr,
75576bb9 4351 &ipr_resource_type_attr,
1da177e4
LT
4352 NULL,
4353};
4354
4355/**
4356 * ipr_biosparam - Return the HSC mapping
4357 * @sdev: scsi device struct
4358 * @block_device: block device pointer
4359 * @capacity: capacity of the device
4360 * @parm: Array containing returned HSC values.
4361 *
4362 * This function generates the HSC parms that fdisk uses.
4363 * We want to make sure we return something that places partitions
4364 * on 4k boundaries for best performance with the IOA.
4365 *
4366 * Return value:
4367 * 0 on success
4368 **/
4369static int ipr_biosparam(struct scsi_device *sdev,
4370 struct block_device *block_device,
4371 sector_t capacity, int *parm)
4372{
4373 int heads, sectors;
4374 sector_t cylinders;
4375
4376 heads = 128;
4377 sectors = 32;
4378
4379 cylinders = capacity;
4380 sector_div(cylinders, (128 * 32));
4381
4382 /* return result */
4383 parm[0] = heads;
4384 parm[1] = sectors;
4385 parm[2] = cylinders;
4386
4387 return 0;
4388}
4389
35a39691
BK
4390/**
4391 * ipr_find_starget - Find target based on bus/target.
4392 * @starget: scsi target struct
4393 *
4394 * Return value:
4395 * resource entry pointer if found / NULL if not found
4396 **/
4397static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4398{
4399 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4400 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4401 struct ipr_resource_entry *res;
4402
4403 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4404 if ((res->bus == starget->channel) &&
0ee1d714 4405 (res->target == starget->id)) {
35a39691
BK
4406 return res;
4407 }
4408 }
4409
4410 return NULL;
4411}
4412
4413static struct ata_port_info sata_port_info;
4414
4415/**
4416 * ipr_target_alloc - Prepare for commands to a SCSI target
4417 * @starget: scsi target struct
4418 *
4419 * If the device is a SATA device, this function allocates an
4420 * ATA port with libata, else it does nothing.
4421 *
4422 * Return value:
4423 * 0 on success / non-0 on failure
4424 **/
4425static int ipr_target_alloc(struct scsi_target *starget)
4426{
4427 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4428 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4429 struct ipr_sata_port *sata_port;
4430 struct ata_port *ap;
4431 struct ipr_resource_entry *res;
4432 unsigned long lock_flags;
4433
4434 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4435 res = ipr_find_starget(starget);
4436 starget->hostdata = NULL;
4437
4438 if (res && ipr_is_gata(res)) {
4439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4440 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4441 if (!sata_port)
4442 return -ENOMEM;
4443
4444 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4445 if (ap) {
4446 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4447 sata_port->ioa_cfg = ioa_cfg;
4448 sata_port->ap = ap;
4449 sata_port->res = res;
4450
4451 res->sata_port = sata_port;
4452 ap->private_data = sata_port;
4453 starget->hostdata = sata_port;
4454 } else {
4455 kfree(sata_port);
4456 return -ENOMEM;
4457 }
4458 }
4459 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4460
4461 return 0;
4462}
4463
4464/**
4465 * ipr_target_destroy - Destroy a SCSI target
4466 * @starget: scsi target struct
4467 *
4468 * If the device was a SATA device, this function frees the libata
4469 * ATA port, else it does nothing.
4470 *
4471 **/
4472static void ipr_target_destroy(struct scsi_target *starget)
4473{
4474 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4475 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4476 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4477
4478 if (ioa_cfg->sis64) {
0ee1d714
BK
4479 if (!ipr_find_starget(starget)) {
4480 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4481 clear_bit(starget->id, ioa_cfg->array_ids);
4482 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4483 clear_bit(starget->id, ioa_cfg->vset_ids);
4484 else if (starget->channel == 0)
4485 clear_bit(starget->id, ioa_cfg->target_ids);
4486 }
3e7ebdfa 4487 }
35a39691
BK
4488
4489 if (sata_port) {
4490 starget->hostdata = NULL;
4491 ata_sas_port_destroy(sata_port->ap);
4492 kfree(sata_port);
4493 }
4494}
4495
4496/**
4497 * ipr_find_sdev - Find device based on bus/target/lun.
4498 * @sdev: scsi device struct
4499 *
4500 * Return value:
4501 * resource entry pointer if found / NULL if not found
4502 **/
4503static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4504{
4505 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4506 struct ipr_resource_entry *res;
4507
4508 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4509 if ((res->bus == sdev->channel) &&
4510 (res->target == sdev->id) &&
4511 (res->lun == sdev->lun))
35a39691
BK
4512 return res;
4513 }
4514
4515 return NULL;
4516}
4517
1da177e4
LT
4518/**
4519 * ipr_slave_destroy - Unconfigure a SCSI device
4520 * @sdev: scsi device struct
4521 *
4522 * Return value:
4523 * nothing
4524 **/
4525static void ipr_slave_destroy(struct scsi_device *sdev)
4526{
4527 struct ipr_resource_entry *res;
4528 struct ipr_ioa_cfg *ioa_cfg;
4529 unsigned long lock_flags = 0;
4530
4531 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4532
4533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4534 res = (struct ipr_resource_entry *) sdev->hostdata;
4535 if (res) {
35a39691 4536 if (res->sata_port)
3e4ec344 4537 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4538 sdev->hostdata = NULL;
4539 res->sdev = NULL;
35a39691 4540 res->sata_port = NULL;
1da177e4
LT
4541 }
4542 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4543}
4544
4545/**
4546 * ipr_slave_configure - Configure a SCSI device
4547 * @sdev: scsi device struct
4548 *
4549 * This function configures the specified scsi device.
4550 *
4551 * Return value:
4552 * 0 on success
4553 **/
4554static int ipr_slave_configure(struct scsi_device *sdev)
4555{
4556 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4557 struct ipr_resource_entry *res;
dd406ef8 4558 struct ata_port *ap = NULL;
1da177e4 4559 unsigned long lock_flags = 0;
3e7ebdfa 4560 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4561
4562 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4563 res = sdev->hostdata;
4564 if (res) {
4565 if (ipr_is_af_dasd_device(res))
4566 sdev->type = TYPE_RAID;
0726ce26 4567 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4568 sdev->scsi_level = 4;
0726ce26
BK
4569 sdev->no_uld_attach = 1;
4570 }
1da177e4 4571 if (ipr_is_vset_device(res)) {
242f9dcb
JA
4572 blk_queue_rq_timeout(sdev->request_queue,
4573 IPR_VSET_RW_TIMEOUT);
086fa5ff 4574 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4575 }
dd406ef8
BK
4576 if (ipr_is_gata(res) && res->sata_port)
4577 ap = res->sata_port->ap;
4578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4579
4580 if (ap) {
35a39691 4581 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8
BK
4582 ata_sas_slave_configure(sdev, ap);
4583 } else
35a39691 4584 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3e7ebdfa
WB
4585 if (ioa_cfg->sis64)
4586 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4587 ipr_format_res_path(ioa_cfg,
4588 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4589 return 0;
1da177e4
LT
4590 }
4591 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4592 return 0;
4593}
4594
35a39691
BK
4595/**
4596 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4597 * @sdev: scsi device struct
4598 *
4599 * This function initializes an ATA port so that future commands
4600 * sent through queuecommand will work.
4601 *
4602 * Return value:
4603 * 0 on success
4604 **/
4605static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4606{
4607 struct ipr_sata_port *sata_port = NULL;
4608 int rc = -ENXIO;
4609
4610 ENTER;
4611 if (sdev->sdev_target)
4612 sata_port = sdev->sdev_target->hostdata;
b2024459 4613 if (sata_port) {
35a39691 4614 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
4615 if (rc == 0)
4616 rc = ata_sas_sync_probe(sata_port->ap);
4617 }
4618
35a39691
BK
4619 if (rc)
4620 ipr_slave_destroy(sdev);
4621
4622 LEAVE;
4623 return rc;
4624}
4625
1da177e4
LT
4626/**
4627 * ipr_slave_alloc - Prepare for commands to a device.
4628 * @sdev: scsi device struct
4629 *
4630 * This function saves a pointer to the resource entry
4631 * in the scsi device struct if the device exists. We
4632 * can then use this pointer in ipr_queuecommand when
4633 * handling new commands.
4634 *
4635 * Return value:
692aebfc 4636 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
4637 **/
4638static int ipr_slave_alloc(struct scsi_device *sdev)
4639{
4640 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4641 struct ipr_resource_entry *res;
4642 unsigned long lock_flags;
692aebfc 4643 int rc = -ENXIO;
1da177e4
LT
4644
4645 sdev->hostdata = NULL;
4646
4647 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4648
35a39691
BK
4649 res = ipr_find_sdev(sdev);
4650 if (res) {
4651 res->sdev = sdev;
4652 res->add_to_ml = 0;
4653 res->in_erp = 0;
4654 sdev->hostdata = res;
4655 if (!ipr_is_naca_model(res))
4656 res->needs_sync_complete = 1;
4657 rc = 0;
4658 if (ipr_is_gata(res)) {
4659 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4660 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
4661 }
4662 }
4663
4664 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4665
692aebfc 4666 return rc;
1da177e4
LT
4667}
4668
4669/**
4670 * ipr_eh_host_reset - Reset the host adapter
4671 * @scsi_cmd: scsi command struct
4672 *
4673 * Return value:
4674 * SUCCESS / FAILED
4675 **/
203fa3fe 4676static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4677{
4678 struct ipr_ioa_cfg *ioa_cfg;
4679 int rc;
4680
4681 ENTER;
4682 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4683
a92fa25c
KSS
4684 if (!ioa_cfg->in_reset_reload) {
4685 dev_err(&ioa_cfg->pdev->dev,
4686 "Adapter being reset as a result of error recovery.\n");
1da177e4 4687
a92fa25c
KSS
4688 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4689 ioa_cfg->sdt_state = GET_DUMP;
4690 }
1da177e4
LT
4691
4692 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4693
4694 LEAVE;
4695 return rc;
4696}
4697
203fa3fe 4698static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
df0ae249
JG
4699{
4700 int rc;
4701
4702 spin_lock_irq(cmd->device->host->host_lock);
4703 rc = __ipr_eh_host_reset(cmd);
4704 spin_unlock_irq(cmd->device->host->host_lock);
4705
4706 return rc;
4707}
4708
c6513096
BK
4709/**
4710 * ipr_device_reset - Reset the device
4711 * @ioa_cfg: ioa config struct
4712 * @res: resource entry struct
4713 *
4714 * This function issues a device reset to the affected device.
4715 * If the device is a SCSI device, a LUN reset will be sent
4716 * to the device first. If that does not work, a target reset
35a39691
BK
4717 * will be sent. If the device is a SATA device, a PHY reset will
4718 * be sent.
c6513096
BK
4719 *
4720 * Return value:
4721 * 0 on success / non-zero on failure
4722 **/
4723static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4724 struct ipr_resource_entry *res)
4725{
4726 struct ipr_cmnd *ipr_cmd;
4727 struct ipr_ioarcb *ioarcb;
4728 struct ipr_cmd_pkt *cmd_pkt;
35a39691 4729 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
4730 u32 ioasc;
4731
4732 ENTER;
4733 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4734 ioarcb = &ipr_cmd->ioarcb;
4735 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
4736
4737 if (ipr_cmd->ioa_cfg->sis64) {
4738 regs = &ipr_cmd->i.ata_ioadl.regs;
4739 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4740 } else
4741 regs = &ioarcb->u.add_data.u.regs;
c6513096 4742
3e7ebdfa 4743 ioarcb->res_handle = res->res_handle;
c6513096
BK
4744 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4745 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
4746 if (ipr_is_gata(res)) {
4747 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 4748 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
4749 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4750 }
c6513096
BK
4751
4752 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 4753 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
c6513096 4754 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
96d21f00
WB
4755 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4756 if (ipr_cmd->ioa_cfg->sis64)
4757 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4758 sizeof(struct ipr_ioasa_gata));
4759 else
4760 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4761 sizeof(struct ipr_ioasa_gata));
4762 }
c6513096
BK
4763
4764 LEAVE;
203fa3fe 4765 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
4766}
4767
35a39691
BK
4768/**
4769 * ipr_sata_reset - Reset the SATA port
cc0680a5 4770 * @link: SATA link to reset
35a39691
BK
4771 * @classes: class of the attached device
4772 *
cc0680a5 4773 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
4774 *
4775 * Return value:
4776 * 0 on success / non-zero on failure
4777 **/
cc0680a5 4778static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 4779 unsigned long deadline)
35a39691 4780{
cc0680a5 4781 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
4782 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4783 struct ipr_resource_entry *res;
4784 unsigned long lock_flags = 0;
4785 int rc = -ENXIO;
4786
4787 ENTER;
4788 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 4789 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
4790 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4791 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4792 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4793 }
4794
35a39691
BK
4795 res = sata_port->res;
4796 if (res) {
4797 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 4798 *classes = res->ata_class;
35a39691
BK
4799 }
4800
4801 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4802 LEAVE;
4803 return rc;
4804}
4805
1da177e4
LT
4806/**
4807 * ipr_eh_dev_reset - Reset the device
4808 * @scsi_cmd: scsi command struct
4809 *
4810 * This function issues a device reset to the affected device.
4811 * A LUN reset will be sent to the device first. If that does
4812 * not work, a target reset will be sent.
4813 *
4814 * Return value:
4815 * SUCCESS / FAILED
4816 **/
203fa3fe 4817static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4818{
4819 struct ipr_cmnd *ipr_cmd;
4820 struct ipr_ioa_cfg *ioa_cfg;
4821 struct ipr_resource_entry *res;
35a39691
BK
4822 struct ata_port *ap;
4823 int rc = 0;
1da177e4
LT
4824
4825 ENTER;
4826 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4827 res = scsi_cmd->device->hostdata;
4828
eeb88307 4829 if (!res)
1da177e4
LT
4830 return FAILED;
4831
4832 /*
4833 * If we are currently going through reset/reload, return failed. This will force the
4834 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4835 * reset to complete
4836 */
4837 if (ioa_cfg->in_reset_reload)
4838 return FAILED;
4839 if (ioa_cfg->ioa_is_dead)
4840 return FAILED;
4841
4842 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4843 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
1da177e4
LT
4844 if (ipr_cmd->scsi_cmd)
4845 ipr_cmd->done = ipr_scsi_eh_done;
24d6f2b5
BK
4846 if (ipr_cmd->qc)
4847 ipr_cmd->done = ipr_sata_eh_done;
7402ecef
BK
4848 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4849 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4850 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4851 }
1da177e4
LT
4852 }
4853 }
4854
4855 res->resetting_device = 1;
fb3ed3cb 4856 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
4857
4858 if (ipr_is_gata(res) && res->sata_port) {
4859 ap = res->sata_port->ap;
4860 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 4861 ata_std_error_handler(ap);
35a39691 4862 spin_lock_irq(scsi_cmd->device->host->host_lock);
5af23d26
BK
4863
4864 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3e7ebdfa 4865 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5af23d26
BK
4866 rc = -EIO;
4867 break;
4868 }
4869 }
35a39691
BK
4870 } else
4871 rc = ipr_device_reset(ioa_cfg, res);
1da177e4
LT
4872 res->resetting_device = 0;
4873
1da177e4 4874 LEAVE;
203fa3fe 4875 return rc ? FAILED : SUCCESS;
1da177e4
LT
4876}
4877
203fa3fe 4878static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
4879{
4880 int rc;
4881
4882 spin_lock_irq(cmd->device->host->host_lock);
4883 rc = __ipr_eh_dev_reset(cmd);
4884 spin_unlock_irq(cmd->device->host->host_lock);
4885
4886 return rc;
4887}
4888
1da177e4
LT
4889/**
4890 * ipr_bus_reset_done - Op done function for bus reset.
4891 * @ipr_cmd: ipr command struct
4892 *
4893 * This function is the op done function for a bus reset
4894 *
4895 * Return value:
4896 * none
4897 **/
4898static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4899{
4900 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4901 struct ipr_resource_entry *res;
4902
4903 ENTER;
3e7ebdfa
WB
4904 if (!ioa_cfg->sis64)
4905 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4906 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4907 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4908 break;
4909 }
1da177e4 4910 }
1da177e4
LT
4911
4912 /*
4913 * If abort has not completed, indicate the reset has, else call the
4914 * abort's done function to wake the sleeping eh thread
4915 */
4916 if (ipr_cmd->sibling->sibling)
4917 ipr_cmd->sibling->sibling = NULL;
4918 else
4919 ipr_cmd->sibling->done(ipr_cmd->sibling);
4920
4921 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4922 LEAVE;
4923}
4924
4925/**
4926 * ipr_abort_timeout - An abort task has timed out
4927 * @ipr_cmd: ipr command struct
4928 *
4929 * This function handles when an abort task times out. If this
4930 * happens we issue a bus reset since we have resources tied
4931 * up that must be freed before returning to the midlayer.
4932 *
4933 * Return value:
4934 * none
4935 **/
4936static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4937{
4938 struct ipr_cmnd *reset_cmd;
4939 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4940 struct ipr_cmd_pkt *cmd_pkt;
4941 unsigned long lock_flags = 0;
4942
4943 ENTER;
4944 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4945 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4946 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4947 return;
4948 }
4949
fb3ed3cb 4950 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
4951 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4952 ipr_cmd->sibling = reset_cmd;
4953 reset_cmd->sibling = ipr_cmd;
4954 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4955 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4956 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4957 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4958 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4959
4960 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4962 LEAVE;
4963}
4964
4965/**
4966 * ipr_cancel_op - Cancel specified op
4967 * @scsi_cmd: scsi command struct
4968 *
4969 * This function cancels specified op.
4970 *
4971 * Return value:
4972 * SUCCESS / FAILED
4973 **/
203fa3fe 4974static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
4975{
4976 struct ipr_cmnd *ipr_cmd;
4977 struct ipr_ioa_cfg *ioa_cfg;
4978 struct ipr_resource_entry *res;
4979 struct ipr_cmd_pkt *cmd_pkt;
a92fa25c 4980 u32 ioasc, int_reg;
1da177e4
LT
4981 int op_found = 0;
4982
4983 ENTER;
4984 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4985 res = scsi_cmd->device->hostdata;
4986
8fa728a2
JG
4987 /* If we are currently going through reset/reload, return failed.
4988 * This will force the mid-layer to call ipr_eh_host_reset,
4989 * which will then go to sleep and wait for the reset to complete
4990 */
4991 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4992 return FAILED;
a92fa25c
KSS
4993 if (!res)
4994 return FAILED;
4995
4996 /*
4997 * If we are aborting a timed out op, chances are that the timeout was caused
4998 * by a still not detected EEH error. In such cases, reading a register will
4999 * trigger the EEH recovery infrastructure.
5000 */
5001 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5002
5003 if (!ipr_is_gscsi(res))
1da177e4
LT
5004 return FAILED;
5005
5006 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5007 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5008 ipr_cmd->done = ipr_scsi_eh_done;
5009 op_found = 1;
5010 break;
5011 }
5012 }
5013
5014 if (!op_found)
5015 return SUCCESS;
5016
5017 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5018 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5019 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5020 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5021 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5022 ipr_cmd->u.sdev = scsi_cmd->device;
5023
fb3ed3cb
BK
5024 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5025 scsi_cmd->cmnd[0]);
1da177e4 5026 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5027 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5028
5029 /*
5030 * If the abort task timed out and we sent a bus reset, we will get
5031 * one the following responses to the abort
5032 */
5033 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5034 ioasc = 0;
5035 ipr_trace;
5036 }
5037
5038 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ee0a90fa
BK
5039 if (!ipr_is_naca_model(res))
5040 res->needs_sync_complete = 1;
1da177e4
LT
5041
5042 LEAVE;
203fa3fe 5043 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5044}
5045
5046/**
5047 * ipr_eh_abort - Abort a single op
5048 * @scsi_cmd: scsi command struct
5049 *
5050 * Return value:
5051 * SUCCESS / FAILED
5052 **/
203fa3fe 5053static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5054{
8fa728a2
JG
5055 unsigned long flags;
5056 int rc;
1da177e4
LT
5057
5058 ENTER;
1da177e4 5059
8fa728a2
JG
5060 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5061 rc = ipr_cancel_op(scsi_cmd);
5062 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4
LT
5063
5064 LEAVE;
8fa728a2 5065 return rc;
1da177e4
LT
5066}
5067
5068/**
5069 * ipr_handle_other_interrupt - Handle "other" interrupts
5070 * @ioa_cfg: ioa config struct
634651fa 5071 * @int_reg: interrupt register
1da177e4
LT
5072 *
5073 * Return value:
5074 * IRQ_NONE / IRQ_HANDLED
5075 **/
634651fa 5076static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5077 u32 int_reg)
1da177e4
LT
5078{
5079 irqreturn_t rc = IRQ_HANDLED;
7dacb64f
WB
5080 u32 int_mask_reg;
5081
5082 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5083 int_reg &= ~int_mask_reg;
5084
5085 /* If an interrupt on the adapter did not occur, ignore it.
5086 * Or in the case of SIS 64, check for a stage change interrupt.
5087 */
5088 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5089 if (ioa_cfg->sis64) {
5090 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5091 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5092 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5093
5094 /* clear stage change */
5095 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5096 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5097 list_del(&ioa_cfg->reset_cmd->queue);
5098 del_timer(&ioa_cfg->reset_cmd->timer);
5099 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5100 return IRQ_HANDLED;
5101 }
5102 }
5103
5104 return IRQ_NONE;
5105 }
1da177e4
LT
5106
5107 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5108 /* Mask the interrupt */
5109 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5110
5111 /* Clear the interrupt */
5112 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5113 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5114
5115 list_del(&ioa_cfg->reset_cmd->queue);
5116 del_timer(&ioa_cfg->reset_cmd->timer);
5117 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5118 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5119 if (ioa_cfg->clear_isr) {
5120 if (ipr_debug && printk_ratelimit())
5121 dev_err(&ioa_cfg->pdev->dev,
5122 "Spurious interrupt detected. 0x%08X\n", int_reg);
5123 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5124 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5125 return IRQ_NONE;
5126 }
1da177e4
LT
5127 } else {
5128 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5129 ioa_cfg->ioa_unit_checked = 1;
5130 else
5131 dev_err(&ioa_cfg->pdev->dev,
5132 "Permanent IOA failure. 0x%08X\n", int_reg);
5133
5134 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5135 ioa_cfg->sdt_state = GET_DUMP;
5136
5137 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5138 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5139 }
5140
5141 return rc;
5142}
5143
3feeb89d
WB
5144/**
5145 * ipr_isr_eh - Interrupt service routine error handler
5146 * @ioa_cfg: ioa config struct
5147 * @msg: message to log
5148 *
5149 * Return value:
5150 * none
5151 **/
5152static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5153{
5154 ioa_cfg->errors_logged++;
5155 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5156
5157 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5158 ioa_cfg->sdt_state = GET_DUMP;
5159
5160 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5161}
5162
1da177e4
LT
5163/**
5164 * ipr_isr - Interrupt service routine
5165 * @irq: irq number
5166 * @devp: pointer to ioa config struct
1da177e4
LT
5167 *
5168 * Return value:
5169 * IRQ_NONE / IRQ_HANDLED
5170 **/
7d12e780 5171static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4
LT
5172{
5173 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5174 unsigned long lock_flags = 0;
7dacb64f 5175 u32 int_reg = 0;
1da177e4
LT
5176 u32 ioasc;
5177 u16 cmd_index;
3feeb89d 5178 int num_hrrq = 0;
7dacb64f 5179 int irq_none = 0;
172cd6e1 5180 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5181 irqreturn_t rc = IRQ_NONE;
172cd6e1 5182 LIST_HEAD(doneq);
1da177e4
LT
5183
5184 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5185
5186 /* If interrupts are disabled, ignore the interrupt */
5187 if (!ioa_cfg->allow_interrupts) {
5188 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5189 return IRQ_NONE;
5190 }
5191
1da177e4
LT
5192 while (1) {
5193 ipr_cmd = NULL;
5194
5195 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5196 ioa_cfg->toggle_bit) {
5197
5198 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5199 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5200
5201 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3feeb89d 5202 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
172cd6e1
BK
5203 rc = IRQ_HANDLED;
5204 goto unlock_out;
1da177e4
LT
5205 }
5206
5207 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5208
96d21f00 5209 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5210
5211 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5212
172cd6e1 5213 list_move_tail(&ipr_cmd->queue, &doneq);
1da177e4
LT
5214
5215 rc = IRQ_HANDLED;
5216
5217 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5218 ioa_cfg->hrrq_curr++;
5219 } else {
5220 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5221 ioa_cfg->toggle_bit ^= 1u;
5222 }
5223 }
5224
7dd21308
BK
5225 if (ipr_cmd && !ioa_cfg->clear_isr)
5226 break;
5227
1da177e4
LT
5228 if (ipr_cmd != NULL) {
5229 /* Clear the PCI interrupt */
a5442ba4 5230 num_hrrq = 0;
3feeb89d 5231 do {
214777ba 5232 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5233 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d
WB
5234 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5235 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5236
7dacb64f
WB
5237 } else if (rc == IRQ_NONE && irq_none == 0) {
5238 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5239 irq_none++;
a5442ba4
WB
5240 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5241 int_reg & IPR_PCII_HRRQ_UPDATED) {
5242 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
172cd6e1
BK
5243 rc = IRQ_HANDLED;
5244 goto unlock_out;
1da177e4
LT
5245 } else
5246 break;
5247 }
5248
5249 if (unlikely(rc == IRQ_NONE))
634651fa 5250 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5251
172cd6e1 5252unlock_out:
1da177e4 5253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
172cd6e1
BK
5254 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5255 list_del(&ipr_cmd->queue);
5256 del_timer(&ipr_cmd->timer);
5257 ipr_cmd->fast_done(ipr_cmd);
5258 }
5259
1da177e4
LT
5260 return rc;
5261}
5262
a32c055f
WB
5263/**
5264 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5265 * @ioa_cfg: ioa config struct
5266 * @ipr_cmd: ipr command struct
5267 *
5268 * Return value:
5269 * 0 on success / -1 on failure
5270 **/
5271static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5272 struct ipr_cmnd *ipr_cmd)
5273{
5274 int i, nseg;
5275 struct scatterlist *sg;
5276 u32 length;
5277 u32 ioadl_flags = 0;
5278 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5279 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5280 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5281
5282 length = scsi_bufflen(scsi_cmd);
5283 if (!length)
5284 return 0;
5285
5286 nseg = scsi_dma_map(scsi_cmd);
5287 if (nseg < 0) {
51f52a47
AB
5288 if (printk_ratelimit())
5289 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
a32c055f
WB
5290 return -1;
5291 }
5292
5293 ipr_cmd->dma_use_sg = nseg;
5294
438b0331 5295 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5296 ioarcb->ioadl_len =
5297 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5298
a32c055f
WB
5299 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5300 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5301 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5302 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5303 ioadl_flags = IPR_IOADL_FLAGS_READ;
5304
5305 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5306 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5307 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5308 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5309 }
5310
5311 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5312 return 0;
5313}
5314
1da177e4
LT
5315/**
5316 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5317 * @ioa_cfg: ioa config struct
5318 * @ipr_cmd: ipr command struct
5319 *
5320 * Return value:
5321 * 0 on success / -1 on failure
5322 **/
5323static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5324 struct ipr_cmnd *ipr_cmd)
5325{
63015bc9
FT
5326 int i, nseg;
5327 struct scatterlist *sg;
1da177e4
LT
5328 u32 length;
5329 u32 ioadl_flags = 0;
5330 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5331 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5332 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5333
63015bc9
FT
5334 length = scsi_bufflen(scsi_cmd);
5335 if (!length)
1da177e4
LT
5336 return 0;
5337
63015bc9
FT
5338 nseg = scsi_dma_map(scsi_cmd);
5339 if (nseg < 0) {
5340 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5341 return -1;
5342 }
51b1c7e1 5343
63015bc9
FT
5344 ipr_cmd->dma_use_sg = nseg;
5345
5346 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5347 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5348 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
5349 ioarcb->data_transfer_length = cpu_to_be32(length);
5350 ioarcb->ioadl_len =
63015bc9
FT
5351 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5352 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5353 ioadl_flags = IPR_IOADL_FLAGS_READ;
5354 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5355 ioarcb->read_ioadl_len =
5356 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5357 }
1da177e4 5358
a32c055f
WB
5359 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5360 ioadl = ioarcb->u.add_data.u.ioadl;
5361 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5362 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
5363 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5364 }
1da177e4 5365
63015bc9
FT
5366 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5367 ioadl[i].flags_and_data_len =
5368 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5369 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
5370 }
5371
63015bc9
FT
5372 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5373 return 0;
1da177e4
LT
5374}
5375
5376/**
5377 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5378 * @scsi_cmd: scsi command struct
5379 *
5380 * Return value:
5381 * task attributes
5382 **/
5383static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5384{
5385 u8 tag[2];
5386 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5387
5388 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5389 switch (tag[0]) {
5390 case MSG_SIMPLE_TAG:
5391 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5392 break;
5393 case MSG_HEAD_TAG:
5394 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5395 break;
5396 case MSG_ORDERED_TAG:
5397 rc = IPR_FLAGS_LO_ORDERED_TASK;
5398 break;
5399 };
5400 }
5401
5402 return rc;
5403}
5404
5405/**
5406 * ipr_erp_done - Process completion of ERP for a device
5407 * @ipr_cmd: ipr command struct
5408 *
5409 * This function copies the sense buffer into the scsi_cmd
5410 * struct and pushes the scsi_done function.
5411 *
5412 * Return value:
5413 * nothing
5414 **/
5415static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5416{
5417 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5418 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5419 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 5420 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5421
5422 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5423 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
5424 scmd_printk(KERN_ERR, scsi_cmd,
5425 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
5426 } else {
5427 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5428 SCSI_SENSE_BUFFERSIZE);
5429 }
5430
5431 if (res) {
ee0a90fa
BK
5432 if (!ipr_is_naca_model(res))
5433 res->needs_sync_complete = 1;
1da177e4
LT
5434 res->in_erp = 0;
5435 }
63015bc9 5436 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5437 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5438 scsi_cmd->scsi_done(scsi_cmd);
5439}
5440
5441/**
5442 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5443 * @ipr_cmd: ipr command struct
5444 *
5445 * Return value:
5446 * none
5447 **/
5448static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5449{
51b1c7e1 5450 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 5451 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 5452 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
5453
5454 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 5455 ioarcb->data_transfer_length = 0;
1da177e4 5456 ioarcb->read_data_transfer_length = 0;
a32c055f 5457 ioarcb->ioadl_len = 0;
1da177e4 5458 ioarcb->read_ioadl_len = 0;
96d21f00
WB
5459 ioasa->hdr.ioasc = 0;
5460 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
5461
5462 if (ipr_cmd->ioa_cfg->sis64)
5463 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5464 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5465 else {
5466 ioarcb->write_ioadl_addr =
5467 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5468 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5469 }
1da177e4
LT
5470}
5471
5472/**
5473 * ipr_erp_request_sense - Send request sense to a device
5474 * @ipr_cmd: ipr command struct
5475 *
5476 * This function sends a request sense to a device as a result
5477 * of a check condition.
5478 *
5479 * Return value:
5480 * nothing
5481 **/
5482static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5483{
5484 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 5485 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5486
5487 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5488 ipr_erp_done(ipr_cmd);
5489 return;
5490 }
5491
5492 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5493
5494 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5495 cmd_pkt->cdb[0] = REQUEST_SENSE;
5496 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5497 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5498 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5499 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5500
a32c055f
WB
5501 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5502 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
5503
5504 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5505 IPR_REQUEST_SENSE_TIMEOUT * 2);
5506}
5507
5508/**
5509 * ipr_erp_cancel_all - Send cancel all to a device
5510 * @ipr_cmd: ipr command struct
5511 *
5512 * This function sends a cancel all to a device to clear the
5513 * queue. If we are running TCQ on the device, QERR is set to 1,
5514 * which means all outstanding ops have been dropped on the floor.
5515 * Cancel all will return them to us.
5516 *
5517 * Return value:
5518 * nothing
5519 **/
5520static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5521{
5522 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5523 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5524 struct ipr_cmd_pkt *cmd_pkt;
5525
5526 res->in_erp = 1;
5527
5528 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5529
5530 if (!scsi_get_tag_type(scsi_cmd->device)) {
5531 ipr_erp_request_sense(ipr_cmd);
5532 return;
5533 }
5534
5535 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5536 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5537 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5538
5539 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5540 IPR_CANCEL_ALL_TIMEOUT);
5541}
5542
5543/**
5544 * ipr_dump_ioasa - Dump contents of IOASA
5545 * @ioa_cfg: ioa config struct
5546 * @ipr_cmd: ipr command struct
fe964d0a 5547 * @res: resource entry struct
1da177e4
LT
5548 *
5549 * This function is invoked by the interrupt handler when ops
5550 * fail. It will log the IOASA if appropriate. Only called
5551 * for GPDD ops.
5552 *
5553 * Return value:
5554 * none
5555 **/
5556static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 5557 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
5558{
5559 int i;
5560 u16 data_len;
b0692dd4 5561 u32 ioasc, fd_ioasc;
96d21f00 5562 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
5563 __be32 *ioasa_data = (__be32 *)ioasa;
5564 int error_index;
5565
96d21f00
WB
5566 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5567 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5568
5569 if (0 == ioasc)
5570 return;
5571
5572 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5573 return;
5574
b0692dd4
BK
5575 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5576 error_index = ipr_get_error(fd_ioasc);
5577 else
5578 error_index = ipr_get_error(ioasc);
1da177e4
LT
5579
5580 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5581 /* Don't log an error if the IOA already logged one */
96d21f00 5582 if (ioasa->hdr.ilid != 0)
1da177e4
LT
5583 return;
5584
cc9bd5d4
BK
5585 if (!ipr_is_gscsi(res))
5586 return;
5587
1da177e4
LT
5588 if (ipr_error_table[error_index].log_ioasa == 0)
5589 return;
5590 }
5591
fe964d0a 5592 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 5593
96d21f00
WB
5594 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5595 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5596 data_len = sizeof(struct ipr_ioasa64);
5597 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 5598 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
5599
5600 ipr_err("IOASA Dump:\n");
5601
5602 for (i = 0; i < data_len / 4; i += 4) {
5603 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5604 be32_to_cpu(ioasa_data[i]),
5605 be32_to_cpu(ioasa_data[i+1]),
5606 be32_to_cpu(ioasa_data[i+2]),
5607 be32_to_cpu(ioasa_data[i+3]));
5608 }
5609}
5610
5611/**
5612 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5613 * @ioasa: IOASA
5614 * @sense_buf: sense data buffer
5615 *
5616 * Return value:
5617 * none
5618 **/
5619static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5620{
5621 u32 failing_lba;
5622 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5623 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
5624 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5625 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
5626
5627 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5628
5629 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5630 return;
5631
5632 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5633
5634 if (ipr_is_vset_device(res) &&
5635 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5636 ioasa->u.vset.failing_lba_hi != 0) {
5637 sense_buf[0] = 0x72;
5638 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5639 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5640 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5641
5642 sense_buf[7] = 12;
5643 sense_buf[8] = 0;
5644 sense_buf[9] = 0x0A;
5645 sense_buf[10] = 0x80;
5646
5647 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5648
5649 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5650 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5651 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5652 sense_buf[15] = failing_lba & 0x000000ff;
5653
5654 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5655
5656 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5657 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5658 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5659 sense_buf[19] = failing_lba & 0x000000ff;
5660 } else {
5661 sense_buf[0] = 0x70;
5662 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5663 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5664 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5665
5666 /* Illegal request */
5667 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 5668 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
5669 sense_buf[7] = 10; /* additional length */
5670
5671 /* IOARCB was in error */
5672 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5673 sense_buf[15] = 0xC0;
5674 else /* Parameter data was invalid */
5675 sense_buf[15] = 0x80;
5676
5677 sense_buf[16] =
5678 ((IPR_FIELD_POINTER_MASK &
96d21f00 5679 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
5680 sense_buf[17] =
5681 (IPR_FIELD_POINTER_MASK &
96d21f00 5682 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
5683 } else {
5684 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5685 if (ipr_is_vset_device(res))
5686 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5687 else
5688 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5689
5690 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5691 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5692 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5693 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5694 sense_buf[6] = failing_lba & 0x000000ff;
5695 }
5696
5697 sense_buf[7] = 6; /* additional length */
5698 }
5699 }
5700}
5701
ee0a90fa
BK
5702/**
5703 * ipr_get_autosense - Copy autosense data to sense buffer
5704 * @ipr_cmd: ipr command struct
5705 *
5706 * This function copies the autosense buffer to the buffer
5707 * in the scsi_cmd, if there is autosense available.
5708 *
5709 * Return value:
5710 * 1 if autosense was available / 0 if not
5711 **/
5712static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5713{
96d21f00
WB
5714 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5715 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 5716
96d21f00 5717 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa
BK
5718 return 0;
5719
96d21f00
WB
5720 if (ipr_cmd->ioa_cfg->sis64)
5721 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5722 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5723 SCSI_SENSE_BUFFERSIZE));
5724 else
5725 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5726 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5727 SCSI_SENSE_BUFFERSIZE));
ee0a90fa
BK
5728 return 1;
5729}
5730
1da177e4
LT
5731/**
5732 * ipr_erp_start - Process an error response for a SCSI op
5733 * @ioa_cfg: ioa config struct
5734 * @ipr_cmd: ipr command struct
5735 *
5736 * This function determines whether or not to initiate ERP
5737 * on the affected device.
5738 *
5739 * Return value:
5740 * nothing
5741 **/
5742static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5743 struct ipr_cmnd *ipr_cmd)
5744{
5745 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5746 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 5747 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 5748 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
5749
5750 if (!res) {
5751 ipr_scsi_eh_done(ipr_cmd);
5752 return;
5753 }
5754
8a048994 5755 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
5756 ipr_gen_sense(ipr_cmd);
5757
cc9bd5d4
BK
5758 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5759
8a048994 5760 switch (masked_ioasc) {
1da177e4 5761 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa
BK
5762 if (ipr_is_naca_model(res))
5763 scsi_cmd->result |= (DID_ABORT << 16);
5764 else
5765 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
5766 break;
5767 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 5768 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
5769 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5770 break;
5771 case IPR_IOASC_HW_SEL_TIMEOUT:
5772 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa
BK
5773 if (!ipr_is_naca_model(res))
5774 res->needs_sync_complete = 1;
1da177e4
LT
5775 break;
5776 case IPR_IOASC_SYNC_REQUIRED:
5777 if (!res->in_erp)
5778 res->needs_sync_complete = 1;
5779 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5780 break;
5781 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 5782 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
1da177e4
LT
5783 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5784 break;
5785 case IPR_IOASC_BUS_WAS_RESET:
5786 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5787 /*
5788 * Report the bus reset and ask for a retry. The device
5789 * will give CC/UA the next command.
5790 */
5791 if (!res->resetting_device)
5792 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5793 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa
BK
5794 if (!ipr_is_naca_model(res))
5795 res->needs_sync_complete = 1;
1da177e4
LT
5796 break;
5797 case IPR_IOASC_HW_DEV_BUS_STATUS:
5798 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5799 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa
BK
5800 if (!ipr_get_autosense(ipr_cmd)) {
5801 if (!ipr_is_naca_model(res)) {
5802 ipr_erp_cancel_all(ipr_cmd);
5803 return;
5804 }
5805 }
1da177e4 5806 }
ee0a90fa
BK
5807 if (!ipr_is_naca_model(res))
5808 res->needs_sync_complete = 1;
1da177e4
LT
5809 break;
5810 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5811 break;
5812 default:
5b7304fb
BK
5813 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5814 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 5815 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
5816 res->needs_sync_complete = 1;
5817 break;
5818 }
5819
63015bc9 5820 scsi_dma_unmap(ipr_cmd->scsi_cmd);
1da177e4
LT
5821 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5822 scsi_cmd->scsi_done(scsi_cmd);
5823}
5824
5825/**
5826 * ipr_scsi_done - mid-layer done function
5827 * @ipr_cmd: ipr command struct
5828 *
5829 * This function is invoked by the interrupt handler for
5830 * ops generated by the SCSI mid-layer
5831 *
5832 * Return value:
5833 * none
5834 **/
5835static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5836{
5837 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5838 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 5839 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
172cd6e1 5840 unsigned long lock_flags;
1da177e4 5841
96d21f00 5842 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
5843
5844 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
5845 scsi_dma_unmap(scsi_cmd);
5846
5847 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
5848 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5849 scsi_cmd->scsi_done(scsi_cmd);
172cd6e1
BK
5850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5851 } else {
5852 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 5853 ipr_erp_start(ioa_cfg, ipr_cmd);
172cd6e1
BK
5854 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5855 }
1da177e4
LT
5856}
5857
1da177e4
LT
5858/**
5859 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 5860 * @shost: scsi host struct
1da177e4 5861 * @scsi_cmd: scsi command struct
1da177e4
LT
5862 *
5863 * This function queues a request generated by the mid-layer.
5864 *
5865 * Return value:
5866 * 0 on success
5867 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5868 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5869 **/
00bfef2c
BK
5870static int ipr_queuecommand(struct Scsi_Host *shost,
5871 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5872{
5873 struct ipr_ioa_cfg *ioa_cfg;
5874 struct ipr_resource_entry *res;
5875 struct ipr_ioarcb *ioarcb;
5876 struct ipr_cmnd *ipr_cmd;
00bfef2c 5877 unsigned long lock_flags;
d12f1576 5878 int rc;
1da177e4 5879
00bfef2c
BK
5880 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
5881
5882 spin_lock_irqsave(shost->host_lock, lock_flags);
1da177e4 5883 scsi_cmd->result = (DID_OK << 16);
00bfef2c 5884 res = scsi_cmd->device->hostdata;
1da177e4
LT
5885
5886 /*
5887 * We are currently blocking all devices due to a host reset
5888 * We have told the host to stop giving us new requests, but
5889 * ERP ops don't count. FIXME
5890 */
00bfef2c
BK
5891 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
5892 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1da177e4 5893 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 5894 }
1da177e4
LT
5895
5896 /*
5897 * FIXME - Create scsi_set_host_offline interface
5898 * and the ioa_is_dead check can be removed
5899 */
5900 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
00bfef2c
BK
5901 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5902 goto err_nodev;
1da177e4
LT
5903 }
5904
d995e1b7
DC
5905 if (ipr_is_gata(res) && res->sata_port) {
5906 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5907 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5908 return rc;
1da177e4
LT
5909 }
5910
00bfef2c
BK
5911 ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
5912 spin_unlock_irqrestore(shost->host_lock, lock_flags);
35a39691 5913
172cd6e1 5914 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 5915 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
5916
5917 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5918 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 5919 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4
LT
5920
5921 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5922 if (scsi_cmd->underflow == 0)
5923 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5924
1da177e4 5925 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
ab6c10b1
WB
5926 if (ipr_is_gscsi(res))
5927 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
1da177e4
LT
5928 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5929 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5930 }
5931
5932 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5933 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5934 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5935
d12f1576
DC
5936 if (ioa_cfg->sis64)
5937 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5938 else
5939 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 5940
00bfef2c
BK
5941 spin_lock_irqsave(shost->host_lock, lock_flags);
5942 if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
5943 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5944 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5945 if (!rc)
5946 scsi_dma_unmap(scsi_cmd);
a5fb407e 5947 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
5948 }
5949
00bfef2c
BK
5950 if (unlikely(ioa_cfg->ioa_is_dead)) {
5951 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5952 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5953 scsi_dma_unmap(scsi_cmd);
5954 goto err_nodev;
5955 }
5956
5957 ioarcb->res_handle = res->res_handle;
5958 if (res->needs_sync_complete) {
5959 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5960 res->needs_sync_complete = 0;
5961 }
5962 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5963 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 5964 ipr_send_command(ipr_cmd);
00bfef2c 5965 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1da177e4 5966 return 0;
1da177e4 5967
00bfef2c
BK
5968err_nodev:
5969 spin_lock_irqsave(shost->host_lock, lock_flags);
5970 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5971 scsi_cmd->result = (DID_NO_CONNECT << 16);
5972 scsi_cmd->scsi_done(scsi_cmd);
5973 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5974 return 0;
5975}
f281233d 5976
35a39691
BK
5977/**
5978 * ipr_ioctl - IOCTL handler
5979 * @sdev: scsi device struct
5980 * @cmd: IOCTL cmd
5981 * @arg: IOCTL arg
5982 *
5983 * Return value:
5984 * 0 on success / other on failure
5985 **/
bd705f2d 5986static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
35a39691
BK
5987{
5988 struct ipr_resource_entry *res;
5989
5990 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
5991 if (res && ipr_is_gata(res)) {
5992 if (cmd == HDIO_GET_IDENTITY)
5993 return -ENOTTY;
94be9a58 5994 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 5995 }
35a39691
BK
5996
5997 return -EINVAL;
5998}
5999
1da177e4
LT
6000/**
6001 * ipr_info - Get information about the card/driver
6002 * @scsi_host: scsi host struct
6003 *
6004 * Return value:
6005 * pointer to buffer with description string
6006 **/
203fa3fe 6007static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6008{
6009 static char buffer[512];
6010 struct ipr_ioa_cfg *ioa_cfg;
6011 unsigned long lock_flags = 0;
6012
6013 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6014
6015 spin_lock_irqsave(host->host_lock, lock_flags);
6016 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6017 spin_unlock_irqrestore(host->host_lock, lock_flags);
6018
6019 return buffer;
6020}
6021
6022static struct scsi_host_template driver_template = {
6023 .module = THIS_MODULE,
6024 .name = "IPR",
6025 .info = ipr_ioa_info,
35a39691 6026 .ioctl = ipr_ioctl,
1da177e4
LT
6027 .queuecommand = ipr_queuecommand,
6028 .eh_abort_handler = ipr_eh_abort,
6029 .eh_device_reset_handler = ipr_eh_dev_reset,
6030 .eh_host_reset_handler = ipr_eh_host_reset,
6031 .slave_alloc = ipr_slave_alloc,
6032 .slave_configure = ipr_slave_configure,
6033 .slave_destroy = ipr_slave_destroy,
35a39691
BK
6034 .target_alloc = ipr_target_alloc,
6035 .target_destroy = ipr_target_destroy,
1da177e4
LT
6036 .change_queue_depth = ipr_change_queue_depth,
6037 .change_queue_type = ipr_change_queue_type,
6038 .bios_param = ipr_biosparam,
6039 .can_queue = IPR_MAX_COMMANDS,
6040 .this_id = -1,
6041 .sg_tablesize = IPR_MAX_SGLIST,
6042 .max_sectors = IPR_IOA_MAX_SECTORS,
6043 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6044 .use_clustering = ENABLE_CLUSTERING,
6045 .shost_attrs = ipr_ioa_attrs,
6046 .sdev_attrs = ipr_dev_attrs,
6047 .proc_name = IPR_NAME
6048};
6049
35a39691
BK
6050/**
6051 * ipr_ata_phy_reset - libata phy_reset handler
6052 * @ap: ata port to reset
6053 *
6054 **/
6055static void ipr_ata_phy_reset(struct ata_port *ap)
6056{
6057 unsigned long flags;
6058 struct ipr_sata_port *sata_port = ap->private_data;
6059 struct ipr_resource_entry *res = sata_port->res;
6060 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6061 int rc;
6062
6063 ENTER;
6064 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6065 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6067 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6068 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6069 }
6070
6071 if (!ioa_cfg->allow_cmds)
6072 goto out_unlock;
6073
6074 rc = ipr_device_reset(ioa_cfg, res);
6075
6076 if (rc) {
3e4ec344 6077 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6078 goto out_unlock;
6079 }
6080
3e7ebdfa
WB
6081 ap->link.device[0].class = res->ata_class;
6082 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6083 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6084
6085out_unlock:
6086 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6087 LEAVE;
6088}
6089
6090/**
6091 * ipr_ata_post_internal - Cleanup after an internal command
6092 * @qc: ATA queued command
6093 *
6094 * Return value:
6095 * none
6096 **/
6097static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6098{
6099 struct ipr_sata_port *sata_port = qc->ap->private_data;
6100 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6101 struct ipr_cmnd *ipr_cmd;
6102 unsigned long flags;
6103
6104 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6105 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6106 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6107 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6108 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6109 }
6110
35a39691
BK
6111 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
6112 if (ipr_cmd->qc == qc) {
6113 ipr_device_reset(ioa_cfg, sata_port->res);
6114 break;
6115 }
6116 }
6117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6118}
6119
35a39691
BK
6120/**
6121 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6122 * @regs: destination
6123 * @tf: source ATA taskfile
6124 *
6125 * Return value:
6126 * none
6127 **/
6128static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6129 struct ata_taskfile *tf)
6130{
6131 regs->feature = tf->feature;
6132 regs->nsect = tf->nsect;
6133 regs->lbal = tf->lbal;
6134 regs->lbam = tf->lbam;
6135 regs->lbah = tf->lbah;
6136 regs->device = tf->device;
6137 regs->command = tf->command;
6138 regs->hob_feature = tf->hob_feature;
6139 regs->hob_nsect = tf->hob_nsect;
6140 regs->hob_lbal = tf->hob_lbal;
6141 regs->hob_lbam = tf->hob_lbam;
6142 regs->hob_lbah = tf->hob_lbah;
6143 regs->ctl = tf->ctl;
6144}
6145
6146/**
6147 * ipr_sata_done - done function for SATA commands
6148 * @ipr_cmd: ipr command struct
6149 *
6150 * This function is invoked by the interrupt handler for
6151 * ops generated by the SCSI mid-layer to SATA devices
6152 *
6153 * Return value:
6154 * none
6155 **/
6156static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6157{
6158 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6159 struct ata_queued_cmd *qc = ipr_cmd->qc;
6160 struct ipr_sata_port *sata_port = qc->ap->private_data;
6161 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6162 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6163
96d21f00
WB
6164 if (ipr_cmd->ioa_cfg->sis64)
6165 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6166 sizeof(struct ipr_ioasa_gata));
6167 else
6168 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6169 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6170 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6171
96d21f00 6172 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6173 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6174
6175 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6176 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6177 else
96d21f00 6178 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
35a39691
BK
6179 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6180 ata_qc_complete(qc);
6181}
6182
a32c055f
WB
6183/**
6184 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6185 * @ipr_cmd: ipr command struct
6186 * @qc: ATA queued command
6187 *
6188 **/
6189static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6190 struct ata_queued_cmd *qc)
6191{
6192 u32 ioadl_flags = 0;
6193 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6194 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6195 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6196 int len = qc->nbytes;
6197 struct scatterlist *sg;
6198 unsigned int si;
6199 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6200
6201 if (len == 0)
6202 return;
6203
6204 if (qc->dma_dir == DMA_TO_DEVICE) {
6205 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6206 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6207 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6208 ioadl_flags = IPR_IOADL_FLAGS_READ;
6209
6210 ioarcb->data_transfer_length = cpu_to_be32(len);
6211 ioarcb->ioadl_len =
6212 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6213 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6214 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6215
6216 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6217 ioadl64->flags = cpu_to_be32(ioadl_flags);
6218 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6219 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6220
6221 last_ioadl64 = ioadl64;
6222 ioadl64++;
6223 }
6224
6225 if (likely(last_ioadl64))
6226 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6227}
6228
35a39691
BK
6229/**
6230 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6231 * @ipr_cmd: ipr command struct
6232 * @qc: ATA queued command
6233 *
6234 **/
6235static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6236 struct ata_queued_cmd *qc)
6237{
6238 u32 ioadl_flags = 0;
6239 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6240 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6241 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6242 int len = qc->nbytes;
35a39691 6243 struct scatterlist *sg;
ff2aeb1e 6244 unsigned int si;
35a39691
BK
6245
6246 if (len == 0)
6247 return;
6248
6249 if (qc->dma_dir == DMA_TO_DEVICE) {
6250 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6251 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6252 ioarcb->data_transfer_length = cpu_to_be32(len);
6253 ioarcb->ioadl_len =
35a39691
BK
6254 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6255 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6256 ioadl_flags = IPR_IOADL_FLAGS_READ;
6257 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6258 ioarcb->read_ioadl_len =
6259 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6260 }
6261
ff2aeb1e 6262 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6263 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6264 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6265
6266 last_ioadl = ioadl;
6267 ioadl++;
35a39691 6268 }
3be6cbd7
JG
6269
6270 if (likely(last_ioadl))
6271 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
6272}
6273
6274/**
6275 * ipr_qc_issue - Issue a SATA qc to a device
6276 * @qc: queued command
6277 *
6278 * Return value:
6279 * 0 if success
6280 **/
6281static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6282{
6283 struct ata_port *ap = qc->ap;
6284 struct ipr_sata_port *sata_port = ap->private_data;
6285 struct ipr_resource_entry *res = sata_port->res;
6286 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6287 struct ipr_cmnd *ipr_cmd;
6288 struct ipr_ioarcb *ioarcb;
6289 struct ipr_ioarcb_ata_regs *regs;
6290
6291 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
0feeed82 6292 return AC_ERR_SYSTEM;
35a39691
BK
6293
6294 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6295 ioarcb = &ipr_cmd->ioarcb;
35a39691 6296
a32c055f
WB
6297 if (ioa_cfg->sis64) {
6298 regs = &ipr_cmd->i.ata_ioadl.regs;
6299 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6300 } else
6301 regs = &ioarcb->u.add_data.u.regs;
6302
6303 memset(regs, 0, sizeof(*regs));
6304 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691
BK
6305
6306 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6307 ipr_cmd->qc = qc;
6308 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 6309 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
6310 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6311 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6312 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 6313 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 6314
a32c055f
WB
6315 if (ioa_cfg->sis64)
6316 ipr_build_ata_ioadl64(ipr_cmd, qc);
6317 else
6318 ipr_build_ata_ioadl(ipr_cmd, qc);
6319
35a39691
BK
6320 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6321 ipr_copy_sata_tf(regs, &qc->tf);
6322 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 6323 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
6324
6325 switch (qc->tf.protocol) {
6326 case ATA_PROT_NODATA:
6327 case ATA_PROT_PIO:
6328 break;
6329
6330 case ATA_PROT_DMA:
6331 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6332 break;
6333
0dc36888
TH
6334 case ATAPI_PROT_PIO:
6335 case ATAPI_PROT_NODATA:
35a39691
BK
6336 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6337 break;
6338
0dc36888 6339 case ATAPI_PROT_DMA:
35a39691
BK
6340 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6341 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6342 break;
6343
6344 default:
6345 WARN_ON(1);
0feeed82 6346 return AC_ERR_INVALID;
35a39691
BK
6347 }
6348
a32c055f
WB
6349 ipr_send_command(ipr_cmd);
6350
35a39691
BK
6351 return 0;
6352}
6353
4c9bf4e7
TH
6354/**
6355 * ipr_qc_fill_rtf - Read result TF
6356 * @qc: ATA queued command
6357 *
6358 * Return value:
6359 * true
6360 **/
6361static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6362{
6363 struct ipr_sata_port *sata_port = qc->ap->private_data;
6364 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6365 struct ata_taskfile *tf = &qc->result_tf;
6366
6367 tf->feature = g->error;
6368 tf->nsect = g->nsect;
6369 tf->lbal = g->lbal;
6370 tf->lbam = g->lbam;
6371 tf->lbah = g->lbah;
6372 tf->device = g->device;
6373 tf->command = g->status;
6374 tf->hob_nsect = g->hob_nsect;
6375 tf->hob_lbal = g->hob_lbal;
6376 tf->hob_lbam = g->hob_lbam;
6377 tf->hob_lbah = g->hob_lbah;
6378 tf->ctl = g->alt_status;
6379
6380 return true;
6381}
6382
35a39691 6383static struct ata_port_operations ipr_sata_ops = {
35a39691 6384 .phy_reset = ipr_ata_phy_reset,
a1efdaba 6385 .hardreset = ipr_sata_reset,
35a39691 6386 .post_internal_cmd = ipr_ata_post_internal,
35a39691
BK
6387 .qc_prep = ata_noop_qc_prep,
6388 .qc_issue = ipr_qc_issue,
4c9bf4e7 6389 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
6390 .port_start = ata_sas_port_start,
6391 .port_stop = ata_sas_port_stop
6392};
6393
6394static struct ata_port_info sata_port_info = {
9cbe056f 6395 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
0f2e0330
SS
6396 .pio_mask = ATA_PIO4_ONLY,
6397 .mwdma_mask = ATA_MWDMA2,
6398 .udma_mask = ATA_UDMA6,
35a39691
BK
6399 .port_ops = &ipr_sata_ops
6400};
6401
1da177e4
LT
6402#ifdef CONFIG_PPC_PSERIES
6403static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
6404 PVR_NORTHSTAR,
6405 PVR_PULSAR,
6406 PVR_POWER4,
6407 PVR_ICESTAR,
6408 PVR_SSTAR,
6409 PVR_POWER4p,
6410 PVR_630,
6411 PVR_630p
1da177e4
LT
6412};
6413
6414/**
6415 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6416 * @ioa_cfg: ioa cfg struct
6417 *
6418 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6419 * certain pSeries hardware. This function determines if the given
6420 * adapter is in one of these confgurations or not.
6421 *
6422 * Return value:
6423 * 1 if adapter is not supported / 0 if adapter is supported
6424 **/
6425static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6426{
1da177e4
LT
6427 int i;
6428
44c10138 6429 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 6430 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 6431 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 6432 return 1;
1da177e4
LT
6433 }
6434 }
6435 return 0;
6436}
6437#else
6438#define ipr_invalid_adapter(ioa_cfg) 0
6439#endif
6440
6441/**
6442 * ipr_ioa_bringdown_done - IOA bring down completion.
6443 * @ipr_cmd: ipr command struct
6444 *
6445 * This function processes the completion of an adapter bring down.
6446 * It wakes any reset sleepers.
6447 *
6448 * Return value:
6449 * IPR_RC_JOB_RETURN
6450 **/
6451static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6452{
6453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6454
6455 ENTER;
6456 ioa_cfg->in_reset_reload = 0;
6457 ioa_cfg->reset_retries = 0;
6458 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6459 wake_up_all(&ioa_cfg->reset_wait_q);
6460
6461 spin_unlock_irq(ioa_cfg->host->host_lock);
6462 scsi_unblock_requests(ioa_cfg->host);
6463 spin_lock_irq(ioa_cfg->host->host_lock);
6464 LEAVE;
6465
6466 return IPR_RC_JOB_RETURN;
6467}
6468
6469/**
6470 * ipr_ioa_reset_done - IOA reset completion.
6471 * @ipr_cmd: ipr command struct
6472 *
6473 * This function processes the completion of an adapter reset.
6474 * It schedules any necessary mid-layer add/removes and
6475 * wakes any reset sleepers.
6476 *
6477 * Return value:
6478 * IPR_RC_JOB_RETURN
6479 **/
6480static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6481{
6482 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6483 struct ipr_resource_entry *res;
6484 struct ipr_hostrcb *hostrcb, *temp;
6485 int i = 0;
6486
6487 ENTER;
6488 ioa_cfg->in_reset_reload = 0;
6489 ioa_cfg->allow_cmds = 1;
6490 ioa_cfg->reset_cmd = NULL;
3d1d0da6 6491 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
6492
6493 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6494 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6495 ipr_trace;
6496 break;
6497 }
6498 }
6499 schedule_work(&ioa_cfg->work_q);
6500
6501 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6502 list_del(&hostrcb->queue);
6503 if (i++ < IPR_NUM_LOG_HCAMS)
6504 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6505 else
6506 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6507 }
6508
6bb04170 6509 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
6510 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6511
6512 ioa_cfg->reset_retries = 0;
6513 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6514 wake_up_all(&ioa_cfg->reset_wait_q);
6515
30237853 6516 spin_unlock(ioa_cfg->host->host_lock);
1da177e4 6517 scsi_unblock_requests(ioa_cfg->host);
30237853 6518 spin_lock(ioa_cfg->host->host_lock);
1da177e4
LT
6519
6520 if (!ioa_cfg->allow_cmds)
6521 scsi_block_requests(ioa_cfg->host);
6522
6523 LEAVE;
6524 return IPR_RC_JOB_RETURN;
6525}
6526
6527/**
6528 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6529 * @supported_dev: supported device struct
6530 * @vpids: vendor product id struct
6531 *
6532 * Return value:
6533 * none
6534 **/
6535static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6536 struct ipr_std_inq_vpids *vpids)
6537{
6538 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6539 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6540 supported_dev->num_records = 1;
6541 supported_dev->data_length =
6542 cpu_to_be16(sizeof(struct ipr_supported_device));
6543 supported_dev->reserved = 0;
6544}
6545
6546/**
6547 * ipr_set_supported_devs - Send Set Supported Devices for a device
6548 * @ipr_cmd: ipr command struct
6549 *
a32c055f 6550 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
6551 *
6552 * Return value:
6553 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6554 **/
6555static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6556{
6557 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6558 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
6559 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6560 struct ipr_resource_entry *res = ipr_cmd->u.res;
6561
6562 ipr_cmd->job_step = ipr_ioa_reset_done;
6563
6564 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 6565 if (!ipr_is_scsi_disk(res))
1da177e4
LT
6566 continue;
6567
6568 ipr_cmd->u.res = res;
3e7ebdfa 6569 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
6570
6571 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6572 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6573 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6574
6575 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 6576 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
6577 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6578 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6579
a32c055f
WB
6580 ipr_init_ioadl(ipr_cmd,
6581 ioa_cfg->vpd_cbs_dma +
6582 offsetof(struct ipr_misc_cbs, supp_dev),
6583 sizeof(struct ipr_supported_device),
6584 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6585
6586 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6587 IPR_SET_SUP_DEVICE_TIMEOUT);
6588
3e7ebdfa
WB
6589 if (!ioa_cfg->sis64)
6590 ipr_cmd->job_step = ipr_set_supported_devs;
1da177e4
LT
6591 return IPR_RC_JOB_RETURN;
6592 }
6593
6594 return IPR_RC_JOB_CONTINUE;
6595}
6596
6597/**
6598 * ipr_get_mode_page - Locate specified mode page
6599 * @mode_pages: mode page buffer
6600 * @page_code: page code to find
6601 * @len: minimum required length for mode page
6602 *
6603 * Return value:
6604 * pointer to mode page / NULL on failure
6605 **/
6606static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6607 u32 page_code, u32 len)
6608{
6609 struct ipr_mode_page_hdr *mode_hdr;
6610 u32 page_length;
6611 u32 length;
6612
6613 if (!mode_pages || (mode_pages->hdr.length == 0))
6614 return NULL;
6615
6616 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6617 mode_hdr = (struct ipr_mode_page_hdr *)
6618 (mode_pages->data + mode_pages->hdr.block_desc_len);
6619
6620 while (length) {
6621 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6622 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6623 return mode_hdr;
6624 break;
6625 } else {
6626 page_length = (sizeof(struct ipr_mode_page_hdr) +
6627 mode_hdr->page_length);
6628 length -= page_length;
6629 mode_hdr = (struct ipr_mode_page_hdr *)
6630 ((unsigned long)mode_hdr + page_length);
6631 }
6632 }
6633 return NULL;
6634}
6635
6636/**
6637 * ipr_check_term_power - Check for term power errors
6638 * @ioa_cfg: ioa config struct
6639 * @mode_pages: IOAFP mode pages buffer
6640 *
6641 * Check the IOAFP's mode page 28 for term power errors
6642 *
6643 * Return value:
6644 * nothing
6645 **/
6646static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6647 struct ipr_mode_pages *mode_pages)
6648{
6649 int i;
6650 int entry_length;
6651 struct ipr_dev_bus_entry *bus;
6652 struct ipr_mode_page28 *mode_page;
6653
6654 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6655 sizeof(struct ipr_mode_page28));
6656
6657 entry_length = mode_page->entry_length;
6658
6659 bus = mode_page->bus;
6660
6661 for (i = 0; i < mode_page->num_entries; i++) {
6662 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6663 dev_err(&ioa_cfg->pdev->dev,
6664 "Term power is absent on scsi bus %d\n",
6665 bus->res_addr.bus);
6666 }
6667
6668 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6669 }
6670}
6671
6672/**
6673 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6674 * @ioa_cfg: ioa config struct
6675 *
6676 * Looks through the config table checking for SES devices. If
6677 * the SES device is in the SES table indicating a maximum SCSI
6678 * bus speed, the speed is limited for the bus.
6679 *
6680 * Return value:
6681 * none
6682 **/
6683static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6684{
6685 u32 max_xfer_rate;
6686 int i;
6687
6688 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6689 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6690 ioa_cfg->bus_attr[i].bus_width);
6691
6692 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6693 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6694 }
6695}
6696
6697/**
6698 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6699 * @ioa_cfg: ioa config struct
6700 * @mode_pages: mode page 28 buffer
6701 *
6702 * Updates mode page 28 based on driver configuration
6703 *
6704 * Return value:
6705 * none
6706 **/
6707static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 6708 struct ipr_mode_pages *mode_pages)
1da177e4
LT
6709{
6710 int i, entry_length;
6711 struct ipr_dev_bus_entry *bus;
6712 struct ipr_bus_attributes *bus_attr;
6713 struct ipr_mode_page28 *mode_page;
6714
6715 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6716 sizeof(struct ipr_mode_page28));
6717
6718 entry_length = mode_page->entry_length;
6719
6720 /* Loop for each device bus entry */
6721 for (i = 0, bus = mode_page->bus;
6722 i < mode_page->num_entries;
6723 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6724 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6725 dev_err(&ioa_cfg->pdev->dev,
6726 "Invalid resource address reported: 0x%08X\n",
6727 IPR_GET_PHYS_LOC(bus->res_addr));
6728 continue;
6729 }
6730
6731 bus_attr = &ioa_cfg->bus_attr[i];
6732 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6733 bus->bus_width = bus_attr->bus_width;
6734 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6735 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6736 if (bus_attr->qas_enabled)
6737 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6738 else
6739 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6740 }
6741}
6742
6743/**
6744 * ipr_build_mode_select - Build a mode select command
6745 * @ipr_cmd: ipr command struct
6746 * @res_handle: resource handle to send command to
6747 * @parm: Byte 2 of Mode Sense command
6748 * @dma_addr: DMA buffer address
6749 * @xfer_len: data transfer length
6750 *
6751 * Return value:
6752 * none
6753 **/
6754static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
6755 __be32 res_handle, u8 parm,
6756 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6757{
1da177e4
LT
6758 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6759
6760 ioarcb->res_handle = res_handle;
6761 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6762 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6763 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6764 ioarcb->cmd_pkt.cdb[1] = parm;
6765 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6766
a32c055f 6767 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
6768}
6769
6770/**
6771 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6772 * @ipr_cmd: ipr command struct
6773 *
6774 * This function sets up the SCSI bus attributes and sends
6775 * a Mode Select for Page 28 to activate them.
6776 *
6777 * Return value:
6778 * IPR_RC_JOB_RETURN
6779 **/
6780static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6781{
6782 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6783 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6784 int length;
6785
6786 ENTER;
4733804c
BK
6787 ipr_scsi_bus_speed_limit(ioa_cfg);
6788 ipr_check_term_power(ioa_cfg, mode_pages);
6789 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6790 length = mode_pages->hdr.length + 1;
6791 mode_pages->hdr.length = 0;
1da177e4
LT
6792
6793 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6794 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6795 length);
6796
f72919ec
WB
6797 ipr_cmd->job_step = ipr_set_supported_devs;
6798 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6799 struct ipr_resource_entry, queue);
1da177e4
LT
6800 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6801
6802 LEAVE;
6803 return IPR_RC_JOB_RETURN;
6804}
6805
6806/**
6807 * ipr_build_mode_sense - Builds a mode sense command
6808 * @ipr_cmd: ipr command struct
6809 * @res: resource entry struct
6810 * @parm: Byte 2 of mode sense command
6811 * @dma_addr: DMA address of mode sense buffer
6812 * @xfer_len: Size of DMA buffer
6813 *
6814 * Return value:
6815 * none
6816 **/
6817static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6818 __be32 res_handle,
a32c055f 6819 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 6820{
1da177e4
LT
6821 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6822
6823 ioarcb->res_handle = res_handle;
6824 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6825 ioarcb->cmd_pkt.cdb[2] = parm;
6826 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6827 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6828
a32c055f 6829 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6830}
6831
dfed823e
BK
6832/**
6833 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6834 * @ipr_cmd: ipr command struct
6835 *
6836 * This function handles the failure of an IOA bringup command.
6837 *
6838 * Return value:
6839 * IPR_RC_JOB_RETURN
6840 **/
6841static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6842{
6843 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6844 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
6845
6846 dev_err(&ioa_cfg->pdev->dev,
6847 "0x%02X failed with IOASC: 0x%08X\n",
6848 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6849
6850 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6851 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6852 return IPR_RC_JOB_RETURN;
6853}
6854
6855/**
6856 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6857 * @ipr_cmd: ipr command struct
6858 *
6859 * This function handles the failure of a Mode Sense to the IOAFP.
6860 * Some adapters do not handle all mode pages.
6861 *
6862 * Return value:
6863 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6864 **/
6865static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6866{
f72919ec 6867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 6868 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e
BK
6869
6870 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
6871 ipr_cmd->job_step = ipr_set_supported_devs;
6872 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6873 struct ipr_resource_entry, queue);
dfed823e
BK
6874 return IPR_RC_JOB_CONTINUE;
6875 }
6876
6877 return ipr_reset_cmd_failed(ipr_cmd);
6878}
6879
1da177e4
LT
6880/**
6881 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6882 * @ipr_cmd: ipr command struct
6883 *
6884 * This function send a Page 28 mode sense to the IOA to
6885 * retrieve SCSI bus attributes.
6886 *
6887 * Return value:
6888 * IPR_RC_JOB_RETURN
6889 **/
6890static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6891{
6892 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6893
6894 ENTER;
6895 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6896 0x28, ioa_cfg->vpd_cbs_dma +
6897 offsetof(struct ipr_misc_cbs, mode_pages),
6898 sizeof(struct ipr_mode_pages));
6899
6900 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 6901 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
6902
6903 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6904
6905 LEAVE;
6906 return IPR_RC_JOB_RETURN;
6907}
6908
ac09c349
BK
6909/**
6910 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6911 * @ipr_cmd: ipr command struct
6912 *
6913 * This function enables dual IOA RAID support if possible.
6914 *
6915 * Return value:
6916 * IPR_RC_JOB_RETURN
6917 **/
6918static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6919{
6920 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6921 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6922 struct ipr_mode_page24 *mode_page;
6923 int length;
6924
6925 ENTER;
6926 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6927 sizeof(struct ipr_mode_page24));
6928
6929 if (mode_page)
6930 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6931
6932 length = mode_pages->hdr.length + 1;
6933 mode_pages->hdr.length = 0;
6934
6935 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6936 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6937 length);
6938
6939 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6940 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6941
6942 LEAVE;
6943 return IPR_RC_JOB_RETURN;
6944}
6945
6946/**
6947 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6948 * @ipr_cmd: ipr command struct
6949 *
6950 * This function handles the failure of a Mode Sense to the IOAFP.
6951 * Some adapters do not handle all mode pages.
6952 *
6953 * Return value:
6954 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6955 **/
6956static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6957{
96d21f00 6958 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
6959
6960 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6961 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6962 return IPR_RC_JOB_CONTINUE;
6963 }
6964
6965 return ipr_reset_cmd_failed(ipr_cmd);
6966}
6967
6968/**
6969 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6970 * @ipr_cmd: ipr command struct
6971 *
6972 * This function send a mode sense to the IOA to retrieve
6973 * the IOA Advanced Function Control mode page.
6974 *
6975 * Return value:
6976 * IPR_RC_JOB_RETURN
6977 **/
6978static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6979{
6980 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6981
6982 ENTER;
6983 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6984 0x24, ioa_cfg->vpd_cbs_dma +
6985 offsetof(struct ipr_misc_cbs, mode_pages),
6986 sizeof(struct ipr_mode_pages));
6987
6988 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6989 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6990
6991 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6992
6993 LEAVE;
6994 return IPR_RC_JOB_RETURN;
6995}
6996
1da177e4
LT
6997/**
6998 * ipr_init_res_table - Initialize the resource table
6999 * @ipr_cmd: ipr command struct
7000 *
7001 * This function looks through the existing resource table, comparing
7002 * it with the config table. This function will take care of old/new
7003 * devices and schedule adding/removing them from the mid-layer
7004 * as appropriate.
7005 *
7006 * Return value:
7007 * IPR_RC_JOB_CONTINUE
7008 **/
7009static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7010{
7011 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7012 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7013 struct ipr_config_table_entry_wrapper cfgtew;
7014 int entries, found, flag, i;
1da177e4
LT
7015 LIST_HEAD(old_res);
7016
7017 ENTER;
3e7ebdfa
WB
7018 if (ioa_cfg->sis64)
7019 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7020 else
7021 flag = ioa_cfg->u.cfg_table->hdr.flags;
7022
7023 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7024 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7025
7026 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7027 list_move_tail(&res->queue, &old_res);
7028
3e7ebdfa 7029 if (ioa_cfg->sis64)
438b0331 7030 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7031 else
7032 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7033
7034 for (i = 0; i < entries; i++) {
7035 if (ioa_cfg->sis64)
7036 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7037 else
7038 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7039 found = 0;
7040
7041 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7042 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7043 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7044 found = 1;
7045 break;
7046 }
7047 }
7048
7049 if (!found) {
7050 if (list_empty(&ioa_cfg->free_res_q)) {
7051 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7052 break;
7053 }
7054
7055 found = 1;
7056 res = list_entry(ioa_cfg->free_res_q.next,
7057 struct ipr_resource_entry, queue);
7058 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7059 ipr_init_res_entry(res, &cfgtew);
1da177e4 7060 res->add_to_ml = 1;
56115598
WB
7061 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7062 res->sdev->allow_restart = 1;
1da177e4
LT
7063
7064 if (found)
3e7ebdfa 7065 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7066 }
7067
7068 list_for_each_entry_safe(res, temp, &old_res, queue) {
7069 if (res->sdev) {
7070 res->del_from_ml = 1;
3e7ebdfa 7071 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7072 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7073 }
7074 }
7075
3e7ebdfa
WB
7076 list_for_each_entry_safe(res, temp, &old_res, queue) {
7077 ipr_clear_res_target(res);
7078 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7079 }
7080
ac09c349
BK
7081 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7082 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7083 else
7084 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7085
7086 LEAVE;
7087 return IPR_RC_JOB_CONTINUE;
7088}
7089
7090/**
7091 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7092 * @ipr_cmd: ipr command struct
7093 *
7094 * This function sends a Query IOA Configuration command
7095 * to the adapter to retrieve the IOA configuration table.
7096 *
7097 * Return value:
7098 * IPR_RC_JOB_RETURN
7099 **/
7100static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7101{
7102 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7103 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7104 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7105 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7106
7107 ENTER;
ac09c349
BK
7108 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7109 ioa_cfg->dual_raid = 1;
1da177e4
LT
7110 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7111 ucode_vpd->major_release, ucode_vpd->card_type,
7112 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7113 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7114 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7115
7116 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7117 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7118 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7119 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7120
3e7ebdfa 7121 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7122 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7123
7124 ipr_cmd->job_step = ipr_init_res_table;
7125
7126 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7127
7128 LEAVE;
7129 return IPR_RC_JOB_RETURN;
7130}
7131
7132/**
7133 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7134 * @ipr_cmd: ipr command struct
7135 *
7136 * This utility function sends an inquiry to the adapter.
7137 *
7138 * Return value:
7139 * none
7140 **/
7141static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 7142 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
7143{
7144 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
7145
7146 ENTER;
7147 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7148 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7149
7150 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7151 ioarcb->cmd_pkt.cdb[1] = flags;
7152 ioarcb->cmd_pkt.cdb[2] = page;
7153 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7154
a32c055f 7155 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7156
7157 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7158 LEAVE;
7159}
7160
62275040
BK
7161/**
7162 * ipr_inquiry_page_supported - Is the given inquiry page supported
7163 * @page0: inquiry page 0 buffer
7164 * @page: page code.
7165 *
7166 * This function determines if the specified inquiry page is supported.
7167 *
7168 * Return value:
7169 * 1 if page is supported / 0 if not
7170 **/
7171static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7172{
7173 int i;
7174
7175 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7176 if (page0->page[i] == page)
7177 return 1;
7178
7179 return 0;
7180}
7181
ac09c349
BK
7182/**
7183 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7184 * @ipr_cmd: ipr command struct
7185 *
7186 * This function sends a Page 0xD0 inquiry to the adapter
7187 * to retrieve adapter capabilities.
7188 *
7189 * Return value:
7190 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7191 **/
7192static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7193{
7194 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7195 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7196 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7197
7198 ENTER;
7199 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7200 memset(cap, 0, sizeof(*cap));
7201
7202 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7203 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7204 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7205 sizeof(struct ipr_inquiry_cap));
7206 return IPR_RC_JOB_RETURN;
7207 }
7208
7209 LEAVE;
7210 return IPR_RC_JOB_CONTINUE;
7211}
7212
1da177e4
LT
7213/**
7214 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7215 * @ipr_cmd: ipr command struct
7216 *
7217 * This function sends a Page 3 inquiry to the adapter
7218 * to retrieve software VPD information.
7219 *
7220 * Return value:
7221 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7222 **/
7223static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040
BK
7224{
7225 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040
BK
7226
7227 ENTER;
7228
ac09c349 7229 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040
BK
7230
7231 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7232 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7233 sizeof(struct ipr_inquiry_page3));
7234
7235 LEAVE;
7236 return IPR_RC_JOB_RETURN;
7237}
7238
7239/**
7240 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7241 * @ipr_cmd: ipr command struct
7242 *
7243 * This function sends a Page 0 inquiry to the adapter
7244 * to retrieve supported inquiry pages.
7245 *
7246 * Return value:
7247 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7248 **/
7249static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7250{
7251 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7252 char type[5];
7253
7254 ENTER;
7255
7256 /* Grab the type out of the VPD and store it away */
7257 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7258 type[4] = '\0';
7259 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7260
62275040 7261 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 7262
62275040
BK
7263 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7264 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7265 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
7266
7267 LEAVE;
7268 return IPR_RC_JOB_RETURN;
7269}
7270
7271/**
7272 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7273 * @ipr_cmd: ipr command struct
7274 *
7275 * This function sends a standard inquiry to the adapter.
7276 *
7277 * Return value:
7278 * IPR_RC_JOB_RETURN
7279 **/
7280static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7281{
7282 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7283
7284 ENTER;
62275040 7285 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
7286
7287 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7288 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7289 sizeof(struct ipr_ioa_vpd));
7290
7291 LEAVE;
7292 return IPR_RC_JOB_RETURN;
7293}
7294
7295/**
214777ba 7296 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
7297 * @ipr_cmd: ipr command struct
7298 *
7299 * This function send an Identify Host Request Response Queue
7300 * command to establish the HRRQ with the adapter.
7301 *
7302 * Return value:
7303 * IPR_RC_JOB_RETURN
7304 **/
214777ba 7305static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
7306{
7307 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7308 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7309
7310 ENTER;
7311 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7312
7313 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7314 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7315
7316 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
214777ba
WB
7317 if (ioa_cfg->sis64)
7318 ioarcb->cmd_pkt.cdb[1] = 0x1;
1da177e4 7319 ioarcb->cmd_pkt.cdb[2] =
214777ba 7320 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
1da177e4 7321 ioarcb->cmd_pkt.cdb[3] =
214777ba 7322 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
1da177e4 7323 ioarcb->cmd_pkt.cdb[4] =
214777ba 7324 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
1da177e4 7325 ioarcb->cmd_pkt.cdb[5] =
214777ba 7326 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
1da177e4
LT
7327 ioarcb->cmd_pkt.cdb[7] =
7328 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7329 ioarcb->cmd_pkt.cdb[8] =
7330 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7331
214777ba
WB
7332 if (ioa_cfg->sis64) {
7333 ioarcb->cmd_pkt.cdb[10] =
7334 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7335 ioarcb->cmd_pkt.cdb[11] =
7336 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7337 ioarcb->cmd_pkt.cdb[12] =
7338 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7339 ioarcb->cmd_pkt.cdb[13] =
7340 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7341 }
7342
1da177e4
LT
7343 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7344
7345 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7346
7347 LEAVE;
7348 return IPR_RC_JOB_RETURN;
7349}
7350
7351/**
7352 * ipr_reset_timer_done - Adapter reset timer function
7353 * @ipr_cmd: ipr command struct
7354 *
7355 * Description: This function is used in adapter reset processing
7356 * for timing events. If the reset_cmd pointer in the IOA
7357 * config struct is not this adapter's we are doing nested
7358 * resets and fail_all_ops will take care of freeing the
7359 * command block.
7360 *
7361 * Return value:
7362 * none
7363 **/
7364static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7365{
7366 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7367 unsigned long lock_flags = 0;
7368
7369 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7370
7371 if (ioa_cfg->reset_cmd == ipr_cmd) {
7372 list_del(&ipr_cmd->queue);
7373 ipr_cmd->done(ipr_cmd);
7374 }
7375
7376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7377}
7378
7379/**
7380 * ipr_reset_start_timer - Start a timer for adapter reset job
7381 * @ipr_cmd: ipr command struct
7382 * @timeout: timeout value
7383 *
7384 * Description: This function is used in adapter reset processing
7385 * for timing events. If the reset_cmd pointer in the IOA
7386 * config struct is not this adapter's we are doing nested
7387 * resets and fail_all_ops will take care of freeing the
7388 * command block.
7389 *
7390 * Return value:
7391 * none
7392 **/
7393static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7394 unsigned long timeout)
7395{
7396 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7397 ipr_cmd->done = ipr_reset_ioa_job;
7398
7399 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7400 ipr_cmd->timer.expires = jiffies + timeout;
7401 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7402 add_timer(&ipr_cmd->timer);
7403}
7404
7405/**
7406 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7407 * @ioa_cfg: ioa cfg struct
7408 *
7409 * Return value:
7410 * nothing
7411 **/
7412static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7413{
7414 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7415
7416 /* Initialize Host RRQ pointers */
7417 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7418 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7419 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7420 ioa_cfg->toggle_bit = 1;
7421
7422 /* Zero out config table */
3e7ebdfa 7423 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
7424}
7425
214777ba
WB
7426/**
7427 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7428 * @ipr_cmd: ipr command struct
7429 *
7430 * Return value:
7431 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7432 **/
7433static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7434{
7435 unsigned long stage, stage_time;
7436 u32 feedback;
7437 volatile u32 int_reg;
7438 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7439 u64 maskval = 0;
7440
7441 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7442 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7443 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7444
7445 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7446
7447 /* sanity check the stage_time value */
438b0331
WB
7448 if (stage_time == 0)
7449 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7450 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
7451 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7452 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7453 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7454
7455 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7456 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7457 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7458 stage_time = ioa_cfg->transop_timeout;
7459 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7460 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
7461 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7462 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7463 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7464 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7465 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7466 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7467 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7468 return IPR_RC_JOB_CONTINUE;
7469 }
214777ba
WB
7470 }
7471
7472 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7473 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7474 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7475 ipr_cmd->done = ipr_reset_ioa_job;
7476 add_timer(&ipr_cmd->timer);
7477 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7478
7479 return IPR_RC_JOB_RETURN;
7480}
7481
1da177e4
LT
7482/**
7483 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7484 * @ipr_cmd: ipr command struct
7485 *
7486 * This function reinitializes some control blocks and
7487 * enables destructive diagnostics on the adapter.
7488 *
7489 * Return value:
7490 * IPR_RC_JOB_RETURN
7491 **/
7492static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7493{
7494 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7495 volatile u32 int_reg;
7be96900 7496 volatile u64 maskval;
1da177e4
LT
7497
7498 ENTER;
214777ba 7499 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
7500 ipr_init_ioa_mem(ioa_cfg);
7501
7502 ioa_cfg->allow_interrupts = 1;
8701f185
WB
7503 if (ioa_cfg->sis64) {
7504 /* Set the adapter to the correct endian mode. */
7505 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7506 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7507 }
7508
7be96900 7509 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
7510
7511 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7512 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 7513 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
7514 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7515 return IPR_RC_JOB_CONTINUE;
7516 }
7517
7518 /* Enable destructive diagnostics on IOA */
214777ba
WB
7519 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7520
7be96900
WB
7521 if (ioa_cfg->sis64) {
7522 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7523 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7524 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7525 } else
7526 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 7527
1da177e4
LT
7528 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7529
7530 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7531
214777ba
WB
7532 if (ioa_cfg->sis64) {
7533 ipr_cmd->job_step = ipr_reset_next_stage;
7534 return IPR_RC_JOB_CONTINUE;
7535 }
7536
1da177e4 7537 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5469cb5b 7538 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
1da177e4
LT
7539 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7540 ipr_cmd->done = ipr_reset_ioa_job;
7541 add_timer(&ipr_cmd->timer);
7542 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7543
7544 LEAVE;
7545 return IPR_RC_JOB_RETURN;
7546}
7547
7548/**
7549 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7550 * @ipr_cmd: ipr command struct
7551 *
7552 * This function is invoked when an adapter dump has run out
7553 * of processing time.
7554 *
7555 * Return value:
7556 * IPR_RC_JOB_CONTINUE
7557 **/
7558static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7559{
7560 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7561
7562 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
7563 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7564 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
7565 ioa_cfg->sdt_state = ABORT_DUMP;
7566
4c647e90 7567 ioa_cfg->dump_timeout = 1;
1da177e4
LT
7568 ipr_cmd->job_step = ipr_reset_alert;
7569
7570 return IPR_RC_JOB_CONTINUE;
7571}
7572
7573/**
7574 * ipr_unit_check_no_data - Log a unit check/no data error log
7575 * @ioa_cfg: ioa config struct
7576 *
7577 * Logs an error indicating the adapter unit checked, but for some
7578 * reason, we were unable to fetch the unit check buffer.
7579 *
7580 * Return value:
7581 * nothing
7582 **/
7583static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7584{
7585 ioa_cfg->errors_logged++;
7586 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7587}
7588
7589/**
7590 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7591 * @ioa_cfg: ioa config struct
7592 *
7593 * Fetches the unit check buffer from the adapter by clocking the data
7594 * through the mailbox register.
7595 *
7596 * Return value:
7597 * nothing
7598 **/
7599static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7600{
7601 unsigned long mailbox;
7602 struct ipr_hostrcb *hostrcb;
7603 struct ipr_uc_sdt sdt;
7604 int rc, length;
65f56475 7605 u32 ioasc;
1da177e4
LT
7606
7607 mailbox = readl(ioa_cfg->ioa_mailbox);
7608
dcbad00e 7609 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
7610 ipr_unit_check_no_data(ioa_cfg);
7611 return;
7612 }
7613
7614 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7615 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7616 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7617
dcbad00e
WB
7618 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7619 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7620 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
7621 ipr_unit_check_no_data(ioa_cfg);
7622 return;
7623 }
7624
7625 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
7626 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7627 length = be32_to_cpu(sdt.entry[0].end_token);
7628 else
7629 length = (be32_to_cpu(sdt.entry[0].end_token) -
7630 be32_to_cpu(sdt.entry[0].start_token)) &
7631 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
7632
7633 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7634 struct ipr_hostrcb, queue);
7635 list_del(&hostrcb->queue);
7636 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7637
7638 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 7639 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
7640 (__be32 *)&hostrcb->hcam,
7641 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7642
65f56475 7643 if (!rc) {
1da177e4 7644 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 7645 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
7646 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7647 ioa_cfg->sdt_state == GET_DUMP)
7648 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7649 } else
1da177e4
LT
7650 ipr_unit_check_no_data(ioa_cfg);
7651
7652 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7653}
7654
110def85
WB
7655/**
7656 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7657 * @ipr_cmd: ipr command struct
7658 *
7659 * Description: This function will call to get the unit check buffer.
7660 *
7661 * Return value:
7662 * IPR_RC_JOB_RETURN
7663 **/
7664static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7665{
7666 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7667
7668 ENTER;
7669 ioa_cfg->ioa_unit_checked = 0;
7670 ipr_get_unit_check_buffer(ioa_cfg);
7671 ipr_cmd->job_step = ipr_reset_alert;
7672 ipr_reset_start_timer(ipr_cmd, 0);
7673
7674 LEAVE;
7675 return IPR_RC_JOB_RETURN;
7676}
7677
1da177e4
LT
7678/**
7679 * ipr_reset_restore_cfg_space - Restore PCI config space.
7680 * @ipr_cmd: ipr command struct
7681 *
7682 * Description: This function restores the saved PCI config space of
7683 * the adapter, fails all outstanding ops back to the callers, and
7684 * fetches the dump/unit check if applicable to this reset.
7685 *
7686 * Return value:
7687 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7688 **/
7689static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7690{
7691 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
630ad831 7692 u32 int_reg;
1da177e4
LT
7693
7694 ENTER;
99c965dd 7695 ioa_cfg->pdev->state_saved = true;
1d3c16a8 7696 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
7697
7698 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 7699 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
7700 return IPR_RC_JOB_CONTINUE;
7701 }
7702
7703 ipr_fail_all_ops(ioa_cfg);
7704
8701f185
WB
7705 if (ioa_cfg->sis64) {
7706 /* Set the adapter to the correct endian mode. */
7707 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7708 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7709 }
7710
1da177e4 7711 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
7712 if (ioa_cfg->sis64) {
7713 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7714 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7715 return IPR_RC_JOB_RETURN;
7716 } else {
7717 ioa_cfg->ioa_unit_checked = 0;
7718 ipr_get_unit_check_buffer(ioa_cfg);
7719 ipr_cmd->job_step = ipr_reset_alert;
7720 ipr_reset_start_timer(ipr_cmd, 0);
7721 return IPR_RC_JOB_RETURN;
7722 }
1da177e4
LT
7723 }
7724
7725 if (ioa_cfg->in_ioa_bringdown) {
7726 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7727 } else {
7728 ipr_cmd->job_step = ipr_reset_enable_ioa;
7729
7730 if (GET_DUMP == ioa_cfg->sdt_state) {
41e9a696 7731 ioa_cfg->sdt_state = READ_DUMP;
4c647e90 7732 ioa_cfg->dump_timeout = 0;
4d4dd706
KSS
7733 if (ioa_cfg->sis64)
7734 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7735 else
7736 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
1da177e4
LT
7737 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7738 schedule_work(&ioa_cfg->work_q);
7739 return IPR_RC_JOB_RETURN;
7740 }
7741 }
7742
438b0331 7743 LEAVE;
1da177e4
LT
7744 return IPR_RC_JOB_CONTINUE;
7745}
7746
e619e1a7
BK
7747/**
7748 * ipr_reset_bist_done - BIST has completed on the adapter.
7749 * @ipr_cmd: ipr command struct
7750 *
7751 * Description: Unblock config space and resume the reset process.
7752 *
7753 * Return value:
7754 * IPR_RC_JOB_CONTINUE
7755 **/
7756static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7757{
fb51ccbf
JK
7758 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7759
e619e1a7 7760 ENTER;
fb51ccbf
JK
7761 if (ioa_cfg->cfg_locked)
7762 pci_cfg_access_unlock(ioa_cfg->pdev);
7763 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
7764 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7765 LEAVE;
7766 return IPR_RC_JOB_CONTINUE;
7767}
7768
1da177e4
LT
7769/**
7770 * ipr_reset_start_bist - Run BIST on the adapter.
7771 * @ipr_cmd: ipr command struct
7772 *
7773 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7774 *
7775 * Return value:
7776 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7777 **/
7778static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7779{
7780 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 7781 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
7782
7783 ENTER;
cb237ef7
WB
7784 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7785 writel(IPR_UPROCI_SIS64_START_BIST,
7786 ioa_cfg->regs.set_uproc_interrupt_reg32);
7787 else
7788 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7789
7790 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 7791 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
7792 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7793 rc = IPR_RC_JOB_RETURN;
cb237ef7 7794 } else {
fb51ccbf
JK
7795 if (ioa_cfg->cfg_locked)
7796 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7797 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
7798 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7799 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
7800 }
7801
7802 LEAVE;
7803 return rc;
7804}
7805
463fc696
BK
7806/**
7807 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7808 * @ipr_cmd: ipr command struct
7809 *
7810 * Description: This clears PCI reset to the adapter and delays two seconds.
7811 *
7812 * Return value:
7813 * IPR_RC_JOB_RETURN
7814 **/
7815static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7816{
7817 ENTER;
7818 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7819 ipr_cmd->job_step = ipr_reset_bist_done;
7820 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7821 LEAVE;
7822 return IPR_RC_JOB_RETURN;
7823}
7824
7825/**
7826 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7827 * @ipr_cmd: ipr command struct
7828 *
7829 * Description: This asserts PCI reset to the adapter.
7830 *
7831 * Return value:
7832 * IPR_RC_JOB_RETURN
7833 **/
7834static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7835{
7836 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7837 struct pci_dev *pdev = ioa_cfg->pdev;
7838
7839 ENTER;
463fc696
BK
7840 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7841 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7842 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7843 LEAVE;
7844 return IPR_RC_JOB_RETURN;
7845}
7846
fb51ccbf
JK
7847/**
7848 * ipr_reset_block_config_access_wait - Wait for permission to block config access
7849 * @ipr_cmd: ipr command struct
7850 *
7851 * Description: This attempts to block config access to the IOA.
7852 *
7853 * Return value:
7854 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7855 **/
7856static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
7857{
7858 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7859 int rc = IPR_RC_JOB_CONTINUE;
7860
7861 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
7862 ioa_cfg->cfg_locked = 1;
7863 ipr_cmd->job_step = ioa_cfg->reset;
7864 } else {
7865 if (ipr_cmd->u.time_left) {
7866 rc = IPR_RC_JOB_RETURN;
7867 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7868 ipr_reset_start_timer(ipr_cmd,
7869 IPR_CHECK_FOR_RESET_TIMEOUT);
7870 } else {
7871 ipr_cmd->job_step = ioa_cfg->reset;
7872 dev_err(&ioa_cfg->pdev->dev,
7873 "Timed out waiting to lock config access. Resetting anyway.\n");
7874 }
7875 }
7876
7877 return rc;
7878}
7879
7880/**
7881 * ipr_reset_block_config_access - Block config access to the IOA
7882 * @ipr_cmd: ipr command struct
7883 *
7884 * Description: This attempts to block config access to the IOA
7885 *
7886 * Return value:
7887 * IPR_RC_JOB_CONTINUE
7888 **/
7889static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
7890{
7891 ipr_cmd->ioa_cfg->cfg_locked = 0;
7892 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
7893 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7894 return IPR_RC_JOB_CONTINUE;
7895}
7896
1da177e4
LT
7897/**
7898 * ipr_reset_allowed - Query whether or not IOA can be reset
7899 * @ioa_cfg: ioa config struct
7900 *
7901 * Return value:
7902 * 0 if reset not allowed / non-zero if reset is allowed
7903 **/
7904static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7905{
7906 volatile u32 temp_reg;
7907
7908 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7909 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7910}
7911
7912/**
7913 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7914 * @ipr_cmd: ipr command struct
7915 *
7916 * Description: This function waits for adapter permission to run BIST,
7917 * then runs BIST. If the adapter does not give permission after a
7918 * reasonable time, we will reset the adapter anyway. The impact of
7919 * resetting the adapter without warning the adapter is the risk of
7920 * losing the persistent error log on the adapter. If the adapter is
7921 * reset while it is writing to the flash on the adapter, the flash
7922 * segment will have bad ECC and be zeroed.
7923 *
7924 * Return value:
7925 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7926 **/
7927static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7928{
7929 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7930 int rc = IPR_RC_JOB_RETURN;
7931
7932 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7933 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7934 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7935 } else {
fb51ccbf 7936 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
7937 rc = IPR_RC_JOB_CONTINUE;
7938 }
7939
7940 return rc;
7941}
7942
7943/**
8701f185 7944 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
7945 * @ipr_cmd: ipr command struct
7946 *
7947 * Description: This function alerts the adapter that it will be reset.
7948 * If memory space is not currently enabled, proceed directly
7949 * to running BIST on the adapter. The timer must always be started
7950 * so we guarantee we do not run BIST from ipr_isr.
7951 *
7952 * Return value:
7953 * IPR_RC_JOB_RETURN
7954 **/
7955static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7956{
7957 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7958 u16 cmd_reg;
7959 int rc;
7960
7961 ENTER;
7962 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7963
7964 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7965 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 7966 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
7967 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7968 } else {
fb51ccbf 7969 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
7970 }
7971
7972 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7973 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7974
7975 LEAVE;
7976 return IPR_RC_JOB_RETURN;
7977}
7978
7979/**
7980 * ipr_reset_ucode_download_done - Microcode download completion
7981 * @ipr_cmd: ipr command struct
7982 *
7983 * Description: This function unmaps the microcode download buffer.
7984 *
7985 * Return value:
7986 * IPR_RC_JOB_CONTINUE
7987 **/
7988static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7989{
7990 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7991 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7992
7993 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7994 sglist->num_sg, DMA_TO_DEVICE);
7995
7996 ipr_cmd->job_step = ipr_reset_alert;
7997 return IPR_RC_JOB_CONTINUE;
7998}
7999
8000/**
8001 * ipr_reset_ucode_download - Download microcode to the adapter
8002 * @ipr_cmd: ipr command struct
8003 *
8004 * Description: This function checks to see if it there is microcode
8005 * to download to the adapter. If there is, a download is performed.
8006 *
8007 * Return value:
8008 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8009 **/
8010static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8011{
8012 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8013 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8014
8015 ENTER;
8016 ipr_cmd->job_step = ipr_reset_alert;
8017
8018 if (!sglist)
8019 return IPR_RC_JOB_CONTINUE;
8020
8021 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8022 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8023 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8024 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8025 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8026 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8027 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8028
a32c055f
WB
8029 if (ioa_cfg->sis64)
8030 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8031 else
8032 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
8033 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8034
8035 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8036 IPR_WRITE_BUFFER_TIMEOUT);
8037
8038 LEAVE;
8039 return IPR_RC_JOB_RETURN;
8040}
8041
8042/**
8043 * ipr_reset_shutdown_ioa - Shutdown the adapter
8044 * @ipr_cmd: ipr command struct
8045 *
8046 * Description: This function issues an adapter shutdown of the
8047 * specified type to the specified adapter as part of the
8048 * adapter reset job.
8049 *
8050 * Return value:
8051 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8052 **/
8053static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8054{
8055 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8056 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8057 unsigned long timeout;
8058 int rc = IPR_RC_JOB_CONTINUE;
8059
8060 ENTER;
8061 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
8062 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8063 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8064 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8065 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8066
ac09c349
BK
8067 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8068 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
8069 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8070 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
8071 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8072 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 8073 else
ac09c349 8074 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
8075
8076 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8077
8078 rc = IPR_RC_JOB_RETURN;
8079 ipr_cmd->job_step = ipr_reset_ucode_download;
8080 } else
8081 ipr_cmd->job_step = ipr_reset_alert;
8082
8083 LEAVE;
8084 return rc;
8085}
8086
8087/**
8088 * ipr_reset_ioa_job - Adapter reset job
8089 * @ipr_cmd: ipr command struct
8090 *
8091 * Description: This function is the job router for the adapter reset job.
8092 *
8093 * Return value:
8094 * none
8095 **/
8096static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8097{
8098 u32 rc, ioasc;
1da177e4
LT
8099 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8100
8101 do {
96d21f00 8102 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
8103
8104 if (ioa_cfg->reset_cmd != ipr_cmd) {
8105 /*
8106 * We are doing nested adapter resets and this is
8107 * not the current reset job.
8108 */
8109 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8110 return;
8111 }
8112
8113 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e
BK
8114 rc = ipr_cmd->job_step_failed(ipr_cmd);
8115 if (rc == IPR_RC_JOB_RETURN)
8116 return;
1da177e4
LT
8117 }
8118
8119 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 8120 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 8121 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 8122 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
8123}
8124
8125/**
8126 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8127 * @ioa_cfg: ioa config struct
8128 * @job_step: first job step of reset job
8129 * @shutdown_type: shutdown type
8130 *
8131 * Description: This function will initiate the reset of the given adapter
8132 * starting at the selected job step.
8133 * If the caller needs to wait on the completion of the reset,
8134 * the caller must sleep on the reset_wait_q.
8135 *
8136 * Return value:
8137 * none
8138 **/
8139static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8140 int (*job_step) (struct ipr_cmnd *),
8141 enum ipr_shutdown_type shutdown_type)
8142{
8143 struct ipr_cmnd *ipr_cmd;
8144
8145 ioa_cfg->in_reset_reload = 1;
8146 ioa_cfg->allow_cmds = 0;
8147 scsi_block_requests(ioa_cfg->host);
8148
8149 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8150 ioa_cfg->reset_cmd = ipr_cmd;
8151 ipr_cmd->job_step = job_step;
8152 ipr_cmd->u.shutdown_type = shutdown_type;
8153
8154 ipr_reset_ioa_job(ipr_cmd);
8155}
8156
8157/**
8158 * ipr_initiate_ioa_reset - Initiate an adapter reset
8159 * @ioa_cfg: ioa config struct
8160 * @shutdown_type: shutdown type
8161 *
8162 * Description: This function will initiate the reset of the given adapter.
8163 * If the caller needs to wait on the completion of the reset,
8164 * the caller must sleep on the reset_wait_q.
8165 *
8166 * Return value:
8167 * none
8168 **/
8169static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8170 enum ipr_shutdown_type shutdown_type)
8171{
8172 if (ioa_cfg->ioa_is_dead)
8173 return;
8174
41e9a696
BK
8175 if (ioa_cfg->in_reset_reload) {
8176 if (ioa_cfg->sdt_state == GET_DUMP)
8177 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8178 else if (ioa_cfg->sdt_state == READ_DUMP)
8179 ioa_cfg->sdt_state = ABORT_DUMP;
8180 }
1da177e4
LT
8181
8182 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8183 dev_err(&ioa_cfg->pdev->dev,
8184 "IOA taken offline - error recovery failed\n");
8185
8186 ioa_cfg->reset_retries = 0;
8187 ioa_cfg->ioa_is_dead = 1;
8188
8189 if (ioa_cfg->in_ioa_bringdown) {
8190 ioa_cfg->reset_cmd = NULL;
8191 ioa_cfg->in_reset_reload = 0;
8192 ipr_fail_all_ops(ioa_cfg);
8193 wake_up_all(&ioa_cfg->reset_wait_q);
8194
8195 spin_unlock_irq(ioa_cfg->host->host_lock);
8196 scsi_unblock_requests(ioa_cfg->host);
8197 spin_lock_irq(ioa_cfg->host->host_lock);
8198 return;
8199 } else {
8200 ioa_cfg->in_ioa_bringdown = 1;
8201 shutdown_type = IPR_SHUTDOWN_NONE;
8202 }
8203 }
8204
8205 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8206 shutdown_type);
8207}
8208
f8a88b19
LV
8209/**
8210 * ipr_reset_freeze - Hold off all I/O activity
8211 * @ipr_cmd: ipr command struct
8212 *
8213 * Description: If the PCI slot is frozen, hold off all I/O
8214 * activity; then, as soon as the slot is available again,
8215 * initiate an adapter reset.
8216 */
8217static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8218{
8219 /* Disallow new interrupts, avoid loop */
8220 ipr_cmd->ioa_cfg->allow_interrupts = 0;
8221 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
8222 ipr_cmd->done = ipr_reset_ioa_job;
8223 return IPR_RC_JOB_RETURN;
8224}
8225
8226/**
8227 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8228 * @pdev: PCI device struct
8229 *
8230 * Description: This routine is called to tell us that the PCI bus
8231 * is down. Can't do anything here, except put the device driver
8232 * into a holding pattern, waiting for the PCI bus to come back.
8233 */
8234static void ipr_pci_frozen(struct pci_dev *pdev)
8235{
8236 unsigned long flags = 0;
8237 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8238
8239 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8240 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8242}
8243
8244/**
8245 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8246 * @pdev: PCI device struct
8247 *
8248 * Description: This routine is called by the pci error recovery
8249 * code after the PCI slot has been reset, just before we
8250 * should resume normal operations.
8251 */
8252static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8253{
8254 unsigned long flags = 0;
8255 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8256
8257 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
463fc696
BK
8258 if (ioa_cfg->needs_warm_reset)
8259 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8260 else
8261 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8262 IPR_SHUTDOWN_NONE);
f8a88b19
LV
8263 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8264 return PCI_ERS_RESULT_RECOVERED;
8265}
8266
8267/**
8268 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8269 * @pdev: PCI device struct
8270 *
8271 * Description: This routine is called when the PCI bus has
8272 * permanently failed.
8273 */
8274static void ipr_pci_perm_failure(struct pci_dev *pdev)
8275{
8276 unsigned long flags = 0;
8277 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8278
8279 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8280 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8281 ioa_cfg->sdt_state = ABORT_DUMP;
8282 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8283 ioa_cfg->in_ioa_bringdown = 1;
6ff63896 8284 ioa_cfg->allow_cmds = 0;
f8a88b19
LV
8285 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8287}
8288
8289/**
8290 * ipr_pci_error_detected - Called when a PCI error is detected.
8291 * @pdev: PCI device struct
8292 * @state: PCI channel state
8293 *
8294 * Description: Called when a PCI error is detected.
8295 *
8296 * Return value:
8297 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8298 */
8299static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8300 pci_channel_state_t state)
8301{
8302 switch (state) {
8303 case pci_channel_io_frozen:
8304 ipr_pci_frozen(pdev);
8305 return PCI_ERS_RESULT_NEED_RESET;
8306 case pci_channel_io_perm_failure:
8307 ipr_pci_perm_failure(pdev);
8308 return PCI_ERS_RESULT_DISCONNECT;
8309 break;
8310 default:
8311 break;
8312 }
8313 return PCI_ERS_RESULT_NEED_RESET;
8314}
8315
1da177e4
LT
8316/**
8317 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8318 * @ioa_cfg: ioa cfg struct
8319 *
8320 * Description: This is the second phase of adapter intialization
8321 * This function takes care of initilizing the adapter to the point
8322 * where it can accept new commands.
8323
8324 * Return value:
b1c11812 8325 * 0 on success / -EIO on failure
1da177e4 8326 **/
6f039790 8327static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8328{
8329 int rc = 0;
8330 unsigned long host_lock_flags = 0;
8331
8332 ENTER;
8333 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8334 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
ce155cce
BK
8335 if (ioa_cfg->needs_hard_reset) {
8336 ioa_cfg->needs_hard_reset = 0;
8337 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8338 } else
8339 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8340 IPR_SHUTDOWN_NONE);
1da177e4
LT
8341
8342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8343 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8344 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8345
8346 if (ioa_cfg->ioa_is_dead) {
8347 rc = -EIO;
8348 } else if (ipr_invalid_adapter(ioa_cfg)) {
8349 if (!ipr_testmode)
8350 rc = -EIO;
8351
8352 dev_err(&ioa_cfg->pdev->dev,
8353 "Adapter not supported in this hardware configuration.\n");
8354 }
8355
8356 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8357
8358 LEAVE;
8359 return rc;
8360}
8361
8362/**
8363 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8364 * @ioa_cfg: ioa config struct
8365 *
8366 * Return value:
8367 * none
8368 **/
8369static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8370{
8371 int i;
8372
8373 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8374 if (ioa_cfg->ipr_cmnd_list[i])
8375 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8376 ioa_cfg->ipr_cmnd_list[i],
8377 ioa_cfg->ipr_cmnd_list_dma[i]);
8378
8379 ioa_cfg->ipr_cmnd_list[i] = NULL;
8380 }
8381
8382 if (ioa_cfg->ipr_cmd_pool)
203fa3fe 8383 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 8384
89aad428
BK
8385 kfree(ioa_cfg->ipr_cmnd_list);
8386 kfree(ioa_cfg->ipr_cmnd_list_dma);
8387 ioa_cfg->ipr_cmnd_list = NULL;
8388 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
8389 ioa_cfg->ipr_cmd_pool = NULL;
8390}
8391
8392/**
8393 * ipr_free_mem - Frees memory allocated for an adapter
8394 * @ioa_cfg: ioa cfg struct
8395 *
8396 * Return value:
8397 * nothing
8398 **/
8399static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8400{
8401 int i;
8402
8403 kfree(ioa_cfg->res_entries);
8404 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8405 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8406 ipr_free_cmd_blks(ioa_cfg);
8407 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8408 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
3e7ebdfa
WB
8409 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8410 ioa_cfg->u.cfg_table,
1da177e4
LT
8411 ioa_cfg->cfg_table_dma);
8412
8413 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8414 pci_free_consistent(ioa_cfg->pdev,
8415 sizeof(struct ipr_hostrcb),
8416 ioa_cfg->hostrcb[i],
8417 ioa_cfg->hostrcb_dma[i]);
8418 }
8419
8420 ipr_free_dump(ioa_cfg);
1da177e4
LT
8421 kfree(ioa_cfg->trace);
8422}
8423
8424/**
8425 * ipr_free_all_resources - Free all allocated resources for an adapter.
8426 * @ipr_cmd: ipr command struct
8427 *
8428 * This function frees all allocated resources for the
8429 * specified adapter.
8430 *
8431 * Return value:
8432 * none
8433 **/
8434static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8435{
8436 struct pci_dev *pdev = ioa_cfg->pdev;
8437
8438 ENTER;
8439 free_irq(pdev->irq, ioa_cfg);
5a9ef25b 8440 pci_disable_msi(pdev);
1da177e4
LT
8441 iounmap(ioa_cfg->hdw_dma_regs);
8442 pci_release_regions(pdev);
8443 ipr_free_mem(ioa_cfg);
8444 scsi_host_put(ioa_cfg->host);
8445 pci_disable_device(pdev);
8446 LEAVE;
8447}
8448
8449/**
8450 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8451 * @ioa_cfg: ioa config struct
8452 *
8453 * Return value:
8454 * 0 on success / -ENOMEM on allocation failure
8455 **/
6f039790 8456static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8457{
8458 struct ipr_cmnd *ipr_cmd;
8459 struct ipr_ioarcb *ioarcb;
8460 dma_addr_t dma_addr;
8461 int i;
8462
203fa3fe
KSS
8463 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8464 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
8465
8466 if (!ioa_cfg->ipr_cmd_pool)
8467 return -ENOMEM;
8468
89aad428
BK
8469 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8470 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8471
8472 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8473 ipr_free_cmd_blks(ioa_cfg);
8474 return -ENOMEM;
8475 }
8476
1da177e4 8477 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
203fa3fe 8478 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
1da177e4
LT
8479
8480 if (!ipr_cmd) {
8481 ipr_free_cmd_blks(ioa_cfg);
8482 return -ENOMEM;
8483 }
8484
8485 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8486 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8487 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8488
8489 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
8490 ipr_cmd->dma_addr = dma_addr;
8491 if (ioa_cfg->sis64)
8492 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8493 else
8494 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8495
1da177e4 8496 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
8497 if (ioa_cfg->sis64) {
8498 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8499 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8500 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 8501 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
8502 } else {
8503 ioarcb->write_ioadl_addr =
8504 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8505 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8506 ioarcb->ioasa_host_pci_addr =
96d21f00 8507 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 8508 }
1da177e4
LT
8509 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8510 ipr_cmd->cmd_index = i;
8511 ipr_cmd->ioa_cfg = ioa_cfg;
8512 ipr_cmd->sense_buffer_dma = dma_addr +
8513 offsetof(struct ipr_cmnd, sense_buffer);
8514
8515 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8516 }
8517
8518 return 0;
8519}
8520
8521/**
8522 * ipr_alloc_mem - Allocate memory for an adapter
8523 * @ioa_cfg: ioa config struct
8524 *
8525 * Return value:
8526 * 0 on success / non-zero for error
8527 **/
6f039790 8528static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8529{
8530 struct pci_dev *pdev = ioa_cfg->pdev;
8531 int i, rc = -ENOMEM;
8532
8533 ENTER;
0bc42e35 8534 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
3e7ebdfa 8535 ioa_cfg->max_devs_supported, GFP_KERNEL);
1da177e4
LT
8536
8537 if (!ioa_cfg->res_entries)
8538 goto out;
8539
3e7ebdfa
WB
8540 if (ioa_cfg->sis64) {
8541 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8542 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8543 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8544 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8545 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8546 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
a2e49cb2
BK
8547
8548 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
8549 || !ioa_cfg->vset_ids)
8550 goto out_free_res_entries;
3e7ebdfa
WB
8551 }
8552
8553 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 8554 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
8555 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8556 }
1da177e4
LT
8557
8558 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8559 sizeof(struct ipr_misc_cbs),
8560 &ioa_cfg->vpd_cbs_dma);
8561
8562 if (!ioa_cfg->vpd_cbs)
8563 goto out_free_res_entries;
8564
8565 if (ipr_alloc_cmd_blks(ioa_cfg))
8566 goto out_free_vpd_cbs;
8567
8568 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8569 sizeof(u32) * IPR_NUM_CMD_BLKS,
8570 &ioa_cfg->host_rrq_dma);
8571
8572 if (!ioa_cfg->host_rrq)
8573 goto out_ipr_free_cmd_blocks;
8574
3e7ebdfa
WB
8575 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8576 ioa_cfg->cfg_table_size,
8577 &ioa_cfg->cfg_table_dma);
1da177e4 8578
3e7ebdfa 8579 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
8580 goto out_free_host_rrq;
8581
8582 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8583 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8584 sizeof(struct ipr_hostrcb),
8585 &ioa_cfg->hostrcb_dma[i]);
8586
8587 if (!ioa_cfg->hostrcb[i])
8588 goto out_free_hostrcb_dma;
8589
8590 ioa_cfg->hostrcb[i]->hostrcb_dma =
8591 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 8592 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
8593 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8594 }
8595
0bc42e35 8596 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
1da177e4
LT
8597 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8598
8599 if (!ioa_cfg->trace)
8600 goto out_free_hostrcb_dma;
8601
1da177e4
LT
8602 rc = 0;
8603out:
8604 LEAVE;
8605 return rc;
8606
8607out_free_hostrcb_dma:
8608 while (i-- > 0) {
8609 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8610 ioa_cfg->hostrcb[i],
8611 ioa_cfg->hostrcb_dma[i]);
8612 }
3e7ebdfa
WB
8613 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8614 ioa_cfg->u.cfg_table,
8615 ioa_cfg->cfg_table_dma);
1da177e4
LT
8616out_free_host_rrq:
8617 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8618 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8619out_ipr_free_cmd_blocks:
8620 ipr_free_cmd_blks(ioa_cfg);
8621out_free_vpd_cbs:
8622 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8623 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8624out_free_res_entries:
8625 kfree(ioa_cfg->res_entries);
a2e49cb2
BK
8626 kfree(ioa_cfg->target_ids);
8627 kfree(ioa_cfg->array_ids);
8628 kfree(ioa_cfg->vset_ids);
1da177e4
LT
8629 goto out;
8630}
8631
8632/**
8633 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8634 * @ioa_cfg: ioa config struct
8635 *
8636 * Return value:
8637 * none
8638 **/
6f039790 8639static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
8640{
8641 int i;
8642
8643 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8644 ioa_cfg->bus_attr[i].bus = i;
8645 ioa_cfg->bus_attr[i].qas_enabled = 0;
8646 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8647 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8648 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8649 else
8650 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8651 }
8652}
8653
8654/**
8655 * ipr_init_ioa_cfg - Initialize IOA config struct
8656 * @ioa_cfg: ioa config struct
8657 * @host: scsi host struct
8658 * @pdev: PCI dev struct
8659 *
8660 * Return value:
8661 * none
8662 **/
6f039790
GKH
8663static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8664 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4
LT
8665{
8666 const struct ipr_interrupt_offsets *p;
8667 struct ipr_interrupts *t;
8668 void __iomem *base;
8669
8670 ioa_cfg->host = host;
8671 ioa_cfg->pdev = pdev;
8672 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 8673 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
8674 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8675 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8676 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8677 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8678 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8679 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8680 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8681 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8682
8683 INIT_LIST_HEAD(&ioa_cfg->free_q);
8684 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8685 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8686 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8687 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8688 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 8689 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1da177e4 8690 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 8691 init_waitqueue_head(&ioa_cfg->msi_wait_q);
1da177e4
LT
8692 ioa_cfg->sdt_state = INACTIVE;
8693
8694 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 8695 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 8696
3e7ebdfa
WB
8697 if (ioa_cfg->sis64) {
8698 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8699 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8700 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8701 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8702 } else {
8703 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8704 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8705 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8706 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8707 }
1da177e4
LT
8708 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8709 host->unique_id = host->host_no;
8710 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 8711 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
8712 pci_set_drvdata(pdev, ioa_cfg);
8713
8714 p = &ioa_cfg->chip_cfg->regs;
8715 t = &ioa_cfg->regs;
8716 base = ioa_cfg->hdw_dma_regs;
8717
8718 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8719 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
214777ba 8720 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
1da177e4 8721 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
214777ba 8722 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
1da177e4 8723 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
214777ba 8724 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
1da177e4 8725 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
214777ba 8726 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
1da177e4
LT
8727 t->ioarrin_reg = base + p->ioarrin_reg;
8728 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
214777ba 8729 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
1da177e4 8730 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
214777ba 8731 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
1da177e4 8732 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
214777ba 8733 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
dcbad00e
WB
8734
8735 if (ioa_cfg->sis64) {
214777ba 8736 t->init_feedback_reg = base + p->init_feedback_reg;
dcbad00e
WB
8737 t->dump_addr_reg = base + p->dump_addr_reg;
8738 t->dump_data_reg = base + p->dump_data_reg;
8701f185 8739 t->endian_swap_reg = base + p->endian_swap_reg;
dcbad00e 8740 }
1da177e4
LT
8741}
8742
8743/**
1be7bd82 8744 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
8745 * @dev_id: PCI device id struct
8746 *
8747 * Return value:
1be7bd82 8748 * ptr to chip information on success / NULL on failure
1da177e4 8749 **/
6f039790 8750static const struct ipr_chip_t *
1be7bd82 8751ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
8752{
8753 int i;
8754
1da177e4
LT
8755 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8756 if (ipr_chip[i].vendor == dev_id->vendor &&
8757 ipr_chip[i].device == dev_id->device)
1be7bd82 8758 return &ipr_chip[i];
1da177e4
LT
8759 return NULL;
8760}
8761
95fecd90
WB
8762/**
8763 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8764 * @pdev: PCI device struct
8765 *
8766 * Description: Simply set the msi_received flag to 1 indicating that
8767 * Message Signaled Interrupts are supported.
8768 *
8769 * Return value:
8770 * 0 on success / non-zero on failure
8771 **/
6f039790 8772static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
8773{
8774 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8775 unsigned long lock_flags = 0;
8776 irqreturn_t rc = IRQ_HANDLED;
8777
8778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8779
8780 ioa_cfg->msi_received = 1;
8781 wake_up(&ioa_cfg->msi_wait_q);
8782
8783 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8784 return rc;
8785}
8786
8787/**
8788 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8789 * @pdev: PCI device struct
8790 *
8791 * Description: The return value from pci_enable_msi() can not always be
8792 * trusted. This routine sets up and initiates a test interrupt to determine
8793 * if the interrupt is received via the ipr_test_intr() service routine.
8794 * If the tests fails, the driver will fall back to LSI.
8795 *
8796 * Return value:
8797 * 0 on success / non-zero on failure
8798 **/
6f039790 8799static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
8800{
8801 int rc;
8802 volatile u32 int_reg;
8803 unsigned long lock_flags = 0;
8804
8805 ENTER;
8806
8807 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8808 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8809 ioa_cfg->msi_received = 0;
8810 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 8811 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
95fecd90
WB
8812 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8814
8815 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8816 if (rc) {
8817 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8818 return rc;
8819 } else if (ipr_debug)
8820 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8821
214777ba 8822 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
95fecd90
WB
8823 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8824 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8825 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8826
8827 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8828 if (!ioa_cfg->msi_received) {
8829 /* MSI test failed */
8830 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8831 rc = -EOPNOTSUPP;
8832 } else if (ipr_debug)
8833 dev_info(&pdev->dev, "MSI test succeeded.\n");
8834
8835 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8836
8837 free_irq(pdev->irq, ioa_cfg);
8838
8839 LEAVE;
8840
8841 return rc;
8842}
8843
1da177e4
LT
8844/**
8845 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8846 * @pdev: PCI device struct
8847 * @dev_id: PCI device id struct
8848 *
8849 * Return value:
8850 * 0 on success / non-zero on failure
8851 **/
6f039790
GKH
8852static int ipr_probe_ioa(struct pci_dev *pdev,
8853 const struct pci_device_id *dev_id)
1da177e4
LT
8854{
8855 struct ipr_ioa_cfg *ioa_cfg;
8856 struct Scsi_Host *host;
8857 unsigned long ipr_regs_pci;
8858 void __iomem *ipr_regs;
a2a65a3e 8859 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 8860 volatile u32 mask, uproc, interrupts;
1da177e4
LT
8861
8862 ENTER;
8863
8864 if ((rc = pci_enable_device(pdev))) {
8865 dev_err(&pdev->dev, "Cannot enable adapter\n");
8866 goto out;
8867 }
8868
8869 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8870
8871 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8872
8873 if (!host) {
8874 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8875 rc = -ENOMEM;
8876 goto out_disable;
8877 }
8878
8879 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8880 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 8881 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 8882
1be7bd82 8883 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 8884
1be7bd82 8885 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
8886 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8887 dev_id->vendor, dev_id->device);
8888 goto out_scsi_host_put;
8889 }
8890
a32c055f
WB
8891 /* set SIS 32 or SIS 64 */
8892 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 8893 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 8894 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 8895 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 8896
5469cb5b
BK
8897 if (ipr_transop_timeout)
8898 ioa_cfg->transop_timeout = ipr_transop_timeout;
8899 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8900 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8901 else
8902 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8903
44c10138 8904 ioa_cfg->revid = pdev->revision;
463fc696 8905
1da177e4
LT
8906 ipr_regs_pci = pci_resource_start(pdev, 0);
8907
8908 rc = pci_request_regions(pdev, IPR_NAME);
8909 if (rc < 0) {
8910 dev_err(&pdev->dev,
8911 "Couldn't register memory range of registers\n");
8912 goto out_scsi_host_put;
8913 }
8914
25729a7f 8915 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
8916
8917 if (!ipr_regs) {
8918 dev_err(&pdev->dev,
8919 "Couldn't map memory range of registers\n");
8920 rc = -ENOMEM;
8921 goto out_release_regions;
8922 }
8923
8924 ioa_cfg->hdw_dma_regs = ipr_regs;
8925 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8926 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8927
8928 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8929
8930 pci_set_master(pdev);
8931
a32c055f
WB
8932 if (ioa_cfg->sis64) {
8933 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8934 if (rc < 0) {
8935 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8936 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8937 }
8938
8939 } else
8940 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8941
1da177e4
LT
8942 if (rc < 0) {
8943 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8944 goto cleanup_nomem;
8945 }
8946
8947 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8948 ioa_cfg->chip_cfg->cache_line_size);
8949
8950 if (rc != PCIBIOS_SUCCESSFUL) {
8951 dev_err(&pdev->dev, "Write of cache line size failed\n");
8952 rc = -EIO;
8953 goto cleanup_nomem;
8954 }
8955
95fecd90 8956 /* Enable MSI style interrupts if they are supported. */
1be7bd82 8957 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
95fecd90
WB
8958 rc = ipr_test_msi(ioa_cfg, pdev);
8959 if (rc == -EOPNOTSUPP)
8960 pci_disable_msi(pdev);
8961 else if (rc)
8962 goto out_msi_disable;
8963 else
8964 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8965 } else if (ipr_debug)
8966 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8967
1da177e4
LT
8968 /* Save away PCI config space for use following IOA reset */
8969 rc = pci_save_state(pdev);
8970
8971 if (rc != PCIBIOS_SUCCESSFUL) {
8972 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8973 rc = -EIO;
f170c684 8974 goto out_msi_disable;
1da177e4
LT
8975 }
8976
8977 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 8978 goto out_msi_disable;
1da177e4
LT
8979
8980 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 8981 goto out_msi_disable;
1da177e4 8982
3e7ebdfa
WB
8983 if (ioa_cfg->sis64)
8984 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8985 + ((sizeof(struct ipr_config_table_entry64)
8986 * ioa_cfg->max_devs_supported)));
8987 else
8988 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8989 + ((sizeof(struct ipr_config_table_entry)
8990 * ioa_cfg->max_devs_supported)));
8991
1da177e4
LT
8992 rc = ipr_alloc_mem(ioa_cfg);
8993 if (rc < 0) {
8994 dev_err(&pdev->dev,
8995 "Couldn't allocate enough memory for device driver!\n");
f170c684 8996 goto out_msi_disable;
1da177e4
LT
8997 }
8998
ce155cce
BK
8999 /*
9000 * If HRRQ updated interrupt is not masked, or reset alert is set,
9001 * the card is in an unknown state and needs a hard reset
9002 */
214777ba
WB
9003 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9004 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9005 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce
BK
9006 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9007 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 9008 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
9009 ioa_cfg->needs_hard_reset = 1;
9010 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9011 ioa_cfg->ioa_unit_checked = 1;
ce155cce 9012
1da177e4 9013 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
95fecd90
WB
9014 rc = request_irq(pdev->irq, ipr_isr,
9015 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
9016 IPR_NAME, ioa_cfg);
1da177e4
LT
9017
9018 if (rc) {
9019 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9020 pdev->irq, rc);
9021 goto cleanup_nolog;
9022 }
9023
463fc696
BK
9024 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9025 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9026 ioa_cfg->needs_warm_reset = 1;
9027 ioa_cfg->reset = ipr_reset_slot_reset;
9028 } else
9029 ioa_cfg->reset = ipr_reset_start_bist;
9030
1da177e4
LT
9031 spin_lock(&ipr_driver_lock);
9032 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9033 spin_unlock(&ipr_driver_lock);
9034
9035 LEAVE;
9036out:
9037 return rc;
9038
9039cleanup_nolog:
9040 ipr_free_mem(ioa_cfg);
95fecd90
WB
9041out_msi_disable:
9042 pci_disable_msi(pdev);
f170c684
JL
9043cleanup_nomem:
9044 iounmap(ipr_regs);
1da177e4
LT
9045out_release_regions:
9046 pci_release_regions(pdev);
9047out_scsi_host_put:
9048 scsi_host_put(host);
9049out_disable:
9050 pci_disable_device(pdev);
9051 goto out;
9052}
9053
9054/**
9055 * ipr_scan_vsets - Scans for VSET devices
9056 * @ioa_cfg: ioa config struct
9057 *
9058 * Description: Since the VSET resources do not follow SAM in that we can have
9059 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9060 *
9061 * Return value:
9062 * none
9063 **/
9064static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9065{
9066 int target, lun;
9067
9068 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
203fa3fe 9069 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
1da177e4
LT
9070 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9071}
9072
9073/**
9074 * ipr_initiate_ioa_bringdown - Bring down an adapter
9075 * @ioa_cfg: ioa config struct
9076 * @shutdown_type: shutdown type
9077 *
9078 * Description: This function will initiate bringing down the adapter.
9079 * This consists of issuing an IOA shutdown to the adapter
9080 * to flush the cache, and running BIST.
9081 * If the caller needs to wait on the completion of the reset,
9082 * the caller must sleep on the reset_wait_q.
9083 *
9084 * Return value:
9085 * none
9086 **/
9087static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9088 enum ipr_shutdown_type shutdown_type)
9089{
9090 ENTER;
9091 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9092 ioa_cfg->sdt_state = ABORT_DUMP;
9093 ioa_cfg->reset_retries = 0;
9094 ioa_cfg->in_ioa_bringdown = 1;
9095 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9096 LEAVE;
9097}
9098
9099/**
9100 * __ipr_remove - Remove a single adapter
9101 * @pdev: pci device struct
9102 *
9103 * Adapter hot plug remove entry point.
9104 *
9105 * Return value:
9106 * none
9107 **/
9108static void __ipr_remove(struct pci_dev *pdev)
9109{
9110 unsigned long host_lock_flags = 0;
9111 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9112 ENTER;
9113
9114 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 9115 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9117 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9118 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9119 }
9120
1da177e4
LT
9121 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9122
9123 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9124 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 9125 flush_work(&ioa_cfg->work_q);
1da177e4
LT
9126 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9127
9128 spin_lock(&ipr_driver_lock);
9129 list_del(&ioa_cfg->queue);
9130 spin_unlock(&ipr_driver_lock);
9131
9132 if (ioa_cfg->sdt_state == ABORT_DUMP)
9133 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9134 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9135
9136 ipr_free_all_resources(ioa_cfg);
9137
9138 LEAVE;
9139}
9140
9141/**
9142 * ipr_remove - IOA hot plug remove entry point
9143 * @pdev: pci device struct
9144 *
9145 * Adapter hot plug remove entry point.
9146 *
9147 * Return value:
9148 * none
9149 **/
6f039790 9150static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
9151{
9152 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9153
9154 ENTER;
9155
ee959b00 9156 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 9157 &ipr_trace_attr);
ee959b00 9158 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9159 &ipr_dump_attr);
9160 scsi_remove_host(ioa_cfg->host);
9161
9162 __ipr_remove(pdev);
9163
9164 LEAVE;
9165}
9166
9167/**
9168 * ipr_probe - Adapter hot plug add entry point
9169 *
9170 * Return value:
9171 * 0 on success / non-zero on failure
9172 **/
6f039790 9173static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
9174{
9175 struct ipr_ioa_cfg *ioa_cfg;
9176 int rc;
9177
9178 rc = ipr_probe_ioa(pdev, dev_id);
9179
9180 if (rc)
9181 return rc;
9182
9183 ioa_cfg = pci_get_drvdata(pdev);
9184 rc = ipr_probe_ioa_part2(ioa_cfg);
9185
9186 if (rc) {
9187 __ipr_remove(pdev);
9188 return rc;
9189 }
9190
9191 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9192
9193 if (rc) {
9194 __ipr_remove(pdev);
9195 return rc;
9196 }
9197
ee959b00 9198 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9199 &ipr_trace_attr);
9200
9201 if (rc) {
9202 scsi_remove_host(ioa_cfg->host);
9203 __ipr_remove(pdev);
9204 return rc;
9205 }
9206
ee959b00 9207 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9208 &ipr_dump_attr);
9209
9210 if (rc) {
ee959b00 9211 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
9212 &ipr_trace_attr);
9213 scsi_remove_host(ioa_cfg->host);
9214 __ipr_remove(pdev);
9215 return rc;
9216 }
9217
9218 scsi_scan_host(ioa_cfg->host);
9219 ipr_scan_vsets(ioa_cfg);
9220 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9221 ioa_cfg->allow_ml_add_del = 1;
11cd8f12 9222 ioa_cfg->host->max_channel = IPR_VSET_BUS;
1da177e4
LT
9223 schedule_work(&ioa_cfg->work_q);
9224 return 0;
9225}
9226
9227/**
9228 * ipr_shutdown - Shutdown handler.
d18c3db5 9229 * @pdev: pci device struct
1da177e4
LT
9230 *
9231 * This function is invoked upon system shutdown/reboot. It will issue
9232 * an adapter shutdown to the adapter to flush the write cache.
9233 *
9234 * Return value:
9235 * none
9236 **/
d18c3db5 9237static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 9238{
d18c3db5 9239 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4
LT
9240 unsigned long lock_flags = 0;
9241
9242 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 9243 while (ioa_cfg->in_reset_reload) {
970ea294
BK
9244 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9245 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9246 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9247 }
9248
1da177e4
LT
9249 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9250 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9251 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9252}
9253
6f039790 9254static struct pci_device_id ipr_pci_table[] = {
1da177e4 9255 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9256 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 9257 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9258 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 9259 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9260 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 9261 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 9262 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 9263 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9264 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 9265 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9266 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 9267 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 9268 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 9269 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
9270 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9271 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9272 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 9273 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9274 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
9275 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9276 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9277 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
9278 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9279 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 9280 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 9281 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 9282 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
9283 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9284 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 9285 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
9286 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9287 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 9288 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
9289 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9290 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
9291 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9292 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
9293 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9294 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 9295 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 9296 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 9297 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 9298 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 9299 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 9300 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 9301 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 9302 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9303 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9304 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 9305 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
9306 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9307 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
9308 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9309 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9310 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9311 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9312 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9313 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 9314 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9315 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
9316 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9317 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
9318 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9319 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 9320 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9321 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 9322 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9323 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 9324 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9325 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
9326 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9327 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9328 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 9329 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 9330 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9331 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9332 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9333 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9334 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9335 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9336 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9337 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
1da177e4
LT
9338 { }
9339};
9340MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9341
a55b2d21 9342static const struct pci_error_handlers ipr_err_handler = {
f8a88b19
LV
9343 .error_detected = ipr_pci_error_detected,
9344 .slot_reset = ipr_pci_slot_reset,
9345};
9346
1da177e4
LT
9347static struct pci_driver ipr_driver = {
9348 .name = IPR_NAME,
9349 .id_table = ipr_pci_table,
9350 .probe = ipr_probe,
6f039790 9351 .remove = ipr_remove,
d18c3db5 9352 .shutdown = ipr_shutdown,
f8a88b19 9353 .err_handler = &ipr_err_handler,
1da177e4
LT
9354};
9355
f72919ec
WB
9356/**
9357 * ipr_halt_done - Shutdown prepare completion
9358 *
9359 * Return value:
9360 * none
9361 **/
9362static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9363{
9364 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9365
9366 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9367}
9368
9369/**
9370 * ipr_halt - Issue shutdown prepare to all adapters
9371 *
9372 * Return value:
9373 * NOTIFY_OK on success / NOTIFY_DONE on failure
9374 **/
9375static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9376{
9377 struct ipr_cmnd *ipr_cmd;
9378 struct ipr_ioa_cfg *ioa_cfg;
9379 unsigned long flags = 0;
9380
9381 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9382 return NOTIFY_DONE;
9383
9384 spin_lock(&ipr_driver_lock);
9385
9386 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9387 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9388 if (!ioa_cfg->allow_cmds) {
9389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9390 continue;
9391 }
9392
9393 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9394 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9395 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9396 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9397 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9398
9399 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9400 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9401 }
9402 spin_unlock(&ipr_driver_lock);
9403
9404 return NOTIFY_OK;
9405}
9406
9407static struct notifier_block ipr_notifier = {
9408 ipr_halt, NULL, 0
9409};
9410
1da177e4
LT
9411/**
9412 * ipr_init - Module entry point
9413 *
9414 * Return value:
9415 * 0 on success / negative value on failure
9416 **/
9417static int __init ipr_init(void)
9418{
9419 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9420 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9421
f72919ec 9422 register_reboot_notifier(&ipr_notifier);
dcbccbde 9423 return pci_register_driver(&ipr_driver);
1da177e4
LT
9424}
9425
9426/**
9427 * ipr_exit - Module unload
9428 *
9429 * Module unload entry point.
9430 *
9431 * Return value:
9432 * none
9433 **/
9434static void __exit ipr_exit(void)
9435{
f72919ec 9436 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
9437 pci_unregister_driver(&ipr_driver);
9438}
9439
9440module_init(ipr_init);
9441module_exit(ipr_exit);