Merge tag 'v3.10.67' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / ipr.c
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90 * Global Data
91 */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107 .mailbox = 0x0042C,
108 .max_cmds = 100,
109 .cache_line_size = 0x20,
110 .clear_isr = 1,
111 .iopoll_weight = 0,
112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
115 .clr_interrupt_mask_reg32 = 0x00230,
116 .sense_interrupt_mask_reg = 0x0022C,
117 .sense_interrupt_mask_reg32 = 0x0022C,
118 .clr_interrupt_reg = 0x00228,
119 .clr_interrupt_reg32 = 0x00228,
120 .sense_interrupt_reg = 0x00224,
121 .sense_interrupt_reg32 = 0x00224,
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
124 .sense_uproc_interrupt_reg32 = 0x00214,
125 .set_uproc_interrupt_reg = 0x00214,
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
133 .max_cmds = 100,
134 .cache_line_size = 0x20,
135 .clear_isr = 1,
136 .iopoll_weight = 0,
137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
140 .clr_interrupt_mask_reg32 = 0x0028C,
141 .sense_interrupt_mask_reg = 0x00288,
142 .sense_interrupt_mask_reg32 = 0x00288,
143 .clr_interrupt_reg = 0x00284,
144 .clr_interrupt_reg32 = 0x00284,
145 .sense_interrupt_reg = 0x00280,
146 .sense_interrupt_reg32 = 0x00280,
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
149 .sense_uproc_interrupt_reg32 = 0x00290,
150 .set_uproc_interrupt_reg = 0x00290,
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
154 }
155 },
156 { /* CRoC */
157 .mailbox = 0x00044,
158 .max_cmds = 1000,
159 .cache_line_size = 0x20,
160 .clear_isr = 0,
161 .iopoll_weight = 64,
162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
165 .clr_interrupt_mask_reg32 = 0x0001C,
166 .sense_interrupt_mask_reg = 0x00010,
167 .sense_interrupt_mask_reg32 = 0x00014,
168 .clr_interrupt_reg = 0x00008,
169 .clr_interrupt_reg32 = 0x0000C,
170 .sense_interrupt_reg = 0x00000,
171 .sense_interrupt_reg32 = 0x00004,
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
174 .sense_uproc_interrupt_reg32 = 0x00024,
175 .set_uproc_interrupt_reg = 0x00020,
176 .set_uproc_interrupt_reg32 = 0x00024,
177 .clr_uproc_interrupt_reg = 0x00028,
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
180 .dump_addr_reg = 0x00064,
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
183 }
184 },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /* A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 "8009: Impending cache battery pack failure"},
284 {0x02040400, 0, 0,
285 "34FF: Disk device format in progress"},
286 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9070: IOA requested reset"},
288 {0x023F0000, 0, 0,
289 "Synchronization required"},
290 {0x024E0000, 0, 0,
291 "No ready, IOA shutdown"},
292 {0x025A0000, 0, 0,
293 "Not ready, IOA has been shutdown"},
294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
295 "3020: Storage subsystem configuration error"},
296 {0x03110B00, 0, 0,
297 "FFF5: Medium error, data unreadable, recommend reassign"},
298 {0x03110C00, 0, 0,
299 "7000: Medium error, data unreadable, do not reassign"},
300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
301 "FFF3: Disk media format bad"},
302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
303 "3002: Addressed device failed to respond to selection"},
304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "3100: Device bus error"},
306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
307 "3109: IOA timed out a device command"},
308 {0x04088000, 0, 0,
309 "3120: SCSI bus is not operational"},
310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "4100: Hard device bus fabric error"},
312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
327 "9000: IOA reserved area data check"},
328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
329 "9001: IOA reserved area invalid data pattern"},
330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
331 "9002: IOA reserved area LRC error"},
332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
335 "102E: Out of alternate sectors for disk storage"},
336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
337 "FFF4: Data transfer underlength error"},
338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
339 "FFF4: Data transfer overlength error"},
340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
341 "3400: Logical unit failure"},
342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
343 "FFF4: Device microcode is corrupt"},
344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
345 "8150: PCI bus error"},
346 {0x04430000, 1, 0,
347 "Unsupported device bus message received"},
348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
349 "FFF4: Disk device problem"},
350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
351 "8150: Permanent IOA failure"},
352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
353 "3010: Disk device returned wrong response to IOA"},
354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
355 "8151: IOA microcode error"},
356 {0x04448500, 0, 0,
357 "Device bus status error"},
358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
359 "8157: IOA error requiring IOA reset to recover"},
360 {0x04448700, 0, 0,
361 "ATA device status error"},
362 {0x04490000, 0, 0,
363 "Message reject received from the device"},
364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
365 "8008: A permanent cache battery pack failure occurred"},
366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
367 "9090: Disk unit has been modified after the last known status"},
368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
369 "9081: IOA detected device error"},
370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
371 "9082: IOA detected device error"},
372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
373 "3110: Device bus error, message or command phase"},
374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
375 "3110: SAS Command / Task Management Function failed"},
376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9091: Incorrect hardware configuration change has been detected"},
378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9073: Invalid multi-adapter configuration"},
380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
381 "4010: Incorrect connection between cascaded expanders"},
382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "4020: Connections exceed IOA design limits"},
384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "4030: Incorrect multipath connection"},
386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
387 "4110: Unsupported enclosure function"},
388 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
389 "FFF4: Command to logical unit failed"},
390 {0x05240000, 1, 0,
391 "Illegal request, invalid request type or request packet"},
392 {0x05250000, 0, 0,
393 "Illegal request, invalid resource handle"},
394 {0x05258000, 0, 0,
395 "Illegal request, commands not allowed to this device"},
396 {0x05258100, 0, 0,
397 "Illegal request, command not allowed to a secondary adapter"},
398 {0x05258200, 0, 0,
399 "Illegal request, command not allowed to a non-optimized resource"},
400 {0x05260000, 0, 0,
401 "Illegal request, invalid field in parameter list"},
402 {0x05260100, 0, 0,
403 "Illegal request, parameter not supported"},
404 {0x05260200, 0, 0,
405 "Illegal request, parameter value invalid"},
406 {0x052C0000, 0, 0,
407 "Illegal request, command sequence error"},
408 {0x052C8000, 1, 0,
409 "Illegal request, dual adapter support not enabled"},
410 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
411 "9031: Array protection temporarily suspended, protection resuming"},
412 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
413 "9040: Array protection temporarily suspended, protection resuming"},
414 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
415 "3140: Device bus not ready to ready transition"},
416 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
417 "FFFB: SCSI bus was reset"},
418 {0x06290500, 0, 0,
419 "FFFE: SCSI bus transition to single ended"},
420 {0x06290600, 0, 0,
421 "FFFE: SCSI bus transition to LVD"},
422 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
423 "FFFB: SCSI bus was reset by another initiator"},
424 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
425 "3029: A device replacement has occurred"},
426 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
427 "9051: IOA cache data exists for a missing or failed device"},
428 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
429 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
430 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9025: Disk unit is not supported at its physical location"},
432 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
433 "3020: IOA detected a SCSI bus configuration error"},
434 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "3150: SCSI bus configuration error"},
436 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
437 "9074: Asymmetric advanced function disk configuration"},
438 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4040: Incomplete multipath connection between IOA and enclosure"},
440 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
441 "4041: Incomplete multipath connection between enclosure and device"},
442 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
443 "9075: Incomplete multipath connection between IOA and remote IOA"},
444 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
445 "9076: Configuration error, missing remote IOA"},
446 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4050: Enclosure does not support a required multipath function"},
448 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "4070: Logically bad block written on device"},
450 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9041: Array protection temporarily suspended"},
452 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9042: Corrupt array parity detected on specified device"},
454 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9030: Array no longer protected due to missing or failed disk unit"},
456 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9071: Link operational transition"},
458 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9072: Link not operational transition"},
460 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
461 "9032: Array exposed but still protected"},
462 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
463 "70DD: Device forced failed by disrupt device command"},
464 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
465 "4061: Multipath redundancy level got better"},
466 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4060: Multipath redundancy level got worse"},
468 {0x07270000, 0, 0,
469 "Failure due to other device"},
470 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9008: IOA does not support functions expected by devices"},
472 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9010: Cache data associated with attached devices cannot be found"},
474 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
475 "9011: Cache data belongs to devices other than those attached"},
476 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
477 "9020: Array missing 2 or more devices with only 1 device present"},
478 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
479 "9021: Array missing 2 or more devices with 2 or more devices present"},
480 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9022: Exposed array is missing a required device"},
482 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9023: Array member(s) not at required physical locations"},
484 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9024: Array not functional due to present hardware configuration"},
486 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9026: Array not functional due to present hardware configuration"},
488 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9027: Array is missing a device and parity is out of sync"},
490 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9028: Maximum number of arrays already exist"},
492 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9050: Required cache data cannot be located for a disk unit"},
494 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9052: Cache data exists for a device that has been modified"},
496 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
497 "9054: IOA resources not available due to previous problems"},
498 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
499 "9092: Disk unit requires initialization before use"},
500 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9029: Incorrect hardware configuration change has been detected"},
502 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9060: One or more disk pairs are missing from an array"},
504 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
505 "9061: One or more disks are missing from an array"},
506 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
507 "9062: One or more disks are missing from an array"},
508 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9063: Maximum number of functional arrays has been exceeded"},
510 {0x0B260000, 0, 0,
511 "Aborted command, invalid descriptor"},
512 {0x0B5A0000, 0, 0,
513 "Command terminated by host"}
514 };
515
516 static const struct ipr_ses_table_entry ipr_ses_table[] = {
517 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
518 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
519 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
520 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
521 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
522 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
523 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
524 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
527 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
528 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
529 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
530 };
531
532 /*
533 * Function Prototypes
534 */
535 static int ipr_reset_alert(struct ipr_cmnd *);
536 static void ipr_process_ccn(struct ipr_cmnd *);
537 static void ipr_process_error(struct ipr_cmnd *);
538 static void ipr_reset_ioa_job(struct ipr_cmnd *);
539 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
540 enum ipr_shutdown_type);
541
542 #ifdef CONFIG_SCSI_IPR_TRACE
543 /**
544 * ipr_trc_hook - Add a trace entry to the driver trace
545 * @ipr_cmd: ipr command struct
546 * @type: trace type
547 * @add_data: additional data
548 *
549 * Return value:
550 * none
551 **/
552 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
553 u8 type, u32 add_data)
554 {
555 struct ipr_trace_entry *trace_entry;
556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
557
558 trace_entry = &ioa_cfg->trace[atomic_add_return
559 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
560 trace_entry->time = jiffies;
561 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
562 trace_entry->type = type;
563 if (ipr_cmd->ioa_cfg->sis64)
564 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
565 else
566 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
567 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
568 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
569 trace_entry->u.add_data = add_data;
570 wmb();
571 }
572 #else
573 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
574 #endif
575
576 /**
577 * ipr_lock_and_done - Acquire lock and complete command
578 * @ipr_cmd: ipr command struct
579 *
580 * Return value:
581 * none
582 **/
583 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
584 {
585 unsigned long lock_flags;
586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
587
588 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
589 ipr_cmd->done(ipr_cmd);
590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
591 }
592
593 /**
594 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
595 * @ipr_cmd: ipr command struct
596 *
597 * Return value:
598 * none
599 **/
600 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
601 {
602 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
603 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
604 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
605 dma_addr_t dma_addr = ipr_cmd->dma_addr;
606 int hrrq_id;
607
608 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
609 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
610 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
611 ioarcb->data_transfer_length = 0;
612 ioarcb->read_data_transfer_length = 0;
613 ioarcb->ioadl_len = 0;
614 ioarcb->read_ioadl_len = 0;
615
616 if (ipr_cmd->ioa_cfg->sis64) {
617 ioarcb->u.sis64_addr_data.data_ioadl_addr =
618 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
619 ioasa64->u.gata.status = 0;
620 } else {
621 ioarcb->write_ioadl_addr =
622 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
623 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
624 ioasa->u.gata.status = 0;
625 }
626
627 ioasa->hdr.ioasc = 0;
628 ioasa->hdr.residual_data_len = 0;
629 ipr_cmd->scsi_cmd = NULL;
630 ipr_cmd->qc = NULL;
631 ipr_cmd->sense_buffer[0] = 0;
632 ipr_cmd->dma_use_sg = 0;
633 }
634
635 /**
636 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
637 * @ipr_cmd: ipr command struct
638 *
639 * Return value:
640 * none
641 **/
642 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
643 void (*fast_done) (struct ipr_cmnd *))
644 {
645 ipr_reinit_ipr_cmnd(ipr_cmd);
646 ipr_cmd->u.scratch = 0;
647 ipr_cmd->sibling = NULL;
648 ipr_cmd->eh_comp = NULL;
649 ipr_cmd->fast_done = fast_done;
650 init_timer(&ipr_cmd->timer);
651 }
652
653 /**
654 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
655 * @ioa_cfg: ioa config struct
656 *
657 * Return value:
658 * pointer to ipr command struct
659 **/
660 static
661 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
662 {
663 struct ipr_cmnd *ipr_cmd = NULL;
664
665 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
666 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
667 struct ipr_cmnd, queue);
668 list_del(&ipr_cmd->queue);
669 }
670
671
672 return ipr_cmd;
673 }
674
675 /**
676 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
677 * @ioa_cfg: ioa config struct
678 *
679 * Return value:
680 * pointer to ipr command struct
681 **/
682 static
683 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
684 {
685 struct ipr_cmnd *ipr_cmd =
686 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
687 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
688 return ipr_cmd;
689 }
690
691 /**
692 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
693 * @ioa_cfg: ioa config struct
694 * @clr_ints: interrupts to clear
695 *
696 * This function masks all interrupts on the adapter, then clears the
697 * interrupts specified in the mask
698 *
699 * Return value:
700 * none
701 **/
702 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
703 u32 clr_ints)
704 {
705 volatile u32 int_reg;
706 int i;
707
708 /* Stop new interrupts */
709 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
710 spin_lock(&ioa_cfg->hrrq[i]._lock);
711 ioa_cfg->hrrq[i].allow_interrupts = 0;
712 spin_unlock(&ioa_cfg->hrrq[i]._lock);
713 }
714 wmb();
715
716 /* Set interrupt mask to stop all new interrupts */
717 if (ioa_cfg->sis64)
718 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
719 else
720 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
721
722 /* Clear any pending interrupts */
723 if (ioa_cfg->sis64)
724 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
725 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
726 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
727 }
728
729 /**
730 * ipr_save_pcix_cmd_reg - Save PCI-X command register
731 * @ioa_cfg: ioa config struct
732 *
733 * Return value:
734 * 0 on success / -EIO on failure
735 **/
736 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
737 {
738 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
739
740 if (pcix_cmd_reg == 0)
741 return 0;
742
743 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
744 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
745 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
746 return -EIO;
747 }
748
749 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
750 return 0;
751 }
752
753 /**
754 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
755 * @ioa_cfg: ioa config struct
756 *
757 * Return value:
758 * 0 on success / -EIO on failure
759 **/
760 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
761 {
762 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
763
764 if (pcix_cmd_reg) {
765 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
766 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
767 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
768 return -EIO;
769 }
770 }
771
772 return 0;
773 }
774
775 /**
776 * ipr_sata_eh_done - done function for aborted SATA commands
777 * @ipr_cmd: ipr command struct
778 *
779 * This function is invoked for ops generated to SATA
780 * devices which are being aborted.
781 *
782 * Return value:
783 * none
784 **/
785 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
786 {
787 struct ata_queued_cmd *qc = ipr_cmd->qc;
788 struct ipr_sata_port *sata_port = qc->ap->private_data;
789
790 qc->err_mask |= AC_ERR_OTHER;
791 sata_port->ioasa.status |= ATA_BUSY;
792 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
793 ata_qc_complete(qc);
794 }
795
796 /**
797 * ipr_scsi_eh_done - mid-layer done function for aborted ops
798 * @ipr_cmd: ipr command struct
799 *
800 * This function is invoked by the interrupt handler for
801 * ops generated by the SCSI mid-layer which are being aborted.
802 *
803 * Return value:
804 * none
805 **/
806 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
807 {
808 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
809
810 scsi_cmd->result |= (DID_ERROR << 16);
811
812 scsi_dma_unmap(ipr_cmd->scsi_cmd);
813 scsi_cmd->scsi_done(scsi_cmd);
814 if (ipr_cmd->eh_comp)
815 complete(ipr_cmd->eh_comp);
816 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
817 }
818
819 /**
820 * ipr_fail_all_ops - Fails all outstanding ops.
821 * @ioa_cfg: ioa config struct
822 *
823 * This function fails all outstanding ops.
824 *
825 * Return value:
826 * none
827 **/
828 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
829 {
830 struct ipr_cmnd *ipr_cmd, *temp;
831 struct ipr_hrr_queue *hrrq;
832
833 ENTER;
834 for_each_hrrq(hrrq, ioa_cfg) {
835 spin_lock(&hrrq->_lock);
836 list_for_each_entry_safe(ipr_cmd,
837 temp, &hrrq->hrrq_pending_q, queue) {
838 list_del(&ipr_cmd->queue);
839
840 ipr_cmd->s.ioasa.hdr.ioasc =
841 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
842 ipr_cmd->s.ioasa.hdr.ilid =
843 cpu_to_be32(IPR_DRIVER_ILID);
844
845 if (ipr_cmd->scsi_cmd)
846 ipr_cmd->done = ipr_scsi_eh_done;
847 else if (ipr_cmd->qc)
848 ipr_cmd->done = ipr_sata_eh_done;
849
850 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
851 IPR_IOASC_IOA_WAS_RESET);
852 del_timer(&ipr_cmd->timer);
853 ipr_cmd->done(ipr_cmd);
854 }
855 spin_unlock(&hrrq->_lock);
856 }
857 LEAVE;
858 }
859
860 /**
861 * ipr_send_command - Send driver initiated requests.
862 * @ipr_cmd: ipr command struct
863 *
864 * This function sends a command to the adapter using the correct write call.
865 * In the case of sis64, calculate the ioarcb size required. Then or in the
866 * appropriate bits.
867 *
868 * Return value:
869 * none
870 **/
871 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
872 {
873 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
874 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
875
876 if (ioa_cfg->sis64) {
877 /* The default size is 256 bytes */
878 send_dma_addr |= 0x1;
879
880 /* If the number of ioadls * size of ioadl > 128 bytes,
881 then use a 512 byte ioarcb */
882 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
883 send_dma_addr |= 0x4;
884 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
885 } else
886 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
887 }
888
889 /**
890 * ipr_do_req - Send driver initiated requests.
891 * @ipr_cmd: ipr command struct
892 * @done: done function
893 * @timeout_func: timeout function
894 * @timeout: timeout value
895 *
896 * This function sends the specified command to the adapter with the
897 * timeout given. The done function is invoked on command completion.
898 *
899 * Return value:
900 * none
901 **/
902 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
903 void (*done) (struct ipr_cmnd *),
904 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
905 {
906 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
907
908 ipr_cmd->done = done;
909
910 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
911 ipr_cmd->timer.expires = jiffies + timeout;
912 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
913
914 add_timer(&ipr_cmd->timer);
915
916 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
917
918 ipr_send_command(ipr_cmd);
919 }
920
921 /**
922 * ipr_internal_cmd_done - Op done function for an internally generated op.
923 * @ipr_cmd: ipr command struct
924 *
925 * This function is the op done function for an internally generated,
926 * blocking op. It simply wakes the sleeping thread.
927 *
928 * Return value:
929 * none
930 **/
931 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
932 {
933 if (ipr_cmd->sibling)
934 ipr_cmd->sibling = NULL;
935 else
936 complete(&ipr_cmd->completion);
937 }
938
939 /**
940 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
941 * @ipr_cmd: ipr command struct
942 * @dma_addr: dma address
943 * @len: transfer length
944 * @flags: ioadl flag value
945 *
946 * This function initializes an ioadl in the case where there is only a single
947 * descriptor.
948 *
949 * Return value:
950 * nothing
951 **/
952 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
953 u32 len, int flags)
954 {
955 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
956 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
957
958 ipr_cmd->dma_use_sg = 1;
959
960 if (ipr_cmd->ioa_cfg->sis64) {
961 ioadl64->flags = cpu_to_be32(flags);
962 ioadl64->data_len = cpu_to_be32(len);
963 ioadl64->address = cpu_to_be64(dma_addr);
964
965 ipr_cmd->ioarcb.ioadl_len =
966 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
967 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
968 } else {
969 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
970 ioadl->address = cpu_to_be32(dma_addr);
971
972 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
973 ipr_cmd->ioarcb.read_ioadl_len =
974 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
975 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
976 } else {
977 ipr_cmd->ioarcb.ioadl_len =
978 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
979 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
980 }
981 }
982 }
983
984 /**
985 * ipr_send_blocking_cmd - Send command and sleep on its completion.
986 * @ipr_cmd: ipr command struct
987 * @timeout_func: function to invoke if command times out
988 * @timeout: timeout
989 *
990 * Return value:
991 * none
992 **/
993 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
994 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
995 u32 timeout)
996 {
997 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
998
999 init_completion(&ipr_cmd->completion);
1000 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1001
1002 spin_unlock_irq(ioa_cfg->host->host_lock);
1003 wait_for_completion(&ipr_cmd->completion);
1004 spin_lock_irq(ioa_cfg->host->host_lock);
1005 }
1006
1007 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1008 {
1009 if (ioa_cfg->hrrq_num == 1)
1010 return 0;
1011 else
1012 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1013 }
1014
1015 /**
1016 * ipr_send_hcam - Send an HCAM to the adapter.
1017 * @ioa_cfg: ioa config struct
1018 * @type: HCAM type
1019 * @hostrcb: hostrcb struct
1020 *
1021 * This function will send a Host Controlled Async command to the adapter.
1022 * If HCAMs are currently not allowed to be issued to the adapter, it will
1023 * place the hostrcb on the free queue.
1024 *
1025 * Return value:
1026 * none
1027 **/
1028 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1029 struct ipr_hostrcb *hostrcb)
1030 {
1031 struct ipr_cmnd *ipr_cmd;
1032 struct ipr_ioarcb *ioarcb;
1033
1034 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1035 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1036 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1037 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1038
1039 ipr_cmd->u.hostrcb = hostrcb;
1040 ioarcb = &ipr_cmd->ioarcb;
1041
1042 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1043 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1044 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1045 ioarcb->cmd_pkt.cdb[1] = type;
1046 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1047 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1048
1049 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1050 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1051
1052 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1053 ipr_cmd->done = ipr_process_ccn;
1054 else
1055 ipr_cmd->done = ipr_process_error;
1056
1057 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1058
1059 ipr_send_command(ipr_cmd);
1060 } else {
1061 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1062 }
1063 }
1064
1065 /**
1066 * ipr_update_ata_class - Update the ata class in the resource entry
1067 * @res: resource entry struct
1068 * @proto: cfgte device bus protocol value
1069 *
1070 * Return value:
1071 * none
1072 **/
1073 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1074 {
1075 switch (proto) {
1076 case IPR_PROTO_SATA:
1077 case IPR_PROTO_SAS_STP:
1078 res->ata_class = ATA_DEV_ATA;
1079 break;
1080 case IPR_PROTO_SATA_ATAPI:
1081 case IPR_PROTO_SAS_STP_ATAPI:
1082 res->ata_class = ATA_DEV_ATAPI;
1083 break;
1084 default:
1085 res->ata_class = ATA_DEV_UNKNOWN;
1086 break;
1087 };
1088 }
1089
1090 /**
1091 * ipr_init_res_entry - Initialize a resource entry struct.
1092 * @res: resource entry struct
1093 * @cfgtew: config table entry wrapper struct
1094 *
1095 * Return value:
1096 * none
1097 **/
1098 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1099 struct ipr_config_table_entry_wrapper *cfgtew)
1100 {
1101 int found = 0;
1102 unsigned int proto;
1103 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1104 struct ipr_resource_entry *gscsi_res = NULL;
1105
1106 res->needs_sync_complete = 0;
1107 res->in_erp = 0;
1108 res->add_to_ml = 0;
1109 res->del_from_ml = 0;
1110 res->resetting_device = 0;
1111 res->sdev = NULL;
1112 res->sata_port = NULL;
1113
1114 if (ioa_cfg->sis64) {
1115 proto = cfgtew->u.cfgte64->proto;
1116 res->res_flags = cfgtew->u.cfgte64->res_flags;
1117 res->qmodel = IPR_QUEUEING_MODEL64(res);
1118 res->type = cfgtew->u.cfgte64->res_type;
1119
1120 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1121 sizeof(res->res_path));
1122
1123 res->bus = 0;
1124 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1125 sizeof(res->dev_lun.scsi_lun));
1126 res->lun = scsilun_to_int(&res->dev_lun);
1127
1128 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1129 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1130 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1131 found = 1;
1132 res->target = gscsi_res->target;
1133 break;
1134 }
1135 }
1136 if (!found) {
1137 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1138 ioa_cfg->max_devs_supported);
1139 set_bit(res->target, ioa_cfg->target_ids);
1140 }
1141 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1142 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1143 res->target = 0;
1144 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1145 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1146 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1147 ioa_cfg->max_devs_supported);
1148 set_bit(res->target, ioa_cfg->array_ids);
1149 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1150 res->bus = IPR_VSET_VIRTUAL_BUS;
1151 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1152 ioa_cfg->max_devs_supported);
1153 set_bit(res->target, ioa_cfg->vset_ids);
1154 } else {
1155 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1156 ioa_cfg->max_devs_supported);
1157 set_bit(res->target, ioa_cfg->target_ids);
1158 }
1159 } else {
1160 proto = cfgtew->u.cfgte->proto;
1161 res->qmodel = IPR_QUEUEING_MODEL(res);
1162 res->flags = cfgtew->u.cfgte->flags;
1163 if (res->flags & IPR_IS_IOA_RESOURCE)
1164 res->type = IPR_RES_TYPE_IOAFP;
1165 else
1166 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1167
1168 res->bus = cfgtew->u.cfgte->res_addr.bus;
1169 res->target = cfgtew->u.cfgte->res_addr.target;
1170 res->lun = cfgtew->u.cfgte->res_addr.lun;
1171 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1172 }
1173
1174 ipr_update_ata_class(res, proto);
1175 }
1176
1177 /**
1178 * ipr_is_same_device - Determine if two devices are the same.
1179 * @res: resource entry struct
1180 * @cfgtew: config table entry wrapper struct
1181 *
1182 * Return value:
1183 * 1 if the devices are the same / 0 otherwise
1184 **/
1185 static int ipr_is_same_device(struct ipr_resource_entry *res,
1186 struct ipr_config_table_entry_wrapper *cfgtew)
1187 {
1188 if (res->ioa_cfg->sis64) {
1189 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1190 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1191 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1192 sizeof(cfgtew->u.cfgte64->lun))) {
1193 return 1;
1194 }
1195 } else {
1196 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1197 res->target == cfgtew->u.cfgte->res_addr.target &&
1198 res->lun == cfgtew->u.cfgte->res_addr.lun)
1199 return 1;
1200 }
1201
1202 return 0;
1203 }
1204
1205 /**
1206 * __ipr_format_res_path - Format the resource path for printing.
1207 * @res_path: resource path
1208 * @buf: buffer
1209 * @len: length of buffer provided
1210 *
1211 * Return value:
1212 * pointer to buffer
1213 **/
1214 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1215 {
1216 int i;
1217 char *p = buffer;
1218
1219 *p = '\0';
1220 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1221 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1222 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1223
1224 return buffer;
1225 }
1226
1227 /**
1228 * ipr_format_res_path - Format the resource path for printing.
1229 * @ioa_cfg: ioa config struct
1230 * @res_path: resource path
1231 * @buf: buffer
1232 * @len: length of buffer provided
1233 *
1234 * Return value:
1235 * pointer to buffer
1236 **/
1237 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1238 u8 *res_path, char *buffer, int len)
1239 {
1240 char *p = buffer;
1241
1242 *p = '\0';
1243 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1244 __ipr_format_res_path(res_path, p, len - (buffer - p));
1245 return buffer;
1246 }
1247
1248 /**
1249 * ipr_update_res_entry - Update the resource entry.
1250 * @res: resource entry struct
1251 * @cfgtew: config table entry wrapper struct
1252 *
1253 * Return value:
1254 * none
1255 **/
1256 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1257 struct ipr_config_table_entry_wrapper *cfgtew)
1258 {
1259 char buffer[IPR_MAX_RES_PATH_LENGTH];
1260 unsigned int proto;
1261 int new_path = 0;
1262
1263 if (res->ioa_cfg->sis64) {
1264 res->flags = cfgtew->u.cfgte64->flags;
1265 res->res_flags = cfgtew->u.cfgte64->res_flags;
1266 res->type = cfgtew->u.cfgte64->res_type;
1267
1268 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1269 sizeof(struct ipr_std_inq_data));
1270
1271 res->qmodel = IPR_QUEUEING_MODEL64(res);
1272 proto = cfgtew->u.cfgte64->proto;
1273 res->res_handle = cfgtew->u.cfgte64->res_handle;
1274 res->dev_id = cfgtew->u.cfgte64->dev_id;
1275
1276 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1277 sizeof(res->dev_lun.scsi_lun));
1278
1279 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1280 sizeof(res->res_path))) {
1281 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1282 sizeof(res->res_path));
1283 new_path = 1;
1284 }
1285
1286 if (res->sdev && new_path)
1287 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1288 ipr_format_res_path(res->ioa_cfg,
1289 res->res_path, buffer, sizeof(buffer)));
1290 } else {
1291 res->flags = cfgtew->u.cfgte->flags;
1292 if (res->flags & IPR_IS_IOA_RESOURCE)
1293 res->type = IPR_RES_TYPE_IOAFP;
1294 else
1295 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1296
1297 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1298 sizeof(struct ipr_std_inq_data));
1299
1300 res->qmodel = IPR_QUEUEING_MODEL(res);
1301 proto = cfgtew->u.cfgte->proto;
1302 res->res_handle = cfgtew->u.cfgte->res_handle;
1303 }
1304
1305 ipr_update_ata_class(res, proto);
1306 }
1307
1308 /**
1309 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1310 * for the resource.
1311 * @res: resource entry struct
1312 * @cfgtew: config table entry wrapper struct
1313 *
1314 * Return value:
1315 * none
1316 **/
1317 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1318 {
1319 struct ipr_resource_entry *gscsi_res = NULL;
1320 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1321
1322 if (!ioa_cfg->sis64)
1323 return;
1324
1325 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1326 clear_bit(res->target, ioa_cfg->array_ids);
1327 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1328 clear_bit(res->target, ioa_cfg->vset_ids);
1329 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1330 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1331 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1332 return;
1333 clear_bit(res->target, ioa_cfg->target_ids);
1334
1335 } else if (res->bus == 0)
1336 clear_bit(res->target, ioa_cfg->target_ids);
1337 }
1338
1339 /**
1340 * ipr_handle_config_change - Handle a config change from the adapter
1341 * @ioa_cfg: ioa config struct
1342 * @hostrcb: hostrcb
1343 *
1344 * Return value:
1345 * none
1346 **/
1347 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1348 struct ipr_hostrcb *hostrcb)
1349 {
1350 struct ipr_resource_entry *res = NULL;
1351 struct ipr_config_table_entry_wrapper cfgtew;
1352 __be32 cc_res_handle;
1353
1354 u32 is_ndn = 1;
1355
1356 if (ioa_cfg->sis64) {
1357 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1358 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1359 } else {
1360 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1361 cc_res_handle = cfgtew.u.cfgte->res_handle;
1362 }
1363
1364 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1365 if (res->res_handle == cc_res_handle) {
1366 is_ndn = 0;
1367 break;
1368 }
1369 }
1370
1371 if (is_ndn) {
1372 if (list_empty(&ioa_cfg->free_res_q)) {
1373 ipr_send_hcam(ioa_cfg,
1374 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1375 hostrcb);
1376 return;
1377 }
1378
1379 res = list_entry(ioa_cfg->free_res_q.next,
1380 struct ipr_resource_entry, queue);
1381
1382 list_del(&res->queue);
1383 ipr_init_res_entry(res, &cfgtew);
1384 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1385 }
1386
1387 ipr_update_res_entry(res, &cfgtew);
1388
1389 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1390 if (res->sdev) {
1391 res->del_from_ml = 1;
1392 res->res_handle = IPR_INVALID_RES_HANDLE;
1393 if (ioa_cfg->allow_ml_add_del)
1394 schedule_work(&ioa_cfg->work_q);
1395 } else {
1396 ipr_clear_res_target(res);
1397 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1398 }
1399 } else if (!res->sdev || res->del_from_ml) {
1400 res->add_to_ml = 1;
1401 if (ioa_cfg->allow_ml_add_del)
1402 schedule_work(&ioa_cfg->work_q);
1403 }
1404
1405 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1406 }
1407
1408 /**
1409 * ipr_process_ccn - Op done function for a CCN.
1410 * @ipr_cmd: ipr command struct
1411 *
1412 * This function is the op done function for a configuration
1413 * change notification host controlled async from the adapter.
1414 *
1415 * Return value:
1416 * none
1417 **/
1418 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1419 {
1420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1421 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1422 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1423
1424 list_del(&hostrcb->queue);
1425 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1426
1427 if (ioasc) {
1428 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1429 dev_err(&ioa_cfg->pdev->dev,
1430 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1431
1432 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1433 } else {
1434 ipr_handle_config_change(ioa_cfg, hostrcb);
1435 }
1436 }
1437
1438 /**
1439 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1440 * @i: index into buffer
1441 * @buf: string to modify
1442 *
1443 * This function will strip all trailing whitespace, pad the end
1444 * of the string with a single space, and NULL terminate the string.
1445 *
1446 * Return value:
1447 * new length of string
1448 **/
1449 static int strip_and_pad_whitespace(int i, char *buf)
1450 {
1451 while (i && buf[i] == ' ')
1452 i--;
1453 buf[i+1] = ' ';
1454 buf[i+2] = '\0';
1455 return i + 2;
1456 }
1457
1458 /**
1459 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1460 * @prefix: string to print at start of printk
1461 * @hostrcb: hostrcb pointer
1462 * @vpd: vendor/product id/sn struct
1463 *
1464 * Return value:
1465 * none
1466 **/
1467 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1468 struct ipr_vpd *vpd)
1469 {
1470 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1471 int i = 0;
1472
1473 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1474 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1475
1476 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1477 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1478
1479 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1480 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1481
1482 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1483 }
1484
1485 /**
1486 * ipr_log_vpd - Log the passed VPD to the error log.
1487 * @vpd: vendor/product id/sn struct
1488 *
1489 * Return value:
1490 * none
1491 **/
1492 static void ipr_log_vpd(struct ipr_vpd *vpd)
1493 {
1494 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1495 + IPR_SERIAL_NUM_LEN];
1496
1497 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1498 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1499 IPR_PROD_ID_LEN);
1500 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1501 ipr_err("Vendor/Product ID: %s\n", buffer);
1502
1503 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1504 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1505 ipr_err(" Serial Number: %s\n", buffer);
1506 }
1507
1508 /**
1509 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1510 * @prefix: string to print at start of printk
1511 * @hostrcb: hostrcb pointer
1512 * @vpd: vendor/product id/sn/wwn struct
1513 *
1514 * Return value:
1515 * none
1516 **/
1517 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1518 struct ipr_ext_vpd *vpd)
1519 {
1520 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1521 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1522 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1523 }
1524
1525 /**
1526 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1527 * @vpd: vendor/product id/sn/wwn struct
1528 *
1529 * Return value:
1530 * none
1531 **/
1532 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1533 {
1534 ipr_log_vpd(&vpd->vpd);
1535 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1536 be32_to_cpu(vpd->wwid[1]));
1537 }
1538
1539 /**
1540 * ipr_log_enhanced_cache_error - Log a cache error.
1541 * @ioa_cfg: ioa config struct
1542 * @hostrcb: hostrcb struct
1543 *
1544 * Return value:
1545 * none
1546 **/
1547 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1548 struct ipr_hostrcb *hostrcb)
1549 {
1550 struct ipr_hostrcb_type_12_error *error;
1551
1552 if (ioa_cfg->sis64)
1553 error = &hostrcb->hcam.u.error64.u.type_12_error;
1554 else
1555 error = &hostrcb->hcam.u.error.u.type_12_error;
1556
1557 ipr_err("-----Current Configuration-----\n");
1558 ipr_err("Cache Directory Card Information:\n");
1559 ipr_log_ext_vpd(&error->ioa_vpd);
1560 ipr_err("Adapter Card Information:\n");
1561 ipr_log_ext_vpd(&error->cfc_vpd);
1562
1563 ipr_err("-----Expected Configuration-----\n");
1564 ipr_err("Cache Directory Card Information:\n");
1565 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1566 ipr_err("Adapter Card Information:\n");
1567 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1568
1569 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1570 be32_to_cpu(error->ioa_data[0]),
1571 be32_to_cpu(error->ioa_data[1]),
1572 be32_to_cpu(error->ioa_data[2]));
1573 }
1574
1575 /**
1576 * ipr_log_cache_error - Log a cache error.
1577 * @ioa_cfg: ioa config struct
1578 * @hostrcb: hostrcb struct
1579 *
1580 * Return value:
1581 * none
1582 **/
1583 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584 struct ipr_hostrcb *hostrcb)
1585 {
1586 struct ipr_hostrcb_type_02_error *error =
1587 &hostrcb->hcam.u.error.u.type_02_error;
1588
1589 ipr_err("-----Current Configuration-----\n");
1590 ipr_err("Cache Directory Card Information:\n");
1591 ipr_log_vpd(&error->ioa_vpd);
1592 ipr_err("Adapter Card Information:\n");
1593 ipr_log_vpd(&error->cfc_vpd);
1594
1595 ipr_err("-----Expected Configuration-----\n");
1596 ipr_err("Cache Directory Card Information:\n");
1597 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1598 ipr_err("Adapter Card Information:\n");
1599 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1600
1601 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1602 be32_to_cpu(error->ioa_data[0]),
1603 be32_to_cpu(error->ioa_data[1]),
1604 be32_to_cpu(error->ioa_data[2]));
1605 }
1606
1607 /**
1608 * ipr_log_enhanced_config_error - Log a configuration error.
1609 * @ioa_cfg: ioa config struct
1610 * @hostrcb: hostrcb struct
1611 *
1612 * Return value:
1613 * none
1614 **/
1615 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1616 struct ipr_hostrcb *hostrcb)
1617 {
1618 int errors_logged, i;
1619 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1620 struct ipr_hostrcb_type_13_error *error;
1621
1622 error = &hostrcb->hcam.u.error.u.type_13_error;
1623 errors_logged = be32_to_cpu(error->errors_logged);
1624
1625 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1626 be32_to_cpu(error->errors_detected), errors_logged);
1627
1628 dev_entry = error->dev;
1629
1630 for (i = 0; i < errors_logged; i++, dev_entry++) {
1631 ipr_err_separator;
1632
1633 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1634 ipr_log_ext_vpd(&dev_entry->vpd);
1635
1636 ipr_err("-----New Device Information-----\n");
1637 ipr_log_ext_vpd(&dev_entry->new_vpd);
1638
1639 ipr_err("Cache Directory Card Information:\n");
1640 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1641
1642 ipr_err("Adapter Card Information:\n");
1643 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1644 }
1645 }
1646
1647 /**
1648 * ipr_log_sis64_config_error - Log a device error.
1649 * @ioa_cfg: ioa config struct
1650 * @hostrcb: hostrcb struct
1651 *
1652 * Return value:
1653 * none
1654 **/
1655 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1656 struct ipr_hostrcb *hostrcb)
1657 {
1658 int errors_logged, i;
1659 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1660 struct ipr_hostrcb_type_23_error *error;
1661 char buffer[IPR_MAX_RES_PATH_LENGTH];
1662
1663 error = &hostrcb->hcam.u.error64.u.type_23_error;
1664 errors_logged = be32_to_cpu(error->errors_logged);
1665
1666 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1667 be32_to_cpu(error->errors_detected), errors_logged);
1668
1669 dev_entry = error->dev;
1670
1671 for (i = 0; i < errors_logged; i++, dev_entry++) {
1672 ipr_err_separator;
1673
1674 ipr_err("Device %d : %s", i + 1,
1675 __ipr_format_res_path(dev_entry->res_path,
1676 buffer, sizeof(buffer)));
1677 ipr_log_ext_vpd(&dev_entry->vpd);
1678
1679 ipr_err("-----New Device Information-----\n");
1680 ipr_log_ext_vpd(&dev_entry->new_vpd);
1681
1682 ipr_err("Cache Directory Card Information:\n");
1683 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1684
1685 ipr_err("Adapter Card Information:\n");
1686 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1687 }
1688 }
1689
1690 /**
1691 * ipr_log_config_error - Log a configuration error.
1692 * @ioa_cfg: ioa config struct
1693 * @hostrcb: hostrcb struct
1694 *
1695 * Return value:
1696 * none
1697 **/
1698 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1699 struct ipr_hostrcb *hostrcb)
1700 {
1701 int errors_logged, i;
1702 struct ipr_hostrcb_device_data_entry *dev_entry;
1703 struct ipr_hostrcb_type_03_error *error;
1704
1705 error = &hostrcb->hcam.u.error.u.type_03_error;
1706 errors_logged = be32_to_cpu(error->errors_logged);
1707
1708 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1709 be32_to_cpu(error->errors_detected), errors_logged);
1710
1711 dev_entry = error->dev;
1712
1713 for (i = 0; i < errors_logged; i++, dev_entry++) {
1714 ipr_err_separator;
1715
1716 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1717 ipr_log_vpd(&dev_entry->vpd);
1718
1719 ipr_err("-----New Device Information-----\n");
1720 ipr_log_vpd(&dev_entry->new_vpd);
1721
1722 ipr_err("Cache Directory Card Information:\n");
1723 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1724
1725 ipr_err("Adapter Card Information:\n");
1726 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1727
1728 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1729 be32_to_cpu(dev_entry->ioa_data[0]),
1730 be32_to_cpu(dev_entry->ioa_data[1]),
1731 be32_to_cpu(dev_entry->ioa_data[2]),
1732 be32_to_cpu(dev_entry->ioa_data[3]),
1733 be32_to_cpu(dev_entry->ioa_data[4]));
1734 }
1735 }
1736
1737 /**
1738 * ipr_log_enhanced_array_error - Log an array configuration error.
1739 * @ioa_cfg: ioa config struct
1740 * @hostrcb: hostrcb struct
1741 *
1742 * Return value:
1743 * none
1744 **/
1745 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1746 struct ipr_hostrcb *hostrcb)
1747 {
1748 int i, num_entries;
1749 struct ipr_hostrcb_type_14_error *error;
1750 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1751 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1752
1753 error = &hostrcb->hcam.u.error.u.type_14_error;
1754
1755 ipr_err_separator;
1756
1757 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1758 error->protection_level,
1759 ioa_cfg->host->host_no,
1760 error->last_func_vset_res_addr.bus,
1761 error->last_func_vset_res_addr.target,
1762 error->last_func_vset_res_addr.lun);
1763
1764 ipr_err_separator;
1765
1766 array_entry = error->array_member;
1767 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1768 ARRAY_SIZE(error->array_member));
1769
1770 for (i = 0; i < num_entries; i++, array_entry++) {
1771 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1772 continue;
1773
1774 if (be32_to_cpu(error->exposed_mode_adn) == i)
1775 ipr_err("Exposed Array Member %d:\n", i);
1776 else
1777 ipr_err("Array Member %d:\n", i);
1778
1779 ipr_log_ext_vpd(&array_entry->vpd);
1780 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1781 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1782 "Expected Location");
1783
1784 ipr_err_separator;
1785 }
1786 }
1787
1788 /**
1789 * ipr_log_array_error - Log an array configuration error.
1790 * @ioa_cfg: ioa config struct
1791 * @hostrcb: hostrcb struct
1792 *
1793 * Return value:
1794 * none
1795 **/
1796 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1797 struct ipr_hostrcb *hostrcb)
1798 {
1799 int i;
1800 struct ipr_hostrcb_type_04_error *error;
1801 struct ipr_hostrcb_array_data_entry *array_entry;
1802 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1803
1804 error = &hostrcb->hcam.u.error.u.type_04_error;
1805
1806 ipr_err_separator;
1807
1808 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1809 error->protection_level,
1810 ioa_cfg->host->host_no,
1811 error->last_func_vset_res_addr.bus,
1812 error->last_func_vset_res_addr.target,
1813 error->last_func_vset_res_addr.lun);
1814
1815 ipr_err_separator;
1816
1817 array_entry = error->array_member;
1818
1819 for (i = 0; i < 18; i++) {
1820 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1821 continue;
1822
1823 if (be32_to_cpu(error->exposed_mode_adn) == i)
1824 ipr_err("Exposed Array Member %d:\n", i);
1825 else
1826 ipr_err("Array Member %d:\n", i);
1827
1828 ipr_log_vpd(&array_entry->vpd);
1829
1830 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1831 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1832 "Expected Location");
1833
1834 ipr_err_separator;
1835
1836 if (i == 9)
1837 array_entry = error->array_member2;
1838 else
1839 array_entry++;
1840 }
1841 }
1842
1843 /**
1844 * ipr_log_hex_data - Log additional hex IOA error data.
1845 * @ioa_cfg: ioa config struct
1846 * @data: IOA error data
1847 * @len: data length
1848 *
1849 * Return value:
1850 * none
1851 **/
1852 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1853 {
1854 int i;
1855
1856 if (len == 0)
1857 return;
1858
1859 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1860 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1861
1862 for (i = 0; i < len / 4; i += 4) {
1863 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1864 be32_to_cpu(data[i]),
1865 be32_to_cpu(data[i+1]),
1866 be32_to_cpu(data[i+2]),
1867 be32_to_cpu(data[i+3]));
1868 }
1869 }
1870
1871 /**
1872 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1873 * @ioa_cfg: ioa config struct
1874 * @hostrcb: hostrcb struct
1875 *
1876 * Return value:
1877 * none
1878 **/
1879 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1880 struct ipr_hostrcb *hostrcb)
1881 {
1882 struct ipr_hostrcb_type_17_error *error;
1883
1884 if (ioa_cfg->sis64)
1885 error = &hostrcb->hcam.u.error64.u.type_17_error;
1886 else
1887 error = &hostrcb->hcam.u.error.u.type_17_error;
1888
1889 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1890 strim(error->failure_reason);
1891
1892 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1893 be32_to_cpu(hostrcb->hcam.u.error.prc));
1894 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1895 ipr_log_hex_data(ioa_cfg, error->data,
1896 be32_to_cpu(hostrcb->hcam.length) -
1897 (offsetof(struct ipr_hostrcb_error, u) +
1898 offsetof(struct ipr_hostrcb_type_17_error, data)));
1899 }
1900
1901 /**
1902 * ipr_log_dual_ioa_error - Log a dual adapter error.
1903 * @ioa_cfg: ioa config struct
1904 * @hostrcb: hostrcb struct
1905 *
1906 * Return value:
1907 * none
1908 **/
1909 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1910 struct ipr_hostrcb *hostrcb)
1911 {
1912 struct ipr_hostrcb_type_07_error *error;
1913
1914 error = &hostrcb->hcam.u.error.u.type_07_error;
1915 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1916 strim(error->failure_reason);
1917
1918 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1919 be32_to_cpu(hostrcb->hcam.u.error.prc));
1920 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1921 ipr_log_hex_data(ioa_cfg, error->data,
1922 be32_to_cpu(hostrcb->hcam.length) -
1923 (offsetof(struct ipr_hostrcb_error, u) +
1924 offsetof(struct ipr_hostrcb_type_07_error, data)));
1925 }
1926
1927 static const struct {
1928 u8 active;
1929 char *desc;
1930 } path_active_desc[] = {
1931 { IPR_PATH_NO_INFO, "Path" },
1932 { IPR_PATH_ACTIVE, "Active path" },
1933 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1934 };
1935
1936 static const struct {
1937 u8 state;
1938 char *desc;
1939 } path_state_desc[] = {
1940 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1941 { IPR_PATH_HEALTHY, "is healthy" },
1942 { IPR_PATH_DEGRADED, "is degraded" },
1943 { IPR_PATH_FAILED, "is failed" }
1944 };
1945
1946 /**
1947 * ipr_log_fabric_path - Log a fabric path error
1948 * @hostrcb: hostrcb struct
1949 * @fabric: fabric descriptor
1950 *
1951 * Return value:
1952 * none
1953 **/
1954 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1955 struct ipr_hostrcb_fabric_desc *fabric)
1956 {
1957 int i, j;
1958 u8 path_state = fabric->path_state;
1959 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1960 u8 state = path_state & IPR_PATH_STATE_MASK;
1961
1962 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1963 if (path_active_desc[i].active != active)
1964 continue;
1965
1966 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1967 if (path_state_desc[j].state != state)
1968 continue;
1969
1970 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1971 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1972 path_active_desc[i].desc, path_state_desc[j].desc,
1973 fabric->ioa_port);
1974 } else if (fabric->cascaded_expander == 0xff) {
1975 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1976 path_active_desc[i].desc, path_state_desc[j].desc,
1977 fabric->ioa_port, fabric->phy);
1978 } else if (fabric->phy == 0xff) {
1979 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1980 path_active_desc[i].desc, path_state_desc[j].desc,
1981 fabric->ioa_port, fabric->cascaded_expander);
1982 } else {
1983 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1984 path_active_desc[i].desc, path_state_desc[j].desc,
1985 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1986 }
1987 return;
1988 }
1989 }
1990
1991 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1992 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1993 }
1994
1995 /**
1996 * ipr_log64_fabric_path - Log a fabric path error
1997 * @hostrcb: hostrcb struct
1998 * @fabric: fabric descriptor
1999 *
2000 * Return value:
2001 * none
2002 **/
2003 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2004 struct ipr_hostrcb64_fabric_desc *fabric)
2005 {
2006 int i, j;
2007 u8 path_state = fabric->path_state;
2008 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2009 u8 state = path_state & IPR_PATH_STATE_MASK;
2010 char buffer[IPR_MAX_RES_PATH_LENGTH];
2011
2012 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2013 if (path_active_desc[i].active != active)
2014 continue;
2015
2016 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2017 if (path_state_desc[j].state != state)
2018 continue;
2019
2020 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2021 path_active_desc[i].desc, path_state_desc[j].desc,
2022 ipr_format_res_path(hostrcb->ioa_cfg,
2023 fabric->res_path,
2024 buffer, sizeof(buffer)));
2025 return;
2026 }
2027 }
2028
2029 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2030 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2031 buffer, sizeof(buffer)));
2032 }
2033
2034 static const struct {
2035 u8 type;
2036 char *desc;
2037 } path_type_desc[] = {
2038 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2039 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2040 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2041 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2042 };
2043
2044 static const struct {
2045 u8 status;
2046 char *desc;
2047 } path_status_desc[] = {
2048 { IPR_PATH_CFG_NO_PROB, "Functional" },
2049 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2050 { IPR_PATH_CFG_FAILED, "Failed" },
2051 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2052 { IPR_PATH_NOT_DETECTED, "Missing" },
2053 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2054 };
2055
2056 static const char *link_rate[] = {
2057 "unknown",
2058 "disabled",
2059 "phy reset problem",
2060 "spinup hold",
2061 "port selector",
2062 "unknown",
2063 "unknown",
2064 "unknown",
2065 "1.5Gbps",
2066 "3.0Gbps",
2067 "unknown",
2068 "unknown",
2069 "unknown",
2070 "unknown",
2071 "unknown",
2072 "unknown"
2073 };
2074
2075 /**
2076 * ipr_log_path_elem - Log a fabric path element.
2077 * @hostrcb: hostrcb struct
2078 * @cfg: fabric path element struct
2079 *
2080 * Return value:
2081 * none
2082 **/
2083 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2084 struct ipr_hostrcb_config_element *cfg)
2085 {
2086 int i, j;
2087 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2088 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2089
2090 if (type == IPR_PATH_CFG_NOT_EXIST)
2091 return;
2092
2093 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2094 if (path_type_desc[i].type != type)
2095 continue;
2096
2097 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2098 if (path_status_desc[j].status != status)
2099 continue;
2100
2101 if (type == IPR_PATH_CFG_IOA_PORT) {
2102 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2103 path_status_desc[j].desc, path_type_desc[i].desc,
2104 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2105 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2106 } else {
2107 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2108 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2109 path_status_desc[j].desc, path_type_desc[i].desc,
2110 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2111 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2112 } else if (cfg->cascaded_expander == 0xff) {
2113 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2114 "WWN=%08X%08X\n", path_status_desc[j].desc,
2115 path_type_desc[i].desc, cfg->phy,
2116 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2117 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2118 } else if (cfg->phy == 0xff) {
2119 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2120 "WWN=%08X%08X\n", path_status_desc[j].desc,
2121 path_type_desc[i].desc, cfg->cascaded_expander,
2122 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2123 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2124 } else {
2125 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2126 "WWN=%08X%08X\n", path_status_desc[j].desc,
2127 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2128 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2129 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2130 }
2131 }
2132 return;
2133 }
2134 }
2135
2136 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2137 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2138 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2139 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2140 }
2141
2142 /**
2143 * ipr_log64_path_elem - Log a fabric path element.
2144 * @hostrcb: hostrcb struct
2145 * @cfg: fabric path element struct
2146 *
2147 * Return value:
2148 * none
2149 **/
2150 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2151 struct ipr_hostrcb64_config_element *cfg)
2152 {
2153 int i, j;
2154 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2155 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2156 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2157 char buffer[IPR_MAX_RES_PATH_LENGTH];
2158
2159 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2160 return;
2161
2162 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2163 if (path_type_desc[i].type != type)
2164 continue;
2165
2166 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2167 if (path_status_desc[j].status != status)
2168 continue;
2169
2170 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2171 path_status_desc[j].desc, path_type_desc[i].desc,
2172 ipr_format_res_path(hostrcb->ioa_cfg,
2173 cfg->res_path, buffer, sizeof(buffer)),
2174 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175 be32_to_cpu(cfg->wwid[0]),
2176 be32_to_cpu(cfg->wwid[1]));
2177 return;
2178 }
2179 }
2180 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2181 "WWN=%08X%08X\n", cfg->type_status,
2182 ipr_format_res_path(hostrcb->ioa_cfg,
2183 cfg->res_path, buffer, sizeof(buffer)),
2184 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2185 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2186 }
2187
2188 /**
2189 * ipr_log_fabric_error - Log a fabric error.
2190 * @ioa_cfg: ioa config struct
2191 * @hostrcb: hostrcb struct
2192 *
2193 * Return value:
2194 * none
2195 **/
2196 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2197 struct ipr_hostrcb *hostrcb)
2198 {
2199 struct ipr_hostrcb_type_20_error *error;
2200 struct ipr_hostrcb_fabric_desc *fabric;
2201 struct ipr_hostrcb_config_element *cfg;
2202 int i, add_len;
2203
2204 error = &hostrcb->hcam.u.error.u.type_20_error;
2205 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2206 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2207
2208 add_len = be32_to_cpu(hostrcb->hcam.length) -
2209 (offsetof(struct ipr_hostrcb_error, u) +
2210 offsetof(struct ipr_hostrcb_type_20_error, desc));
2211
2212 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2213 ipr_log_fabric_path(hostrcb, fabric);
2214 for_each_fabric_cfg(fabric, cfg)
2215 ipr_log_path_elem(hostrcb, cfg);
2216
2217 add_len -= be16_to_cpu(fabric->length);
2218 fabric = (struct ipr_hostrcb_fabric_desc *)
2219 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2220 }
2221
2222 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2223 }
2224
2225 /**
2226 * ipr_log_sis64_array_error - Log a sis64 array error.
2227 * @ioa_cfg: ioa config struct
2228 * @hostrcb: hostrcb struct
2229 *
2230 * Return value:
2231 * none
2232 **/
2233 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2234 struct ipr_hostrcb *hostrcb)
2235 {
2236 int i, num_entries;
2237 struct ipr_hostrcb_type_24_error *error;
2238 struct ipr_hostrcb64_array_data_entry *array_entry;
2239 char buffer[IPR_MAX_RES_PATH_LENGTH];
2240 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2241
2242 error = &hostrcb->hcam.u.error64.u.type_24_error;
2243
2244 ipr_err_separator;
2245
2246 ipr_err("RAID %s Array Configuration: %s\n",
2247 error->protection_level,
2248 ipr_format_res_path(ioa_cfg, error->last_res_path,
2249 buffer, sizeof(buffer)));
2250
2251 ipr_err_separator;
2252
2253 array_entry = error->array_member;
2254 num_entries = min_t(u32, error->num_entries,
2255 ARRAY_SIZE(error->array_member));
2256
2257 for (i = 0; i < num_entries; i++, array_entry++) {
2258
2259 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2260 continue;
2261
2262 if (error->exposed_mode_adn == i)
2263 ipr_err("Exposed Array Member %d:\n", i);
2264 else
2265 ipr_err("Array Member %d:\n", i);
2266
2267 ipr_err("Array Member %d:\n", i);
2268 ipr_log_ext_vpd(&array_entry->vpd);
2269 ipr_err("Current Location: %s\n",
2270 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2271 buffer, sizeof(buffer)));
2272 ipr_err("Expected Location: %s\n",
2273 ipr_format_res_path(ioa_cfg,
2274 array_entry->expected_res_path,
2275 buffer, sizeof(buffer)));
2276
2277 ipr_err_separator;
2278 }
2279 }
2280
2281 /**
2282 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2283 * @ioa_cfg: ioa config struct
2284 * @hostrcb: hostrcb struct
2285 *
2286 * Return value:
2287 * none
2288 **/
2289 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2290 struct ipr_hostrcb *hostrcb)
2291 {
2292 struct ipr_hostrcb_type_30_error *error;
2293 struct ipr_hostrcb64_fabric_desc *fabric;
2294 struct ipr_hostrcb64_config_element *cfg;
2295 int i, add_len;
2296
2297 error = &hostrcb->hcam.u.error64.u.type_30_error;
2298
2299 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2300 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2301
2302 add_len = be32_to_cpu(hostrcb->hcam.length) -
2303 (offsetof(struct ipr_hostrcb64_error, u) +
2304 offsetof(struct ipr_hostrcb_type_30_error, desc));
2305
2306 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2307 ipr_log64_fabric_path(hostrcb, fabric);
2308 for_each_fabric_cfg(fabric, cfg)
2309 ipr_log64_path_elem(hostrcb, cfg);
2310
2311 add_len -= be16_to_cpu(fabric->length);
2312 fabric = (struct ipr_hostrcb64_fabric_desc *)
2313 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2314 }
2315
2316 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2317 }
2318
2319 /**
2320 * ipr_log_generic_error - Log an adapter error.
2321 * @ioa_cfg: ioa config struct
2322 * @hostrcb: hostrcb struct
2323 *
2324 * Return value:
2325 * none
2326 **/
2327 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2328 struct ipr_hostrcb *hostrcb)
2329 {
2330 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2331 be32_to_cpu(hostrcb->hcam.length));
2332 }
2333
2334 /**
2335 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2336 * @ioasc: IOASC
2337 *
2338 * This function will return the index of into the ipr_error_table
2339 * for the specified IOASC. If the IOASC is not in the table,
2340 * 0 will be returned, which points to the entry used for unknown errors.
2341 *
2342 * Return value:
2343 * index into the ipr_error_table
2344 **/
2345 static u32 ipr_get_error(u32 ioasc)
2346 {
2347 int i;
2348
2349 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2350 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2351 return i;
2352
2353 return 0;
2354 }
2355
2356 /**
2357 * ipr_handle_log_data - Log an adapter error.
2358 * @ioa_cfg: ioa config struct
2359 * @hostrcb: hostrcb struct
2360 *
2361 * This function logs an adapter error to the system.
2362 *
2363 * Return value:
2364 * none
2365 **/
2366 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2367 struct ipr_hostrcb *hostrcb)
2368 {
2369 u32 ioasc;
2370 int error_index;
2371
2372 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2373 return;
2374
2375 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2376 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2377
2378 if (ioa_cfg->sis64)
2379 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2380 else
2381 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2382
2383 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2384 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2385 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2386 scsi_report_bus_reset(ioa_cfg->host,
2387 hostrcb->hcam.u.error.fd_res_addr.bus);
2388 }
2389
2390 error_index = ipr_get_error(ioasc);
2391
2392 if (!ipr_error_table[error_index].log_hcam)
2393 return;
2394
2395 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2396
2397 /* Set indication we have logged an error */
2398 ioa_cfg->errors_logged++;
2399
2400 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2401 return;
2402 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2403 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2404
2405 switch (hostrcb->hcam.overlay_id) {
2406 case IPR_HOST_RCB_OVERLAY_ID_2:
2407 ipr_log_cache_error(ioa_cfg, hostrcb);
2408 break;
2409 case IPR_HOST_RCB_OVERLAY_ID_3:
2410 ipr_log_config_error(ioa_cfg, hostrcb);
2411 break;
2412 case IPR_HOST_RCB_OVERLAY_ID_4:
2413 case IPR_HOST_RCB_OVERLAY_ID_6:
2414 ipr_log_array_error(ioa_cfg, hostrcb);
2415 break;
2416 case IPR_HOST_RCB_OVERLAY_ID_7:
2417 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2418 break;
2419 case IPR_HOST_RCB_OVERLAY_ID_12:
2420 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2421 break;
2422 case IPR_HOST_RCB_OVERLAY_ID_13:
2423 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2424 break;
2425 case IPR_HOST_RCB_OVERLAY_ID_14:
2426 case IPR_HOST_RCB_OVERLAY_ID_16:
2427 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2428 break;
2429 case IPR_HOST_RCB_OVERLAY_ID_17:
2430 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2431 break;
2432 case IPR_HOST_RCB_OVERLAY_ID_20:
2433 ipr_log_fabric_error(ioa_cfg, hostrcb);
2434 break;
2435 case IPR_HOST_RCB_OVERLAY_ID_23:
2436 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2437 break;
2438 case IPR_HOST_RCB_OVERLAY_ID_24:
2439 case IPR_HOST_RCB_OVERLAY_ID_26:
2440 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2441 break;
2442 case IPR_HOST_RCB_OVERLAY_ID_30:
2443 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2444 break;
2445 case IPR_HOST_RCB_OVERLAY_ID_1:
2446 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2447 default:
2448 ipr_log_generic_error(ioa_cfg, hostrcb);
2449 break;
2450 }
2451 }
2452
2453 /**
2454 * ipr_process_error - Op done function for an adapter error log.
2455 * @ipr_cmd: ipr command struct
2456 *
2457 * This function is the op done function for an error log host
2458 * controlled async from the adapter. It will log the error and
2459 * send the HCAM back to the adapter.
2460 *
2461 * Return value:
2462 * none
2463 **/
2464 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2465 {
2466 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2467 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2468 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2469 u32 fd_ioasc;
2470
2471 if (ioa_cfg->sis64)
2472 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2473 else
2474 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2475
2476 list_del(&hostrcb->queue);
2477 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2478
2479 if (!ioasc) {
2480 ipr_handle_log_data(ioa_cfg, hostrcb);
2481 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2482 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2483 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2484 dev_err(&ioa_cfg->pdev->dev,
2485 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2486 }
2487
2488 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2489 }
2490
2491 /**
2492 * ipr_timeout - An internally generated op has timed out.
2493 * @ipr_cmd: ipr command struct
2494 *
2495 * This function blocks host requests and initiates an
2496 * adapter reset.
2497 *
2498 * Return value:
2499 * none
2500 **/
2501 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2502 {
2503 unsigned long lock_flags = 0;
2504 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2505
2506 ENTER;
2507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2508
2509 ioa_cfg->errors_logged++;
2510 dev_err(&ioa_cfg->pdev->dev,
2511 "Adapter being reset due to command timeout.\n");
2512
2513 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2514 ioa_cfg->sdt_state = GET_DUMP;
2515
2516 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2517 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2518
2519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2520 LEAVE;
2521 }
2522
2523 /**
2524 * ipr_oper_timeout - Adapter timed out transitioning to operational
2525 * @ipr_cmd: ipr command struct
2526 *
2527 * This function blocks host requests and initiates an
2528 * adapter reset.
2529 *
2530 * Return value:
2531 * none
2532 **/
2533 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2534 {
2535 unsigned long lock_flags = 0;
2536 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2537
2538 ENTER;
2539 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2540
2541 ioa_cfg->errors_logged++;
2542 dev_err(&ioa_cfg->pdev->dev,
2543 "Adapter timed out transitioning to operational.\n");
2544
2545 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2546 ioa_cfg->sdt_state = GET_DUMP;
2547
2548 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2549 if (ipr_fastfail)
2550 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2551 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2552 }
2553
2554 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2555 LEAVE;
2556 }
2557
2558 /**
2559 * ipr_find_ses_entry - Find matching SES in SES table
2560 * @res: resource entry struct of SES
2561 *
2562 * Return value:
2563 * pointer to SES table entry / NULL on failure
2564 **/
2565 static const struct ipr_ses_table_entry *
2566 ipr_find_ses_entry(struct ipr_resource_entry *res)
2567 {
2568 int i, j, matches;
2569 struct ipr_std_inq_vpids *vpids;
2570 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2571
2572 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2573 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2574 if (ste->compare_product_id_byte[j] == 'X') {
2575 vpids = &res->std_inq_data.vpids;
2576 if (vpids->product_id[j] == ste->product_id[j])
2577 matches++;
2578 else
2579 break;
2580 } else
2581 matches++;
2582 }
2583
2584 if (matches == IPR_PROD_ID_LEN)
2585 return ste;
2586 }
2587
2588 return NULL;
2589 }
2590
2591 /**
2592 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2593 * @ioa_cfg: ioa config struct
2594 * @bus: SCSI bus
2595 * @bus_width: bus width
2596 *
2597 * Return value:
2598 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2599 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2600 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2601 * max 160MHz = max 320MB/sec).
2602 **/
2603 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2604 {
2605 struct ipr_resource_entry *res;
2606 const struct ipr_ses_table_entry *ste;
2607 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2608
2609 /* Loop through each config table entry in the config table buffer */
2610 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2611 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2612 continue;
2613
2614 if (bus != res->bus)
2615 continue;
2616
2617 if (!(ste = ipr_find_ses_entry(res)))
2618 continue;
2619
2620 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2621 }
2622
2623 return max_xfer_rate;
2624 }
2625
2626 /**
2627 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2628 * @ioa_cfg: ioa config struct
2629 * @max_delay: max delay in micro-seconds to wait
2630 *
2631 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2632 *
2633 * Return value:
2634 * 0 on success / other on failure
2635 **/
2636 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2637 {
2638 volatile u32 pcii_reg;
2639 int delay = 1;
2640
2641 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2642 while (delay < max_delay) {
2643 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2644
2645 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2646 return 0;
2647
2648 /* udelay cannot be used if delay is more than a few milliseconds */
2649 if ((delay / 1000) > MAX_UDELAY_MS)
2650 mdelay(delay / 1000);
2651 else
2652 udelay(delay);
2653
2654 delay += delay;
2655 }
2656 return -EIO;
2657 }
2658
2659 /**
2660 * ipr_get_sis64_dump_data_section - Dump IOA memory
2661 * @ioa_cfg: ioa config struct
2662 * @start_addr: adapter address to dump
2663 * @dest: destination kernel buffer
2664 * @length_in_words: length to dump in 4 byte words
2665 *
2666 * Return value:
2667 * 0 on success
2668 **/
2669 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2670 u32 start_addr,
2671 __be32 *dest, u32 length_in_words)
2672 {
2673 int i;
2674
2675 for (i = 0; i < length_in_words; i++) {
2676 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2677 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2678 dest++;
2679 }
2680
2681 return 0;
2682 }
2683
2684 /**
2685 * ipr_get_ldump_data_section - Dump IOA memory
2686 * @ioa_cfg: ioa config struct
2687 * @start_addr: adapter address to dump
2688 * @dest: destination kernel buffer
2689 * @length_in_words: length to dump in 4 byte words
2690 *
2691 * Return value:
2692 * 0 on success / -EIO on failure
2693 **/
2694 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2695 u32 start_addr,
2696 __be32 *dest, u32 length_in_words)
2697 {
2698 volatile u32 temp_pcii_reg;
2699 int i, delay = 0;
2700
2701 if (ioa_cfg->sis64)
2702 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2703 dest, length_in_words);
2704
2705 /* Write IOA interrupt reg starting LDUMP state */
2706 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2707 ioa_cfg->regs.set_uproc_interrupt_reg32);
2708
2709 /* Wait for IO debug acknowledge */
2710 if (ipr_wait_iodbg_ack(ioa_cfg,
2711 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2712 dev_err(&ioa_cfg->pdev->dev,
2713 "IOA dump long data transfer timeout\n");
2714 return -EIO;
2715 }
2716
2717 /* Signal LDUMP interlocked - clear IO debug ack */
2718 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2719 ioa_cfg->regs.clr_interrupt_reg);
2720
2721 /* Write Mailbox with starting address */
2722 writel(start_addr, ioa_cfg->ioa_mailbox);
2723
2724 /* Signal address valid - clear IOA Reset alert */
2725 writel(IPR_UPROCI_RESET_ALERT,
2726 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2727
2728 for (i = 0; i < length_in_words; i++) {
2729 /* Wait for IO debug acknowledge */
2730 if (ipr_wait_iodbg_ack(ioa_cfg,
2731 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2732 dev_err(&ioa_cfg->pdev->dev,
2733 "IOA dump short data transfer timeout\n");
2734 return -EIO;
2735 }
2736
2737 /* Read data from mailbox and increment destination pointer */
2738 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2739 dest++;
2740
2741 /* For all but the last word of data, signal data received */
2742 if (i < (length_in_words - 1)) {
2743 /* Signal dump data received - Clear IO debug Ack */
2744 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2745 ioa_cfg->regs.clr_interrupt_reg);
2746 }
2747 }
2748
2749 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2750 writel(IPR_UPROCI_RESET_ALERT,
2751 ioa_cfg->regs.set_uproc_interrupt_reg32);
2752
2753 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2754 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2755
2756 /* Signal dump data received - Clear IO debug Ack */
2757 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2758 ioa_cfg->regs.clr_interrupt_reg);
2759
2760 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2761 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2762 temp_pcii_reg =
2763 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2764
2765 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2766 return 0;
2767
2768 udelay(10);
2769 delay += 10;
2770 }
2771
2772 return 0;
2773 }
2774
2775 #ifdef CONFIG_SCSI_IPR_DUMP
2776 /**
2777 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2778 * @ioa_cfg: ioa config struct
2779 * @pci_address: adapter address
2780 * @length: length of data to copy
2781 *
2782 * Copy data from PCI adapter to kernel buffer.
2783 * Note: length MUST be a 4 byte multiple
2784 * Return value:
2785 * 0 on success / other on failure
2786 **/
2787 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2788 unsigned long pci_address, u32 length)
2789 {
2790 int bytes_copied = 0;
2791 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2792 __be32 *page;
2793 unsigned long lock_flags = 0;
2794 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2795
2796 if (ioa_cfg->sis64)
2797 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2798 else
2799 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2800
2801 while (bytes_copied < length &&
2802 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2803 if (ioa_dump->page_offset >= PAGE_SIZE ||
2804 ioa_dump->page_offset == 0) {
2805 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2806
2807 if (!page) {
2808 ipr_trace;
2809 return bytes_copied;
2810 }
2811
2812 ioa_dump->page_offset = 0;
2813 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2814 ioa_dump->next_page_index++;
2815 } else
2816 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2817
2818 rem_len = length - bytes_copied;
2819 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2820 cur_len = min(rem_len, rem_page_len);
2821
2822 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2823 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2824 rc = -EIO;
2825 } else {
2826 rc = ipr_get_ldump_data_section(ioa_cfg,
2827 pci_address + bytes_copied,
2828 &page[ioa_dump->page_offset / 4],
2829 (cur_len / sizeof(u32)));
2830 }
2831 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2832
2833 if (!rc) {
2834 ioa_dump->page_offset += cur_len;
2835 bytes_copied += cur_len;
2836 } else {
2837 ipr_trace;
2838 break;
2839 }
2840 schedule();
2841 }
2842
2843 return bytes_copied;
2844 }
2845
2846 /**
2847 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2848 * @hdr: dump entry header struct
2849 *
2850 * Return value:
2851 * nothing
2852 **/
2853 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2854 {
2855 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2856 hdr->num_elems = 1;
2857 hdr->offset = sizeof(*hdr);
2858 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2859 }
2860
2861 /**
2862 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2863 * @ioa_cfg: ioa config struct
2864 * @driver_dump: driver dump struct
2865 *
2866 * Return value:
2867 * nothing
2868 **/
2869 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2870 struct ipr_driver_dump *driver_dump)
2871 {
2872 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2873
2874 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2875 driver_dump->ioa_type_entry.hdr.len =
2876 sizeof(struct ipr_dump_ioa_type_entry) -
2877 sizeof(struct ipr_dump_entry_header);
2878 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2879 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2880 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2881 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2882 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2883 ucode_vpd->minor_release[1];
2884 driver_dump->hdr.num_entries++;
2885 }
2886
2887 /**
2888 * ipr_dump_version_data - Fill in the driver version in the dump.
2889 * @ioa_cfg: ioa config struct
2890 * @driver_dump: driver dump struct
2891 *
2892 * Return value:
2893 * nothing
2894 **/
2895 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2896 struct ipr_driver_dump *driver_dump)
2897 {
2898 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2899 driver_dump->version_entry.hdr.len =
2900 sizeof(struct ipr_dump_version_entry) -
2901 sizeof(struct ipr_dump_entry_header);
2902 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2903 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2904 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2905 driver_dump->hdr.num_entries++;
2906 }
2907
2908 /**
2909 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2910 * @ioa_cfg: ioa config struct
2911 * @driver_dump: driver dump struct
2912 *
2913 * Return value:
2914 * nothing
2915 **/
2916 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2917 struct ipr_driver_dump *driver_dump)
2918 {
2919 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2920 driver_dump->trace_entry.hdr.len =
2921 sizeof(struct ipr_dump_trace_entry) -
2922 sizeof(struct ipr_dump_entry_header);
2923 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2924 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2925 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2926 driver_dump->hdr.num_entries++;
2927 }
2928
2929 /**
2930 * ipr_dump_location_data - Fill in the IOA location in the dump.
2931 * @ioa_cfg: ioa config struct
2932 * @driver_dump: driver dump struct
2933 *
2934 * Return value:
2935 * nothing
2936 **/
2937 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2938 struct ipr_driver_dump *driver_dump)
2939 {
2940 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2941 driver_dump->location_entry.hdr.len =
2942 sizeof(struct ipr_dump_location_entry) -
2943 sizeof(struct ipr_dump_entry_header);
2944 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2945 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2946 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2947 driver_dump->hdr.num_entries++;
2948 }
2949
2950 /**
2951 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2952 * @ioa_cfg: ioa config struct
2953 * @dump: dump struct
2954 *
2955 * Return value:
2956 * nothing
2957 **/
2958 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2959 {
2960 unsigned long start_addr, sdt_word;
2961 unsigned long lock_flags = 0;
2962 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2963 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2964 u32 num_entries, max_num_entries, start_off, end_off;
2965 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2966 struct ipr_sdt *sdt;
2967 int valid = 1;
2968 int i;
2969
2970 ENTER;
2971
2972 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2973
2974 if (ioa_cfg->sdt_state != READ_DUMP) {
2975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2976 return;
2977 }
2978
2979 if (ioa_cfg->sis64) {
2980 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2981 ssleep(IPR_DUMP_DELAY_SECONDS);
2982 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2983 }
2984
2985 start_addr = readl(ioa_cfg->ioa_mailbox);
2986
2987 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2988 dev_err(&ioa_cfg->pdev->dev,
2989 "Invalid dump table format: %lx\n", start_addr);
2990 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2991 return;
2992 }
2993
2994 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2995
2996 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2997
2998 /* Initialize the overall dump header */
2999 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3000 driver_dump->hdr.num_entries = 1;
3001 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3002 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3003 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3004 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3005
3006 ipr_dump_version_data(ioa_cfg, driver_dump);
3007 ipr_dump_location_data(ioa_cfg, driver_dump);
3008 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3009 ipr_dump_trace_data(ioa_cfg, driver_dump);
3010
3011 /* Update dump_header */
3012 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3013
3014 /* IOA Dump entry */
3015 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3016 ioa_dump->hdr.len = 0;
3017 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3018 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3019
3020 /* First entries in sdt are actually a list of dump addresses and
3021 lengths to gather the real dump data. sdt represents the pointer
3022 to the ioa generated dump table. Dump data will be extracted based
3023 on entries in this table */
3024 sdt = &ioa_dump->sdt;
3025
3026 if (ioa_cfg->sis64) {
3027 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3028 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3029 } else {
3030 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3031 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3032 }
3033
3034 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3035 (max_num_entries * sizeof(struct ipr_sdt_entry));
3036 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3037 bytes_to_copy / sizeof(__be32));
3038
3039 /* Smart Dump table is ready to use and the first entry is valid */
3040 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3041 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3042 dev_err(&ioa_cfg->pdev->dev,
3043 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3044 rc, be32_to_cpu(sdt->hdr.state));
3045 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3046 ioa_cfg->sdt_state = DUMP_OBTAINED;
3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3048 return;
3049 }
3050
3051 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3052
3053 if (num_entries > max_num_entries)
3054 num_entries = max_num_entries;
3055
3056 /* Update dump length to the actual data to be copied */
3057 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3058 if (ioa_cfg->sis64)
3059 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3060 else
3061 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3062
3063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3064
3065 for (i = 0; i < num_entries; i++) {
3066 if (ioa_dump->hdr.len > max_dump_size) {
3067 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3068 break;
3069 }
3070
3071 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3072 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3073 if (ioa_cfg->sis64)
3074 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3075 else {
3076 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3077 end_off = be32_to_cpu(sdt->entry[i].end_token);
3078
3079 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3080 bytes_to_copy = end_off - start_off;
3081 else
3082 valid = 0;
3083 }
3084 if (valid) {
3085 if (bytes_to_copy > max_dump_size) {
3086 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3087 continue;
3088 }
3089
3090 /* Copy data from adapter to driver buffers */
3091 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3092 bytes_to_copy);
3093
3094 ioa_dump->hdr.len += bytes_copied;
3095
3096 if (bytes_copied != bytes_to_copy) {
3097 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3098 break;
3099 }
3100 }
3101 }
3102 }
3103
3104 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3105
3106 /* Update dump_header */
3107 driver_dump->hdr.len += ioa_dump->hdr.len;
3108 wmb();
3109 ioa_cfg->sdt_state = DUMP_OBTAINED;
3110 LEAVE;
3111 }
3112
3113 #else
3114 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3115 #endif
3116
3117 /**
3118 * ipr_release_dump - Free adapter dump memory
3119 * @kref: kref struct
3120 *
3121 * Return value:
3122 * nothing
3123 **/
3124 static void ipr_release_dump(struct kref *kref)
3125 {
3126 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3127 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3128 unsigned long lock_flags = 0;
3129 int i;
3130
3131 ENTER;
3132 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3133 ioa_cfg->dump = NULL;
3134 ioa_cfg->sdt_state = INACTIVE;
3135 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3136
3137 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3138 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3139
3140 vfree(dump->ioa_dump.ioa_data);
3141 kfree(dump);
3142 LEAVE;
3143 }
3144
3145 /**
3146 * ipr_worker_thread - Worker thread
3147 * @work: ioa config struct
3148 *
3149 * Called at task level from a work thread. This function takes care
3150 * of adding and removing device from the mid-layer as configuration
3151 * changes are detected by the adapter.
3152 *
3153 * Return value:
3154 * nothing
3155 **/
3156 static void ipr_worker_thread(struct work_struct *work)
3157 {
3158 unsigned long lock_flags;
3159 struct ipr_resource_entry *res;
3160 struct scsi_device *sdev;
3161 struct ipr_dump *dump;
3162 struct ipr_ioa_cfg *ioa_cfg =
3163 container_of(work, struct ipr_ioa_cfg, work_q);
3164 u8 bus, target, lun;
3165 int did_work;
3166
3167 ENTER;
3168 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3169
3170 if (ioa_cfg->sdt_state == READ_DUMP) {
3171 dump = ioa_cfg->dump;
3172 if (!dump) {
3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174 return;
3175 }
3176 kref_get(&dump->kref);
3177 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3178 ipr_get_ioa_dump(ioa_cfg, dump);
3179 kref_put(&dump->kref, ipr_release_dump);
3180
3181 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3182 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3183 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3185 return;
3186 }
3187
3188 restart:
3189 do {
3190 did_work = 0;
3191 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3192 !ioa_cfg->allow_ml_add_del) {
3193 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3194 return;
3195 }
3196
3197 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3198 if (res->del_from_ml && res->sdev) {
3199 did_work = 1;
3200 sdev = res->sdev;
3201 if (!scsi_device_get(sdev)) {
3202 if (!res->add_to_ml)
3203 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3204 else
3205 res->del_from_ml = 0;
3206 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3207 scsi_remove_device(sdev);
3208 scsi_device_put(sdev);
3209 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3210 }
3211 break;
3212 }
3213 }
3214 } while (did_work);
3215
3216 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3217 if (res->add_to_ml) {
3218 bus = res->bus;
3219 target = res->target;
3220 lun = res->lun;
3221 res->add_to_ml = 0;
3222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3223 scsi_add_device(ioa_cfg->host, bus, target, lun);
3224 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3225 goto restart;
3226 }
3227 }
3228
3229 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3231 LEAVE;
3232 }
3233
3234 #ifdef CONFIG_SCSI_IPR_TRACE
3235 /**
3236 * ipr_read_trace - Dump the adapter trace
3237 * @filp: open sysfs file
3238 * @kobj: kobject struct
3239 * @bin_attr: bin_attribute struct
3240 * @buf: buffer
3241 * @off: offset
3242 * @count: buffer size
3243 *
3244 * Return value:
3245 * number of bytes printed to buffer
3246 **/
3247 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3248 struct bin_attribute *bin_attr,
3249 char *buf, loff_t off, size_t count)
3250 {
3251 struct device *dev = container_of(kobj, struct device, kobj);
3252 struct Scsi_Host *shost = class_to_shost(dev);
3253 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3254 unsigned long lock_flags = 0;
3255 ssize_t ret;
3256
3257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3258 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3259 IPR_TRACE_SIZE);
3260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3261
3262 return ret;
3263 }
3264
3265 static struct bin_attribute ipr_trace_attr = {
3266 .attr = {
3267 .name = "trace",
3268 .mode = S_IRUGO,
3269 },
3270 .size = 0,
3271 .read = ipr_read_trace,
3272 };
3273 #endif
3274
3275 /**
3276 * ipr_show_fw_version - Show the firmware version
3277 * @dev: class device struct
3278 * @buf: buffer
3279 *
3280 * Return value:
3281 * number of bytes printed to buffer
3282 **/
3283 static ssize_t ipr_show_fw_version(struct device *dev,
3284 struct device_attribute *attr, char *buf)
3285 {
3286 struct Scsi_Host *shost = class_to_shost(dev);
3287 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3288 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3289 unsigned long lock_flags = 0;
3290 int len;
3291
3292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3293 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3294 ucode_vpd->major_release, ucode_vpd->card_type,
3295 ucode_vpd->minor_release[0],
3296 ucode_vpd->minor_release[1]);
3297 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3298 return len;
3299 }
3300
3301 static struct device_attribute ipr_fw_version_attr = {
3302 .attr = {
3303 .name = "fw_version",
3304 .mode = S_IRUGO,
3305 },
3306 .show = ipr_show_fw_version,
3307 };
3308
3309 /**
3310 * ipr_show_log_level - Show the adapter's error logging level
3311 * @dev: class device struct
3312 * @buf: buffer
3313 *
3314 * Return value:
3315 * number of bytes printed to buffer
3316 **/
3317 static ssize_t ipr_show_log_level(struct device *dev,
3318 struct device_attribute *attr, char *buf)
3319 {
3320 struct Scsi_Host *shost = class_to_shost(dev);
3321 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3322 unsigned long lock_flags = 0;
3323 int len;
3324
3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3328 return len;
3329 }
3330
3331 /**
3332 * ipr_store_log_level - Change the adapter's error logging level
3333 * @dev: class device struct
3334 * @buf: buffer
3335 *
3336 * Return value:
3337 * number of bytes printed to buffer
3338 **/
3339 static ssize_t ipr_store_log_level(struct device *dev,
3340 struct device_attribute *attr,
3341 const char *buf, size_t count)
3342 {
3343 struct Scsi_Host *shost = class_to_shost(dev);
3344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3345 unsigned long lock_flags = 0;
3346
3347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3348 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3350 return strlen(buf);
3351 }
3352
3353 static struct device_attribute ipr_log_level_attr = {
3354 .attr = {
3355 .name = "log_level",
3356 .mode = S_IRUGO | S_IWUSR,
3357 },
3358 .show = ipr_show_log_level,
3359 .store = ipr_store_log_level
3360 };
3361
3362 /**
3363 * ipr_store_diagnostics - IOA Diagnostics interface
3364 * @dev: device struct
3365 * @buf: buffer
3366 * @count: buffer size
3367 *
3368 * This function will reset the adapter and wait a reasonable
3369 * amount of time for any errors that the adapter might log.
3370 *
3371 * Return value:
3372 * count on success / other on failure
3373 **/
3374 static ssize_t ipr_store_diagnostics(struct device *dev,
3375 struct device_attribute *attr,
3376 const char *buf, size_t count)
3377 {
3378 struct Scsi_Host *shost = class_to_shost(dev);
3379 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3380 unsigned long lock_flags = 0;
3381 int rc = count;
3382
3383 if (!capable(CAP_SYS_ADMIN))
3384 return -EACCES;
3385
3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387 while (ioa_cfg->in_reset_reload) {
3388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3389 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3390 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3391 }
3392
3393 ioa_cfg->errors_logged = 0;
3394 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3395
3396 if (ioa_cfg->in_reset_reload) {
3397 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3398 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3399
3400 /* Wait for a second for any errors to be logged */
3401 msleep(1000);
3402 } else {
3403 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404 return -EIO;
3405 }
3406
3407 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3408 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3409 rc = -EIO;
3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411
3412 return rc;
3413 }
3414
3415 static struct device_attribute ipr_diagnostics_attr = {
3416 .attr = {
3417 .name = "run_diagnostics",
3418 .mode = S_IWUSR,
3419 },
3420 .store = ipr_store_diagnostics
3421 };
3422
3423 /**
3424 * ipr_show_adapter_state - Show the adapter's state
3425 * @class_dev: device struct
3426 * @buf: buffer
3427 *
3428 * Return value:
3429 * number of bytes printed to buffer
3430 **/
3431 static ssize_t ipr_show_adapter_state(struct device *dev,
3432 struct device_attribute *attr, char *buf)
3433 {
3434 struct Scsi_Host *shost = class_to_shost(dev);
3435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3436 unsigned long lock_flags = 0;
3437 int len;
3438
3439 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3440 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3441 len = snprintf(buf, PAGE_SIZE, "offline\n");
3442 else
3443 len = snprintf(buf, PAGE_SIZE, "online\n");
3444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3445 return len;
3446 }
3447
3448 /**
3449 * ipr_store_adapter_state - Change adapter state
3450 * @dev: device struct
3451 * @buf: buffer
3452 * @count: buffer size
3453 *
3454 * This function will change the adapter's state.
3455 *
3456 * Return value:
3457 * count on success / other on failure
3458 **/
3459 static ssize_t ipr_store_adapter_state(struct device *dev,
3460 struct device_attribute *attr,
3461 const char *buf, size_t count)
3462 {
3463 struct Scsi_Host *shost = class_to_shost(dev);
3464 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3465 unsigned long lock_flags;
3466 int result = count, i;
3467
3468 if (!capable(CAP_SYS_ADMIN))
3469 return -EACCES;
3470
3471 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3472 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3473 !strncmp(buf, "online", 6)) {
3474 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3475 spin_lock(&ioa_cfg->hrrq[i]._lock);
3476 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3477 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3478 }
3479 wmb();
3480 ioa_cfg->reset_retries = 0;
3481 ioa_cfg->in_ioa_bringdown = 0;
3482 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3483 }
3484 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3485 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3486
3487 return result;
3488 }
3489
3490 static struct device_attribute ipr_ioa_state_attr = {
3491 .attr = {
3492 .name = "online_state",
3493 .mode = S_IRUGO | S_IWUSR,
3494 },
3495 .show = ipr_show_adapter_state,
3496 .store = ipr_store_adapter_state
3497 };
3498
3499 /**
3500 * ipr_store_reset_adapter - Reset the adapter
3501 * @dev: device struct
3502 * @buf: buffer
3503 * @count: buffer size
3504 *
3505 * This function will reset the adapter.
3506 *
3507 * Return value:
3508 * count on success / other on failure
3509 **/
3510 static ssize_t ipr_store_reset_adapter(struct device *dev,
3511 struct device_attribute *attr,
3512 const char *buf, size_t count)
3513 {
3514 struct Scsi_Host *shost = class_to_shost(dev);
3515 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3516 unsigned long lock_flags;
3517 int result = count;
3518
3519 if (!capable(CAP_SYS_ADMIN))
3520 return -EACCES;
3521
3522 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3523 if (!ioa_cfg->in_reset_reload)
3524 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3525 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3526 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3527
3528 return result;
3529 }
3530
3531 static struct device_attribute ipr_ioa_reset_attr = {
3532 .attr = {
3533 .name = "reset_host",
3534 .mode = S_IWUSR,
3535 },
3536 .store = ipr_store_reset_adapter
3537 };
3538
3539 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3540 /**
3541 * ipr_show_iopoll_weight - Show ipr polling mode
3542 * @dev: class device struct
3543 * @buf: buffer
3544 *
3545 * Return value:
3546 * number of bytes printed to buffer
3547 **/
3548 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3549 struct device_attribute *attr, char *buf)
3550 {
3551 struct Scsi_Host *shost = class_to_shost(dev);
3552 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3553 unsigned long lock_flags = 0;
3554 int len;
3555
3556 spin_lock_irqsave(shost->host_lock, lock_flags);
3557 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3558 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3559
3560 return len;
3561 }
3562
3563 /**
3564 * ipr_store_iopoll_weight - Change the adapter's polling mode
3565 * @dev: class device struct
3566 * @buf: buffer
3567 *
3568 * Return value:
3569 * number of bytes printed to buffer
3570 **/
3571 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3572 struct device_attribute *attr,
3573 const char *buf, size_t count)
3574 {
3575 struct Scsi_Host *shost = class_to_shost(dev);
3576 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3577 unsigned long user_iopoll_weight;
3578 unsigned long lock_flags = 0;
3579 int i;
3580
3581 if (!ioa_cfg->sis64) {
3582 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3583 return -EINVAL;
3584 }
3585 if (kstrtoul(buf, 10, &user_iopoll_weight))
3586 return -EINVAL;
3587
3588 if (user_iopoll_weight > 256) {
3589 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3590 return -EINVAL;
3591 }
3592
3593 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3594 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3595 return strlen(buf);
3596 }
3597
3598 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3599 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3600 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3601 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3602 }
3603
3604 spin_lock_irqsave(shost->host_lock, lock_flags);
3605 ioa_cfg->iopoll_weight = user_iopoll_weight;
3606 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3607 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3608 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3609 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3610 ioa_cfg->iopoll_weight, ipr_iopoll);
3611 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3612 }
3613 }
3614 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3615
3616 return strlen(buf);
3617 }
3618
3619 static struct device_attribute ipr_iopoll_weight_attr = {
3620 .attr = {
3621 .name = "iopoll_weight",
3622 .mode = S_IRUGO | S_IWUSR,
3623 },
3624 .show = ipr_show_iopoll_weight,
3625 .store = ipr_store_iopoll_weight
3626 };
3627
3628 /**
3629 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3630 * @buf_len: buffer length
3631 *
3632 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3633 * list to use for microcode download
3634 *
3635 * Return value:
3636 * pointer to sglist / NULL on failure
3637 **/
3638 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3639 {
3640 int sg_size, order, bsize_elem, num_elem, i, j;
3641 struct ipr_sglist *sglist;
3642 struct scatterlist *scatterlist;
3643 struct page *page;
3644
3645 /* Get the minimum size per scatter/gather element */
3646 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3647
3648 /* Get the actual size per element */
3649 order = get_order(sg_size);
3650
3651 /* Determine the actual number of bytes per element */
3652 bsize_elem = PAGE_SIZE * (1 << order);
3653
3654 /* Determine the actual number of sg entries needed */
3655 if (buf_len % bsize_elem)
3656 num_elem = (buf_len / bsize_elem) + 1;
3657 else
3658 num_elem = buf_len / bsize_elem;
3659
3660 /* Allocate a scatter/gather list for the DMA */
3661 sglist = kzalloc(sizeof(struct ipr_sglist) +
3662 (sizeof(struct scatterlist) * (num_elem - 1)),
3663 GFP_KERNEL);
3664
3665 if (sglist == NULL) {
3666 ipr_trace;
3667 return NULL;
3668 }
3669
3670 scatterlist = sglist->scatterlist;
3671 sg_init_table(scatterlist, num_elem);
3672
3673 sglist->order = order;
3674 sglist->num_sg = num_elem;
3675
3676 /* Allocate a bunch of sg elements */
3677 for (i = 0; i < num_elem; i++) {
3678 page = alloc_pages(GFP_KERNEL, order);
3679 if (!page) {
3680 ipr_trace;
3681
3682 /* Free up what we already allocated */
3683 for (j = i - 1; j >= 0; j--)
3684 __free_pages(sg_page(&scatterlist[j]), order);
3685 kfree(sglist);
3686 return NULL;
3687 }
3688
3689 sg_set_page(&scatterlist[i], page, 0, 0);
3690 }
3691
3692 return sglist;
3693 }
3694
3695 /**
3696 * ipr_free_ucode_buffer - Frees a microcode download buffer
3697 * @p_dnld: scatter/gather list pointer
3698 *
3699 * Free a DMA'able ucode download buffer previously allocated with
3700 * ipr_alloc_ucode_buffer
3701 *
3702 * Return value:
3703 * nothing
3704 **/
3705 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3706 {
3707 int i;
3708
3709 for (i = 0; i < sglist->num_sg; i++)
3710 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3711
3712 kfree(sglist);
3713 }
3714
3715 /**
3716 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3717 * @sglist: scatter/gather list pointer
3718 * @buffer: buffer pointer
3719 * @len: buffer length
3720 *
3721 * Copy a microcode image from a user buffer into a buffer allocated by
3722 * ipr_alloc_ucode_buffer
3723 *
3724 * Return value:
3725 * 0 on success / other on failure
3726 **/
3727 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3728 u8 *buffer, u32 len)
3729 {
3730 int bsize_elem, i, result = 0;
3731 struct scatterlist *scatterlist;
3732 void *kaddr;
3733
3734 /* Determine the actual number of bytes per element */
3735 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3736
3737 scatterlist = sglist->scatterlist;
3738
3739 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3740 struct page *page = sg_page(&scatterlist[i]);
3741
3742 kaddr = kmap(page);
3743 memcpy(kaddr, buffer, bsize_elem);
3744 kunmap(page);
3745
3746 scatterlist[i].length = bsize_elem;
3747
3748 if (result != 0) {
3749 ipr_trace;
3750 return result;
3751 }
3752 }
3753
3754 if (len % bsize_elem) {
3755 struct page *page = sg_page(&scatterlist[i]);
3756
3757 kaddr = kmap(page);
3758 memcpy(kaddr, buffer, len % bsize_elem);
3759 kunmap(page);
3760
3761 scatterlist[i].length = len % bsize_elem;
3762 }
3763
3764 sglist->buffer_len = len;
3765 return result;
3766 }
3767
3768 /**
3769 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3770 * @ipr_cmd: ipr command struct
3771 * @sglist: scatter/gather list
3772 *
3773 * Builds a microcode download IOA data list (IOADL).
3774 *
3775 **/
3776 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3777 struct ipr_sglist *sglist)
3778 {
3779 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3780 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3781 struct scatterlist *scatterlist = sglist->scatterlist;
3782 int i;
3783
3784 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3785 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3786 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3787
3788 ioarcb->ioadl_len =
3789 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3790 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3791 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3792 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3793 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3794 }
3795
3796 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3797 }
3798
3799 /**
3800 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3801 * @ipr_cmd: ipr command struct
3802 * @sglist: scatter/gather list
3803 *
3804 * Builds a microcode download IOA data list (IOADL).
3805 *
3806 **/
3807 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3808 struct ipr_sglist *sglist)
3809 {
3810 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3811 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3812 struct scatterlist *scatterlist = sglist->scatterlist;
3813 int i;
3814
3815 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3816 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3817 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3818
3819 ioarcb->ioadl_len =
3820 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3821
3822 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3823 ioadl[i].flags_and_data_len =
3824 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3825 ioadl[i].address =
3826 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3827 }
3828
3829 ioadl[i-1].flags_and_data_len |=
3830 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3831 }
3832
3833 /**
3834 * ipr_update_ioa_ucode - Update IOA's microcode
3835 * @ioa_cfg: ioa config struct
3836 * @sglist: scatter/gather list
3837 *
3838 * Initiate an adapter reset to update the IOA's microcode
3839 *
3840 * Return value:
3841 * 0 on success / -EIO on failure
3842 **/
3843 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3844 struct ipr_sglist *sglist)
3845 {
3846 unsigned long lock_flags;
3847
3848 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3849 while (ioa_cfg->in_reset_reload) {
3850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3851 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3852 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3853 }
3854
3855 if (ioa_cfg->ucode_sglist) {
3856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3857 dev_err(&ioa_cfg->pdev->dev,
3858 "Microcode download already in progress\n");
3859 return -EIO;
3860 }
3861
3862 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3863 sglist->num_sg, DMA_TO_DEVICE);
3864
3865 if (!sglist->num_dma_sg) {
3866 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3867 dev_err(&ioa_cfg->pdev->dev,
3868 "Failed to map microcode download buffer!\n");
3869 return -EIO;
3870 }
3871
3872 ioa_cfg->ucode_sglist = sglist;
3873 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3874 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3875 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3876
3877 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3878 ioa_cfg->ucode_sglist = NULL;
3879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3880 return 0;
3881 }
3882
3883 /**
3884 * ipr_store_update_fw - Update the firmware on the adapter
3885 * @class_dev: device struct
3886 * @buf: buffer
3887 * @count: buffer size
3888 *
3889 * This function will update the firmware on the adapter.
3890 *
3891 * Return value:
3892 * count on success / other on failure
3893 **/
3894 static ssize_t ipr_store_update_fw(struct device *dev,
3895 struct device_attribute *attr,
3896 const char *buf, size_t count)
3897 {
3898 struct Scsi_Host *shost = class_to_shost(dev);
3899 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3900 struct ipr_ucode_image_header *image_hdr;
3901 const struct firmware *fw_entry;
3902 struct ipr_sglist *sglist;
3903 char fname[100];
3904 char *src;
3905 int len, result, dnld_size;
3906
3907 if (!capable(CAP_SYS_ADMIN))
3908 return -EACCES;
3909
3910 len = snprintf(fname, 99, "%s", buf);
3911 fname[len-1] = '\0';
3912
3913 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3914 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3915 return -EIO;
3916 }
3917
3918 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3919
3920 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3921 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3922 sglist = ipr_alloc_ucode_buffer(dnld_size);
3923
3924 if (!sglist) {
3925 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3926 release_firmware(fw_entry);
3927 return -ENOMEM;
3928 }
3929
3930 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3931
3932 if (result) {
3933 dev_err(&ioa_cfg->pdev->dev,
3934 "Microcode buffer copy to DMA buffer failed\n");
3935 goto out;
3936 }
3937
3938 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3939
3940 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3941
3942 if (!result)
3943 result = count;
3944 out:
3945 ipr_free_ucode_buffer(sglist);
3946 release_firmware(fw_entry);
3947 return result;
3948 }
3949
3950 static struct device_attribute ipr_update_fw_attr = {
3951 .attr = {
3952 .name = "update_fw",
3953 .mode = S_IWUSR,
3954 },
3955 .store = ipr_store_update_fw
3956 };
3957
3958 /**
3959 * ipr_show_fw_type - Show the adapter's firmware type.
3960 * @dev: class device struct
3961 * @buf: buffer
3962 *
3963 * Return value:
3964 * number of bytes printed to buffer
3965 **/
3966 static ssize_t ipr_show_fw_type(struct device *dev,
3967 struct device_attribute *attr, char *buf)
3968 {
3969 struct Scsi_Host *shost = class_to_shost(dev);
3970 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3971 unsigned long lock_flags = 0;
3972 int len;
3973
3974 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3975 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3977 return len;
3978 }
3979
3980 static struct device_attribute ipr_ioa_fw_type_attr = {
3981 .attr = {
3982 .name = "fw_type",
3983 .mode = S_IRUGO,
3984 },
3985 .show = ipr_show_fw_type
3986 };
3987
3988 static struct device_attribute *ipr_ioa_attrs[] = {
3989 &ipr_fw_version_attr,
3990 &ipr_log_level_attr,
3991 &ipr_diagnostics_attr,
3992 &ipr_ioa_state_attr,
3993 &ipr_ioa_reset_attr,
3994 &ipr_update_fw_attr,
3995 &ipr_ioa_fw_type_attr,
3996 &ipr_iopoll_weight_attr,
3997 NULL,
3998 };
3999
4000 #ifdef CONFIG_SCSI_IPR_DUMP
4001 /**
4002 * ipr_read_dump - Dump the adapter
4003 * @filp: open sysfs file
4004 * @kobj: kobject struct
4005 * @bin_attr: bin_attribute struct
4006 * @buf: buffer
4007 * @off: offset
4008 * @count: buffer size
4009 *
4010 * Return value:
4011 * number of bytes printed to buffer
4012 **/
4013 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4014 struct bin_attribute *bin_attr,
4015 char *buf, loff_t off, size_t count)
4016 {
4017 struct device *cdev = container_of(kobj, struct device, kobj);
4018 struct Scsi_Host *shost = class_to_shost(cdev);
4019 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4020 struct ipr_dump *dump;
4021 unsigned long lock_flags = 0;
4022 char *src;
4023 int len, sdt_end;
4024 size_t rc = count;
4025
4026 if (!capable(CAP_SYS_ADMIN))
4027 return -EACCES;
4028
4029 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4030 dump = ioa_cfg->dump;
4031
4032 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4033 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4034 return 0;
4035 }
4036 kref_get(&dump->kref);
4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4038
4039 if (off > dump->driver_dump.hdr.len) {
4040 kref_put(&dump->kref, ipr_release_dump);
4041 return 0;
4042 }
4043
4044 if (off + count > dump->driver_dump.hdr.len) {
4045 count = dump->driver_dump.hdr.len - off;
4046 rc = count;
4047 }
4048
4049 if (count && off < sizeof(dump->driver_dump)) {
4050 if (off + count > sizeof(dump->driver_dump))
4051 len = sizeof(dump->driver_dump) - off;
4052 else
4053 len = count;
4054 src = (u8 *)&dump->driver_dump + off;
4055 memcpy(buf, src, len);
4056 buf += len;
4057 off += len;
4058 count -= len;
4059 }
4060
4061 off -= sizeof(dump->driver_dump);
4062
4063 if (ioa_cfg->sis64)
4064 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4065 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4066 sizeof(struct ipr_sdt_entry));
4067 else
4068 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4069 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4070
4071 if (count && off < sdt_end) {
4072 if (off + count > sdt_end)
4073 len = sdt_end - off;
4074 else
4075 len = count;
4076 src = (u8 *)&dump->ioa_dump + off;
4077 memcpy(buf, src, len);
4078 buf += len;
4079 off += len;
4080 count -= len;
4081 }
4082
4083 off -= sdt_end;
4084
4085 while (count) {
4086 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4087 len = PAGE_ALIGN(off) - off;
4088 else
4089 len = count;
4090 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4091 src += off & ~PAGE_MASK;
4092 memcpy(buf, src, len);
4093 buf += len;
4094 off += len;
4095 count -= len;
4096 }
4097
4098 kref_put(&dump->kref, ipr_release_dump);
4099 return rc;
4100 }
4101
4102 /**
4103 * ipr_alloc_dump - Prepare for adapter dump
4104 * @ioa_cfg: ioa config struct
4105 *
4106 * Return value:
4107 * 0 on success / other on failure
4108 **/
4109 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4110 {
4111 struct ipr_dump *dump;
4112 __be32 **ioa_data;
4113 unsigned long lock_flags = 0;
4114
4115 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4116
4117 if (!dump) {
4118 ipr_err("Dump memory allocation failed\n");
4119 return -ENOMEM;
4120 }
4121
4122 if (ioa_cfg->sis64)
4123 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4124 else
4125 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4126
4127 if (!ioa_data) {
4128 ipr_err("Dump memory allocation failed\n");
4129 kfree(dump);
4130 return -ENOMEM;
4131 }
4132
4133 dump->ioa_dump.ioa_data = ioa_data;
4134
4135 kref_init(&dump->kref);
4136 dump->ioa_cfg = ioa_cfg;
4137
4138 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4139
4140 if (INACTIVE != ioa_cfg->sdt_state) {
4141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4142 vfree(dump->ioa_dump.ioa_data);
4143 kfree(dump);
4144 return 0;
4145 }
4146
4147 ioa_cfg->dump = dump;
4148 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4149 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4150 ioa_cfg->dump_taken = 1;
4151 schedule_work(&ioa_cfg->work_q);
4152 }
4153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4154
4155 return 0;
4156 }
4157
4158 /**
4159 * ipr_free_dump - Free adapter dump memory
4160 * @ioa_cfg: ioa config struct
4161 *
4162 * Return value:
4163 * 0 on success / other on failure
4164 **/
4165 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4166 {
4167 struct ipr_dump *dump;
4168 unsigned long lock_flags = 0;
4169
4170 ENTER;
4171
4172 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4173 dump = ioa_cfg->dump;
4174 if (!dump) {
4175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4176 return 0;
4177 }
4178
4179 ioa_cfg->dump = NULL;
4180 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4181
4182 kref_put(&dump->kref, ipr_release_dump);
4183
4184 LEAVE;
4185 return 0;
4186 }
4187
4188 /**
4189 * ipr_write_dump - Setup dump state of adapter
4190 * @filp: open sysfs file
4191 * @kobj: kobject struct
4192 * @bin_attr: bin_attribute struct
4193 * @buf: buffer
4194 * @off: offset
4195 * @count: buffer size
4196 *
4197 * Return value:
4198 * number of bytes printed to buffer
4199 **/
4200 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4201 struct bin_attribute *bin_attr,
4202 char *buf, loff_t off, size_t count)
4203 {
4204 struct device *cdev = container_of(kobj, struct device, kobj);
4205 struct Scsi_Host *shost = class_to_shost(cdev);
4206 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4207 int rc;
4208
4209 if (!capable(CAP_SYS_ADMIN))
4210 return -EACCES;
4211
4212 if (buf[0] == '1')
4213 rc = ipr_alloc_dump(ioa_cfg);
4214 else if (buf[0] == '0')
4215 rc = ipr_free_dump(ioa_cfg);
4216 else
4217 return -EINVAL;
4218
4219 if (rc)
4220 return rc;
4221 else
4222 return count;
4223 }
4224
4225 static struct bin_attribute ipr_dump_attr = {
4226 .attr = {
4227 .name = "dump",
4228 .mode = S_IRUSR | S_IWUSR,
4229 },
4230 .size = 0,
4231 .read = ipr_read_dump,
4232 .write = ipr_write_dump
4233 };
4234 #else
4235 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4236 #endif
4237
4238 /**
4239 * ipr_change_queue_depth - Change the device's queue depth
4240 * @sdev: scsi device struct
4241 * @qdepth: depth to set
4242 * @reason: calling context
4243 *
4244 * Return value:
4245 * actual depth set
4246 **/
4247 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4248 int reason)
4249 {
4250 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4251 struct ipr_resource_entry *res;
4252 unsigned long lock_flags = 0;
4253
4254 if (reason != SCSI_QDEPTH_DEFAULT)
4255 return -EOPNOTSUPP;
4256
4257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4258 res = (struct ipr_resource_entry *)sdev->hostdata;
4259
4260 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4261 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4263
4264 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4265 return sdev->queue_depth;
4266 }
4267
4268 /**
4269 * ipr_change_queue_type - Change the device's queue type
4270 * @dsev: scsi device struct
4271 * @tag_type: type of tags to use
4272 *
4273 * Return value:
4274 * actual queue type set
4275 **/
4276 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4277 {
4278 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4279 struct ipr_resource_entry *res;
4280 unsigned long lock_flags = 0;
4281
4282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4283 res = (struct ipr_resource_entry *)sdev->hostdata;
4284
4285 if (res) {
4286 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4287 /*
4288 * We don't bother quiescing the device here since the
4289 * adapter firmware does it for us.
4290 */
4291 scsi_set_tag_type(sdev, tag_type);
4292
4293 if (tag_type)
4294 scsi_activate_tcq(sdev, sdev->queue_depth);
4295 else
4296 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4297 } else
4298 tag_type = 0;
4299 } else
4300 tag_type = 0;
4301
4302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4303 return tag_type;
4304 }
4305
4306 /**
4307 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4308 * @dev: device struct
4309 * @attr: device attribute structure
4310 * @buf: buffer
4311 *
4312 * Return value:
4313 * number of bytes printed to buffer
4314 **/
4315 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4316 {
4317 struct scsi_device *sdev = to_scsi_device(dev);
4318 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4319 struct ipr_resource_entry *res;
4320 unsigned long lock_flags = 0;
4321 ssize_t len = -ENXIO;
4322
4323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4324 res = (struct ipr_resource_entry *)sdev->hostdata;
4325 if (res)
4326 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4328 return len;
4329 }
4330
4331 static struct device_attribute ipr_adapter_handle_attr = {
4332 .attr = {
4333 .name = "adapter_handle",
4334 .mode = S_IRUSR,
4335 },
4336 .show = ipr_show_adapter_handle
4337 };
4338
4339 /**
4340 * ipr_show_resource_path - Show the resource path or the resource address for
4341 * this device.
4342 * @dev: device struct
4343 * @attr: device attribute structure
4344 * @buf: buffer
4345 *
4346 * Return value:
4347 * number of bytes printed to buffer
4348 **/
4349 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4350 {
4351 struct scsi_device *sdev = to_scsi_device(dev);
4352 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4353 struct ipr_resource_entry *res;
4354 unsigned long lock_flags = 0;
4355 ssize_t len = -ENXIO;
4356 char buffer[IPR_MAX_RES_PATH_LENGTH];
4357
4358 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4359 res = (struct ipr_resource_entry *)sdev->hostdata;
4360 if (res && ioa_cfg->sis64)
4361 len = snprintf(buf, PAGE_SIZE, "%s\n",
4362 __ipr_format_res_path(res->res_path, buffer,
4363 sizeof(buffer)));
4364 else if (res)
4365 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4366 res->bus, res->target, res->lun);
4367
4368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4369 return len;
4370 }
4371
4372 static struct device_attribute ipr_resource_path_attr = {
4373 .attr = {
4374 .name = "resource_path",
4375 .mode = S_IRUGO,
4376 },
4377 .show = ipr_show_resource_path
4378 };
4379
4380 /**
4381 * ipr_show_device_id - Show the device_id for this device.
4382 * @dev: device struct
4383 * @attr: device attribute structure
4384 * @buf: buffer
4385 *
4386 * Return value:
4387 * number of bytes printed to buffer
4388 **/
4389 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4390 {
4391 struct scsi_device *sdev = to_scsi_device(dev);
4392 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4393 struct ipr_resource_entry *res;
4394 unsigned long lock_flags = 0;
4395 ssize_t len = -ENXIO;
4396
4397 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4398 res = (struct ipr_resource_entry *)sdev->hostdata;
4399 if (res && ioa_cfg->sis64)
4400 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4401 else if (res)
4402 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4403
4404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4405 return len;
4406 }
4407
4408 static struct device_attribute ipr_device_id_attr = {
4409 .attr = {
4410 .name = "device_id",
4411 .mode = S_IRUGO,
4412 },
4413 .show = ipr_show_device_id
4414 };
4415
4416 /**
4417 * ipr_show_resource_type - Show the resource type for this device.
4418 * @dev: device struct
4419 * @attr: device attribute structure
4420 * @buf: buffer
4421 *
4422 * Return value:
4423 * number of bytes printed to buffer
4424 **/
4425 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4426 {
4427 struct scsi_device *sdev = to_scsi_device(dev);
4428 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4429 struct ipr_resource_entry *res;
4430 unsigned long lock_flags = 0;
4431 ssize_t len = -ENXIO;
4432
4433 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4434 res = (struct ipr_resource_entry *)sdev->hostdata;
4435
4436 if (res)
4437 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4438
4439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4440 return len;
4441 }
4442
4443 static struct device_attribute ipr_resource_type_attr = {
4444 .attr = {
4445 .name = "resource_type",
4446 .mode = S_IRUGO,
4447 },
4448 .show = ipr_show_resource_type
4449 };
4450
4451 static struct device_attribute *ipr_dev_attrs[] = {
4452 &ipr_adapter_handle_attr,
4453 &ipr_resource_path_attr,
4454 &ipr_device_id_attr,
4455 &ipr_resource_type_attr,
4456 NULL,
4457 };
4458
4459 /**
4460 * ipr_biosparam - Return the HSC mapping
4461 * @sdev: scsi device struct
4462 * @block_device: block device pointer
4463 * @capacity: capacity of the device
4464 * @parm: Array containing returned HSC values.
4465 *
4466 * This function generates the HSC parms that fdisk uses.
4467 * We want to make sure we return something that places partitions
4468 * on 4k boundaries for best performance with the IOA.
4469 *
4470 * Return value:
4471 * 0 on success
4472 **/
4473 static int ipr_biosparam(struct scsi_device *sdev,
4474 struct block_device *block_device,
4475 sector_t capacity, int *parm)
4476 {
4477 int heads, sectors;
4478 sector_t cylinders;
4479
4480 heads = 128;
4481 sectors = 32;
4482
4483 cylinders = capacity;
4484 sector_div(cylinders, (128 * 32));
4485
4486 /* return result */
4487 parm[0] = heads;
4488 parm[1] = sectors;
4489 parm[2] = cylinders;
4490
4491 return 0;
4492 }
4493
4494 /**
4495 * ipr_find_starget - Find target based on bus/target.
4496 * @starget: scsi target struct
4497 *
4498 * Return value:
4499 * resource entry pointer if found / NULL if not found
4500 **/
4501 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4502 {
4503 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4504 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4505 struct ipr_resource_entry *res;
4506
4507 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4508 if ((res->bus == starget->channel) &&
4509 (res->target == starget->id)) {
4510 return res;
4511 }
4512 }
4513
4514 return NULL;
4515 }
4516
4517 static struct ata_port_info sata_port_info;
4518
4519 /**
4520 * ipr_target_alloc - Prepare for commands to a SCSI target
4521 * @starget: scsi target struct
4522 *
4523 * If the device is a SATA device, this function allocates an
4524 * ATA port with libata, else it does nothing.
4525 *
4526 * Return value:
4527 * 0 on success / non-0 on failure
4528 **/
4529 static int ipr_target_alloc(struct scsi_target *starget)
4530 {
4531 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4532 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4533 struct ipr_sata_port *sata_port;
4534 struct ata_port *ap;
4535 struct ipr_resource_entry *res;
4536 unsigned long lock_flags;
4537
4538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4539 res = ipr_find_starget(starget);
4540 starget->hostdata = NULL;
4541
4542 if (res && ipr_is_gata(res)) {
4543 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4544 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4545 if (!sata_port)
4546 return -ENOMEM;
4547
4548 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4549 if (ap) {
4550 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4551 sata_port->ioa_cfg = ioa_cfg;
4552 sata_port->ap = ap;
4553 sata_port->res = res;
4554
4555 res->sata_port = sata_port;
4556 ap->private_data = sata_port;
4557 starget->hostdata = sata_port;
4558 } else {
4559 kfree(sata_port);
4560 return -ENOMEM;
4561 }
4562 }
4563 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4564
4565 return 0;
4566 }
4567
4568 /**
4569 * ipr_target_destroy - Destroy a SCSI target
4570 * @starget: scsi target struct
4571 *
4572 * If the device was a SATA device, this function frees the libata
4573 * ATA port, else it does nothing.
4574 *
4575 **/
4576 static void ipr_target_destroy(struct scsi_target *starget)
4577 {
4578 struct ipr_sata_port *sata_port = starget->hostdata;
4579 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4580 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4581
4582 if (ioa_cfg->sis64) {
4583 if (!ipr_find_starget(starget)) {
4584 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4585 clear_bit(starget->id, ioa_cfg->array_ids);
4586 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4587 clear_bit(starget->id, ioa_cfg->vset_ids);
4588 else if (starget->channel == 0)
4589 clear_bit(starget->id, ioa_cfg->target_ids);
4590 }
4591 }
4592
4593 if (sata_port) {
4594 starget->hostdata = NULL;
4595 ata_sas_port_destroy(sata_port->ap);
4596 kfree(sata_port);
4597 }
4598 }
4599
4600 /**
4601 * ipr_find_sdev - Find device based on bus/target/lun.
4602 * @sdev: scsi device struct
4603 *
4604 * Return value:
4605 * resource entry pointer if found / NULL if not found
4606 **/
4607 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4608 {
4609 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4610 struct ipr_resource_entry *res;
4611
4612 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4613 if ((res->bus == sdev->channel) &&
4614 (res->target == sdev->id) &&
4615 (res->lun == sdev->lun))
4616 return res;
4617 }
4618
4619 return NULL;
4620 }
4621
4622 /**
4623 * ipr_slave_destroy - Unconfigure a SCSI device
4624 * @sdev: scsi device struct
4625 *
4626 * Return value:
4627 * nothing
4628 **/
4629 static void ipr_slave_destroy(struct scsi_device *sdev)
4630 {
4631 struct ipr_resource_entry *res;
4632 struct ipr_ioa_cfg *ioa_cfg;
4633 unsigned long lock_flags = 0;
4634
4635 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4636
4637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4638 res = (struct ipr_resource_entry *) sdev->hostdata;
4639 if (res) {
4640 if (res->sata_port)
4641 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4642 sdev->hostdata = NULL;
4643 res->sdev = NULL;
4644 res->sata_port = NULL;
4645 }
4646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4647 }
4648
4649 /**
4650 * ipr_slave_configure - Configure a SCSI device
4651 * @sdev: scsi device struct
4652 *
4653 * This function configures the specified scsi device.
4654 *
4655 * Return value:
4656 * 0 on success
4657 **/
4658 static int ipr_slave_configure(struct scsi_device *sdev)
4659 {
4660 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4661 struct ipr_resource_entry *res;
4662 struct ata_port *ap = NULL;
4663 unsigned long lock_flags = 0;
4664 char buffer[IPR_MAX_RES_PATH_LENGTH];
4665
4666 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4667 res = sdev->hostdata;
4668 if (res) {
4669 if (ipr_is_af_dasd_device(res))
4670 sdev->type = TYPE_RAID;
4671 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4672 sdev->scsi_level = 4;
4673 sdev->no_uld_attach = 1;
4674 }
4675 if (ipr_is_vset_device(res)) {
4676 blk_queue_rq_timeout(sdev->request_queue,
4677 IPR_VSET_RW_TIMEOUT);
4678 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4679 }
4680 if (ipr_is_gata(res) && res->sata_port)
4681 ap = res->sata_port->ap;
4682 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4683
4684 if (ap) {
4685 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4686 ata_sas_slave_configure(sdev, ap);
4687 } else
4688 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4689 if (ioa_cfg->sis64)
4690 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4691 ipr_format_res_path(ioa_cfg,
4692 res->res_path, buffer, sizeof(buffer)));
4693 return 0;
4694 }
4695 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4696 return 0;
4697 }
4698
4699 /**
4700 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4701 * @sdev: scsi device struct
4702 *
4703 * This function initializes an ATA port so that future commands
4704 * sent through queuecommand will work.
4705 *
4706 * Return value:
4707 * 0 on success
4708 **/
4709 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4710 {
4711 struct ipr_sata_port *sata_port = NULL;
4712 int rc = -ENXIO;
4713
4714 ENTER;
4715 if (sdev->sdev_target)
4716 sata_port = sdev->sdev_target->hostdata;
4717 if (sata_port) {
4718 rc = ata_sas_port_init(sata_port->ap);
4719 if (rc == 0)
4720 rc = ata_sas_sync_probe(sata_port->ap);
4721 }
4722
4723 if (rc)
4724 ipr_slave_destroy(sdev);
4725
4726 LEAVE;
4727 return rc;
4728 }
4729
4730 /**
4731 * ipr_slave_alloc - Prepare for commands to a device.
4732 * @sdev: scsi device struct
4733 *
4734 * This function saves a pointer to the resource entry
4735 * in the scsi device struct if the device exists. We
4736 * can then use this pointer in ipr_queuecommand when
4737 * handling new commands.
4738 *
4739 * Return value:
4740 * 0 on success / -ENXIO if device does not exist
4741 **/
4742 static int ipr_slave_alloc(struct scsi_device *sdev)
4743 {
4744 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4745 struct ipr_resource_entry *res;
4746 unsigned long lock_flags;
4747 int rc = -ENXIO;
4748
4749 sdev->hostdata = NULL;
4750
4751 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4752
4753 res = ipr_find_sdev(sdev);
4754 if (res) {
4755 res->sdev = sdev;
4756 res->add_to_ml = 0;
4757 res->in_erp = 0;
4758 sdev->hostdata = res;
4759 if (!ipr_is_naca_model(res))
4760 res->needs_sync_complete = 1;
4761 rc = 0;
4762 if (ipr_is_gata(res)) {
4763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4764 return ipr_ata_slave_alloc(sdev);
4765 }
4766 }
4767
4768 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4769
4770 return rc;
4771 }
4772
4773 /**
4774 * ipr_match_lun - Match function for specified LUN
4775 * @ipr_cmd: ipr command struct
4776 * @device: device to match (sdev)
4777 *
4778 * Returns:
4779 * 1 if command matches sdev / 0 if command does not match sdev
4780 **/
4781 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4782 {
4783 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4784 return 1;
4785 return 0;
4786 }
4787
4788 /**
4789 * ipr_wait_for_ops - Wait for matching commands to complete
4790 * @ipr_cmd: ipr command struct
4791 * @device: device to match (sdev)
4792 * @match: match function to use
4793 *
4794 * Returns:
4795 * SUCCESS / FAILED
4796 **/
4797 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4798 int (*match)(struct ipr_cmnd *, void *))
4799 {
4800 struct ipr_cmnd *ipr_cmd;
4801 int wait;
4802 unsigned long flags;
4803 struct ipr_hrr_queue *hrrq;
4804 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4805 DECLARE_COMPLETION_ONSTACK(comp);
4806
4807 ENTER;
4808 do {
4809 wait = 0;
4810
4811 for_each_hrrq(hrrq, ioa_cfg) {
4812 spin_lock_irqsave(hrrq->lock, flags);
4813 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4814 if (match(ipr_cmd, device)) {
4815 ipr_cmd->eh_comp = &comp;
4816 wait++;
4817 }
4818 }
4819 spin_unlock_irqrestore(hrrq->lock, flags);
4820 }
4821
4822 if (wait) {
4823 timeout = wait_for_completion_timeout(&comp, timeout);
4824
4825 if (!timeout) {
4826 wait = 0;
4827
4828 for_each_hrrq(hrrq, ioa_cfg) {
4829 spin_lock_irqsave(hrrq->lock, flags);
4830 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4831 if (match(ipr_cmd, device)) {
4832 ipr_cmd->eh_comp = NULL;
4833 wait++;
4834 }
4835 }
4836 spin_unlock_irqrestore(hrrq->lock, flags);
4837 }
4838
4839 if (wait)
4840 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4841 LEAVE;
4842 return wait ? FAILED : SUCCESS;
4843 }
4844 }
4845 } while (wait);
4846
4847 LEAVE;
4848 return SUCCESS;
4849 }
4850
4851 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4852 {
4853 struct ipr_ioa_cfg *ioa_cfg;
4854 unsigned long lock_flags = 0;
4855 int rc = SUCCESS;
4856
4857 ENTER;
4858 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4859 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4860
4861 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4862 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4863 dev_err(&ioa_cfg->pdev->dev,
4864 "Adapter being reset as a result of error recovery.\n");
4865
4866 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4867 ioa_cfg->sdt_state = GET_DUMP;
4868 }
4869
4870 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4871 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4872 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4873
4874 /* If we got hit with a host reset while we were already resetting
4875 the adapter for some reason, and the reset failed. */
4876 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4877 ipr_trace;
4878 rc = FAILED;
4879 }
4880
4881 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4882 LEAVE;
4883 return rc;
4884 }
4885
4886 /**
4887 * ipr_device_reset - Reset the device
4888 * @ioa_cfg: ioa config struct
4889 * @res: resource entry struct
4890 *
4891 * This function issues a device reset to the affected device.
4892 * If the device is a SCSI device, a LUN reset will be sent
4893 * to the device first. If that does not work, a target reset
4894 * will be sent. If the device is a SATA device, a PHY reset will
4895 * be sent.
4896 *
4897 * Return value:
4898 * 0 on success / non-zero on failure
4899 **/
4900 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4901 struct ipr_resource_entry *res)
4902 {
4903 struct ipr_cmnd *ipr_cmd;
4904 struct ipr_ioarcb *ioarcb;
4905 struct ipr_cmd_pkt *cmd_pkt;
4906 struct ipr_ioarcb_ata_regs *regs;
4907 u32 ioasc;
4908
4909 ENTER;
4910 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4911 ioarcb = &ipr_cmd->ioarcb;
4912 cmd_pkt = &ioarcb->cmd_pkt;
4913
4914 if (ipr_cmd->ioa_cfg->sis64) {
4915 regs = &ipr_cmd->i.ata_ioadl.regs;
4916 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4917 } else
4918 regs = &ioarcb->u.add_data.u.regs;
4919
4920 ioarcb->res_handle = res->res_handle;
4921 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4922 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4923 if (ipr_is_gata(res)) {
4924 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4925 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4926 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4927 }
4928
4929 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4930 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4931 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4932 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4933 if (ipr_cmd->ioa_cfg->sis64)
4934 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4935 sizeof(struct ipr_ioasa_gata));
4936 else
4937 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4938 sizeof(struct ipr_ioasa_gata));
4939 }
4940
4941 LEAVE;
4942 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4943 }
4944
4945 /**
4946 * ipr_sata_reset - Reset the SATA port
4947 * @link: SATA link to reset
4948 * @classes: class of the attached device
4949 *
4950 * This function issues a SATA phy reset to the affected ATA link.
4951 *
4952 * Return value:
4953 * 0 on success / non-zero on failure
4954 **/
4955 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4956 unsigned long deadline)
4957 {
4958 struct ipr_sata_port *sata_port = link->ap->private_data;
4959 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4960 struct ipr_resource_entry *res;
4961 unsigned long lock_flags = 0;
4962 int rc = -ENXIO;
4963
4964 ENTER;
4965 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4966 while (ioa_cfg->in_reset_reload) {
4967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4968 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4969 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4970 }
4971
4972 res = sata_port->res;
4973 if (res) {
4974 rc = ipr_device_reset(ioa_cfg, res);
4975 *classes = res->ata_class;
4976 }
4977
4978 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4979 LEAVE;
4980 return rc;
4981 }
4982
4983 /**
4984 * ipr_eh_dev_reset - Reset the device
4985 * @scsi_cmd: scsi command struct
4986 *
4987 * This function issues a device reset to the affected device.
4988 * A LUN reset will be sent to the device first. If that does
4989 * not work, a target reset will be sent.
4990 *
4991 * Return value:
4992 * SUCCESS / FAILED
4993 **/
4994 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4995 {
4996 struct ipr_cmnd *ipr_cmd;
4997 struct ipr_ioa_cfg *ioa_cfg;
4998 struct ipr_resource_entry *res;
4999 struct ata_port *ap;
5000 int rc = 0;
5001 struct ipr_hrr_queue *hrrq;
5002
5003 ENTER;
5004 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5005 res = scsi_cmd->device->hostdata;
5006
5007 if (!res)
5008 return FAILED;
5009
5010 /*
5011 * If we are currently going through reset/reload, return failed. This will force the
5012 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5013 * reset to complete
5014 */
5015 if (ioa_cfg->in_reset_reload)
5016 return FAILED;
5017 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5018 return FAILED;
5019
5020 for_each_hrrq(hrrq, ioa_cfg) {
5021 spin_lock(&hrrq->_lock);
5022 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5023 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5024 if (ipr_cmd->scsi_cmd)
5025 ipr_cmd->done = ipr_scsi_eh_done;
5026 if (ipr_cmd->qc)
5027 ipr_cmd->done = ipr_sata_eh_done;
5028 if (ipr_cmd->qc &&
5029 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5030 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5031 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5032 }
5033 }
5034 }
5035 spin_unlock(&hrrq->_lock);
5036 }
5037 res->resetting_device = 1;
5038 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5039
5040 if (ipr_is_gata(res) && res->sata_port) {
5041 ap = res->sata_port->ap;
5042 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5043 ata_std_error_handler(ap);
5044 spin_lock_irq(scsi_cmd->device->host->host_lock);
5045
5046 for_each_hrrq(hrrq, ioa_cfg) {
5047 spin_lock(&hrrq->_lock);
5048 list_for_each_entry(ipr_cmd,
5049 &hrrq->hrrq_pending_q, queue) {
5050 if (ipr_cmd->ioarcb.res_handle ==
5051 res->res_handle) {
5052 rc = -EIO;
5053 break;
5054 }
5055 }
5056 spin_unlock(&hrrq->_lock);
5057 }
5058 } else
5059 rc = ipr_device_reset(ioa_cfg, res);
5060 res->resetting_device = 0;
5061
5062 LEAVE;
5063 return rc ? FAILED : SUCCESS;
5064 }
5065
5066 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5067 {
5068 int rc;
5069 struct ipr_ioa_cfg *ioa_cfg;
5070
5071 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5072
5073 spin_lock_irq(cmd->device->host->host_lock);
5074 rc = __ipr_eh_dev_reset(cmd);
5075 spin_unlock_irq(cmd->device->host->host_lock);
5076
5077 if (rc == SUCCESS)
5078 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5079
5080 return rc;
5081 }
5082
5083 /**
5084 * ipr_bus_reset_done - Op done function for bus reset.
5085 * @ipr_cmd: ipr command struct
5086 *
5087 * This function is the op done function for a bus reset
5088 *
5089 * Return value:
5090 * none
5091 **/
5092 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5093 {
5094 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5095 struct ipr_resource_entry *res;
5096
5097 ENTER;
5098 if (!ioa_cfg->sis64)
5099 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5100 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5101 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5102 break;
5103 }
5104 }
5105
5106 /*
5107 * If abort has not completed, indicate the reset has, else call the
5108 * abort's done function to wake the sleeping eh thread
5109 */
5110 if (ipr_cmd->sibling->sibling)
5111 ipr_cmd->sibling->sibling = NULL;
5112 else
5113 ipr_cmd->sibling->done(ipr_cmd->sibling);
5114
5115 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5116 LEAVE;
5117 }
5118
5119 /**
5120 * ipr_abort_timeout - An abort task has timed out
5121 * @ipr_cmd: ipr command struct
5122 *
5123 * This function handles when an abort task times out. If this
5124 * happens we issue a bus reset since we have resources tied
5125 * up that must be freed before returning to the midlayer.
5126 *
5127 * Return value:
5128 * none
5129 **/
5130 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5131 {
5132 struct ipr_cmnd *reset_cmd;
5133 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5134 struct ipr_cmd_pkt *cmd_pkt;
5135 unsigned long lock_flags = 0;
5136
5137 ENTER;
5138 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5139 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5141 return;
5142 }
5143
5144 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5145 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5146 ipr_cmd->sibling = reset_cmd;
5147 reset_cmd->sibling = ipr_cmd;
5148 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5149 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5150 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5151 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5152 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5153
5154 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5156 LEAVE;
5157 }
5158
5159 /**
5160 * ipr_cancel_op - Cancel specified op
5161 * @scsi_cmd: scsi command struct
5162 *
5163 * This function cancels specified op.
5164 *
5165 * Return value:
5166 * SUCCESS / FAILED
5167 **/
5168 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5169 {
5170 struct ipr_cmnd *ipr_cmd;
5171 struct ipr_ioa_cfg *ioa_cfg;
5172 struct ipr_resource_entry *res;
5173 struct ipr_cmd_pkt *cmd_pkt;
5174 u32 ioasc, int_reg;
5175 int op_found = 0;
5176 struct ipr_hrr_queue *hrrq;
5177
5178 ENTER;
5179 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5180 res = scsi_cmd->device->hostdata;
5181
5182 /* If we are currently going through reset/reload, return failed.
5183 * This will force the mid-layer to call ipr_eh_host_reset,
5184 * which will then go to sleep and wait for the reset to complete
5185 */
5186 if (ioa_cfg->in_reset_reload ||
5187 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5188 return FAILED;
5189 if (!res)
5190 return FAILED;
5191
5192 /*
5193 * If we are aborting a timed out op, chances are that the timeout was caused
5194 * by a still not detected EEH error. In such cases, reading a register will
5195 * trigger the EEH recovery infrastructure.
5196 */
5197 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5198
5199 if (!ipr_is_gscsi(res))
5200 return FAILED;
5201
5202 for_each_hrrq(hrrq, ioa_cfg) {
5203 spin_lock(&hrrq->_lock);
5204 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5205 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5206 ipr_cmd->done = ipr_scsi_eh_done;
5207 op_found = 1;
5208 break;
5209 }
5210 }
5211 spin_unlock(&hrrq->_lock);
5212 }
5213
5214 if (!op_found)
5215 return SUCCESS;
5216
5217 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5218 ipr_cmd->ioarcb.res_handle = res->res_handle;
5219 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5220 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5221 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5222 ipr_cmd->u.sdev = scsi_cmd->device;
5223
5224 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5225 scsi_cmd->cmnd[0]);
5226 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5227 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5228
5229 /*
5230 * If the abort task timed out and we sent a bus reset, we will get
5231 * one the following responses to the abort
5232 */
5233 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5234 ioasc = 0;
5235 ipr_trace;
5236 }
5237
5238 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5239 if (!ipr_is_naca_model(res))
5240 res->needs_sync_complete = 1;
5241
5242 LEAVE;
5243 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5244 }
5245
5246 /**
5247 * ipr_eh_abort - Abort a single op
5248 * @scsi_cmd: scsi command struct
5249 *
5250 * Return value:
5251 * SUCCESS / FAILED
5252 **/
5253 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5254 {
5255 unsigned long flags;
5256 int rc;
5257 struct ipr_ioa_cfg *ioa_cfg;
5258
5259 ENTER;
5260
5261 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5262
5263 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5264 rc = ipr_cancel_op(scsi_cmd);
5265 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5266
5267 if (rc == SUCCESS)
5268 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5269 LEAVE;
5270 return rc;
5271 }
5272
5273 /**
5274 * ipr_handle_other_interrupt - Handle "other" interrupts
5275 * @ioa_cfg: ioa config struct
5276 * @int_reg: interrupt register
5277 *
5278 * Return value:
5279 * IRQ_NONE / IRQ_HANDLED
5280 **/
5281 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5282 u32 int_reg)
5283 {
5284 irqreturn_t rc = IRQ_HANDLED;
5285 u32 int_mask_reg;
5286
5287 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5288 int_reg &= ~int_mask_reg;
5289
5290 /* If an interrupt on the adapter did not occur, ignore it.
5291 * Or in the case of SIS 64, check for a stage change interrupt.
5292 */
5293 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5294 if (ioa_cfg->sis64) {
5295 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5296 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5297 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5298
5299 /* clear stage change */
5300 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5301 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5302 list_del(&ioa_cfg->reset_cmd->queue);
5303 del_timer(&ioa_cfg->reset_cmd->timer);
5304 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5305 return IRQ_HANDLED;
5306 }
5307 }
5308
5309 return IRQ_NONE;
5310 }
5311
5312 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5313 /* Mask the interrupt */
5314 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5315
5316 /* Clear the interrupt */
5317 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5318 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5319
5320 list_del(&ioa_cfg->reset_cmd->queue);
5321 del_timer(&ioa_cfg->reset_cmd->timer);
5322 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5323 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5324 if (ioa_cfg->clear_isr) {
5325 if (ipr_debug && printk_ratelimit())
5326 dev_err(&ioa_cfg->pdev->dev,
5327 "Spurious interrupt detected. 0x%08X\n", int_reg);
5328 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5329 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5330 return IRQ_NONE;
5331 }
5332 } else {
5333 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5334 ioa_cfg->ioa_unit_checked = 1;
5335 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5336 dev_err(&ioa_cfg->pdev->dev,
5337 "No Host RRQ. 0x%08X\n", int_reg);
5338 else
5339 dev_err(&ioa_cfg->pdev->dev,
5340 "Permanent IOA failure. 0x%08X\n", int_reg);
5341
5342 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5343 ioa_cfg->sdt_state = GET_DUMP;
5344
5345 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5346 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5347 }
5348
5349 return rc;
5350 }
5351
5352 /**
5353 * ipr_isr_eh - Interrupt service routine error handler
5354 * @ioa_cfg: ioa config struct
5355 * @msg: message to log
5356 *
5357 * Return value:
5358 * none
5359 **/
5360 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5361 {
5362 ioa_cfg->errors_logged++;
5363 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5364
5365 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5366 ioa_cfg->sdt_state = GET_DUMP;
5367
5368 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5369 }
5370
5371 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5372 struct list_head *doneq)
5373 {
5374 u32 ioasc;
5375 u16 cmd_index;
5376 struct ipr_cmnd *ipr_cmd;
5377 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5378 int num_hrrq = 0;
5379
5380 /* If interrupts are disabled, ignore the interrupt */
5381 if (!hrr_queue->allow_interrupts)
5382 return 0;
5383
5384 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5385 hrr_queue->toggle_bit) {
5386
5387 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5388 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5389 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5390
5391 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5392 cmd_index < hrr_queue->min_cmd_id)) {
5393 ipr_isr_eh(ioa_cfg,
5394 "Invalid response handle from IOA: ",
5395 cmd_index);
5396 break;
5397 }
5398
5399 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5400 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5401
5402 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5403
5404 list_move_tail(&ipr_cmd->queue, doneq);
5405
5406 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5407 hrr_queue->hrrq_curr++;
5408 } else {
5409 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5410 hrr_queue->toggle_bit ^= 1u;
5411 }
5412 num_hrrq++;
5413 if (budget > 0 && num_hrrq >= budget)
5414 break;
5415 }
5416
5417 return num_hrrq;
5418 }
5419
5420 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5421 {
5422 struct ipr_ioa_cfg *ioa_cfg;
5423 struct ipr_hrr_queue *hrrq;
5424 struct ipr_cmnd *ipr_cmd, *temp;
5425 unsigned long hrrq_flags;
5426 int completed_ops;
5427 LIST_HEAD(doneq);
5428
5429 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5430 ioa_cfg = hrrq->ioa_cfg;
5431
5432 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5433 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5434
5435 if (completed_ops < budget)
5436 blk_iopoll_complete(iop);
5437 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5438
5439 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5440 list_del(&ipr_cmd->queue);
5441 del_timer(&ipr_cmd->timer);
5442 ipr_cmd->fast_done(ipr_cmd);
5443 }
5444
5445 return completed_ops;
5446 }
5447
5448 /**
5449 * ipr_isr - Interrupt service routine
5450 * @irq: irq number
5451 * @devp: pointer to ioa config struct
5452 *
5453 * Return value:
5454 * IRQ_NONE / IRQ_HANDLED
5455 **/
5456 static irqreturn_t ipr_isr(int irq, void *devp)
5457 {
5458 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5459 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5460 unsigned long hrrq_flags = 0;
5461 u32 int_reg = 0;
5462 int num_hrrq = 0;
5463 int irq_none = 0;
5464 struct ipr_cmnd *ipr_cmd, *temp;
5465 irqreturn_t rc = IRQ_NONE;
5466 LIST_HEAD(doneq);
5467
5468 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5469 /* If interrupts are disabled, ignore the interrupt */
5470 if (!hrrq->allow_interrupts) {
5471 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5472 return IRQ_NONE;
5473 }
5474
5475 while (1) {
5476 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5477 rc = IRQ_HANDLED;
5478
5479 if (!ioa_cfg->clear_isr)
5480 break;
5481
5482 /* Clear the PCI interrupt */
5483 num_hrrq = 0;
5484 do {
5485 writel(IPR_PCII_HRRQ_UPDATED,
5486 ioa_cfg->regs.clr_interrupt_reg32);
5487 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5488 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5489 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5490
5491 } else if (rc == IRQ_NONE && irq_none == 0) {
5492 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5493 irq_none++;
5494 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5495 int_reg & IPR_PCII_HRRQ_UPDATED) {
5496 ipr_isr_eh(ioa_cfg,
5497 "Error clearing HRRQ: ", num_hrrq);
5498 rc = IRQ_HANDLED;
5499 break;
5500 } else
5501 break;
5502 }
5503
5504 if (unlikely(rc == IRQ_NONE))
5505 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5506
5507 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5508 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5509 list_del(&ipr_cmd->queue);
5510 del_timer(&ipr_cmd->timer);
5511 ipr_cmd->fast_done(ipr_cmd);
5512 }
5513 return rc;
5514 }
5515
5516 /**
5517 * ipr_isr_mhrrq - Interrupt service routine
5518 * @irq: irq number
5519 * @devp: pointer to ioa config struct
5520 *
5521 * Return value:
5522 * IRQ_NONE / IRQ_HANDLED
5523 **/
5524 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5525 {
5526 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5527 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5528 unsigned long hrrq_flags = 0;
5529 struct ipr_cmnd *ipr_cmd, *temp;
5530 irqreturn_t rc = IRQ_NONE;
5531 LIST_HEAD(doneq);
5532
5533 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5534
5535 /* If interrupts are disabled, ignore the interrupt */
5536 if (!hrrq->allow_interrupts) {
5537 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5538 return IRQ_NONE;
5539 }
5540
5541 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5542 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5543 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5544 hrrq->toggle_bit) {
5545 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5546 blk_iopoll_sched(&hrrq->iopoll);
5547 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5548 return IRQ_HANDLED;
5549 }
5550 } else {
5551 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5552 hrrq->toggle_bit)
5553
5554 if (ipr_process_hrrq(hrrq, -1, &doneq))
5555 rc = IRQ_HANDLED;
5556 }
5557
5558 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5559
5560 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5561 list_del(&ipr_cmd->queue);
5562 del_timer(&ipr_cmd->timer);
5563 ipr_cmd->fast_done(ipr_cmd);
5564 }
5565 return rc;
5566 }
5567
5568 /**
5569 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5570 * @ioa_cfg: ioa config struct
5571 * @ipr_cmd: ipr command struct
5572 *
5573 * Return value:
5574 * 0 on success / -1 on failure
5575 **/
5576 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5577 struct ipr_cmnd *ipr_cmd)
5578 {
5579 int i, nseg;
5580 struct scatterlist *sg;
5581 u32 length;
5582 u32 ioadl_flags = 0;
5583 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5584 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5585 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5586
5587 length = scsi_bufflen(scsi_cmd);
5588 if (!length)
5589 return 0;
5590
5591 nseg = scsi_dma_map(scsi_cmd);
5592 if (nseg < 0) {
5593 if (printk_ratelimit())
5594 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5595 return -1;
5596 }
5597
5598 ipr_cmd->dma_use_sg = nseg;
5599
5600 ioarcb->data_transfer_length = cpu_to_be32(length);
5601 ioarcb->ioadl_len =
5602 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5603
5604 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5605 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5606 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5607 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5608 ioadl_flags = IPR_IOADL_FLAGS_READ;
5609
5610 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5611 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5612 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5613 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5614 }
5615
5616 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5617 return 0;
5618 }
5619
5620 /**
5621 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5622 * @ioa_cfg: ioa config struct
5623 * @ipr_cmd: ipr command struct
5624 *
5625 * Return value:
5626 * 0 on success / -1 on failure
5627 **/
5628 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5629 struct ipr_cmnd *ipr_cmd)
5630 {
5631 int i, nseg;
5632 struct scatterlist *sg;
5633 u32 length;
5634 u32 ioadl_flags = 0;
5635 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5636 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5637 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5638
5639 length = scsi_bufflen(scsi_cmd);
5640 if (!length)
5641 return 0;
5642
5643 nseg = scsi_dma_map(scsi_cmd);
5644 if (nseg < 0) {
5645 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5646 return -1;
5647 }
5648
5649 ipr_cmd->dma_use_sg = nseg;
5650
5651 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5652 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5653 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5654 ioarcb->data_transfer_length = cpu_to_be32(length);
5655 ioarcb->ioadl_len =
5656 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5657 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5658 ioadl_flags = IPR_IOADL_FLAGS_READ;
5659 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5660 ioarcb->read_ioadl_len =
5661 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5662 }
5663
5664 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5665 ioadl = ioarcb->u.add_data.u.ioadl;
5666 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5667 offsetof(struct ipr_ioarcb, u.add_data));
5668 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5669 }
5670
5671 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5672 ioadl[i].flags_and_data_len =
5673 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5674 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5675 }
5676
5677 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5678 return 0;
5679 }
5680
5681 /**
5682 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5683 * @scsi_cmd: scsi command struct
5684 *
5685 * Return value:
5686 * task attributes
5687 **/
5688 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5689 {
5690 u8 tag[2];
5691 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5692
5693 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5694 switch (tag[0]) {
5695 case MSG_SIMPLE_TAG:
5696 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5697 break;
5698 case MSG_HEAD_TAG:
5699 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5700 break;
5701 case MSG_ORDERED_TAG:
5702 rc = IPR_FLAGS_LO_ORDERED_TASK;
5703 break;
5704 };
5705 }
5706
5707 return rc;
5708 }
5709
5710 /**
5711 * ipr_erp_done - Process completion of ERP for a device
5712 * @ipr_cmd: ipr command struct
5713 *
5714 * This function copies the sense buffer into the scsi_cmd
5715 * struct and pushes the scsi_done function.
5716 *
5717 * Return value:
5718 * nothing
5719 **/
5720 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5721 {
5722 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5723 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5724 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5725
5726 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5727 scsi_cmd->result |= (DID_ERROR << 16);
5728 scmd_printk(KERN_ERR, scsi_cmd,
5729 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5730 } else {
5731 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5732 SCSI_SENSE_BUFFERSIZE);
5733 }
5734
5735 if (res) {
5736 if (!ipr_is_naca_model(res))
5737 res->needs_sync_complete = 1;
5738 res->in_erp = 0;
5739 }
5740 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5741 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5742 scsi_cmd->scsi_done(scsi_cmd);
5743 }
5744
5745 /**
5746 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5747 * @ipr_cmd: ipr command struct
5748 *
5749 * Return value:
5750 * none
5751 **/
5752 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5753 {
5754 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5755 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5756 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5757
5758 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5759 ioarcb->data_transfer_length = 0;
5760 ioarcb->read_data_transfer_length = 0;
5761 ioarcb->ioadl_len = 0;
5762 ioarcb->read_ioadl_len = 0;
5763 ioasa->hdr.ioasc = 0;
5764 ioasa->hdr.residual_data_len = 0;
5765
5766 if (ipr_cmd->ioa_cfg->sis64)
5767 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5768 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5769 else {
5770 ioarcb->write_ioadl_addr =
5771 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5772 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5773 }
5774 }
5775
5776 /**
5777 * ipr_erp_request_sense - Send request sense to a device
5778 * @ipr_cmd: ipr command struct
5779 *
5780 * This function sends a request sense to a device as a result
5781 * of a check condition.
5782 *
5783 * Return value:
5784 * nothing
5785 **/
5786 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5787 {
5788 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5789 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5790
5791 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5792 ipr_erp_done(ipr_cmd);
5793 return;
5794 }
5795
5796 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5797
5798 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5799 cmd_pkt->cdb[0] = REQUEST_SENSE;
5800 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5801 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5802 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5803 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5804
5805 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5806 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5807
5808 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5809 IPR_REQUEST_SENSE_TIMEOUT * 2);
5810 }
5811
5812 /**
5813 * ipr_erp_cancel_all - Send cancel all to a device
5814 * @ipr_cmd: ipr command struct
5815 *
5816 * This function sends a cancel all to a device to clear the
5817 * queue. If we are running TCQ on the device, QERR is set to 1,
5818 * which means all outstanding ops have been dropped on the floor.
5819 * Cancel all will return them to us.
5820 *
5821 * Return value:
5822 * nothing
5823 **/
5824 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5825 {
5826 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5827 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5828 struct ipr_cmd_pkt *cmd_pkt;
5829
5830 res->in_erp = 1;
5831
5832 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5833
5834 if (!scsi_get_tag_type(scsi_cmd->device)) {
5835 ipr_erp_request_sense(ipr_cmd);
5836 return;
5837 }
5838
5839 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5840 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5841 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5842
5843 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5844 IPR_CANCEL_ALL_TIMEOUT);
5845 }
5846
5847 /**
5848 * ipr_dump_ioasa - Dump contents of IOASA
5849 * @ioa_cfg: ioa config struct
5850 * @ipr_cmd: ipr command struct
5851 * @res: resource entry struct
5852 *
5853 * This function is invoked by the interrupt handler when ops
5854 * fail. It will log the IOASA if appropriate. Only called
5855 * for GPDD ops.
5856 *
5857 * Return value:
5858 * none
5859 **/
5860 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5861 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5862 {
5863 int i;
5864 u16 data_len;
5865 u32 ioasc, fd_ioasc;
5866 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5867 __be32 *ioasa_data = (__be32 *)ioasa;
5868 int error_index;
5869
5870 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5871 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5872
5873 if (0 == ioasc)
5874 return;
5875
5876 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5877 return;
5878
5879 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5880 error_index = ipr_get_error(fd_ioasc);
5881 else
5882 error_index = ipr_get_error(ioasc);
5883
5884 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5885 /* Don't log an error if the IOA already logged one */
5886 if (ioasa->hdr.ilid != 0)
5887 return;
5888
5889 if (!ipr_is_gscsi(res))
5890 return;
5891
5892 if (ipr_error_table[error_index].log_ioasa == 0)
5893 return;
5894 }
5895
5896 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5897
5898 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5899 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5900 data_len = sizeof(struct ipr_ioasa64);
5901 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5902 data_len = sizeof(struct ipr_ioasa);
5903
5904 ipr_err("IOASA Dump:\n");
5905
5906 for (i = 0; i < data_len / 4; i += 4) {
5907 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5908 be32_to_cpu(ioasa_data[i]),
5909 be32_to_cpu(ioasa_data[i+1]),
5910 be32_to_cpu(ioasa_data[i+2]),
5911 be32_to_cpu(ioasa_data[i+3]));
5912 }
5913 }
5914
5915 /**
5916 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5917 * @ioasa: IOASA
5918 * @sense_buf: sense data buffer
5919 *
5920 * Return value:
5921 * none
5922 **/
5923 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5924 {
5925 u32 failing_lba;
5926 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5927 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5928 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5929 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5930
5931 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5932
5933 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5934 return;
5935
5936 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5937
5938 if (ipr_is_vset_device(res) &&
5939 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5940 ioasa->u.vset.failing_lba_hi != 0) {
5941 sense_buf[0] = 0x72;
5942 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5943 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5944 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5945
5946 sense_buf[7] = 12;
5947 sense_buf[8] = 0;
5948 sense_buf[9] = 0x0A;
5949 sense_buf[10] = 0x80;
5950
5951 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5952
5953 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5954 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5955 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5956 sense_buf[15] = failing_lba & 0x000000ff;
5957
5958 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5959
5960 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5961 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5962 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5963 sense_buf[19] = failing_lba & 0x000000ff;
5964 } else {
5965 sense_buf[0] = 0x70;
5966 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5967 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5968 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5969
5970 /* Illegal request */
5971 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5972 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5973 sense_buf[7] = 10; /* additional length */
5974
5975 /* IOARCB was in error */
5976 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5977 sense_buf[15] = 0xC0;
5978 else /* Parameter data was invalid */
5979 sense_buf[15] = 0x80;
5980
5981 sense_buf[16] =
5982 ((IPR_FIELD_POINTER_MASK &
5983 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5984 sense_buf[17] =
5985 (IPR_FIELD_POINTER_MASK &
5986 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5987 } else {
5988 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5989 if (ipr_is_vset_device(res))
5990 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5991 else
5992 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5993
5994 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5995 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5996 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5997 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5998 sense_buf[6] = failing_lba & 0x000000ff;
5999 }
6000
6001 sense_buf[7] = 6; /* additional length */
6002 }
6003 }
6004 }
6005
6006 /**
6007 * ipr_get_autosense - Copy autosense data to sense buffer
6008 * @ipr_cmd: ipr command struct
6009 *
6010 * This function copies the autosense buffer to the buffer
6011 * in the scsi_cmd, if there is autosense available.
6012 *
6013 * Return value:
6014 * 1 if autosense was available / 0 if not
6015 **/
6016 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6017 {
6018 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6019 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6020
6021 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6022 return 0;
6023
6024 if (ipr_cmd->ioa_cfg->sis64)
6025 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6026 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6027 SCSI_SENSE_BUFFERSIZE));
6028 else
6029 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6030 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6031 SCSI_SENSE_BUFFERSIZE));
6032 return 1;
6033 }
6034
6035 /**
6036 * ipr_erp_start - Process an error response for a SCSI op
6037 * @ioa_cfg: ioa config struct
6038 * @ipr_cmd: ipr command struct
6039 *
6040 * This function determines whether or not to initiate ERP
6041 * on the affected device.
6042 *
6043 * Return value:
6044 * nothing
6045 **/
6046 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6047 struct ipr_cmnd *ipr_cmd)
6048 {
6049 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6050 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6051 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6052 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6053
6054 if (!res) {
6055 ipr_scsi_eh_done(ipr_cmd);
6056 return;
6057 }
6058
6059 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6060 ipr_gen_sense(ipr_cmd);
6061
6062 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6063
6064 switch (masked_ioasc) {
6065 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6066 if (ipr_is_naca_model(res))
6067 scsi_cmd->result |= (DID_ABORT << 16);
6068 else
6069 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6070 break;
6071 case IPR_IOASC_IR_RESOURCE_HANDLE:
6072 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6073 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6074 break;
6075 case IPR_IOASC_HW_SEL_TIMEOUT:
6076 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6077 if (!ipr_is_naca_model(res))
6078 res->needs_sync_complete = 1;
6079 break;
6080 case IPR_IOASC_SYNC_REQUIRED:
6081 if (!res->in_erp)
6082 res->needs_sync_complete = 1;
6083 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6084 break;
6085 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6086 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6087 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6088 break;
6089 case IPR_IOASC_BUS_WAS_RESET:
6090 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6091 /*
6092 * Report the bus reset and ask for a retry. The device
6093 * will give CC/UA the next command.
6094 */
6095 if (!res->resetting_device)
6096 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6097 scsi_cmd->result |= (DID_ERROR << 16);
6098 if (!ipr_is_naca_model(res))
6099 res->needs_sync_complete = 1;
6100 break;
6101 case IPR_IOASC_HW_DEV_BUS_STATUS:
6102 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6103 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6104 if (!ipr_get_autosense(ipr_cmd)) {
6105 if (!ipr_is_naca_model(res)) {
6106 ipr_erp_cancel_all(ipr_cmd);
6107 return;
6108 }
6109 }
6110 }
6111 if (!ipr_is_naca_model(res))
6112 res->needs_sync_complete = 1;
6113 break;
6114 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6115 break;
6116 default:
6117 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6118 scsi_cmd->result |= (DID_ERROR << 16);
6119 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6120 res->needs_sync_complete = 1;
6121 break;
6122 }
6123
6124 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6125 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6126 scsi_cmd->scsi_done(scsi_cmd);
6127 }
6128
6129 /**
6130 * ipr_scsi_done - mid-layer done function
6131 * @ipr_cmd: ipr command struct
6132 *
6133 * This function is invoked by the interrupt handler for
6134 * ops generated by the SCSI mid-layer
6135 *
6136 * Return value:
6137 * none
6138 **/
6139 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6140 {
6141 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6142 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6143 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6144 unsigned long hrrq_flags;
6145
6146 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6147
6148 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6149 scsi_dma_unmap(scsi_cmd);
6150
6151 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6152 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6153 scsi_cmd->scsi_done(scsi_cmd);
6154 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6155 } else {
6156 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6157 ipr_erp_start(ioa_cfg, ipr_cmd);
6158 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6159 }
6160 }
6161
6162 /**
6163 * ipr_queuecommand - Queue a mid-layer request
6164 * @shost: scsi host struct
6165 * @scsi_cmd: scsi command struct
6166 *
6167 * This function queues a request generated by the mid-layer.
6168 *
6169 * Return value:
6170 * 0 on success
6171 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6172 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6173 **/
6174 static int ipr_queuecommand(struct Scsi_Host *shost,
6175 struct scsi_cmnd *scsi_cmd)
6176 {
6177 struct ipr_ioa_cfg *ioa_cfg;
6178 struct ipr_resource_entry *res;
6179 struct ipr_ioarcb *ioarcb;
6180 struct ipr_cmnd *ipr_cmd;
6181 unsigned long hrrq_flags, lock_flags;
6182 int rc;
6183 struct ipr_hrr_queue *hrrq;
6184 int hrrq_id;
6185
6186 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6187
6188 scsi_cmd->result = (DID_OK << 16);
6189 res = scsi_cmd->device->hostdata;
6190
6191 if (ipr_is_gata(res) && res->sata_port) {
6192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6193 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6194 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6195 return rc;
6196 }
6197
6198 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6199 hrrq = &ioa_cfg->hrrq[hrrq_id];
6200
6201 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6202 /*
6203 * We are currently blocking all devices due to a host reset
6204 * We have told the host to stop giving us new requests, but
6205 * ERP ops don't count. FIXME
6206 */
6207 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6208 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6209 return SCSI_MLQUEUE_HOST_BUSY;
6210 }
6211
6212 /*
6213 * FIXME - Create scsi_set_host_offline interface
6214 * and the ioa_is_dead check can be removed
6215 */
6216 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6217 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6218 goto err_nodev;
6219 }
6220
6221 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6222 if (ipr_cmd == NULL) {
6223 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6224 return SCSI_MLQUEUE_HOST_BUSY;
6225 }
6226 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6227
6228 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6229 ioarcb = &ipr_cmd->ioarcb;
6230
6231 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6232 ipr_cmd->scsi_cmd = scsi_cmd;
6233 ipr_cmd->done = ipr_scsi_eh_done;
6234
6235 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6236 if (scsi_cmd->underflow == 0)
6237 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6238
6239 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6240 if (ipr_is_gscsi(res))
6241 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6242 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6243 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6244 }
6245
6246 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6247 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6248 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6249 }
6250
6251 if (ioa_cfg->sis64)
6252 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6253 else
6254 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6255
6256 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6257 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6258 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6259 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6260 if (!rc)
6261 scsi_dma_unmap(scsi_cmd);
6262 return SCSI_MLQUEUE_HOST_BUSY;
6263 }
6264
6265 if (unlikely(hrrq->ioa_is_dead)) {
6266 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6267 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6268 scsi_dma_unmap(scsi_cmd);
6269 goto err_nodev;
6270 }
6271
6272 ioarcb->res_handle = res->res_handle;
6273 if (res->needs_sync_complete) {
6274 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6275 res->needs_sync_complete = 0;
6276 }
6277 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6278 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6279 ipr_send_command(ipr_cmd);
6280 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6281 return 0;
6282
6283 err_nodev:
6284 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6285 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6286 scsi_cmd->result = (DID_NO_CONNECT << 16);
6287 scsi_cmd->scsi_done(scsi_cmd);
6288 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6289 return 0;
6290 }
6291
6292 /**
6293 * ipr_ioctl - IOCTL handler
6294 * @sdev: scsi device struct
6295 * @cmd: IOCTL cmd
6296 * @arg: IOCTL arg
6297 *
6298 * Return value:
6299 * 0 on success / other on failure
6300 **/
6301 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6302 {
6303 struct ipr_resource_entry *res;
6304
6305 res = (struct ipr_resource_entry *)sdev->hostdata;
6306 if (res && ipr_is_gata(res)) {
6307 if (cmd == HDIO_GET_IDENTITY)
6308 return -ENOTTY;
6309 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6310 }
6311
6312 return -EINVAL;
6313 }
6314
6315 /**
6316 * ipr_info - Get information about the card/driver
6317 * @scsi_host: scsi host struct
6318 *
6319 * Return value:
6320 * pointer to buffer with description string
6321 **/
6322 static const char *ipr_ioa_info(struct Scsi_Host *host)
6323 {
6324 static char buffer[512];
6325 struct ipr_ioa_cfg *ioa_cfg;
6326 unsigned long lock_flags = 0;
6327
6328 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6329
6330 spin_lock_irqsave(host->host_lock, lock_flags);
6331 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6332 spin_unlock_irqrestore(host->host_lock, lock_flags);
6333
6334 return buffer;
6335 }
6336
6337 static struct scsi_host_template driver_template = {
6338 .module = THIS_MODULE,
6339 .name = "IPR",
6340 .info = ipr_ioa_info,
6341 .ioctl = ipr_ioctl,
6342 .queuecommand = ipr_queuecommand,
6343 .eh_abort_handler = ipr_eh_abort,
6344 .eh_device_reset_handler = ipr_eh_dev_reset,
6345 .eh_host_reset_handler = ipr_eh_host_reset,
6346 .slave_alloc = ipr_slave_alloc,
6347 .slave_configure = ipr_slave_configure,
6348 .slave_destroy = ipr_slave_destroy,
6349 .target_alloc = ipr_target_alloc,
6350 .target_destroy = ipr_target_destroy,
6351 .change_queue_depth = ipr_change_queue_depth,
6352 .change_queue_type = ipr_change_queue_type,
6353 .bios_param = ipr_biosparam,
6354 .can_queue = IPR_MAX_COMMANDS,
6355 .this_id = -1,
6356 .sg_tablesize = IPR_MAX_SGLIST,
6357 .max_sectors = IPR_IOA_MAX_SECTORS,
6358 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6359 .use_clustering = ENABLE_CLUSTERING,
6360 .shost_attrs = ipr_ioa_attrs,
6361 .sdev_attrs = ipr_dev_attrs,
6362 .proc_name = IPR_NAME,
6363 .no_write_same = 1,
6364 };
6365
6366 /**
6367 * ipr_ata_phy_reset - libata phy_reset handler
6368 * @ap: ata port to reset
6369 *
6370 **/
6371 static void ipr_ata_phy_reset(struct ata_port *ap)
6372 {
6373 unsigned long flags;
6374 struct ipr_sata_port *sata_port = ap->private_data;
6375 struct ipr_resource_entry *res = sata_port->res;
6376 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6377 int rc;
6378
6379 ENTER;
6380 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6381 while (ioa_cfg->in_reset_reload) {
6382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6383 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6384 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6385 }
6386
6387 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6388 goto out_unlock;
6389
6390 rc = ipr_device_reset(ioa_cfg, res);
6391
6392 if (rc) {
6393 ap->link.device[0].class = ATA_DEV_NONE;
6394 goto out_unlock;
6395 }
6396
6397 ap->link.device[0].class = res->ata_class;
6398 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6399 ap->link.device[0].class = ATA_DEV_NONE;
6400
6401 out_unlock:
6402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6403 LEAVE;
6404 }
6405
6406 /**
6407 * ipr_ata_post_internal - Cleanup after an internal command
6408 * @qc: ATA queued command
6409 *
6410 * Return value:
6411 * none
6412 **/
6413 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6414 {
6415 struct ipr_sata_port *sata_port = qc->ap->private_data;
6416 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6417 struct ipr_cmnd *ipr_cmd;
6418 struct ipr_hrr_queue *hrrq;
6419 unsigned long flags;
6420
6421 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6422 while (ioa_cfg->in_reset_reload) {
6423 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6424 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6425 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6426 }
6427
6428 for_each_hrrq(hrrq, ioa_cfg) {
6429 spin_lock(&hrrq->_lock);
6430 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6431 if (ipr_cmd->qc == qc) {
6432 ipr_device_reset(ioa_cfg, sata_port->res);
6433 break;
6434 }
6435 }
6436 spin_unlock(&hrrq->_lock);
6437 }
6438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6439 }
6440
6441 /**
6442 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6443 * @regs: destination
6444 * @tf: source ATA taskfile
6445 *
6446 * Return value:
6447 * none
6448 **/
6449 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6450 struct ata_taskfile *tf)
6451 {
6452 regs->feature = tf->feature;
6453 regs->nsect = tf->nsect;
6454 regs->lbal = tf->lbal;
6455 regs->lbam = tf->lbam;
6456 regs->lbah = tf->lbah;
6457 regs->device = tf->device;
6458 regs->command = tf->command;
6459 regs->hob_feature = tf->hob_feature;
6460 regs->hob_nsect = tf->hob_nsect;
6461 regs->hob_lbal = tf->hob_lbal;
6462 regs->hob_lbam = tf->hob_lbam;
6463 regs->hob_lbah = tf->hob_lbah;
6464 regs->ctl = tf->ctl;
6465 }
6466
6467 /**
6468 * ipr_sata_done - done function for SATA commands
6469 * @ipr_cmd: ipr command struct
6470 *
6471 * This function is invoked by the interrupt handler for
6472 * ops generated by the SCSI mid-layer to SATA devices
6473 *
6474 * Return value:
6475 * none
6476 **/
6477 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6478 {
6479 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6480 struct ata_queued_cmd *qc = ipr_cmd->qc;
6481 struct ipr_sata_port *sata_port = qc->ap->private_data;
6482 struct ipr_resource_entry *res = sata_port->res;
6483 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6484
6485 spin_lock(&ipr_cmd->hrrq->_lock);
6486 if (ipr_cmd->ioa_cfg->sis64)
6487 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6488 sizeof(struct ipr_ioasa_gata));
6489 else
6490 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6491 sizeof(struct ipr_ioasa_gata));
6492 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6493
6494 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6495 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6496
6497 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6498 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6499 else
6500 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6501 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6502 spin_unlock(&ipr_cmd->hrrq->_lock);
6503 ata_qc_complete(qc);
6504 }
6505
6506 /**
6507 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6508 * @ipr_cmd: ipr command struct
6509 * @qc: ATA queued command
6510 *
6511 **/
6512 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6513 struct ata_queued_cmd *qc)
6514 {
6515 u32 ioadl_flags = 0;
6516 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6517 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6518 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6519 int len = qc->nbytes;
6520 struct scatterlist *sg;
6521 unsigned int si;
6522 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6523
6524 if (len == 0)
6525 return;
6526
6527 if (qc->dma_dir == DMA_TO_DEVICE) {
6528 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6529 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6530 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6531 ioadl_flags = IPR_IOADL_FLAGS_READ;
6532
6533 ioarcb->data_transfer_length = cpu_to_be32(len);
6534 ioarcb->ioadl_len =
6535 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6536 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6537 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6538
6539 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6540 ioadl64->flags = cpu_to_be32(ioadl_flags);
6541 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6542 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6543
6544 last_ioadl64 = ioadl64;
6545 ioadl64++;
6546 }
6547
6548 if (likely(last_ioadl64))
6549 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6550 }
6551
6552 /**
6553 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6554 * @ipr_cmd: ipr command struct
6555 * @qc: ATA queued command
6556 *
6557 **/
6558 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6559 struct ata_queued_cmd *qc)
6560 {
6561 u32 ioadl_flags = 0;
6562 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6563 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6564 struct ipr_ioadl_desc *last_ioadl = NULL;
6565 int len = qc->nbytes;
6566 struct scatterlist *sg;
6567 unsigned int si;
6568
6569 if (len == 0)
6570 return;
6571
6572 if (qc->dma_dir == DMA_TO_DEVICE) {
6573 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6574 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6575 ioarcb->data_transfer_length = cpu_to_be32(len);
6576 ioarcb->ioadl_len =
6577 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6578 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6579 ioadl_flags = IPR_IOADL_FLAGS_READ;
6580 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6581 ioarcb->read_ioadl_len =
6582 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6583 }
6584
6585 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6586 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6587 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6588
6589 last_ioadl = ioadl;
6590 ioadl++;
6591 }
6592
6593 if (likely(last_ioadl))
6594 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6595 }
6596
6597 /**
6598 * ipr_qc_defer - Get a free ipr_cmd
6599 * @qc: queued command
6600 *
6601 * Return value:
6602 * 0 if success
6603 **/
6604 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6605 {
6606 struct ata_port *ap = qc->ap;
6607 struct ipr_sata_port *sata_port = ap->private_data;
6608 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6609 struct ipr_cmnd *ipr_cmd;
6610 struct ipr_hrr_queue *hrrq;
6611 int hrrq_id;
6612
6613 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6614 hrrq = &ioa_cfg->hrrq[hrrq_id];
6615
6616 qc->lldd_task = NULL;
6617 spin_lock(&hrrq->_lock);
6618 if (unlikely(hrrq->ioa_is_dead)) {
6619 spin_unlock(&hrrq->_lock);
6620 return 0;
6621 }
6622
6623 if (unlikely(!hrrq->allow_cmds)) {
6624 spin_unlock(&hrrq->_lock);
6625 return ATA_DEFER_LINK;
6626 }
6627
6628 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6629 if (ipr_cmd == NULL) {
6630 spin_unlock(&hrrq->_lock);
6631 return ATA_DEFER_LINK;
6632 }
6633
6634 qc->lldd_task = ipr_cmd;
6635 spin_unlock(&hrrq->_lock);
6636 return 0;
6637 }
6638
6639 /**
6640 * ipr_qc_issue - Issue a SATA qc to a device
6641 * @qc: queued command
6642 *
6643 * Return value:
6644 * 0 if success
6645 **/
6646 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6647 {
6648 struct ata_port *ap = qc->ap;
6649 struct ipr_sata_port *sata_port = ap->private_data;
6650 struct ipr_resource_entry *res = sata_port->res;
6651 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6652 struct ipr_cmnd *ipr_cmd;
6653 struct ipr_ioarcb *ioarcb;
6654 struct ipr_ioarcb_ata_regs *regs;
6655
6656 if (qc->lldd_task == NULL)
6657 ipr_qc_defer(qc);
6658
6659 ipr_cmd = qc->lldd_task;
6660 if (ipr_cmd == NULL)
6661 return AC_ERR_SYSTEM;
6662
6663 qc->lldd_task = NULL;
6664 spin_lock(&ipr_cmd->hrrq->_lock);
6665 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6666 ipr_cmd->hrrq->ioa_is_dead)) {
6667 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6668 spin_unlock(&ipr_cmd->hrrq->_lock);
6669 return AC_ERR_SYSTEM;
6670 }
6671
6672 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6673 ioarcb = &ipr_cmd->ioarcb;
6674
6675 if (ioa_cfg->sis64) {
6676 regs = &ipr_cmd->i.ata_ioadl.regs;
6677 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6678 } else
6679 regs = &ioarcb->u.add_data.u.regs;
6680
6681 memset(regs, 0, sizeof(*regs));
6682 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6683
6684 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6685 ipr_cmd->qc = qc;
6686 ipr_cmd->done = ipr_sata_done;
6687 ipr_cmd->ioarcb.res_handle = res->res_handle;
6688 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6689 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6690 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6691 ipr_cmd->dma_use_sg = qc->n_elem;
6692
6693 if (ioa_cfg->sis64)
6694 ipr_build_ata_ioadl64(ipr_cmd, qc);
6695 else
6696 ipr_build_ata_ioadl(ipr_cmd, qc);
6697
6698 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6699 ipr_copy_sata_tf(regs, &qc->tf);
6700 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6701 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6702
6703 switch (qc->tf.protocol) {
6704 case ATA_PROT_NODATA:
6705 case ATA_PROT_PIO:
6706 break;
6707
6708 case ATA_PROT_DMA:
6709 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6710 break;
6711
6712 case ATAPI_PROT_PIO:
6713 case ATAPI_PROT_NODATA:
6714 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6715 break;
6716
6717 case ATAPI_PROT_DMA:
6718 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6719 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6720 break;
6721
6722 default:
6723 WARN_ON(1);
6724 spin_unlock(&ipr_cmd->hrrq->_lock);
6725 return AC_ERR_INVALID;
6726 }
6727
6728 ipr_send_command(ipr_cmd);
6729 spin_unlock(&ipr_cmd->hrrq->_lock);
6730
6731 return 0;
6732 }
6733
6734 /**
6735 * ipr_qc_fill_rtf - Read result TF
6736 * @qc: ATA queued command
6737 *
6738 * Return value:
6739 * true
6740 **/
6741 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6742 {
6743 struct ipr_sata_port *sata_port = qc->ap->private_data;
6744 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6745 struct ata_taskfile *tf = &qc->result_tf;
6746
6747 tf->feature = g->error;
6748 tf->nsect = g->nsect;
6749 tf->lbal = g->lbal;
6750 tf->lbam = g->lbam;
6751 tf->lbah = g->lbah;
6752 tf->device = g->device;
6753 tf->command = g->status;
6754 tf->hob_nsect = g->hob_nsect;
6755 tf->hob_lbal = g->hob_lbal;
6756 tf->hob_lbam = g->hob_lbam;
6757 tf->hob_lbah = g->hob_lbah;
6758 tf->ctl = g->alt_status;
6759
6760 return true;
6761 }
6762
6763 static struct ata_port_operations ipr_sata_ops = {
6764 .phy_reset = ipr_ata_phy_reset,
6765 .hardreset = ipr_sata_reset,
6766 .post_internal_cmd = ipr_ata_post_internal,
6767 .qc_prep = ata_noop_qc_prep,
6768 .qc_defer = ipr_qc_defer,
6769 .qc_issue = ipr_qc_issue,
6770 .qc_fill_rtf = ipr_qc_fill_rtf,
6771 .port_start = ata_sas_port_start,
6772 .port_stop = ata_sas_port_stop
6773 };
6774
6775 static struct ata_port_info sata_port_info = {
6776 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6777 .pio_mask = ATA_PIO4_ONLY,
6778 .mwdma_mask = ATA_MWDMA2,
6779 .udma_mask = ATA_UDMA6,
6780 .port_ops = &ipr_sata_ops
6781 };
6782
6783 #ifdef CONFIG_PPC_PSERIES
6784 static const u16 ipr_blocked_processors[] = {
6785 PVR_NORTHSTAR,
6786 PVR_PULSAR,
6787 PVR_POWER4,
6788 PVR_ICESTAR,
6789 PVR_SSTAR,
6790 PVR_POWER4p,
6791 PVR_630,
6792 PVR_630p
6793 };
6794
6795 /**
6796 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6797 * @ioa_cfg: ioa cfg struct
6798 *
6799 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6800 * certain pSeries hardware. This function determines if the given
6801 * adapter is in one of these confgurations or not.
6802 *
6803 * Return value:
6804 * 1 if adapter is not supported / 0 if adapter is supported
6805 **/
6806 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6807 {
6808 int i;
6809
6810 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6811 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6812 if (pvr_version_is(ipr_blocked_processors[i]))
6813 return 1;
6814 }
6815 }
6816 return 0;
6817 }
6818 #else
6819 #define ipr_invalid_adapter(ioa_cfg) 0
6820 #endif
6821
6822 /**
6823 * ipr_ioa_bringdown_done - IOA bring down completion.
6824 * @ipr_cmd: ipr command struct
6825 *
6826 * This function processes the completion of an adapter bring down.
6827 * It wakes any reset sleepers.
6828 *
6829 * Return value:
6830 * IPR_RC_JOB_RETURN
6831 **/
6832 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6833 {
6834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6835 int i;
6836
6837 ENTER;
6838 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6839 ipr_trace;
6840 spin_unlock_irq(ioa_cfg->host->host_lock);
6841 scsi_unblock_requests(ioa_cfg->host);
6842 spin_lock_irq(ioa_cfg->host->host_lock);
6843 }
6844
6845 ioa_cfg->in_reset_reload = 0;
6846 ioa_cfg->reset_retries = 0;
6847 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6848 spin_lock(&ioa_cfg->hrrq[i]._lock);
6849 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6850 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6851 }
6852 wmb();
6853
6854 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6855 wake_up_all(&ioa_cfg->reset_wait_q);
6856 LEAVE;
6857
6858 return IPR_RC_JOB_RETURN;
6859 }
6860
6861 /**
6862 * ipr_ioa_reset_done - IOA reset completion.
6863 * @ipr_cmd: ipr command struct
6864 *
6865 * This function processes the completion of an adapter reset.
6866 * It schedules any necessary mid-layer add/removes and
6867 * wakes any reset sleepers.
6868 *
6869 * Return value:
6870 * IPR_RC_JOB_RETURN
6871 **/
6872 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6873 {
6874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6875 struct ipr_resource_entry *res;
6876 struct ipr_hostrcb *hostrcb, *temp;
6877 int i = 0, j;
6878
6879 ENTER;
6880 ioa_cfg->in_reset_reload = 0;
6881 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6882 spin_lock(&ioa_cfg->hrrq[j]._lock);
6883 ioa_cfg->hrrq[j].allow_cmds = 1;
6884 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6885 }
6886 wmb();
6887 ioa_cfg->reset_cmd = NULL;
6888 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6889
6890 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6891 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6892 ipr_trace;
6893 break;
6894 }
6895 }
6896 schedule_work(&ioa_cfg->work_q);
6897
6898 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6899 list_del(&hostrcb->queue);
6900 if (i++ < IPR_NUM_LOG_HCAMS)
6901 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6902 else
6903 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6904 }
6905
6906 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6907 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6908
6909 ioa_cfg->reset_retries = 0;
6910 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6911 wake_up_all(&ioa_cfg->reset_wait_q);
6912
6913 spin_unlock(ioa_cfg->host->host_lock);
6914 scsi_unblock_requests(ioa_cfg->host);
6915 spin_lock(ioa_cfg->host->host_lock);
6916
6917 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6918 scsi_block_requests(ioa_cfg->host);
6919
6920 LEAVE;
6921 return IPR_RC_JOB_RETURN;
6922 }
6923
6924 /**
6925 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6926 * @supported_dev: supported device struct
6927 * @vpids: vendor product id struct
6928 *
6929 * Return value:
6930 * none
6931 **/
6932 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6933 struct ipr_std_inq_vpids *vpids)
6934 {
6935 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6936 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6937 supported_dev->num_records = 1;
6938 supported_dev->data_length =
6939 cpu_to_be16(sizeof(struct ipr_supported_device));
6940 supported_dev->reserved = 0;
6941 }
6942
6943 /**
6944 * ipr_set_supported_devs - Send Set Supported Devices for a device
6945 * @ipr_cmd: ipr command struct
6946 *
6947 * This function sends a Set Supported Devices to the adapter
6948 *
6949 * Return value:
6950 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6951 **/
6952 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6953 {
6954 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6955 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6956 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6957 struct ipr_resource_entry *res = ipr_cmd->u.res;
6958
6959 ipr_cmd->job_step = ipr_ioa_reset_done;
6960
6961 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6962 if (!ipr_is_scsi_disk(res))
6963 continue;
6964
6965 ipr_cmd->u.res = res;
6966 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6967
6968 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6969 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6970 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6971
6972 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6973 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6974 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6975 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6976
6977 ipr_init_ioadl(ipr_cmd,
6978 ioa_cfg->vpd_cbs_dma +
6979 offsetof(struct ipr_misc_cbs, supp_dev),
6980 sizeof(struct ipr_supported_device),
6981 IPR_IOADL_FLAGS_WRITE_LAST);
6982
6983 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6984 IPR_SET_SUP_DEVICE_TIMEOUT);
6985
6986 if (!ioa_cfg->sis64)
6987 ipr_cmd->job_step = ipr_set_supported_devs;
6988 LEAVE;
6989 return IPR_RC_JOB_RETURN;
6990 }
6991
6992 LEAVE;
6993 return IPR_RC_JOB_CONTINUE;
6994 }
6995
6996 /**
6997 * ipr_get_mode_page - Locate specified mode page
6998 * @mode_pages: mode page buffer
6999 * @page_code: page code to find
7000 * @len: minimum required length for mode page
7001 *
7002 * Return value:
7003 * pointer to mode page / NULL on failure
7004 **/
7005 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7006 u32 page_code, u32 len)
7007 {
7008 struct ipr_mode_page_hdr *mode_hdr;
7009 u32 page_length;
7010 u32 length;
7011
7012 if (!mode_pages || (mode_pages->hdr.length == 0))
7013 return NULL;
7014
7015 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7016 mode_hdr = (struct ipr_mode_page_hdr *)
7017 (mode_pages->data + mode_pages->hdr.block_desc_len);
7018
7019 while (length) {
7020 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7021 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7022 return mode_hdr;
7023 break;
7024 } else {
7025 page_length = (sizeof(struct ipr_mode_page_hdr) +
7026 mode_hdr->page_length);
7027 length -= page_length;
7028 mode_hdr = (struct ipr_mode_page_hdr *)
7029 ((unsigned long)mode_hdr + page_length);
7030 }
7031 }
7032 return NULL;
7033 }
7034
7035 /**
7036 * ipr_check_term_power - Check for term power errors
7037 * @ioa_cfg: ioa config struct
7038 * @mode_pages: IOAFP mode pages buffer
7039 *
7040 * Check the IOAFP's mode page 28 for term power errors
7041 *
7042 * Return value:
7043 * nothing
7044 **/
7045 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7046 struct ipr_mode_pages *mode_pages)
7047 {
7048 int i;
7049 int entry_length;
7050 struct ipr_dev_bus_entry *bus;
7051 struct ipr_mode_page28 *mode_page;
7052
7053 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7054 sizeof(struct ipr_mode_page28));
7055
7056 entry_length = mode_page->entry_length;
7057
7058 bus = mode_page->bus;
7059
7060 for (i = 0; i < mode_page->num_entries; i++) {
7061 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7062 dev_err(&ioa_cfg->pdev->dev,
7063 "Term power is absent on scsi bus %d\n",
7064 bus->res_addr.bus);
7065 }
7066
7067 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7068 }
7069 }
7070
7071 /**
7072 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7073 * @ioa_cfg: ioa config struct
7074 *
7075 * Looks through the config table checking for SES devices. If
7076 * the SES device is in the SES table indicating a maximum SCSI
7077 * bus speed, the speed is limited for the bus.
7078 *
7079 * Return value:
7080 * none
7081 **/
7082 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7083 {
7084 u32 max_xfer_rate;
7085 int i;
7086
7087 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7088 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7089 ioa_cfg->bus_attr[i].bus_width);
7090
7091 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7092 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7093 }
7094 }
7095
7096 /**
7097 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7098 * @ioa_cfg: ioa config struct
7099 * @mode_pages: mode page 28 buffer
7100 *
7101 * Updates mode page 28 based on driver configuration
7102 *
7103 * Return value:
7104 * none
7105 **/
7106 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7107 struct ipr_mode_pages *mode_pages)
7108 {
7109 int i, entry_length;
7110 struct ipr_dev_bus_entry *bus;
7111 struct ipr_bus_attributes *bus_attr;
7112 struct ipr_mode_page28 *mode_page;
7113
7114 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7115 sizeof(struct ipr_mode_page28));
7116
7117 entry_length = mode_page->entry_length;
7118
7119 /* Loop for each device bus entry */
7120 for (i = 0, bus = mode_page->bus;
7121 i < mode_page->num_entries;
7122 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7123 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7124 dev_err(&ioa_cfg->pdev->dev,
7125 "Invalid resource address reported: 0x%08X\n",
7126 IPR_GET_PHYS_LOC(bus->res_addr));
7127 continue;
7128 }
7129
7130 bus_attr = &ioa_cfg->bus_attr[i];
7131 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7132 bus->bus_width = bus_attr->bus_width;
7133 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7134 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7135 if (bus_attr->qas_enabled)
7136 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7137 else
7138 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7139 }
7140 }
7141
7142 /**
7143 * ipr_build_mode_select - Build a mode select command
7144 * @ipr_cmd: ipr command struct
7145 * @res_handle: resource handle to send command to
7146 * @parm: Byte 2 of Mode Sense command
7147 * @dma_addr: DMA buffer address
7148 * @xfer_len: data transfer length
7149 *
7150 * Return value:
7151 * none
7152 **/
7153 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7154 __be32 res_handle, u8 parm,
7155 dma_addr_t dma_addr, u8 xfer_len)
7156 {
7157 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7158
7159 ioarcb->res_handle = res_handle;
7160 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7161 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7162 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7163 ioarcb->cmd_pkt.cdb[1] = parm;
7164 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7165
7166 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7167 }
7168
7169 /**
7170 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7171 * @ipr_cmd: ipr command struct
7172 *
7173 * This function sets up the SCSI bus attributes and sends
7174 * a Mode Select for Page 28 to activate them.
7175 *
7176 * Return value:
7177 * IPR_RC_JOB_RETURN
7178 **/
7179 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7180 {
7181 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7182 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7183 int length;
7184
7185 ENTER;
7186 ipr_scsi_bus_speed_limit(ioa_cfg);
7187 ipr_check_term_power(ioa_cfg, mode_pages);
7188 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7189 length = mode_pages->hdr.length + 1;
7190 mode_pages->hdr.length = 0;
7191
7192 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7193 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7194 length);
7195
7196 ipr_cmd->job_step = ipr_set_supported_devs;
7197 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7198 struct ipr_resource_entry, queue);
7199 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7200
7201 LEAVE;
7202 return IPR_RC_JOB_RETURN;
7203 }
7204
7205 /**
7206 * ipr_build_mode_sense - Builds a mode sense command
7207 * @ipr_cmd: ipr command struct
7208 * @res: resource entry struct
7209 * @parm: Byte 2 of mode sense command
7210 * @dma_addr: DMA address of mode sense buffer
7211 * @xfer_len: Size of DMA buffer
7212 *
7213 * Return value:
7214 * none
7215 **/
7216 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7217 __be32 res_handle,
7218 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7219 {
7220 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7221
7222 ioarcb->res_handle = res_handle;
7223 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7224 ioarcb->cmd_pkt.cdb[2] = parm;
7225 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7226 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7227
7228 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7229 }
7230
7231 /**
7232 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7233 * @ipr_cmd: ipr command struct
7234 *
7235 * This function handles the failure of an IOA bringup command.
7236 *
7237 * Return value:
7238 * IPR_RC_JOB_RETURN
7239 **/
7240 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7241 {
7242 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7243 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7244
7245 dev_err(&ioa_cfg->pdev->dev,
7246 "0x%02X failed with IOASC: 0x%08X\n",
7247 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7248
7249 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7250 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7251 return IPR_RC_JOB_RETURN;
7252 }
7253
7254 /**
7255 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7256 * @ipr_cmd: ipr command struct
7257 *
7258 * This function handles the failure of a Mode Sense to the IOAFP.
7259 * Some adapters do not handle all mode pages.
7260 *
7261 * Return value:
7262 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7263 **/
7264 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7265 {
7266 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7267 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7268
7269 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7270 ipr_cmd->job_step = ipr_set_supported_devs;
7271 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7272 struct ipr_resource_entry, queue);
7273 return IPR_RC_JOB_CONTINUE;
7274 }
7275
7276 return ipr_reset_cmd_failed(ipr_cmd);
7277 }
7278
7279 /**
7280 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7281 * @ipr_cmd: ipr command struct
7282 *
7283 * This function send a Page 28 mode sense to the IOA to
7284 * retrieve SCSI bus attributes.
7285 *
7286 * Return value:
7287 * IPR_RC_JOB_RETURN
7288 **/
7289 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7290 {
7291 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7292
7293 ENTER;
7294 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7295 0x28, ioa_cfg->vpd_cbs_dma +
7296 offsetof(struct ipr_misc_cbs, mode_pages),
7297 sizeof(struct ipr_mode_pages));
7298
7299 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7300 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7301
7302 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7303
7304 LEAVE;
7305 return IPR_RC_JOB_RETURN;
7306 }
7307
7308 /**
7309 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7310 * @ipr_cmd: ipr command struct
7311 *
7312 * This function enables dual IOA RAID support if possible.
7313 *
7314 * Return value:
7315 * IPR_RC_JOB_RETURN
7316 **/
7317 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7318 {
7319 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7320 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7321 struct ipr_mode_page24 *mode_page;
7322 int length;
7323
7324 ENTER;
7325 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7326 sizeof(struct ipr_mode_page24));
7327
7328 if (mode_page)
7329 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7330
7331 length = mode_pages->hdr.length + 1;
7332 mode_pages->hdr.length = 0;
7333
7334 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7335 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7336 length);
7337
7338 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7339 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7340
7341 LEAVE;
7342 return IPR_RC_JOB_RETURN;
7343 }
7344
7345 /**
7346 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7347 * @ipr_cmd: ipr command struct
7348 *
7349 * This function handles the failure of a Mode Sense to the IOAFP.
7350 * Some adapters do not handle all mode pages.
7351 *
7352 * Return value:
7353 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7354 **/
7355 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7356 {
7357 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7358
7359 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7360 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7361 return IPR_RC_JOB_CONTINUE;
7362 }
7363
7364 return ipr_reset_cmd_failed(ipr_cmd);
7365 }
7366
7367 /**
7368 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7369 * @ipr_cmd: ipr command struct
7370 *
7371 * This function send a mode sense to the IOA to retrieve
7372 * the IOA Advanced Function Control mode page.
7373 *
7374 * Return value:
7375 * IPR_RC_JOB_RETURN
7376 **/
7377 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7378 {
7379 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7380
7381 ENTER;
7382 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7383 0x24, ioa_cfg->vpd_cbs_dma +
7384 offsetof(struct ipr_misc_cbs, mode_pages),
7385 sizeof(struct ipr_mode_pages));
7386
7387 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7388 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7389
7390 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7391
7392 LEAVE;
7393 return IPR_RC_JOB_RETURN;
7394 }
7395
7396 /**
7397 * ipr_init_res_table - Initialize the resource table
7398 * @ipr_cmd: ipr command struct
7399 *
7400 * This function looks through the existing resource table, comparing
7401 * it with the config table. This function will take care of old/new
7402 * devices and schedule adding/removing them from the mid-layer
7403 * as appropriate.
7404 *
7405 * Return value:
7406 * IPR_RC_JOB_CONTINUE
7407 **/
7408 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7409 {
7410 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7411 struct ipr_resource_entry *res, *temp;
7412 struct ipr_config_table_entry_wrapper cfgtew;
7413 int entries, found, flag, i;
7414 LIST_HEAD(old_res);
7415
7416 ENTER;
7417 if (ioa_cfg->sis64)
7418 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7419 else
7420 flag = ioa_cfg->u.cfg_table->hdr.flags;
7421
7422 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7423 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7424
7425 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7426 list_move_tail(&res->queue, &old_res);
7427
7428 if (ioa_cfg->sis64)
7429 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7430 else
7431 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7432
7433 for (i = 0; i < entries; i++) {
7434 if (ioa_cfg->sis64)
7435 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7436 else
7437 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7438 found = 0;
7439
7440 list_for_each_entry_safe(res, temp, &old_res, queue) {
7441 if (ipr_is_same_device(res, &cfgtew)) {
7442 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7443 found = 1;
7444 break;
7445 }
7446 }
7447
7448 if (!found) {
7449 if (list_empty(&ioa_cfg->free_res_q)) {
7450 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7451 break;
7452 }
7453
7454 found = 1;
7455 res = list_entry(ioa_cfg->free_res_q.next,
7456 struct ipr_resource_entry, queue);
7457 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7458 ipr_init_res_entry(res, &cfgtew);
7459 res->add_to_ml = 1;
7460 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7461 res->sdev->allow_restart = 1;
7462
7463 if (found)
7464 ipr_update_res_entry(res, &cfgtew);
7465 }
7466
7467 list_for_each_entry_safe(res, temp, &old_res, queue) {
7468 if (res->sdev) {
7469 res->del_from_ml = 1;
7470 res->res_handle = IPR_INVALID_RES_HANDLE;
7471 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7472 }
7473 }
7474
7475 list_for_each_entry_safe(res, temp, &old_res, queue) {
7476 ipr_clear_res_target(res);
7477 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7478 }
7479
7480 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7481 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7482 else
7483 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7484
7485 LEAVE;
7486 return IPR_RC_JOB_CONTINUE;
7487 }
7488
7489 /**
7490 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7491 * @ipr_cmd: ipr command struct
7492 *
7493 * This function sends a Query IOA Configuration command
7494 * to the adapter to retrieve the IOA configuration table.
7495 *
7496 * Return value:
7497 * IPR_RC_JOB_RETURN
7498 **/
7499 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7500 {
7501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7502 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7503 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7504 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7505
7506 ENTER;
7507 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7508 ioa_cfg->dual_raid = 1;
7509 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7510 ucode_vpd->major_release, ucode_vpd->card_type,
7511 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7512 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7513 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7514
7515 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7516 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7517 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7518 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7519
7520 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7521 IPR_IOADL_FLAGS_READ_LAST);
7522
7523 ipr_cmd->job_step = ipr_init_res_table;
7524
7525 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7526
7527 LEAVE;
7528 return IPR_RC_JOB_RETURN;
7529 }
7530
7531 /**
7532 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7533 * @ipr_cmd: ipr command struct
7534 *
7535 * This utility function sends an inquiry to the adapter.
7536 *
7537 * Return value:
7538 * none
7539 **/
7540 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7541 dma_addr_t dma_addr, u8 xfer_len)
7542 {
7543 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7544
7545 ENTER;
7546 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7547 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7548
7549 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7550 ioarcb->cmd_pkt.cdb[1] = flags;
7551 ioarcb->cmd_pkt.cdb[2] = page;
7552 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7553
7554 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7555
7556 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7557 LEAVE;
7558 }
7559
7560 /**
7561 * ipr_inquiry_page_supported - Is the given inquiry page supported
7562 * @page0: inquiry page 0 buffer
7563 * @page: page code.
7564 *
7565 * This function determines if the specified inquiry page is supported.
7566 *
7567 * Return value:
7568 * 1 if page is supported / 0 if not
7569 **/
7570 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7571 {
7572 int i;
7573
7574 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7575 if (page0->page[i] == page)
7576 return 1;
7577
7578 return 0;
7579 }
7580
7581 /**
7582 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7583 * @ipr_cmd: ipr command struct
7584 *
7585 * This function sends a Page 0xD0 inquiry to the adapter
7586 * to retrieve adapter capabilities.
7587 *
7588 * Return value:
7589 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7590 **/
7591 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7592 {
7593 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7594 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7595 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7596
7597 ENTER;
7598 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7599 memset(cap, 0, sizeof(*cap));
7600
7601 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7602 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7603 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7604 sizeof(struct ipr_inquiry_cap));
7605 return IPR_RC_JOB_RETURN;
7606 }
7607
7608 LEAVE;
7609 return IPR_RC_JOB_CONTINUE;
7610 }
7611
7612 /**
7613 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7614 * @ipr_cmd: ipr command struct
7615 *
7616 * This function sends a Page 3 inquiry to the adapter
7617 * to retrieve software VPD information.
7618 *
7619 * Return value:
7620 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7621 **/
7622 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7623 {
7624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7625
7626 ENTER;
7627
7628 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7629
7630 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7631 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7632 sizeof(struct ipr_inquiry_page3));
7633
7634 LEAVE;
7635 return IPR_RC_JOB_RETURN;
7636 }
7637
7638 /**
7639 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7640 * @ipr_cmd: ipr command struct
7641 *
7642 * This function sends a Page 0 inquiry to the adapter
7643 * to retrieve supported inquiry pages.
7644 *
7645 * Return value:
7646 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7647 **/
7648 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7649 {
7650 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7651 char type[5];
7652
7653 ENTER;
7654
7655 /* Grab the type out of the VPD and store it away */
7656 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7657 type[4] = '\0';
7658 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7659
7660 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7661
7662 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7663 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7664 sizeof(struct ipr_inquiry_page0));
7665
7666 LEAVE;
7667 return IPR_RC_JOB_RETURN;
7668 }
7669
7670 /**
7671 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7672 * @ipr_cmd: ipr command struct
7673 *
7674 * This function sends a standard inquiry to the adapter.
7675 *
7676 * Return value:
7677 * IPR_RC_JOB_RETURN
7678 **/
7679 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7680 {
7681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7682
7683 ENTER;
7684 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7685
7686 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7687 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7688 sizeof(struct ipr_ioa_vpd));
7689
7690 LEAVE;
7691 return IPR_RC_JOB_RETURN;
7692 }
7693
7694 /**
7695 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7696 * @ipr_cmd: ipr command struct
7697 *
7698 * This function send an Identify Host Request Response Queue
7699 * command to establish the HRRQ with the adapter.
7700 *
7701 * Return value:
7702 * IPR_RC_JOB_RETURN
7703 **/
7704 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7705 {
7706 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7707 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7708 struct ipr_hrr_queue *hrrq;
7709
7710 ENTER;
7711 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7712 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7713
7714 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7715 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7716
7717 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7718 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7719
7720 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7721 if (ioa_cfg->sis64)
7722 ioarcb->cmd_pkt.cdb[1] = 0x1;
7723
7724 if (ioa_cfg->nvectors == 1)
7725 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7726 else
7727 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7728
7729 ioarcb->cmd_pkt.cdb[2] =
7730 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7731 ioarcb->cmd_pkt.cdb[3] =
7732 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7733 ioarcb->cmd_pkt.cdb[4] =
7734 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7735 ioarcb->cmd_pkt.cdb[5] =
7736 ((u64) hrrq->host_rrq_dma) & 0xff;
7737 ioarcb->cmd_pkt.cdb[7] =
7738 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7739 ioarcb->cmd_pkt.cdb[8] =
7740 (sizeof(u32) * hrrq->size) & 0xff;
7741
7742 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7743 ioarcb->cmd_pkt.cdb[9] =
7744 ioa_cfg->identify_hrrq_index;
7745
7746 if (ioa_cfg->sis64) {
7747 ioarcb->cmd_pkt.cdb[10] =
7748 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7749 ioarcb->cmd_pkt.cdb[11] =
7750 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7751 ioarcb->cmd_pkt.cdb[12] =
7752 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7753 ioarcb->cmd_pkt.cdb[13] =
7754 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7755 }
7756
7757 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7758 ioarcb->cmd_pkt.cdb[14] =
7759 ioa_cfg->identify_hrrq_index;
7760
7761 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7762 IPR_INTERNAL_TIMEOUT);
7763
7764 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7765 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7766
7767 LEAVE;
7768 return IPR_RC_JOB_RETURN;
7769 }
7770
7771 LEAVE;
7772 return IPR_RC_JOB_CONTINUE;
7773 }
7774
7775 /**
7776 * ipr_reset_timer_done - Adapter reset timer function
7777 * @ipr_cmd: ipr command struct
7778 *
7779 * Description: This function is used in adapter reset processing
7780 * for timing events. If the reset_cmd pointer in the IOA
7781 * config struct is not this adapter's we are doing nested
7782 * resets and fail_all_ops will take care of freeing the
7783 * command block.
7784 *
7785 * Return value:
7786 * none
7787 **/
7788 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7789 {
7790 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7791 unsigned long lock_flags = 0;
7792
7793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7794
7795 if (ioa_cfg->reset_cmd == ipr_cmd) {
7796 list_del(&ipr_cmd->queue);
7797 ipr_cmd->done(ipr_cmd);
7798 }
7799
7800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7801 }
7802
7803 /**
7804 * ipr_reset_start_timer - Start a timer for adapter reset job
7805 * @ipr_cmd: ipr command struct
7806 * @timeout: timeout value
7807 *
7808 * Description: This function is used in adapter reset processing
7809 * for timing events. If the reset_cmd pointer in the IOA
7810 * config struct is not this adapter's we are doing nested
7811 * resets and fail_all_ops will take care of freeing the
7812 * command block.
7813 *
7814 * Return value:
7815 * none
7816 **/
7817 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7818 unsigned long timeout)
7819 {
7820
7821 ENTER;
7822 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7823 ipr_cmd->done = ipr_reset_ioa_job;
7824
7825 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7826 ipr_cmd->timer.expires = jiffies + timeout;
7827 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7828 add_timer(&ipr_cmd->timer);
7829 }
7830
7831 /**
7832 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7833 * @ioa_cfg: ioa cfg struct
7834 *
7835 * Return value:
7836 * nothing
7837 **/
7838 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7839 {
7840 struct ipr_hrr_queue *hrrq;
7841
7842 for_each_hrrq(hrrq, ioa_cfg) {
7843 spin_lock(&hrrq->_lock);
7844 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7845
7846 /* Initialize Host RRQ pointers */
7847 hrrq->hrrq_start = hrrq->host_rrq;
7848 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7849 hrrq->hrrq_curr = hrrq->hrrq_start;
7850 hrrq->toggle_bit = 1;
7851 spin_unlock(&hrrq->_lock);
7852 }
7853 wmb();
7854
7855 ioa_cfg->identify_hrrq_index = 0;
7856 if (ioa_cfg->hrrq_num == 1)
7857 atomic_set(&ioa_cfg->hrrq_index, 0);
7858 else
7859 atomic_set(&ioa_cfg->hrrq_index, 1);
7860
7861 /* Zero out config table */
7862 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7863 }
7864
7865 /**
7866 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7867 * @ipr_cmd: ipr command struct
7868 *
7869 * Return value:
7870 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7871 **/
7872 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7873 {
7874 unsigned long stage, stage_time;
7875 u32 feedback;
7876 volatile u32 int_reg;
7877 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7878 u64 maskval = 0;
7879
7880 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7881 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7882 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7883
7884 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7885
7886 /* sanity check the stage_time value */
7887 if (stage_time == 0)
7888 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7889 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7890 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7891 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7892 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7893
7894 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7895 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7896 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7897 stage_time = ioa_cfg->transop_timeout;
7898 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7899 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7900 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7901 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7902 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7903 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7904 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7905 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7906 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7907 return IPR_RC_JOB_CONTINUE;
7908 }
7909 }
7910
7911 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7912 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7913 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7914 ipr_cmd->done = ipr_reset_ioa_job;
7915 add_timer(&ipr_cmd->timer);
7916
7917 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7918
7919 return IPR_RC_JOB_RETURN;
7920 }
7921
7922 /**
7923 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7924 * @ipr_cmd: ipr command struct
7925 *
7926 * This function reinitializes some control blocks and
7927 * enables destructive diagnostics on the adapter.
7928 *
7929 * Return value:
7930 * IPR_RC_JOB_RETURN
7931 **/
7932 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7933 {
7934 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7935 volatile u32 int_reg;
7936 volatile u64 maskval;
7937 int i;
7938
7939 ENTER;
7940 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7941 ipr_init_ioa_mem(ioa_cfg);
7942
7943 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7944 spin_lock(&ioa_cfg->hrrq[i]._lock);
7945 ioa_cfg->hrrq[i].allow_interrupts = 1;
7946 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7947 }
7948 wmb();
7949 if (ioa_cfg->sis64) {
7950 /* Set the adapter to the correct endian mode. */
7951 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7952 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7953 }
7954
7955 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7956
7957 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7958 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7959 ioa_cfg->regs.clr_interrupt_mask_reg32);
7960 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7961 return IPR_RC_JOB_CONTINUE;
7962 }
7963
7964 /* Enable destructive diagnostics on IOA */
7965 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7966
7967 if (ioa_cfg->sis64) {
7968 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7969 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7970 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7971 } else
7972 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7973
7974 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7975
7976 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7977
7978 if (ioa_cfg->sis64) {
7979 ipr_cmd->job_step = ipr_reset_next_stage;
7980 return IPR_RC_JOB_CONTINUE;
7981 }
7982
7983 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7984 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7985 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7986 ipr_cmd->done = ipr_reset_ioa_job;
7987 add_timer(&ipr_cmd->timer);
7988 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7989
7990 LEAVE;
7991 return IPR_RC_JOB_RETURN;
7992 }
7993
7994 /**
7995 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7996 * @ipr_cmd: ipr command struct
7997 *
7998 * This function is invoked when an adapter dump has run out
7999 * of processing time.
8000 *
8001 * Return value:
8002 * IPR_RC_JOB_CONTINUE
8003 **/
8004 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8005 {
8006 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8007
8008 if (ioa_cfg->sdt_state == GET_DUMP)
8009 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8010 else if (ioa_cfg->sdt_state == READ_DUMP)
8011 ioa_cfg->sdt_state = ABORT_DUMP;
8012
8013 ioa_cfg->dump_timeout = 1;
8014 ipr_cmd->job_step = ipr_reset_alert;
8015
8016 return IPR_RC_JOB_CONTINUE;
8017 }
8018
8019 /**
8020 * ipr_unit_check_no_data - Log a unit check/no data error log
8021 * @ioa_cfg: ioa config struct
8022 *
8023 * Logs an error indicating the adapter unit checked, but for some
8024 * reason, we were unable to fetch the unit check buffer.
8025 *
8026 * Return value:
8027 * nothing
8028 **/
8029 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8030 {
8031 ioa_cfg->errors_logged++;
8032 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8033 }
8034
8035 /**
8036 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8037 * @ioa_cfg: ioa config struct
8038 *
8039 * Fetches the unit check buffer from the adapter by clocking the data
8040 * through the mailbox register.
8041 *
8042 * Return value:
8043 * nothing
8044 **/
8045 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8046 {
8047 unsigned long mailbox;
8048 struct ipr_hostrcb *hostrcb;
8049 struct ipr_uc_sdt sdt;
8050 int rc, length;
8051 u32 ioasc;
8052
8053 mailbox = readl(ioa_cfg->ioa_mailbox);
8054
8055 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8056 ipr_unit_check_no_data(ioa_cfg);
8057 return;
8058 }
8059
8060 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8061 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8062 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8063
8064 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8065 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8066 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8067 ipr_unit_check_no_data(ioa_cfg);
8068 return;
8069 }
8070
8071 /* Find length of the first sdt entry (UC buffer) */
8072 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8073 length = be32_to_cpu(sdt.entry[0].end_token);
8074 else
8075 length = (be32_to_cpu(sdt.entry[0].end_token) -
8076 be32_to_cpu(sdt.entry[0].start_token)) &
8077 IPR_FMT2_MBX_ADDR_MASK;
8078
8079 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8080 struct ipr_hostrcb, queue);
8081 list_del(&hostrcb->queue);
8082 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8083
8084 rc = ipr_get_ldump_data_section(ioa_cfg,
8085 be32_to_cpu(sdt.entry[0].start_token),
8086 (__be32 *)&hostrcb->hcam,
8087 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8088
8089 if (!rc) {
8090 ipr_handle_log_data(ioa_cfg, hostrcb);
8091 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8092 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8093 ioa_cfg->sdt_state == GET_DUMP)
8094 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8095 } else
8096 ipr_unit_check_no_data(ioa_cfg);
8097
8098 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8099 }
8100
8101 /**
8102 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8103 * @ipr_cmd: ipr command struct
8104 *
8105 * Description: This function will call to get the unit check buffer.
8106 *
8107 * Return value:
8108 * IPR_RC_JOB_RETURN
8109 **/
8110 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8111 {
8112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8113
8114 ENTER;
8115 ioa_cfg->ioa_unit_checked = 0;
8116 ipr_get_unit_check_buffer(ioa_cfg);
8117 ipr_cmd->job_step = ipr_reset_alert;
8118 ipr_reset_start_timer(ipr_cmd, 0);
8119
8120 LEAVE;
8121 return IPR_RC_JOB_RETURN;
8122 }
8123
8124 /**
8125 * ipr_reset_restore_cfg_space - Restore PCI config space.
8126 * @ipr_cmd: ipr command struct
8127 *
8128 * Description: This function restores the saved PCI config space of
8129 * the adapter, fails all outstanding ops back to the callers, and
8130 * fetches the dump/unit check if applicable to this reset.
8131 *
8132 * Return value:
8133 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8134 **/
8135 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8136 {
8137 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8138 u32 int_reg;
8139
8140 ENTER;
8141 ioa_cfg->pdev->state_saved = true;
8142 pci_restore_state(ioa_cfg->pdev);
8143
8144 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8145 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8146 return IPR_RC_JOB_CONTINUE;
8147 }
8148
8149 ipr_fail_all_ops(ioa_cfg);
8150
8151 if (ioa_cfg->sis64) {
8152 /* Set the adapter to the correct endian mode. */
8153 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8154 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8155 }
8156
8157 if (ioa_cfg->ioa_unit_checked) {
8158 if (ioa_cfg->sis64) {
8159 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8160 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8161 return IPR_RC_JOB_RETURN;
8162 } else {
8163 ioa_cfg->ioa_unit_checked = 0;
8164 ipr_get_unit_check_buffer(ioa_cfg);
8165 ipr_cmd->job_step = ipr_reset_alert;
8166 ipr_reset_start_timer(ipr_cmd, 0);
8167 return IPR_RC_JOB_RETURN;
8168 }
8169 }
8170
8171 if (ioa_cfg->in_ioa_bringdown) {
8172 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8173 } else {
8174 ipr_cmd->job_step = ipr_reset_enable_ioa;
8175
8176 if (GET_DUMP == ioa_cfg->sdt_state) {
8177 ioa_cfg->sdt_state = READ_DUMP;
8178 ioa_cfg->dump_timeout = 0;
8179 if (ioa_cfg->sis64)
8180 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8181 else
8182 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8183 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8184 schedule_work(&ioa_cfg->work_q);
8185 return IPR_RC_JOB_RETURN;
8186 }
8187 }
8188
8189 LEAVE;
8190 return IPR_RC_JOB_CONTINUE;
8191 }
8192
8193 /**
8194 * ipr_reset_bist_done - BIST has completed on the adapter.
8195 * @ipr_cmd: ipr command struct
8196 *
8197 * Description: Unblock config space and resume the reset process.
8198 *
8199 * Return value:
8200 * IPR_RC_JOB_CONTINUE
8201 **/
8202 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8203 {
8204 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8205
8206 ENTER;
8207 if (ioa_cfg->cfg_locked)
8208 pci_cfg_access_unlock(ioa_cfg->pdev);
8209 ioa_cfg->cfg_locked = 0;
8210 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8211 LEAVE;
8212 return IPR_RC_JOB_CONTINUE;
8213 }
8214
8215 /**
8216 * ipr_reset_start_bist - Run BIST on the adapter.
8217 * @ipr_cmd: ipr command struct
8218 *
8219 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8220 *
8221 * Return value:
8222 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8223 **/
8224 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8225 {
8226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8227 int rc = PCIBIOS_SUCCESSFUL;
8228
8229 ENTER;
8230 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8231 writel(IPR_UPROCI_SIS64_START_BIST,
8232 ioa_cfg->regs.set_uproc_interrupt_reg32);
8233 else
8234 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8235
8236 if (rc == PCIBIOS_SUCCESSFUL) {
8237 ipr_cmd->job_step = ipr_reset_bist_done;
8238 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8239 rc = IPR_RC_JOB_RETURN;
8240 } else {
8241 if (ioa_cfg->cfg_locked)
8242 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8243 ioa_cfg->cfg_locked = 0;
8244 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8245 rc = IPR_RC_JOB_CONTINUE;
8246 }
8247
8248 LEAVE;
8249 return rc;
8250 }
8251
8252 /**
8253 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8254 * @ipr_cmd: ipr command struct
8255 *
8256 * Description: This clears PCI reset to the adapter and delays two seconds.
8257 *
8258 * Return value:
8259 * IPR_RC_JOB_RETURN
8260 **/
8261 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8262 {
8263 ENTER;
8264 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8265 ipr_cmd->job_step = ipr_reset_bist_done;
8266 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8267 LEAVE;
8268 return IPR_RC_JOB_RETURN;
8269 }
8270
8271 /**
8272 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8273 * @ipr_cmd: ipr command struct
8274 *
8275 * Description: This asserts PCI reset to the adapter.
8276 *
8277 * Return value:
8278 * IPR_RC_JOB_RETURN
8279 **/
8280 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8281 {
8282 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8283 struct pci_dev *pdev = ioa_cfg->pdev;
8284
8285 ENTER;
8286 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8287 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8288 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8289 LEAVE;
8290 return IPR_RC_JOB_RETURN;
8291 }
8292
8293 /**
8294 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8295 * @ipr_cmd: ipr command struct
8296 *
8297 * Description: This attempts to block config access to the IOA.
8298 *
8299 * Return value:
8300 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8301 **/
8302 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8303 {
8304 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8305 int rc = IPR_RC_JOB_CONTINUE;
8306
8307 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8308 ioa_cfg->cfg_locked = 1;
8309 ipr_cmd->job_step = ioa_cfg->reset;
8310 } else {
8311 if (ipr_cmd->u.time_left) {
8312 rc = IPR_RC_JOB_RETURN;
8313 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8314 ipr_reset_start_timer(ipr_cmd,
8315 IPR_CHECK_FOR_RESET_TIMEOUT);
8316 } else {
8317 ipr_cmd->job_step = ioa_cfg->reset;
8318 dev_err(&ioa_cfg->pdev->dev,
8319 "Timed out waiting to lock config access. Resetting anyway.\n");
8320 }
8321 }
8322
8323 return rc;
8324 }
8325
8326 /**
8327 * ipr_reset_block_config_access - Block config access to the IOA
8328 * @ipr_cmd: ipr command struct
8329 *
8330 * Description: This attempts to block config access to the IOA
8331 *
8332 * Return value:
8333 * IPR_RC_JOB_CONTINUE
8334 **/
8335 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8336 {
8337 ipr_cmd->ioa_cfg->cfg_locked = 0;
8338 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8339 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8340 return IPR_RC_JOB_CONTINUE;
8341 }
8342
8343 /**
8344 * ipr_reset_allowed - Query whether or not IOA can be reset
8345 * @ioa_cfg: ioa config struct
8346 *
8347 * Return value:
8348 * 0 if reset not allowed / non-zero if reset is allowed
8349 **/
8350 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8351 {
8352 volatile u32 temp_reg;
8353
8354 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8355 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8356 }
8357
8358 /**
8359 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8360 * @ipr_cmd: ipr command struct
8361 *
8362 * Description: This function waits for adapter permission to run BIST,
8363 * then runs BIST. If the adapter does not give permission after a
8364 * reasonable time, we will reset the adapter anyway. The impact of
8365 * resetting the adapter without warning the adapter is the risk of
8366 * losing the persistent error log on the adapter. If the adapter is
8367 * reset while it is writing to the flash on the adapter, the flash
8368 * segment will have bad ECC and be zeroed.
8369 *
8370 * Return value:
8371 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8372 **/
8373 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8374 {
8375 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8376 int rc = IPR_RC_JOB_RETURN;
8377
8378 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8379 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8380 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8381 } else {
8382 ipr_cmd->job_step = ipr_reset_block_config_access;
8383 rc = IPR_RC_JOB_CONTINUE;
8384 }
8385
8386 return rc;
8387 }
8388
8389 /**
8390 * ipr_reset_alert - Alert the adapter of a pending reset
8391 * @ipr_cmd: ipr command struct
8392 *
8393 * Description: This function alerts the adapter that it will be reset.
8394 * If memory space is not currently enabled, proceed directly
8395 * to running BIST on the adapter. The timer must always be started
8396 * so we guarantee we do not run BIST from ipr_isr.
8397 *
8398 * Return value:
8399 * IPR_RC_JOB_RETURN
8400 **/
8401 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8402 {
8403 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8404 u16 cmd_reg;
8405 int rc;
8406
8407 ENTER;
8408 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8409
8410 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8411 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8412 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8413 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8414 } else {
8415 ipr_cmd->job_step = ipr_reset_block_config_access;
8416 }
8417
8418 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8419 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8420
8421 LEAVE;
8422 return IPR_RC_JOB_RETURN;
8423 }
8424
8425 /**
8426 * ipr_reset_ucode_download_done - Microcode download completion
8427 * @ipr_cmd: ipr command struct
8428 *
8429 * Description: This function unmaps the microcode download buffer.
8430 *
8431 * Return value:
8432 * IPR_RC_JOB_CONTINUE
8433 **/
8434 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8435 {
8436 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8437 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8438
8439 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8440 sglist->num_sg, DMA_TO_DEVICE);
8441
8442 ipr_cmd->job_step = ipr_reset_alert;
8443 return IPR_RC_JOB_CONTINUE;
8444 }
8445
8446 /**
8447 * ipr_reset_ucode_download - Download microcode to the adapter
8448 * @ipr_cmd: ipr command struct
8449 *
8450 * Description: This function checks to see if it there is microcode
8451 * to download to the adapter. If there is, a download is performed.
8452 *
8453 * Return value:
8454 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8455 **/
8456 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8457 {
8458 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8459 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8460
8461 ENTER;
8462 ipr_cmd->job_step = ipr_reset_alert;
8463
8464 if (!sglist)
8465 return IPR_RC_JOB_CONTINUE;
8466
8467 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8468 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8469 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8470 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8471 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8472 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8473 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8474
8475 if (ioa_cfg->sis64)
8476 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8477 else
8478 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8479 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8480
8481 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8482 IPR_WRITE_BUFFER_TIMEOUT);
8483
8484 LEAVE;
8485 return IPR_RC_JOB_RETURN;
8486 }
8487
8488 /**
8489 * ipr_reset_shutdown_ioa - Shutdown the adapter
8490 * @ipr_cmd: ipr command struct
8491 *
8492 * Description: This function issues an adapter shutdown of the
8493 * specified type to the specified adapter as part of the
8494 * adapter reset job.
8495 *
8496 * Return value:
8497 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8498 **/
8499 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8500 {
8501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8502 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8503 unsigned long timeout;
8504 int rc = IPR_RC_JOB_CONTINUE;
8505
8506 ENTER;
8507 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8508 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8509 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8510 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8511 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8512 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8513
8514 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8515 timeout = IPR_SHUTDOWN_TIMEOUT;
8516 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8517 timeout = IPR_INTERNAL_TIMEOUT;
8518 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8519 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8520 else
8521 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8522
8523 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8524
8525 rc = IPR_RC_JOB_RETURN;
8526 ipr_cmd->job_step = ipr_reset_ucode_download;
8527 } else
8528 ipr_cmd->job_step = ipr_reset_alert;
8529
8530 LEAVE;
8531 return rc;
8532 }
8533
8534 /**
8535 * ipr_reset_ioa_job - Adapter reset job
8536 * @ipr_cmd: ipr command struct
8537 *
8538 * Description: This function is the job router for the adapter reset job.
8539 *
8540 * Return value:
8541 * none
8542 **/
8543 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8544 {
8545 u32 rc, ioasc;
8546 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8547
8548 do {
8549 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8550
8551 if (ioa_cfg->reset_cmd != ipr_cmd) {
8552 /*
8553 * We are doing nested adapter resets and this is
8554 * not the current reset job.
8555 */
8556 list_add_tail(&ipr_cmd->queue,
8557 &ipr_cmd->hrrq->hrrq_free_q);
8558 return;
8559 }
8560
8561 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8562 rc = ipr_cmd->job_step_failed(ipr_cmd);
8563 if (rc == IPR_RC_JOB_RETURN)
8564 return;
8565 }
8566
8567 ipr_reinit_ipr_cmnd(ipr_cmd);
8568 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8569 rc = ipr_cmd->job_step(ipr_cmd);
8570 } while (rc == IPR_RC_JOB_CONTINUE);
8571 }
8572
8573 /**
8574 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8575 * @ioa_cfg: ioa config struct
8576 * @job_step: first job step of reset job
8577 * @shutdown_type: shutdown type
8578 *
8579 * Description: This function will initiate the reset of the given adapter
8580 * starting at the selected job step.
8581 * If the caller needs to wait on the completion of the reset,
8582 * the caller must sleep on the reset_wait_q.
8583 *
8584 * Return value:
8585 * none
8586 **/
8587 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8588 int (*job_step) (struct ipr_cmnd *),
8589 enum ipr_shutdown_type shutdown_type)
8590 {
8591 struct ipr_cmnd *ipr_cmd;
8592 int i;
8593
8594 ioa_cfg->in_reset_reload = 1;
8595 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8596 spin_lock(&ioa_cfg->hrrq[i]._lock);
8597 ioa_cfg->hrrq[i].allow_cmds = 0;
8598 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8599 }
8600 wmb();
8601 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8602 scsi_block_requests(ioa_cfg->host);
8603
8604 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8605 ioa_cfg->reset_cmd = ipr_cmd;
8606 ipr_cmd->job_step = job_step;
8607 ipr_cmd->u.shutdown_type = shutdown_type;
8608
8609 ipr_reset_ioa_job(ipr_cmd);
8610 }
8611
8612 /**
8613 * ipr_initiate_ioa_reset - Initiate an adapter reset
8614 * @ioa_cfg: ioa config struct
8615 * @shutdown_type: shutdown type
8616 *
8617 * Description: This function will initiate the reset of the given adapter.
8618 * If the caller needs to wait on the completion of the reset,
8619 * the caller must sleep on the reset_wait_q.
8620 *
8621 * Return value:
8622 * none
8623 **/
8624 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8625 enum ipr_shutdown_type shutdown_type)
8626 {
8627 int i;
8628
8629 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8630 return;
8631
8632 if (ioa_cfg->in_reset_reload) {
8633 if (ioa_cfg->sdt_state == GET_DUMP)
8634 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8635 else if (ioa_cfg->sdt_state == READ_DUMP)
8636 ioa_cfg->sdt_state = ABORT_DUMP;
8637 }
8638
8639 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8640 dev_err(&ioa_cfg->pdev->dev,
8641 "IOA taken offline - error recovery failed\n");
8642
8643 ioa_cfg->reset_retries = 0;
8644 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8645 spin_lock(&ioa_cfg->hrrq[i]._lock);
8646 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8647 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8648 }
8649 wmb();
8650
8651 if (ioa_cfg->in_ioa_bringdown) {
8652 ioa_cfg->reset_cmd = NULL;
8653 ioa_cfg->in_reset_reload = 0;
8654 ipr_fail_all_ops(ioa_cfg);
8655 wake_up_all(&ioa_cfg->reset_wait_q);
8656
8657 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8658 spin_unlock_irq(ioa_cfg->host->host_lock);
8659 scsi_unblock_requests(ioa_cfg->host);
8660 spin_lock_irq(ioa_cfg->host->host_lock);
8661 }
8662 return;
8663 } else {
8664 ioa_cfg->in_ioa_bringdown = 1;
8665 shutdown_type = IPR_SHUTDOWN_NONE;
8666 }
8667 }
8668
8669 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8670 shutdown_type);
8671 }
8672
8673 /**
8674 * ipr_reset_freeze - Hold off all I/O activity
8675 * @ipr_cmd: ipr command struct
8676 *
8677 * Description: If the PCI slot is frozen, hold off all I/O
8678 * activity; then, as soon as the slot is available again,
8679 * initiate an adapter reset.
8680 */
8681 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8682 {
8683 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8684 int i;
8685
8686 /* Disallow new interrupts, avoid loop */
8687 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8688 spin_lock(&ioa_cfg->hrrq[i]._lock);
8689 ioa_cfg->hrrq[i].allow_interrupts = 0;
8690 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8691 }
8692 wmb();
8693 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8694 ipr_cmd->done = ipr_reset_ioa_job;
8695 return IPR_RC_JOB_RETURN;
8696 }
8697
8698 /**
8699 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8700 * @pdev: PCI device struct
8701 *
8702 * Description: This routine is called to tell us that the PCI bus
8703 * is down. Can't do anything here, except put the device driver
8704 * into a holding pattern, waiting for the PCI bus to come back.
8705 */
8706 static void ipr_pci_frozen(struct pci_dev *pdev)
8707 {
8708 unsigned long flags = 0;
8709 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8710
8711 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8712 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8713 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8714 }
8715
8716 /**
8717 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8718 * @pdev: PCI device struct
8719 *
8720 * Description: This routine is called by the pci error recovery
8721 * code after the PCI slot has been reset, just before we
8722 * should resume normal operations.
8723 */
8724 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8725 {
8726 unsigned long flags = 0;
8727 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8728
8729 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8730 if (ioa_cfg->needs_warm_reset)
8731 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8732 else
8733 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8734 IPR_SHUTDOWN_NONE);
8735 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8736 return PCI_ERS_RESULT_RECOVERED;
8737 }
8738
8739 /**
8740 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8741 * @pdev: PCI device struct
8742 *
8743 * Description: This routine is called when the PCI bus has
8744 * permanently failed.
8745 */
8746 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8747 {
8748 unsigned long flags = 0;
8749 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8750 int i;
8751
8752 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8753 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8754 ioa_cfg->sdt_state = ABORT_DUMP;
8755 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8756 ioa_cfg->in_ioa_bringdown = 1;
8757 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8758 spin_lock(&ioa_cfg->hrrq[i]._lock);
8759 ioa_cfg->hrrq[i].allow_cmds = 0;
8760 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8761 }
8762 wmb();
8763 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8764 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8765 }
8766
8767 /**
8768 * ipr_pci_error_detected - Called when a PCI error is detected.
8769 * @pdev: PCI device struct
8770 * @state: PCI channel state
8771 *
8772 * Description: Called when a PCI error is detected.
8773 *
8774 * Return value:
8775 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8776 */
8777 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8778 pci_channel_state_t state)
8779 {
8780 switch (state) {
8781 case pci_channel_io_frozen:
8782 ipr_pci_frozen(pdev);
8783 return PCI_ERS_RESULT_NEED_RESET;
8784 case pci_channel_io_perm_failure:
8785 ipr_pci_perm_failure(pdev);
8786 return PCI_ERS_RESULT_DISCONNECT;
8787 break;
8788 default:
8789 break;
8790 }
8791 return PCI_ERS_RESULT_NEED_RESET;
8792 }
8793
8794 /**
8795 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8796 * @ioa_cfg: ioa cfg struct
8797 *
8798 * Description: This is the second phase of adapter intialization
8799 * This function takes care of initilizing the adapter to the point
8800 * where it can accept new commands.
8801
8802 * Return value:
8803 * 0 on success / -EIO on failure
8804 **/
8805 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8806 {
8807 int rc = 0;
8808 unsigned long host_lock_flags = 0;
8809
8810 ENTER;
8811 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8812 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8813 if (ioa_cfg->needs_hard_reset) {
8814 ioa_cfg->needs_hard_reset = 0;
8815 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8816 } else
8817 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8818 IPR_SHUTDOWN_NONE);
8819 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8820 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8821 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8822
8823 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8824 rc = -EIO;
8825 } else if (ipr_invalid_adapter(ioa_cfg)) {
8826 if (!ipr_testmode)
8827 rc = -EIO;
8828
8829 dev_err(&ioa_cfg->pdev->dev,
8830 "Adapter not supported in this hardware configuration.\n");
8831 }
8832
8833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8834
8835 LEAVE;
8836 return rc;
8837 }
8838
8839 /**
8840 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8841 * @ioa_cfg: ioa config struct
8842 *
8843 * Return value:
8844 * none
8845 **/
8846 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8847 {
8848 int i;
8849
8850 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8851 if (ioa_cfg->ipr_cmnd_list[i])
8852 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8853 ioa_cfg->ipr_cmnd_list[i],
8854 ioa_cfg->ipr_cmnd_list_dma[i]);
8855
8856 ioa_cfg->ipr_cmnd_list[i] = NULL;
8857 }
8858
8859 if (ioa_cfg->ipr_cmd_pool)
8860 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8861
8862 kfree(ioa_cfg->ipr_cmnd_list);
8863 kfree(ioa_cfg->ipr_cmnd_list_dma);
8864 ioa_cfg->ipr_cmnd_list = NULL;
8865 ioa_cfg->ipr_cmnd_list_dma = NULL;
8866 ioa_cfg->ipr_cmd_pool = NULL;
8867 }
8868
8869 /**
8870 * ipr_free_mem - Frees memory allocated for an adapter
8871 * @ioa_cfg: ioa cfg struct
8872 *
8873 * Return value:
8874 * nothing
8875 **/
8876 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8877 {
8878 int i;
8879
8880 kfree(ioa_cfg->res_entries);
8881 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8882 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8883 ipr_free_cmd_blks(ioa_cfg);
8884
8885 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8886 pci_free_consistent(ioa_cfg->pdev,
8887 sizeof(u32) * ioa_cfg->hrrq[i].size,
8888 ioa_cfg->hrrq[i].host_rrq,
8889 ioa_cfg->hrrq[i].host_rrq_dma);
8890
8891 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8892 ioa_cfg->u.cfg_table,
8893 ioa_cfg->cfg_table_dma);
8894
8895 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8896 pci_free_consistent(ioa_cfg->pdev,
8897 sizeof(struct ipr_hostrcb),
8898 ioa_cfg->hostrcb[i],
8899 ioa_cfg->hostrcb_dma[i]);
8900 }
8901
8902 ipr_free_dump(ioa_cfg);
8903 kfree(ioa_cfg->trace);
8904 }
8905
8906 /**
8907 * ipr_free_all_resources - Free all allocated resources for an adapter.
8908 * @ipr_cmd: ipr command struct
8909 *
8910 * This function frees all allocated resources for the
8911 * specified adapter.
8912 *
8913 * Return value:
8914 * none
8915 **/
8916 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8917 {
8918 struct pci_dev *pdev = ioa_cfg->pdev;
8919
8920 ENTER;
8921 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8922 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8923 int i;
8924 for (i = 0; i < ioa_cfg->nvectors; i++)
8925 free_irq(ioa_cfg->vectors_info[i].vec,
8926 &ioa_cfg->hrrq[i]);
8927 } else
8928 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8929
8930 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8931 pci_disable_msi(pdev);
8932 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8933 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8934 pci_disable_msix(pdev);
8935 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8936 }
8937
8938 iounmap(ioa_cfg->hdw_dma_regs);
8939 pci_release_regions(pdev);
8940 ipr_free_mem(ioa_cfg);
8941 scsi_host_put(ioa_cfg->host);
8942 pci_disable_device(pdev);
8943 LEAVE;
8944 }
8945
8946 /**
8947 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8948 * @ioa_cfg: ioa config struct
8949 *
8950 * Return value:
8951 * 0 on success / -ENOMEM on allocation failure
8952 **/
8953 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8954 {
8955 struct ipr_cmnd *ipr_cmd;
8956 struct ipr_ioarcb *ioarcb;
8957 dma_addr_t dma_addr;
8958 int i, entries_each_hrrq, hrrq_id = 0;
8959
8960 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8961 sizeof(struct ipr_cmnd), 512, 0);
8962
8963 if (!ioa_cfg->ipr_cmd_pool)
8964 return -ENOMEM;
8965
8966 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8967 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8968
8969 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8970 ipr_free_cmd_blks(ioa_cfg);
8971 return -ENOMEM;
8972 }
8973
8974 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8975 if (ioa_cfg->hrrq_num > 1) {
8976 if (i == 0) {
8977 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8978 ioa_cfg->hrrq[i].min_cmd_id = 0;
8979 ioa_cfg->hrrq[i].max_cmd_id =
8980 (entries_each_hrrq - 1);
8981 } else {
8982 entries_each_hrrq =
8983 IPR_NUM_BASE_CMD_BLKS/
8984 (ioa_cfg->hrrq_num - 1);
8985 ioa_cfg->hrrq[i].min_cmd_id =
8986 IPR_NUM_INTERNAL_CMD_BLKS +
8987 (i - 1) * entries_each_hrrq;
8988 ioa_cfg->hrrq[i].max_cmd_id =
8989 (IPR_NUM_INTERNAL_CMD_BLKS +
8990 i * entries_each_hrrq - 1);
8991 }
8992 } else {
8993 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8994 ioa_cfg->hrrq[i].min_cmd_id = 0;
8995 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8996 }
8997 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8998 }
8999
9000 BUG_ON(ioa_cfg->hrrq_num == 0);
9001
9002 i = IPR_NUM_CMD_BLKS -
9003 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9004 if (i > 0) {
9005 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9006 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9007 }
9008
9009 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9010 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9011
9012 if (!ipr_cmd) {
9013 ipr_free_cmd_blks(ioa_cfg);
9014 return -ENOMEM;
9015 }
9016
9017 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9018 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9019 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9020
9021 ioarcb = &ipr_cmd->ioarcb;
9022 ipr_cmd->dma_addr = dma_addr;
9023 if (ioa_cfg->sis64)
9024 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9025 else
9026 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9027
9028 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9029 if (ioa_cfg->sis64) {
9030 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9031 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9032 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9033 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9034 } else {
9035 ioarcb->write_ioadl_addr =
9036 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9037 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9038 ioarcb->ioasa_host_pci_addr =
9039 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9040 }
9041 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9042 ipr_cmd->cmd_index = i;
9043 ipr_cmd->ioa_cfg = ioa_cfg;
9044 ipr_cmd->sense_buffer_dma = dma_addr +
9045 offsetof(struct ipr_cmnd, sense_buffer);
9046
9047 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9048 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9049 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9050 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9051 hrrq_id++;
9052 }
9053
9054 return 0;
9055 }
9056
9057 /**
9058 * ipr_alloc_mem - Allocate memory for an adapter
9059 * @ioa_cfg: ioa config struct
9060 *
9061 * Return value:
9062 * 0 on success / non-zero for error
9063 **/
9064 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9065 {
9066 struct pci_dev *pdev = ioa_cfg->pdev;
9067 int i, rc = -ENOMEM;
9068
9069 ENTER;
9070 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9071 ioa_cfg->max_devs_supported, GFP_KERNEL);
9072
9073 if (!ioa_cfg->res_entries)
9074 goto out;
9075
9076 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9077 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9078 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9079 }
9080
9081 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9082 sizeof(struct ipr_misc_cbs),
9083 &ioa_cfg->vpd_cbs_dma);
9084
9085 if (!ioa_cfg->vpd_cbs)
9086 goto out_free_res_entries;
9087
9088 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9089 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9090 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9091 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9092 if (i == 0)
9093 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9094 else
9095 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9096 }
9097
9098 if (ipr_alloc_cmd_blks(ioa_cfg))
9099 goto out_free_vpd_cbs;
9100
9101 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9102 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9103 sizeof(u32) * ioa_cfg->hrrq[i].size,
9104 &ioa_cfg->hrrq[i].host_rrq_dma);
9105
9106 if (!ioa_cfg->hrrq[i].host_rrq) {
9107 while (--i > 0)
9108 pci_free_consistent(pdev,
9109 sizeof(u32) * ioa_cfg->hrrq[i].size,
9110 ioa_cfg->hrrq[i].host_rrq,
9111 ioa_cfg->hrrq[i].host_rrq_dma);
9112 goto out_ipr_free_cmd_blocks;
9113 }
9114 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9115 }
9116
9117 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9118 ioa_cfg->cfg_table_size,
9119 &ioa_cfg->cfg_table_dma);
9120
9121 if (!ioa_cfg->u.cfg_table)
9122 goto out_free_host_rrq;
9123
9124 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9125 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9126 sizeof(struct ipr_hostrcb),
9127 &ioa_cfg->hostrcb_dma[i]);
9128
9129 if (!ioa_cfg->hostrcb[i])
9130 goto out_free_hostrcb_dma;
9131
9132 ioa_cfg->hostrcb[i]->hostrcb_dma =
9133 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9134 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9135 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9136 }
9137
9138 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9139 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9140
9141 if (!ioa_cfg->trace)
9142 goto out_free_hostrcb_dma;
9143
9144 rc = 0;
9145 out:
9146 LEAVE;
9147 return rc;
9148
9149 out_free_hostrcb_dma:
9150 while (i-- > 0) {
9151 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9152 ioa_cfg->hostrcb[i],
9153 ioa_cfg->hostrcb_dma[i]);
9154 }
9155 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9156 ioa_cfg->u.cfg_table,
9157 ioa_cfg->cfg_table_dma);
9158 out_free_host_rrq:
9159 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9160 pci_free_consistent(pdev,
9161 sizeof(u32) * ioa_cfg->hrrq[i].size,
9162 ioa_cfg->hrrq[i].host_rrq,
9163 ioa_cfg->hrrq[i].host_rrq_dma);
9164 }
9165 out_ipr_free_cmd_blocks:
9166 ipr_free_cmd_blks(ioa_cfg);
9167 out_free_vpd_cbs:
9168 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9169 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9170 out_free_res_entries:
9171 kfree(ioa_cfg->res_entries);
9172 goto out;
9173 }
9174
9175 /**
9176 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9177 * @ioa_cfg: ioa config struct
9178 *
9179 * Return value:
9180 * none
9181 **/
9182 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9183 {
9184 int i;
9185
9186 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9187 ioa_cfg->bus_attr[i].bus = i;
9188 ioa_cfg->bus_attr[i].qas_enabled = 0;
9189 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9190 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9191 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9192 else
9193 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9194 }
9195 }
9196
9197 /**
9198 * ipr_init_ioa_cfg - Initialize IOA config struct
9199 * @ioa_cfg: ioa config struct
9200 * @host: scsi host struct
9201 * @pdev: PCI dev struct
9202 *
9203 * Return value:
9204 * none
9205 **/
9206 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9207 struct Scsi_Host *host, struct pci_dev *pdev)
9208 {
9209 const struct ipr_interrupt_offsets *p;
9210 struct ipr_interrupts *t;
9211 void __iomem *base;
9212
9213 ioa_cfg->host = host;
9214 ioa_cfg->pdev = pdev;
9215 ioa_cfg->log_level = ipr_log_level;
9216 ioa_cfg->doorbell = IPR_DOORBELL;
9217 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9218 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9219 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9220 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9221 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9222 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9223
9224 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9225 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9226 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9227 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9228 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9229 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9230 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9231 ioa_cfg->sdt_state = INACTIVE;
9232
9233 ipr_initialize_bus_attr(ioa_cfg);
9234 ioa_cfg->max_devs_supported = ipr_max_devs;
9235
9236 if (ioa_cfg->sis64) {
9237 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9238 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9239 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9240 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9241 } else {
9242 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9243 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9244 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9245 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9246 }
9247 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9248 host->unique_id = host->host_no;
9249 host->max_cmd_len = IPR_MAX_CDB_LEN;
9250 host->can_queue = ioa_cfg->max_cmds;
9251 pci_set_drvdata(pdev, ioa_cfg);
9252
9253 p = &ioa_cfg->chip_cfg->regs;
9254 t = &ioa_cfg->regs;
9255 base = ioa_cfg->hdw_dma_regs;
9256
9257 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9258 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9259 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9260 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9261 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9262 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9263 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9264 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9265 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9266 t->ioarrin_reg = base + p->ioarrin_reg;
9267 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9268 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9269 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9270 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9271 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9272 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9273
9274 if (ioa_cfg->sis64) {
9275 t->init_feedback_reg = base + p->init_feedback_reg;
9276 t->dump_addr_reg = base + p->dump_addr_reg;
9277 t->dump_data_reg = base + p->dump_data_reg;
9278 t->endian_swap_reg = base + p->endian_swap_reg;
9279 }
9280 }
9281
9282 /**
9283 * ipr_get_chip_info - Find adapter chip information
9284 * @dev_id: PCI device id struct
9285 *
9286 * Return value:
9287 * ptr to chip information on success / NULL on failure
9288 **/
9289 static const struct ipr_chip_t *
9290 ipr_get_chip_info(const struct pci_device_id *dev_id)
9291 {
9292 int i;
9293
9294 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9295 if (ipr_chip[i].vendor == dev_id->vendor &&
9296 ipr_chip[i].device == dev_id->device)
9297 return &ipr_chip[i];
9298 return NULL;
9299 }
9300
9301 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9302 {
9303 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9304 int i, err, vectors;
9305
9306 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9307 entries[i].entry = i;
9308
9309 vectors = ipr_number_of_msix;
9310
9311 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9312 vectors = err;
9313
9314 if (err < 0) {
9315 pci_disable_msix(ioa_cfg->pdev);
9316 return err;
9317 }
9318
9319 if (!err) {
9320 for (i = 0; i < vectors; i++)
9321 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9322 ioa_cfg->nvectors = vectors;
9323 }
9324
9325 return err;
9326 }
9327
9328 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9329 {
9330 int i, err, vectors;
9331
9332 vectors = ipr_number_of_msix;
9333
9334 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9335 vectors = err;
9336
9337 if (err < 0) {
9338 pci_disable_msi(ioa_cfg->pdev);
9339 return err;
9340 }
9341
9342 if (!err) {
9343 for (i = 0; i < vectors; i++)
9344 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9345 ioa_cfg->nvectors = vectors;
9346 }
9347
9348 return err;
9349 }
9350
9351 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9352 {
9353 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9354
9355 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9356 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9357 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9358 ioa_cfg->vectors_info[vec_idx].
9359 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9360 }
9361 }
9362
9363 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9364 {
9365 int i, rc;
9366
9367 for (i = 1; i < ioa_cfg->nvectors; i++) {
9368 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9369 ipr_isr_mhrrq,
9370 0,
9371 ioa_cfg->vectors_info[i].desc,
9372 &ioa_cfg->hrrq[i]);
9373 if (rc) {
9374 while (--i >= 0)
9375 free_irq(ioa_cfg->vectors_info[i].vec,
9376 &ioa_cfg->hrrq[i]);
9377 return rc;
9378 }
9379 }
9380 return 0;
9381 }
9382
9383 /**
9384 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9385 * @pdev: PCI device struct
9386 *
9387 * Description: Simply set the msi_received flag to 1 indicating that
9388 * Message Signaled Interrupts are supported.
9389 *
9390 * Return value:
9391 * 0 on success / non-zero on failure
9392 **/
9393 static irqreturn_t ipr_test_intr(int irq, void *devp)
9394 {
9395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9396 unsigned long lock_flags = 0;
9397 irqreturn_t rc = IRQ_HANDLED;
9398
9399 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9400 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9401
9402 ioa_cfg->msi_received = 1;
9403 wake_up(&ioa_cfg->msi_wait_q);
9404
9405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9406 return rc;
9407 }
9408
9409 /**
9410 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9411 * @pdev: PCI device struct
9412 *
9413 * Description: The return value from pci_enable_msi() can not always be
9414 * trusted. This routine sets up and initiates a test interrupt to determine
9415 * if the interrupt is received via the ipr_test_intr() service routine.
9416 * If the tests fails, the driver will fall back to LSI.
9417 *
9418 * Return value:
9419 * 0 on success / non-zero on failure
9420 **/
9421 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9422 {
9423 int rc;
9424 volatile u32 int_reg;
9425 unsigned long lock_flags = 0;
9426
9427 ENTER;
9428
9429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9430 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9431 ioa_cfg->msi_received = 0;
9432 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9433 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9434 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9436
9437 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9438 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9439 else
9440 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9441 if (rc) {
9442 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9443 return rc;
9444 } else if (ipr_debug)
9445 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9446
9447 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9448 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9449 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9450 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9451 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9452
9453 if (!ioa_cfg->msi_received) {
9454 /* MSI test failed */
9455 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9456 rc = -EOPNOTSUPP;
9457 } else if (ipr_debug)
9458 dev_info(&pdev->dev, "MSI test succeeded.\n");
9459
9460 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9461
9462 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9463 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9464 else
9465 free_irq(pdev->irq, ioa_cfg);
9466
9467 LEAVE;
9468
9469 return rc;
9470 }
9471
9472 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9473 * @pdev: PCI device struct
9474 * @dev_id: PCI device id struct
9475 *
9476 * Return value:
9477 * 0 on success / non-zero on failure
9478 **/
9479 static int ipr_probe_ioa(struct pci_dev *pdev,
9480 const struct pci_device_id *dev_id)
9481 {
9482 struct ipr_ioa_cfg *ioa_cfg;
9483 struct Scsi_Host *host;
9484 unsigned long ipr_regs_pci;
9485 void __iomem *ipr_regs;
9486 int rc = PCIBIOS_SUCCESSFUL;
9487 volatile u32 mask, uproc, interrupts;
9488 unsigned long lock_flags;
9489
9490 ENTER;
9491
9492 if ((rc = pci_enable_device(pdev))) {
9493 dev_err(&pdev->dev, "Cannot enable adapter\n");
9494 goto out;
9495 }
9496
9497 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9498
9499 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9500
9501 if (!host) {
9502 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9503 rc = -ENOMEM;
9504 goto out_disable;
9505 }
9506
9507 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9508 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9509 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9510
9511 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9512
9513 if (!ioa_cfg->ipr_chip) {
9514 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9515 dev_id->vendor, dev_id->device);
9516 goto out_scsi_host_put;
9517 }
9518
9519 /* set SIS 32 or SIS 64 */
9520 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9521 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9522 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9523 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9524
9525 if (ipr_transop_timeout)
9526 ioa_cfg->transop_timeout = ipr_transop_timeout;
9527 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9528 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9529 else
9530 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9531
9532 ioa_cfg->revid = pdev->revision;
9533
9534 ipr_regs_pci = pci_resource_start(pdev, 0);
9535
9536 rc = pci_request_regions(pdev, IPR_NAME);
9537 if (rc < 0) {
9538 dev_err(&pdev->dev,
9539 "Couldn't register memory range of registers\n");
9540 goto out_scsi_host_put;
9541 }
9542
9543 ipr_regs = pci_ioremap_bar(pdev, 0);
9544
9545 if (!ipr_regs) {
9546 dev_err(&pdev->dev,
9547 "Couldn't map memory range of registers\n");
9548 rc = -ENOMEM;
9549 goto out_release_regions;
9550 }
9551
9552 ioa_cfg->hdw_dma_regs = ipr_regs;
9553 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9554 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9555
9556 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9557
9558 pci_set_master(pdev);
9559
9560 if (ioa_cfg->sis64) {
9561 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9562 if (rc < 0) {
9563 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9564 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9565 }
9566
9567 } else
9568 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9569
9570 if (rc < 0) {
9571 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9572 goto cleanup_nomem;
9573 }
9574
9575 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9576 ioa_cfg->chip_cfg->cache_line_size);
9577
9578 if (rc != PCIBIOS_SUCCESSFUL) {
9579 dev_err(&pdev->dev, "Write of cache line size failed\n");
9580 rc = -EIO;
9581 goto cleanup_nomem;
9582 }
9583
9584 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9585 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9586 IPR_MAX_MSIX_VECTORS);
9587 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9588 }
9589
9590 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9591 ipr_enable_msix(ioa_cfg) == 0)
9592 ioa_cfg->intr_flag = IPR_USE_MSIX;
9593 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9594 ipr_enable_msi(ioa_cfg) == 0)
9595 ioa_cfg->intr_flag = IPR_USE_MSI;
9596 else {
9597 ioa_cfg->intr_flag = IPR_USE_LSI;
9598 ioa_cfg->nvectors = 1;
9599 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9600 }
9601
9602 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9603 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9604 rc = ipr_test_msi(ioa_cfg, pdev);
9605 if (rc == -EOPNOTSUPP) {
9606 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9607 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9608 pci_disable_msi(pdev);
9609 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9610 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9611 pci_disable_msix(pdev);
9612 }
9613
9614 ioa_cfg->intr_flag = IPR_USE_LSI;
9615 ioa_cfg->nvectors = 1;
9616 }
9617 else if (rc)
9618 goto out_msi_disable;
9619 else {
9620 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9621 dev_info(&pdev->dev,
9622 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9623 ioa_cfg->nvectors, pdev->irq);
9624 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9625 dev_info(&pdev->dev,
9626 "Request for %d MSIXs succeeded.",
9627 ioa_cfg->nvectors);
9628 }
9629 }
9630
9631 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9632 (unsigned int)num_online_cpus(),
9633 (unsigned int)IPR_MAX_HRRQ_NUM);
9634
9635 /* Save away PCI config space for use following IOA reset */
9636 rc = pci_save_state(pdev);
9637
9638 if (rc != PCIBIOS_SUCCESSFUL) {
9639 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9640 rc = -EIO;
9641 goto out_msi_disable;
9642 }
9643
9644 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9645 goto out_msi_disable;
9646
9647 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9648 goto out_msi_disable;
9649
9650 if (ioa_cfg->sis64)
9651 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9652 + ((sizeof(struct ipr_config_table_entry64)
9653 * ioa_cfg->max_devs_supported)));
9654 else
9655 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9656 + ((sizeof(struct ipr_config_table_entry)
9657 * ioa_cfg->max_devs_supported)));
9658
9659 rc = ipr_alloc_mem(ioa_cfg);
9660 if (rc < 0) {
9661 dev_err(&pdev->dev,
9662 "Couldn't allocate enough memory for device driver!\n");
9663 goto out_msi_disable;
9664 }
9665
9666 /*
9667 * If HRRQ updated interrupt is not masked, or reset alert is set,
9668 * the card is in an unknown state and needs a hard reset
9669 */
9670 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9671 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9672 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9673 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9674 ioa_cfg->needs_hard_reset = 1;
9675 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9676 ioa_cfg->needs_hard_reset = 1;
9677 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9678 ioa_cfg->ioa_unit_checked = 1;
9679
9680 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9681 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9682 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9683
9684 if (ioa_cfg->intr_flag == IPR_USE_MSI
9685 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9686 name_msi_vectors(ioa_cfg);
9687 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9688 0,
9689 ioa_cfg->vectors_info[0].desc,
9690 &ioa_cfg->hrrq[0]);
9691 if (!rc)
9692 rc = ipr_request_other_msi_irqs(ioa_cfg);
9693 } else {
9694 rc = request_irq(pdev->irq, ipr_isr,
9695 IRQF_SHARED,
9696 IPR_NAME, &ioa_cfg->hrrq[0]);
9697 }
9698 if (rc) {
9699 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9700 pdev->irq, rc);
9701 goto cleanup_nolog;
9702 }
9703
9704 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9705 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9706 ioa_cfg->needs_warm_reset = 1;
9707 ioa_cfg->reset = ipr_reset_slot_reset;
9708 } else
9709 ioa_cfg->reset = ipr_reset_start_bist;
9710
9711 spin_lock(&ipr_driver_lock);
9712 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9713 spin_unlock(&ipr_driver_lock);
9714
9715 LEAVE;
9716 out:
9717 return rc;
9718
9719 cleanup_nolog:
9720 ipr_free_mem(ioa_cfg);
9721 out_msi_disable:
9722 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9723 pci_disable_msi(pdev);
9724 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9725 pci_disable_msix(pdev);
9726 cleanup_nomem:
9727 iounmap(ipr_regs);
9728 out_release_regions:
9729 pci_release_regions(pdev);
9730 out_scsi_host_put:
9731 scsi_host_put(host);
9732 out_disable:
9733 pci_disable_device(pdev);
9734 goto out;
9735 }
9736
9737 /**
9738 * ipr_scan_vsets - Scans for VSET devices
9739 * @ioa_cfg: ioa config struct
9740 *
9741 * Description: Since the VSET resources do not follow SAM in that we can have
9742 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9743 *
9744 * Return value:
9745 * none
9746 **/
9747 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9748 {
9749 int target, lun;
9750
9751 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9752 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9753 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9754 }
9755
9756 /**
9757 * ipr_initiate_ioa_bringdown - Bring down an adapter
9758 * @ioa_cfg: ioa config struct
9759 * @shutdown_type: shutdown type
9760 *
9761 * Description: This function will initiate bringing down the adapter.
9762 * This consists of issuing an IOA shutdown to the adapter
9763 * to flush the cache, and running BIST.
9764 * If the caller needs to wait on the completion of the reset,
9765 * the caller must sleep on the reset_wait_q.
9766 *
9767 * Return value:
9768 * none
9769 **/
9770 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9771 enum ipr_shutdown_type shutdown_type)
9772 {
9773 ENTER;
9774 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9775 ioa_cfg->sdt_state = ABORT_DUMP;
9776 ioa_cfg->reset_retries = 0;
9777 ioa_cfg->in_ioa_bringdown = 1;
9778 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9779 LEAVE;
9780 }
9781
9782 /**
9783 * __ipr_remove - Remove a single adapter
9784 * @pdev: pci device struct
9785 *
9786 * Adapter hot plug remove entry point.
9787 *
9788 * Return value:
9789 * none
9790 **/
9791 static void __ipr_remove(struct pci_dev *pdev)
9792 {
9793 unsigned long host_lock_flags = 0;
9794 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9795 int i;
9796 ENTER;
9797
9798 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9799 while (ioa_cfg->in_reset_reload) {
9800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9801 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9802 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9803 }
9804
9805 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9806 spin_lock(&ioa_cfg->hrrq[i]._lock);
9807 ioa_cfg->hrrq[i].removing_ioa = 1;
9808 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9809 }
9810 wmb();
9811 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9812
9813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9814 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9815 flush_work(&ioa_cfg->work_q);
9816 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9817 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9818
9819 spin_lock(&ipr_driver_lock);
9820 list_del(&ioa_cfg->queue);
9821 spin_unlock(&ipr_driver_lock);
9822
9823 if (ioa_cfg->sdt_state == ABORT_DUMP)
9824 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9826
9827 ipr_free_all_resources(ioa_cfg);
9828
9829 LEAVE;
9830 }
9831
9832 /**
9833 * ipr_remove - IOA hot plug remove entry point
9834 * @pdev: pci device struct
9835 *
9836 * Adapter hot plug remove entry point.
9837 *
9838 * Return value:
9839 * none
9840 **/
9841 static void ipr_remove(struct pci_dev *pdev)
9842 {
9843 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9844
9845 ENTER;
9846
9847 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9848 &ipr_trace_attr);
9849 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9850 &ipr_dump_attr);
9851 scsi_remove_host(ioa_cfg->host);
9852
9853 __ipr_remove(pdev);
9854
9855 LEAVE;
9856 }
9857
9858 /**
9859 * ipr_probe - Adapter hot plug add entry point
9860 *
9861 * Return value:
9862 * 0 on success / non-zero on failure
9863 **/
9864 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9865 {
9866 struct ipr_ioa_cfg *ioa_cfg;
9867 int rc, i;
9868
9869 rc = ipr_probe_ioa(pdev, dev_id);
9870
9871 if (rc)
9872 return rc;
9873
9874 ioa_cfg = pci_get_drvdata(pdev);
9875 rc = ipr_probe_ioa_part2(ioa_cfg);
9876
9877 if (rc) {
9878 __ipr_remove(pdev);
9879 return rc;
9880 }
9881
9882 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9883
9884 if (rc) {
9885 __ipr_remove(pdev);
9886 return rc;
9887 }
9888
9889 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9890 &ipr_trace_attr);
9891
9892 if (rc) {
9893 scsi_remove_host(ioa_cfg->host);
9894 __ipr_remove(pdev);
9895 return rc;
9896 }
9897
9898 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9899 &ipr_dump_attr);
9900
9901 if (rc) {
9902 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9903 &ipr_trace_attr);
9904 scsi_remove_host(ioa_cfg->host);
9905 __ipr_remove(pdev);
9906 return rc;
9907 }
9908
9909 scsi_scan_host(ioa_cfg->host);
9910 ipr_scan_vsets(ioa_cfg);
9911 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9912 ioa_cfg->allow_ml_add_del = 1;
9913 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9914 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9915
9916 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9917 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9918 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9919 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9920 ioa_cfg->iopoll_weight, ipr_iopoll);
9921 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9922 }
9923 }
9924
9925 schedule_work(&ioa_cfg->work_q);
9926 return 0;
9927 }
9928
9929 /**
9930 * ipr_shutdown - Shutdown handler.
9931 * @pdev: pci device struct
9932 *
9933 * This function is invoked upon system shutdown/reboot. It will issue
9934 * an adapter shutdown to the adapter to flush the write cache.
9935 *
9936 * Return value:
9937 * none
9938 **/
9939 static void ipr_shutdown(struct pci_dev *pdev)
9940 {
9941 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9942 unsigned long lock_flags = 0;
9943 int i;
9944
9945 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9946 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9947 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9948 ioa_cfg->iopoll_weight = 0;
9949 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9950 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9951 }
9952
9953 while (ioa_cfg->in_reset_reload) {
9954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9955 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9956 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9957 }
9958
9959 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9960 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9961 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9962 }
9963
9964 static struct pci_device_id ipr_pci_table[] = {
9965 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9966 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9967 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9968 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9969 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9970 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9971 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9972 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9973 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9974 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9975 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9976 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9977 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9978 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9979 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9980 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9981 IPR_USE_LONG_TRANSOP_TIMEOUT },
9982 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9983 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9984 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9985 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9986 IPR_USE_LONG_TRANSOP_TIMEOUT },
9987 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9988 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9989 IPR_USE_LONG_TRANSOP_TIMEOUT },
9990 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9991 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9992 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9993 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9994 IPR_USE_LONG_TRANSOP_TIMEOUT},
9995 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9996 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9997 IPR_USE_LONG_TRANSOP_TIMEOUT },
9998 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9999 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10000 IPR_USE_LONG_TRANSOP_TIMEOUT },
10001 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10002 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10003 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10004 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10005 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10006 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10007 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10008 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10009 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10010 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10011 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10012 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10013 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10014 IPR_USE_LONG_TRANSOP_TIMEOUT },
10015 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10016 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10017 IPR_USE_LONG_TRANSOP_TIMEOUT },
10018 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10019 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10020 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10021 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10022 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10023 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10024 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10025 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10026 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10027 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10028 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10029 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10030 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10031 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10032 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10033 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10034 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10035 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10036 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10037 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10038 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10039 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10040 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10041 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10042 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10043 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10044 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10045 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10046 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10047 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10048 { }
10049 };
10050 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10051
10052 static const struct pci_error_handlers ipr_err_handler = {
10053 .error_detected = ipr_pci_error_detected,
10054 .slot_reset = ipr_pci_slot_reset,
10055 };
10056
10057 static struct pci_driver ipr_driver = {
10058 .name = IPR_NAME,
10059 .id_table = ipr_pci_table,
10060 .probe = ipr_probe,
10061 .remove = ipr_remove,
10062 .shutdown = ipr_shutdown,
10063 .err_handler = &ipr_err_handler,
10064 };
10065
10066 /**
10067 * ipr_halt_done - Shutdown prepare completion
10068 *
10069 * Return value:
10070 * none
10071 **/
10072 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10073 {
10074 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10075 }
10076
10077 /**
10078 * ipr_halt - Issue shutdown prepare to all adapters
10079 *
10080 * Return value:
10081 * NOTIFY_OK on success / NOTIFY_DONE on failure
10082 **/
10083 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10084 {
10085 struct ipr_cmnd *ipr_cmd;
10086 struct ipr_ioa_cfg *ioa_cfg;
10087 unsigned long flags = 0;
10088
10089 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10090 return NOTIFY_DONE;
10091
10092 spin_lock(&ipr_driver_lock);
10093
10094 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10095 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10096 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10098 continue;
10099 }
10100
10101 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10102 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10103 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10104 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10105 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10106
10107 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10108 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10109 }
10110 spin_unlock(&ipr_driver_lock);
10111
10112 return NOTIFY_OK;
10113 }
10114
10115 static struct notifier_block ipr_notifier = {
10116 ipr_halt, NULL, 0
10117 };
10118
10119 /**
10120 * ipr_init - Module entry point
10121 *
10122 * Return value:
10123 * 0 on success / negative value on failure
10124 **/
10125 static int __init ipr_init(void)
10126 {
10127 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10128 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10129
10130 register_reboot_notifier(&ipr_notifier);
10131 return pci_register_driver(&ipr_driver);
10132 }
10133
10134 /**
10135 * ipr_exit - Module unload
10136 *
10137 * Module unload entry point.
10138 *
10139 * Return value:
10140 * none
10141 **/
10142 static void __exit ipr_exit(void)
10143 {
10144 unregister_reboot_notifier(&ipr_notifier);
10145 pci_unregister_driver(&ipr_driver);
10146 }
10147
10148 module_init(ipr_init);
10149 module_exit(ipr_exit);