2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/seq_file.h>
33 #include <linux/init.h>
34 #include <linux/spinlock.h>
35 #include <linux/compat.h>
36 #include <linux/blktrace_api.h>
37 #include <linux/uaccess.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/completion.h>
41 #include <linux/moduleparam.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/kthread.h>
52 #include <linux/jiffies.h>
56 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
57 #define HPSA_DRIVER_VERSION "2.0.2-1"
58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
61 /* How long to wait (in milliseconds) for board to go into simple mode */
62 #define MAX_CONFIG_WAIT 30000
63 #define MAX_IOCTL_CONFIG_WAIT 1000
65 /*define how many times we will try a command because of bus resets */
66 #define MAX_CMD_RETRIES 3
68 /* Embedded module documentation macros - see modules.h */
69 MODULE_AUTHOR("Hewlett-Packard Company");
70 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
72 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
73 MODULE_VERSION(HPSA_DRIVER_VERSION
);
74 MODULE_LICENSE("GPL");
76 static int hpsa_allow_any
;
77 module_param(hpsa_allow_any
, int, S_IRUGO
|S_IWUSR
);
78 MODULE_PARM_DESC(hpsa_allow_any
,
79 "Allow hpsa driver to access unknown HP Smart Array hardware");
80 static int hpsa_simple_mode
;
81 module_param(hpsa_simple_mode
, int, S_IRUGO
|S_IWUSR
);
82 MODULE_PARM_DESC(hpsa_simple_mode
,
83 "Use 'simple mode' rather than 'performant mode'");
85 /* define the PCI info for the cards we can control */
86 static const struct pci_device_id hpsa_pci_device_id
[] = {
87 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
88 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
89 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
90 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
91 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
92 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324a},
93 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324b},
94 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3233},
95 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3350},
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3351},
97 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3352},
98 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3353},
99 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3354},
100 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3355},
101 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3356},
102 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1920},
103 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1921},
104 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1922},
105 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1923},
106 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1924},
107 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1925},
108 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1926},
109 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1928},
110 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x334d},
111 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
112 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
116 MODULE_DEVICE_TABLE(pci
, hpsa_pci_device_id
);
118 /* board_id = Subsystem Device ID & Vendor ID
119 * product = Marketing Name for the board
120 * access = Address of the struct of function pointers
122 static struct board_type products
[] = {
123 {0x3241103C, "Smart Array P212", &SA5_access
},
124 {0x3243103C, "Smart Array P410", &SA5_access
},
125 {0x3245103C, "Smart Array P410i", &SA5_access
},
126 {0x3247103C, "Smart Array P411", &SA5_access
},
127 {0x3249103C, "Smart Array P812", &SA5_access
},
128 {0x324a103C, "Smart Array P712m", &SA5_access
},
129 {0x324b103C, "Smart Array P711m", &SA5_access
},
130 {0x3350103C, "Smart Array P222", &SA5_access
},
131 {0x3351103C, "Smart Array P420", &SA5_access
},
132 {0x3352103C, "Smart Array P421", &SA5_access
},
133 {0x3353103C, "Smart Array P822", &SA5_access
},
134 {0x3354103C, "Smart Array P420i", &SA5_access
},
135 {0x3355103C, "Smart Array P220i", &SA5_access
},
136 {0x3356103C, "Smart Array P721m", &SA5_access
},
137 {0x1920103C, "Smart Array", &SA5_access
},
138 {0x1921103C, "Smart Array", &SA5_access
},
139 {0x1922103C, "Smart Array", &SA5_access
},
140 {0x1923103C, "Smart Array", &SA5_access
},
141 {0x1924103C, "Smart Array", &SA5_access
},
142 {0x1925103C, "Smart Array", &SA5_access
},
143 {0x1926103C, "Smart Array", &SA5_access
},
144 {0x1928103C, "Smart Array", &SA5_access
},
145 {0x334d103C, "Smart Array P822se", &SA5_access
},
146 {0xFFFF103C, "Unknown Smart Array", &SA5_access
},
149 static int number_of_controllers
;
151 static struct list_head hpsa_ctlr_list
= LIST_HEAD_INIT(hpsa_ctlr_list
);
152 static spinlock_t lockup_detector_lock
;
153 static struct task_struct
*hpsa_lockup_detector
;
155 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
);
156 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
);
157 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
);
158 static void start_io(struct ctlr_info
*h
);
161 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
);
164 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
);
165 static void cmd_special_free(struct ctlr_info
*h
, struct CommandList
*c
);
166 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
);
167 static struct CommandList
*cmd_special_alloc(struct ctlr_info
*h
);
168 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
169 void *buff
, size_t size
, u8 page_code
, unsigned char *scsi3addr
,
172 static int hpsa_scsi_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
173 static void hpsa_scan_start(struct Scsi_Host
*);
174 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
175 unsigned long elapsed_time
);
176 static int hpsa_change_queue_depth(struct scsi_device
*sdev
,
177 int qdepth
, int reason
);
179 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
);
180 static int hpsa_eh_abort_handler(struct scsi_cmnd
*scsicmd
);
181 static int hpsa_slave_alloc(struct scsi_device
*sdev
);
182 static void hpsa_slave_destroy(struct scsi_device
*sdev
);
184 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
);
185 static int check_for_unit_attention(struct ctlr_info
*h
,
186 struct CommandList
*c
);
187 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
188 struct CommandList
*c
);
189 /* performant mode helper functions */
190 static void calc_bucket_map(int *bucket
, int num_buckets
,
191 int nsgs
, int *bucket_map
);
192 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
);
193 static inline u32
next_command(struct ctlr_info
*h
, u8 q
);
194 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
195 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
197 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
198 unsigned long *memory_bar
);
199 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
);
200 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
202 static inline void finish_cmd(struct CommandList
*c
);
203 #define BOARD_NOT_READY 0
204 #define BOARD_READY 1
206 static inline struct ctlr_info
*sdev_to_hba(struct scsi_device
*sdev
)
208 unsigned long *priv
= shost_priv(sdev
->host
);
209 return (struct ctlr_info
*) *priv
;
212 static inline struct ctlr_info
*shost_to_hba(struct Scsi_Host
*sh
)
214 unsigned long *priv
= shost_priv(sh
);
215 return (struct ctlr_info
*) *priv
;
218 static int check_for_unit_attention(struct ctlr_info
*h
,
219 struct CommandList
*c
)
221 if (c
->err_info
->SenseInfo
[2] != UNIT_ATTENTION
)
224 switch (c
->err_info
->SenseInfo
[12]) {
226 dev_warn(&h
->pdev
->dev
, HPSA
"%d: a state change "
227 "detected, command retried\n", h
->ctlr
);
230 dev_warn(&h
->pdev
->dev
, HPSA
"%d: LUN failure "
231 "detected, action required\n", h
->ctlr
);
233 case REPORT_LUNS_CHANGED
:
234 dev_warn(&h
->pdev
->dev
, HPSA
"%d: report LUN data "
235 "changed, action required\n", h
->ctlr
);
237 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
238 * target (array) devices.
242 dev_warn(&h
->pdev
->dev
, HPSA
"%d: a power on "
243 "or device reset detected\n", h
->ctlr
);
245 case UNIT_ATTENTION_CLEARED
:
246 dev_warn(&h
->pdev
->dev
, HPSA
"%d: unit attention "
247 "cleared by another initiator\n", h
->ctlr
);
250 dev_warn(&h
->pdev
->dev
, HPSA
"%d: unknown "
251 "unit attention detected\n", h
->ctlr
);
257 static int check_for_busy(struct ctlr_info
*h
, struct CommandList
*c
)
259 if (c
->err_info
->CommandStatus
!= CMD_TARGET_STATUS
||
260 (c
->err_info
->ScsiStatus
!= SAM_STAT_BUSY
&&
261 c
->err_info
->ScsiStatus
!= SAM_STAT_TASK_SET_FULL
))
263 dev_warn(&h
->pdev
->dev
, HPSA
"device busy");
267 static ssize_t
host_store_rescan(struct device
*dev
,
268 struct device_attribute
*attr
,
269 const char *buf
, size_t count
)
272 struct Scsi_Host
*shost
= class_to_shost(dev
);
273 h
= shost_to_hba(shost
);
274 hpsa_scan_start(h
->scsi_host
);
278 static ssize_t
host_show_firmware_revision(struct device
*dev
,
279 struct device_attribute
*attr
, char *buf
)
282 struct Scsi_Host
*shost
= class_to_shost(dev
);
283 unsigned char *fwrev
;
285 h
= shost_to_hba(shost
);
286 if (!h
->hba_inquiry_data
)
288 fwrev
= &h
->hba_inquiry_data
[32];
289 return snprintf(buf
, 20, "%c%c%c%c\n",
290 fwrev
[0], fwrev
[1], fwrev
[2], fwrev
[3]);
293 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
294 struct device_attribute
*attr
, char *buf
)
296 struct Scsi_Host
*shost
= class_to_shost(dev
);
297 struct ctlr_info
*h
= shost_to_hba(shost
);
299 return snprintf(buf
, 20, "%d\n", h
->commands_outstanding
);
302 static ssize_t
host_show_transport_mode(struct device
*dev
,
303 struct device_attribute
*attr
, char *buf
)
306 struct Scsi_Host
*shost
= class_to_shost(dev
);
308 h
= shost_to_hba(shost
);
309 return snprintf(buf
, 20, "%s\n",
310 h
->transMethod
& CFGTBL_Trans_Performant
?
311 "performant" : "simple");
314 /* List of controllers which cannot be hard reset on kexec with reset_devices */
315 static u32 unresettable_controller
[] = {
316 0x324a103C, /* Smart Array P712m */
317 0x324b103C, /* SmartArray P711m */
318 0x3223103C, /* Smart Array P800 */
319 0x3234103C, /* Smart Array P400 */
320 0x3235103C, /* Smart Array P400i */
321 0x3211103C, /* Smart Array E200i */
322 0x3212103C, /* Smart Array E200 */
323 0x3213103C, /* Smart Array E200i */
324 0x3214103C, /* Smart Array E200i */
325 0x3215103C, /* Smart Array E200i */
326 0x3237103C, /* Smart Array E500 */
327 0x323D103C, /* Smart Array P700m */
328 0x40800E11, /* Smart Array 5i */
329 0x409C0E11, /* Smart Array 6400 */
330 0x409D0E11, /* Smart Array 6400 EM */
331 0x40700E11, /* Smart Array 5300 */
332 0x40820E11, /* Smart Array 532 */
333 0x40830E11, /* Smart Array 5312 */
334 0x409A0E11, /* Smart Array 641 */
335 0x409B0E11, /* Smart Array 642 */
336 0x40910E11, /* Smart Array 6i */
339 /* List of controllers which cannot even be soft reset */
340 static u32 soft_unresettable_controller
[] = {
341 0x40800E11, /* Smart Array 5i */
342 0x40700E11, /* Smart Array 5300 */
343 0x40820E11, /* Smart Array 532 */
344 0x40830E11, /* Smart Array 5312 */
345 0x409A0E11, /* Smart Array 641 */
346 0x409B0E11, /* Smart Array 642 */
347 0x40910E11, /* Smart Array 6i */
348 /* Exclude 640x boards. These are two pci devices in one slot
349 * which share a battery backed cache module. One controls the
350 * cache, the other accesses the cache through the one that controls
351 * it. If we reset the one controlling the cache, the other will
352 * likely not be happy. Just forbid resetting this conjoined mess.
353 * The 640x isn't really supported by hpsa anyway.
355 0x409C0E11, /* Smart Array 6400 */
356 0x409D0E11, /* Smart Array 6400 EM */
359 static int ctlr_is_hard_resettable(u32 board_id
)
363 for (i
= 0; i
< ARRAY_SIZE(unresettable_controller
); i
++)
364 if (unresettable_controller
[i
] == board_id
)
369 static int ctlr_is_soft_resettable(u32 board_id
)
373 for (i
= 0; i
< ARRAY_SIZE(soft_unresettable_controller
); i
++)
374 if (soft_unresettable_controller
[i
] == board_id
)
379 static int ctlr_is_resettable(u32 board_id
)
381 return ctlr_is_hard_resettable(board_id
) ||
382 ctlr_is_soft_resettable(board_id
);
385 static ssize_t
host_show_resettable(struct device
*dev
,
386 struct device_attribute
*attr
, char *buf
)
389 struct Scsi_Host
*shost
= class_to_shost(dev
);
391 h
= shost_to_hba(shost
);
392 return snprintf(buf
, 20, "%d\n", ctlr_is_resettable(h
->board_id
));
395 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr
[])
397 return (scsi3addr
[3] & 0xC0) == 0x40;
400 static const char *raid_label
[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
403 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
405 static ssize_t
raid_level_show(struct device
*dev
,
406 struct device_attribute
*attr
, char *buf
)
409 unsigned char rlevel
;
411 struct scsi_device
*sdev
;
412 struct hpsa_scsi_dev_t
*hdev
;
415 sdev
= to_scsi_device(dev
);
416 h
= sdev_to_hba(sdev
);
417 spin_lock_irqsave(&h
->lock
, flags
);
418 hdev
= sdev
->hostdata
;
420 spin_unlock_irqrestore(&h
->lock
, flags
);
424 /* Is this even a logical drive? */
425 if (!is_logical_dev_addr_mode(hdev
->scsi3addr
)) {
426 spin_unlock_irqrestore(&h
->lock
, flags
);
427 l
= snprintf(buf
, PAGE_SIZE
, "N/A\n");
431 rlevel
= hdev
->raid_level
;
432 spin_unlock_irqrestore(&h
->lock
, flags
);
433 if (rlevel
> RAID_UNKNOWN
)
434 rlevel
= RAID_UNKNOWN
;
435 l
= snprintf(buf
, PAGE_SIZE
, "RAID %s\n", raid_label
[rlevel
]);
439 static ssize_t
lunid_show(struct device
*dev
,
440 struct device_attribute
*attr
, char *buf
)
443 struct scsi_device
*sdev
;
444 struct hpsa_scsi_dev_t
*hdev
;
446 unsigned char lunid
[8];
448 sdev
= to_scsi_device(dev
);
449 h
= sdev_to_hba(sdev
);
450 spin_lock_irqsave(&h
->lock
, flags
);
451 hdev
= sdev
->hostdata
;
453 spin_unlock_irqrestore(&h
->lock
, flags
);
456 memcpy(lunid
, hdev
->scsi3addr
, sizeof(lunid
));
457 spin_unlock_irqrestore(&h
->lock
, flags
);
458 return snprintf(buf
, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
459 lunid
[0], lunid
[1], lunid
[2], lunid
[3],
460 lunid
[4], lunid
[5], lunid
[6], lunid
[7]);
463 static ssize_t
unique_id_show(struct device
*dev
,
464 struct device_attribute
*attr
, char *buf
)
467 struct scsi_device
*sdev
;
468 struct hpsa_scsi_dev_t
*hdev
;
470 unsigned char sn
[16];
472 sdev
= to_scsi_device(dev
);
473 h
= sdev_to_hba(sdev
);
474 spin_lock_irqsave(&h
->lock
, flags
);
475 hdev
= sdev
->hostdata
;
477 spin_unlock_irqrestore(&h
->lock
, flags
);
480 memcpy(sn
, hdev
->device_id
, sizeof(sn
));
481 spin_unlock_irqrestore(&h
->lock
, flags
);
482 return snprintf(buf
, 16 * 2 + 2,
483 "%02X%02X%02X%02X%02X%02X%02X%02X"
484 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
485 sn
[0], sn
[1], sn
[2], sn
[3],
486 sn
[4], sn
[5], sn
[6], sn
[7],
487 sn
[8], sn
[9], sn
[10], sn
[11],
488 sn
[12], sn
[13], sn
[14], sn
[15]);
491 static DEVICE_ATTR(raid_level
, S_IRUGO
, raid_level_show
, NULL
);
492 static DEVICE_ATTR(lunid
, S_IRUGO
, lunid_show
, NULL
);
493 static DEVICE_ATTR(unique_id
, S_IRUGO
, unique_id_show
, NULL
);
494 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, host_store_rescan
);
495 static DEVICE_ATTR(firmware_revision
, S_IRUGO
,
496 host_show_firmware_revision
, NULL
);
497 static DEVICE_ATTR(commands_outstanding
, S_IRUGO
,
498 host_show_commands_outstanding
, NULL
);
499 static DEVICE_ATTR(transport_mode
, S_IRUGO
,
500 host_show_transport_mode
, NULL
);
501 static DEVICE_ATTR(resettable
, S_IRUGO
,
502 host_show_resettable
, NULL
);
504 static struct device_attribute
*hpsa_sdev_attrs
[] = {
505 &dev_attr_raid_level
,
511 static struct device_attribute
*hpsa_shost_attrs
[] = {
513 &dev_attr_firmware_revision
,
514 &dev_attr_commands_outstanding
,
515 &dev_attr_transport_mode
,
516 &dev_attr_resettable
,
520 static struct scsi_host_template hpsa_driver_template
= {
521 .module
= THIS_MODULE
,
524 .queuecommand
= hpsa_scsi_queue_command
,
525 .scan_start
= hpsa_scan_start
,
526 .scan_finished
= hpsa_scan_finished
,
527 .change_queue_depth
= hpsa_change_queue_depth
,
529 .use_clustering
= ENABLE_CLUSTERING
,
530 .eh_abort_handler
= hpsa_eh_abort_handler
,
531 .eh_device_reset_handler
= hpsa_eh_device_reset_handler
,
533 .slave_alloc
= hpsa_slave_alloc
,
534 .slave_destroy
= hpsa_slave_destroy
,
536 .compat_ioctl
= hpsa_compat_ioctl
,
538 .sdev_attrs
= hpsa_sdev_attrs
,
539 .shost_attrs
= hpsa_shost_attrs
,
545 /* Enqueuing and dequeuing functions for cmdlists. */
546 static inline void addQ(struct list_head
*list
, struct CommandList
*c
)
548 list_add_tail(&c
->list
, list
);
551 static inline u32
next_command(struct ctlr_info
*h
, u8 q
)
554 struct reply_pool
*rq
= &h
->reply_queue
[q
];
557 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
558 return h
->access
.command_completed(h
, q
);
560 if ((rq
->head
[rq
->current_entry
] & 1) == rq
->wraparound
) {
561 a
= rq
->head
[rq
->current_entry
];
563 spin_lock_irqsave(&h
->lock
, flags
);
564 h
->commands_outstanding
--;
565 spin_unlock_irqrestore(&h
->lock
, flags
);
569 /* Check for wraparound */
570 if (rq
->current_entry
== h
->max_commands
) {
571 rq
->current_entry
= 0;
577 /* set_performant_mode: Modify the tag for cciss performant
578 * set bit 0 for pull model, bits 3-1 for block fetch
581 static void set_performant_mode(struct ctlr_info
*h
, struct CommandList
*c
)
583 if (likely(h
->transMethod
& CFGTBL_Trans_Performant
)) {
584 c
->busaddr
|= 1 | (h
->blockFetchTable
[c
->Header
.SGList
] << 1);
585 if (likely(h
->msix_vector
))
586 c
->Header
.ReplyQueue
=
587 smp_processor_id() % h
->nreply_queues
;
591 static int is_firmware_flash_cmd(u8
*cdb
)
593 return cdb
[0] == BMIC_WRITE
&& cdb
[6] == BMIC_FLASH_FIRMWARE
;
597 * During firmware flash, the heartbeat register may not update as frequently
598 * as it should. So we dial down lockup detection during firmware flash. and
599 * dial it back up when firmware flash completes.
601 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
602 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
603 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info
*h
,
604 struct CommandList
*c
)
606 if (!is_firmware_flash_cmd(c
->Request
.CDB
))
608 atomic_inc(&h
->firmware_flash_in_progress
);
609 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH
;
612 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info
*h
,
613 struct CommandList
*c
)
615 if (is_firmware_flash_cmd(c
->Request
.CDB
) &&
616 atomic_dec_and_test(&h
->firmware_flash_in_progress
))
617 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
620 static void enqueue_cmd_and_start_io(struct ctlr_info
*h
,
621 struct CommandList
*c
)
625 set_performant_mode(h
, c
);
626 dial_down_lockup_detection_during_fw_flash(h
, c
);
627 spin_lock_irqsave(&h
->lock
, flags
);
630 spin_unlock_irqrestore(&h
->lock
, flags
);
634 static inline void removeQ(struct CommandList
*c
)
636 if (WARN_ON(list_empty(&c
->list
)))
638 list_del_init(&c
->list
);
641 static inline int is_hba_lunid(unsigned char scsi3addr
[])
643 return memcmp(scsi3addr
, RAID_CTLR_LUNID
, 8) == 0;
646 static inline int is_scsi_rev_5(struct ctlr_info
*h
)
648 if (!h
->hba_inquiry_data
)
650 if ((h
->hba_inquiry_data
[2] & 0x07) == 5)
655 static int hpsa_find_target_lun(struct ctlr_info
*h
,
656 unsigned char scsi3addr
[], int bus
, int *target
, int *lun
)
658 /* finds an unused bus, target, lun for a new physical device
659 * assumes h->devlock is held
662 DECLARE_BITMAP(lun_taken
, HPSA_MAX_DEVICES
);
664 bitmap_zero(lun_taken
, HPSA_MAX_DEVICES
);
666 for (i
= 0; i
< h
->ndevices
; i
++) {
667 if (h
->dev
[i
]->bus
== bus
&& h
->dev
[i
]->target
!= -1)
668 __set_bit(h
->dev
[i
]->target
, lun_taken
);
671 i
= find_first_zero_bit(lun_taken
, HPSA_MAX_DEVICES
);
672 if (i
< HPSA_MAX_DEVICES
) {
681 /* Add an entry into h->dev[] array. */
682 static int hpsa_scsi_add_entry(struct ctlr_info
*h
, int hostno
,
683 struct hpsa_scsi_dev_t
*device
,
684 struct hpsa_scsi_dev_t
*added
[], int *nadded
)
686 /* assumes h->devlock is held */
689 unsigned char addr1
[8], addr2
[8];
690 struct hpsa_scsi_dev_t
*sd
;
692 if (n
>= HPSA_MAX_DEVICES
) {
693 dev_err(&h
->pdev
->dev
, "too many devices, some will be "
698 /* physical devices do not have lun or target assigned until now. */
699 if (device
->lun
!= -1)
700 /* Logical device, lun is already assigned. */
703 /* If this device a non-zero lun of a multi-lun device
704 * byte 4 of the 8-byte LUN addr will contain the logical
705 * unit no, zero otherise.
707 if (device
->scsi3addr
[4] == 0) {
708 /* This is not a non-zero lun of a multi-lun device */
709 if (hpsa_find_target_lun(h
, device
->scsi3addr
,
710 device
->bus
, &device
->target
, &device
->lun
) != 0)
715 /* This is a non-zero lun of a multi-lun device.
716 * Search through our list and find the device which
717 * has the same 8 byte LUN address, excepting byte 4.
718 * Assign the same bus and target for this new LUN.
719 * Use the logical unit number from the firmware.
721 memcpy(addr1
, device
->scsi3addr
, 8);
723 for (i
= 0; i
< n
; i
++) {
725 memcpy(addr2
, sd
->scsi3addr
, 8);
727 /* differ only in byte 4? */
728 if (memcmp(addr1
, addr2
, 8) == 0) {
729 device
->bus
= sd
->bus
;
730 device
->target
= sd
->target
;
731 device
->lun
= device
->scsi3addr
[4];
735 if (device
->lun
== -1) {
736 dev_warn(&h
->pdev
->dev
, "physical device with no LUN=0,"
737 " suspect firmware bug or unsupported hardware "
746 added
[*nadded
] = device
;
749 /* initially, (before registering with scsi layer) we don't
750 * know our hostno and we don't want to print anything first
751 * time anyway (the scsi layer's inquiries will show that info)
753 /* if (hostno != -1) */
754 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d added.\n",
755 scsi_device_type(device
->devtype
), hostno
,
756 device
->bus
, device
->target
, device
->lun
);
760 /* Update an entry in h->dev[] array. */
761 static void hpsa_scsi_update_entry(struct ctlr_info
*h
, int hostno
,
762 int entry
, struct hpsa_scsi_dev_t
*new_entry
)
764 /* assumes h->devlock is held */
765 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
767 /* Raid level changed. */
768 h
->dev
[entry
]->raid_level
= new_entry
->raid_level
;
769 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d updated.\n",
770 scsi_device_type(new_entry
->devtype
), hostno
, new_entry
->bus
,
771 new_entry
->target
, new_entry
->lun
);
774 /* Replace an entry from h->dev[] array. */
775 static void hpsa_scsi_replace_entry(struct ctlr_info
*h
, int hostno
,
776 int entry
, struct hpsa_scsi_dev_t
*new_entry
,
777 struct hpsa_scsi_dev_t
*added
[], int *nadded
,
778 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
780 /* assumes h->devlock is held */
781 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
782 removed
[*nremoved
] = h
->dev
[entry
];
786 * New physical devices won't have target/lun assigned yet
787 * so we need to preserve the values in the slot we are replacing.
789 if (new_entry
->target
== -1) {
790 new_entry
->target
= h
->dev
[entry
]->target
;
791 new_entry
->lun
= h
->dev
[entry
]->lun
;
794 h
->dev
[entry
] = new_entry
;
795 added
[*nadded
] = new_entry
;
797 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d changed.\n",
798 scsi_device_type(new_entry
->devtype
), hostno
, new_entry
->bus
,
799 new_entry
->target
, new_entry
->lun
);
802 /* Remove an entry from h->dev[] array. */
803 static void hpsa_scsi_remove_entry(struct ctlr_info
*h
, int hostno
, int entry
,
804 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
806 /* assumes h->devlock is held */
808 struct hpsa_scsi_dev_t
*sd
;
810 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
813 removed
[*nremoved
] = h
->dev
[entry
];
816 for (i
= entry
; i
< h
->ndevices
-1; i
++)
817 h
->dev
[i
] = h
->dev
[i
+1];
819 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d removed.\n",
820 scsi_device_type(sd
->devtype
), hostno
, sd
->bus
, sd
->target
,
824 #define SCSI3ADDR_EQ(a, b) ( \
825 (a)[7] == (b)[7] && \
826 (a)[6] == (b)[6] && \
827 (a)[5] == (b)[5] && \
828 (a)[4] == (b)[4] && \
829 (a)[3] == (b)[3] && \
830 (a)[2] == (b)[2] && \
831 (a)[1] == (b)[1] && \
834 static void fixup_botched_add(struct ctlr_info
*h
,
835 struct hpsa_scsi_dev_t
*added
)
837 /* called when scsi_add_device fails in order to re-adjust
838 * h->dev[] to match the mid layer's view.
843 spin_lock_irqsave(&h
->lock
, flags
);
844 for (i
= 0; i
< h
->ndevices
; i
++) {
845 if (h
->dev
[i
] == added
) {
846 for (j
= i
; j
< h
->ndevices
-1; j
++)
847 h
->dev
[j
] = h
->dev
[j
+1];
852 spin_unlock_irqrestore(&h
->lock
, flags
);
856 static inline int device_is_the_same(struct hpsa_scsi_dev_t
*dev1
,
857 struct hpsa_scsi_dev_t
*dev2
)
859 /* we compare everything except lun and target as these
860 * are not yet assigned. Compare parts likely
863 if (memcmp(dev1
->scsi3addr
, dev2
->scsi3addr
,
864 sizeof(dev1
->scsi3addr
)) != 0)
866 if (memcmp(dev1
->device_id
, dev2
->device_id
,
867 sizeof(dev1
->device_id
)) != 0)
869 if (memcmp(dev1
->model
, dev2
->model
, sizeof(dev1
->model
)) != 0)
871 if (memcmp(dev1
->vendor
, dev2
->vendor
, sizeof(dev1
->vendor
)) != 0)
873 if (dev1
->devtype
!= dev2
->devtype
)
875 if (dev1
->bus
!= dev2
->bus
)
880 static inline int device_updated(struct hpsa_scsi_dev_t
*dev1
,
881 struct hpsa_scsi_dev_t
*dev2
)
883 /* Device attributes that can change, but don't mean
884 * that the device is a different device, nor that the OS
885 * needs to be told anything about the change.
887 if (dev1
->raid_level
!= dev2
->raid_level
)
892 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
893 * and return needle location in *index. If scsi3addr matches, but not
894 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
895 * location in *index.
896 * In the case of a minor device attribute change, such as RAID level, just
897 * return DEVICE_UPDATED, along with the updated device's location in index.
898 * If needle not found, return DEVICE_NOT_FOUND.
900 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t
*needle
,
901 struct hpsa_scsi_dev_t
*haystack
[], int haystack_size
,
905 #define DEVICE_NOT_FOUND 0
906 #define DEVICE_CHANGED 1
907 #define DEVICE_SAME 2
908 #define DEVICE_UPDATED 3
909 for (i
= 0; i
< haystack_size
; i
++) {
910 if (haystack
[i
] == NULL
) /* previously removed. */
912 if (SCSI3ADDR_EQ(needle
->scsi3addr
, haystack
[i
]->scsi3addr
)) {
914 if (device_is_the_same(needle
, haystack
[i
])) {
915 if (device_updated(needle
, haystack
[i
]))
916 return DEVICE_UPDATED
;
919 return DEVICE_CHANGED
;
924 return DEVICE_NOT_FOUND
;
927 static void adjust_hpsa_scsi_table(struct ctlr_info
*h
, int hostno
,
928 struct hpsa_scsi_dev_t
*sd
[], int nsds
)
930 /* sd contains scsi3 addresses and devtypes, and inquiry
931 * data. This function takes what's in sd to be the current
932 * reality and updates h->dev[] to reflect that reality.
934 int i
, entry
, device_change
, changes
= 0;
935 struct hpsa_scsi_dev_t
*csd
;
937 struct hpsa_scsi_dev_t
**added
, **removed
;
938 int nadded
, nremoved
;
939 struct Scsi_Host
*sh
= NULL
;
941 added
= kzalloc(sizeof(*added
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
942 removed
= kzalloc(sizeof(*removed
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
944 if (!added
|| !removed
) {
945 dev_warn(&h
->pdev
->dev
, "out of memory in "
946 "adjust_hpsa_scsi_table\n");
950 spin_lock_irqsave(&h
->devlock
, flags
);
952 /* find any devices in h->dev[] that are not in
953 * sd[] and remove them from h->dev[], and for any
954 * devices which have changed, remove the old device
955 * info and add the new device info.
956 * If minor device attributes change, just update
957 * the existing device structure.
962 while (i
< h
->ndevices
) {
964 device_change
= hpsa_scsi_find_entry(csd
, sd
, nsds
, &entry
);
965 if (device_change
== DEVICE_NOT_FOUND
) {
967 hpsa_scsi_remove_entry(h
, hostno
, i
,
969 continue; /* remove ^^^, hence i not incremented */
970 } else if (device_change
== DEVICE_CHANGED
) {
972 hpsa_scsi_replace_entry(h
, hostno
, i
, sd
[entry
],
973 added
, &nadded
, removed
, &nremoved
);
974 /* Set it to NULL to prevent it from being freed
975 * at the bottom of hpsa_update_scsi_devices()
978 } else if (device_change
== DEVICE_UPDATED
) {
979 hpsa_scsi_update_entry(h
, hostno
, i
, sd
[entry
]);
984 /* Now, make sure every device listed in sd[] is also
985 * listed in h->dev[], adding them if they aren't found
988 for (i
= 0; i
< nsds
; i
++) {
989 if (!sd
[i
]) /* if already added above. */
991 device_change
= hpsa_scsi_find_entry(sd
[i
], h
->dev
,
992 h
->ndevices
, &entry
);
993 if (device_change
== DEVICE_NOT_FOUND
) {
995 if (hpsa_scsi_add_entry(h
, hostno
, sd
[i
],
996 added
, &nadded
) != 0)
998 sd
[i
] = NULL
; /* prevent from being freed later. */
999 } else if (device_change
== DEVICE_CHANGED
) {
1000 /* should never happen... */
1002 dev_warn(&h
->pdev
->dev
,
1003 "device unexpectedly changed.\n");
1004 /* but if it does happen, we just ignore that device */
1007 spin_unlock_irqrestore(&h
->devlock
, flags
);
1009 /* Don't notify scsi mid layer of any changes the first time through
1010 * (or if there are no changes) scsi_scan_host will do it later the
1011 * first time through.
1013 if (hostno
== -1 || !changes
)
1017 /* Notify scsi mid layer of any removed devices */
1018 for (i
= 0; i
< nremoved
; i
++) {
1019 struct scsi_device
*sdev
=
1020 scsi_device_lookup(sh
, removed
[i
]->bus
,
1021 removed
[i
]->target
, removed
[i
]->lun
);
1023 scsi_remove_device(sdev
);
1024 scsi_device_put(sdev
);
1026 /* We don't expect to get here.
1027 * future cmds to this device will get selection
1028 * timeout as if the device was gone.
1030 dev_warn(&h
->pdev
->dev
, "didn't find c%db%dt%dl%d "
1031 " for removal.", hostno
, removed
[i
]->bus
,
1032 removed
[i
]->target
, removed
[i
]->lun
);
1038 /* Notify scsi mid layer of any added devices */
1039 for (i
= 0; i
< nadded
; i
++) {
1040 if (scsi_add_device(sh
, added
[i
]->bus
,
1041 added
[i
]->target
, added
[i
]->lun
) == 0)
1043 dev_warn(&h
->pdev
->dev
, "scsi_add_device c%db%dt%dl%d failed, "
1044 "device not added.\n", hostno
, added
[i
]->bus
,
1045 added
[i
]->target
, added
[i
]->lun
);
1046 /* now we have to remove it from h->dev,
1047 * since it didn't get added to scsi mid layer
1049 fixup_botched_add(h
, added
[i
]);
1058 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
1059 * Assume's h->devlock is held.
1061 static struct hpsa_scsi_dev_t
*lookup_hpsa_scsi_dev(struct ctlr_info
*h
,
1062 int bus
, int target
, int lun
)
1065 struct hpsa_scsi_dev_t
*sd
;
1067 for (i
= 0; i
< h
->ndevices
; i
++) {
1069 if (sd
->bus
== bus
&& sd
->target
== target
&& sd
->lun
== lun
)
1075 /* link sdev->hostdata to our per-device structure. */
1076 static int hpsa_slave_alloc(struct scsi_device
*sdev
)
1078 struct hpsa_scsi_dev_t
*sd
;
1079 unsigned long flags
;
1080 struct ctlr_info
*h
;
1082 h
= sdev_to_hba(sdev
);
1083 spin_lock_irqsave(&h
->devlock
, flags
);
1084 sd
= lookup_hpsa_scsi_dev(h
, sdev_channel(sdev
),
1085 sdev_id(sdev
), sdev
->lun
);
1087 sdev
->hostdata
= sd
;
1088 spin_unlock_irqrestore(&h
->devlock
, flags
);
1092 static void hpsa_slave_destroy(struct scsi_device
*sdev
)
1094 /* nothing to do. */
1097 static void hpsa_free_sg_chain_blocks(struct ctlr_info
*h
)
1101 if (!h
->cmd_sg_list
)
1103 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1104 kfree(h
->cmd_sg_list
[i
]);
1105 h
->cmd_sg_list
[i
] = NULL
;
1107 kfree(h
->cmd_sg_list
);
1108 h
->cmd_sg_list
= NULL
;
1111 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info
*h
)
1115 if (h
->chainsize
<= 0)
1118 h
->cmd_sg_list
= kzalloc(sizeof(*h
->cmd_sg_list
) * h
->nr_cmds
,
1120 if (!h
->cmd_sg_list
)
1122 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1123 h
->cmd_sg_list
[i
] = kmalloc(sizeof(*h
->cmd_sg_list
[i
]) *
1124 h
->chainsize
, GFP_KERNEL
);
1125 if (!h
->cmd_sg_list
[i
])
1131 hpsa_free_sg_chain_blocks(h
);
1135 static int hpsa_map_sg_chain_block(struct ctlr_info
*h
,
1136 struct CommandList
*c
)
1138 struct SGDescriptor
*chain_sg
, *chain_block
;
1141 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
1142 chain_block
= h
->cmd_sg_list
[c
->cmdindex
];
1143 chain_sg
->Ext
= HPSA_SG_CHAIN
;
1144 chain_sg
->Len
= sizeof(*chain_sg
) *
1145 (c
->Header
.SGTotal
- h
->max_cmd_sg_entries
);
1146 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_sg
->Len
,
1148 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
1149 /* prevent subsequent unmapping */
1150 chain_sg
->Addr
.lower
= 0;
1151 chain_sg
->Addr
.upper
= 0;
1154 chain_sg
->Addr
.lower
= (u32
) (temp64
& 0x0FFFFFFFFULL
);
1155 chain_sg
->Addr
.upper
= (u32
) ((temp64
>> 32) & 0x0FFFFFFFFULL
);
1159 static void hpsa_unmap_sg_chain_block(struct ctlr_info
*h
,
1160 struct CommandList
*c
)
1162 struct SGDescriptor
*chain_sg
;
1163 union u64bit temp64
;
1165 if (c
->Header
.SGTotal
<= h
->max_cmd_sg_entries
)
1168 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
1169 temp64
.val32
.lower
= chain_sg
->Addr
.lower
;
1170 temp64
.val32
.upper
= chain_sg
->Addr
.upper
;
1171 pci_unmap_single(h
->pdev
, temp64
.val
, chain_sg
->Len
, PCI_DMA_TODEVICE
);
1174 static void complete_scsi_command(struct CommandList
*cp
)
1176 struct scsi_cmnd
*cmd
;
1177 struct ctlr_info
*h
;
1178 struct ErrorInfo
*ei
;
1180 unsigned char sense_key
;
1181 unsigned char asc
; /* additional sense code */
1182 unsigned char ascq
; /* additional sense code qualifier */
1183 unsigned long sense_data_size
;
1186 cmd
= (struct scsi_cmnd
*) cp
->scsi_cmd
;
1189 scsi_dma_unmap(cmd
); /* undo the DMA mappings */
1190 if (cp
->Header
.SGTotal
> h
->max_cmd_sg_entries
)
1191 hpsa_unmap_sg_chain_block(h
, cp
);
1193 cmd
->result
= (DID_OK
<< 16); /* host byte */
1194 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
1195 cmd
->result
|= ei
->ScsiStatus
;
1197 /* copy the sense data whether we need to or not. */
1198 if (SCSI_SENSE_BUFFERSIZE
< sizeof(ei
->SenseInfo
))
1199 sense_data_size
= SCSI_SENSE_BUFFERSIZE
;
1201 sense_data_size
= sizeof(ei
->SenseInfo
);
1202 if (ei
->SenseLen
< sense_data_size
)
1203 sense_data_size
= ei
->SenseLen
;
1205 memcpy(cmd
->sense_buffer
, ei
->SenseInfo
, sense_data_size
);
1206 scsi_set_resid(cmd
, ei
->ResidualCnt
);
1208 if (ei
->CommandStatus
== 0) {
1210 cmd
->scsi_done(cmd
);
1214 /* an error has occurred */
1215 switch (ei
->CommandStatus
) {
1217 case CMD_TARGET_STATUS
:
1218 if (ei
->ScsiStatus
) {
1220 sense_key
= 0xf & ei
->SenseInfo
[2];
1221 /* Get additional sense code */
1222 asc
= ei
->SenseInfo
[12];
1223 /* Get addition sense code qualifier */
1224 ascq
= ei
->SenseInfo
[13];
1227 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
) {
1228 if (check_for_unit_attention(h
, cp
)) {
1229 cmd
->result
= DID_SOFT_ERROR
<< 16;
1232 if (sense_key
== ILLEGAL_REQUEST
) {
1234 * SCSI REPORT_LUNS is commonly unsupported on
1235 * Smart Array. Suppress noisy complaint.
1237 if (cp
->Request
.CDB
[0] == REPORT_LUNS
)
1240 /* If ASC/ASCQ indicate Logical Unit
1241 * Not Supported condition,
1243 if ((asc
== 0x25) && (ascq
== 0x0)) {
1244 dev_warn(&h
->pdev
->dev
, "cp %p "
1245 "has check condition\n", cp
);
1250 if (sense_key
== NOT_READY
) {
1251 /* If Sense is Not Ready, Logical Unit
1252 * Not ready, Manual Intervention
1255 if ((asc
== 0x04) && (ascq
== 0x03)) {
1256 dev_warn(&h
->pdev
->dev
, "cp %p "
1257 "has check condition: unit "
1258 "not ready, manual "
1259 "intervention required\n", cp
);
1263 if (sense_key
== ABORTED_COMMAND
) {
1264 /* Aborted command is retryable */
1265 dev_warn(&h
->pdev
->dev
, "cp %p "
1266 "has check condition: aborted command: "
1267 "ASC: 0x%x, ASCQ: 0x%x\n",
1269 cmd
->result
|= DID_SOFT_ERROR
<< 16;
1272 /* Must be some other type of check condition */
1273 dev_dbg(&h
->pdev
->dev
, "cp %p has check condition: "
1275 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1276 "Returning result: 0x%x, "
1277 "cmd=[%02x %02x %02x %02x %02x "
1278 "%02x %02x %02x %02x %02x %02x "
1279 "%02x %02x %02x %02x %02x]\n",
1280 cp
, sense_key
, asc
, ascq
,
1282 cmd
->cmnd
[0], cmd
->cmnd
[1],
1283 cmd
->cmnd
[2], cmd
->cmnd
[3],
1284 cmd
->cmnd
[4], cmd
->cmnd
[5],
1285 cmd
->cmnd
[6], cmd
->cmnd
[7],
1286 cmd
->cmnd
[8], cmd
->cmnd
[9],
1287 cmd
->cmnd
[10], cmd
->cmnd
[11],
1288 cmd
->cmnd
[12], cmd
->cmnd
[13],
1289 cmd
->cmnd
[14], cmd
->cmnd
[15]);
1294 /* Problem was not a check condition
1295 * Pass it up to the upper layers...
1297 if (ei
->ScsiStatus
) {
1298 dev_warn(&h
->pdev
->dev
, "cp %p has status 0x%x "
1299 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1300 "Returning result: 0x%x\n",
1302 sense_key
, asc
, ascq
,
1304 } else { /* scsi status is zero??? How??? */
1305 dev_warn(&h
->pdev
->dev
, "cp %p SCSI status was 0. "
1306 "Returning no connection.\n", cp
),
1308 /* Ordinarily, this case should never happen,
1309 * but there is a bug in some released firmware
1310 * revisions that allows it to happen if, for
1311 * example, a 4100 backplane loses power and
1312 * the tape drive is in it. We assume that
1313 * it's a fatal error of some kind because we
1314 * can't show that it wasn't. We will make it
1315 * look like selection timeout since that is
1316 * the most common reason for this to occur,
1317 * and it's severe enough.
1320 cmd
->result
= DID_NO_CONNECT
<< 16;
1324 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
1326 case CMD_DATA_OVERRUN
:
1327 dev_warn(&h
->pdev
->dev
, "cp %p has"
1328 " completed with data overrun "
1332 /* print_bytes(cp, sizeof(*cp), 1, 0);
1334 /* We get CMD_INVALID if you address a non-existent device
1335 * instead of a selection timeout (no response). You will
1336 * see this if you yank out a drive, then try to access it.
1337 * This is kind of a shame because it means that any other
1338 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1339 * missing target. */
1340 cmd
->result
= DID_NO_CONNECT
<< 16;
1343 case CMD_PROTOCOL_ERR
:
1344 cmd
->result
= DID_ERROR
<< 16;
1345 dev_warn(&h
->pdev
->dev
, "cp %p has "
1346 "protocol error\n", cp
);
1348 case CMD_HARDWARE_ERR
:
1349 cmd
->result
= DID_ERROR
<< 16;
1350 dev_warn(&h
->pdev
->dev
, "cp %p had hardware error\n", cp
);
1352 case CMD_CONNECTION_LOST
:
1353 cmd
->result
= DID_ERROR
<< 16;
1354 dev_warn(&h
->pdev
->dev
, "cp %p had connection lost\n", cp
);
1357 cmd
->result
= DID_ABORT
<< 16;
1358 dev_warn(&h
->pdev
->dev
, "cp %p was aborted with status 0x%x\n",
1359 cp
, ei
->ScsiStatus
);
1361 case CMD_ABORT_FAILED
:
1362 cmd
->result
= DID_ERROR
<< 16;
1363 dev_warn(&h
->pdev
->dev
, "cp %p reports abort failed\n", cp
);
1365 case CMD_UNSOLICITED_ABORT
:
1366 cmd
->result
= DID_SOFT_ERROR
<< 16; /* retry the command */
1367 dev_warn(&h
->pdev
->dev
, "cp %p aborted due to an unsolicited "
1371 cmd
->result
= DID_TIME_OUT
<< 16;
1372 dev_warn(&h
->pdev
->dev
, "cp %p timedout\n", cp
);
1374 case CMD_UNABORTABLE
:
1375 cmd
->result
= DID_ERROR
<< 16;
1376 dev_warn(&h
->pdev
->dev
, "Command unabortable\n");
1379 cmd
->result
= DID_ERROR
<< 16;
1380 dev_warn(&h
->pdev
->dev
, "cp %p returned unknown status %x\n",
1381 cp
, ei
->CommandStatus
);
1384 cmd
->scsi_done(cmd
);
1387 static void hpsa_pci_unmap(struct pci_dev
*pdev
,
1388 struct CommandList
*c
, int sg_used
, int data_direction
)
1391 union u64bit addr64
;
1393 for (i
= 0; i
< sg_used
; i
++) {
1394 addr64
.val32
.lower
= c
->SG
[i
].Addr
.lower
;
1395 addr64
.val32
.upper
= c
->SG
[i
].Addr
.upper
;
1396 pci_unmap_single(pdev
, (dma_addr_t
) addr64
.val
, c
->SG
[i
].Len
,
1401 static int hpsa_map_one(struct pci_dev
*pdev
,
1402 struct CommandList
*cp
,
1409 if (buflen
== 0 || data_direction
== PCI_DMA_NONE
) {
1410 cp
->Header
.SGList
= 0;
1411 cp
->Header
.SGTotal
= 0;
1415 addr64
= (u64
) pci_map_single(pdev
, buf
, buflen
, data_direction
);
1416 if (dma_mapping_error(&pdev
->dev
, addr64
)) {
1417 /* Prevent subsequent unmap of something never mapped */
1418 cp
->Header
.SGList
= 0;
1419 cp
->Header
.SGTotal
= 0;
1422 cp
->SG
[0].Addr
.lower
=
1423 (u32
) (addr64
& (u64
) 0x00000000FFFFFFFF);
1424 cp
->SG
[0].Addr
.upper
=
1425 (u32
) ((addr64
>> 32) & (u64
) 0x00000000FFFFFFFF);
1426 cp
->SG
[0].Len
= buflen
;
1427 cp
->Header
.SGList
= (u8
) 1; /* no. SGs contig in this cmd */
1428 cp
->Header
.SGTotal
= (u16
) 1; /* total sgs in this cmd list */
1432 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info
*h
,
1433 struct CommandList
*c
)
1435 DECLARE_COMPLETION_ONSTACK(wait
);
1438 enqueue_cmd_and_start_io(h
, c
);
1439 wait_for_completion(&wait
);
1442 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info
*h
,
1443 struct CommandList
*c
)
1445 unsigned long flags
;
1447 /* If controller lockup detected, fake a hardware error. */
1448 spin_lock_irqsave(&h
->lock
, flags
);
1449 if (unlikely(h
->lockup_detected
)) {
1450 spin_unlock_irqrestore(&h
->lock
, flags
);
1451 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
1453 spin_unlock_irqrestore(&h
->lock
, flags
);
1454 hpsa_scsi_do_simple_cmd_core(h
, c
);
1458 #define MAX_DRIVER_CMD_RETRIES 25
1459 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info
*h
,
1460 struct CommandList
*c
, int data_direction
)
1462 int backoff_time
= 10, retry_count
= 0;
1465 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
1466 hpsa_scsi_do_simple_cmd_core(h
, c
);
1468 if (retry_count
> 3) {
1469 msleep(backoff_time
);
1470 if (backoff_time
< 1000)
1473 } while ((check_for_unit_attention(h
, c
) ||
1474 check_for_busy(h
, c
)) &&
1475 retry_count
<= MAX_DRIVER_CMD_RETRIES
);
1476 hpsa_pci_unmap(h
->pdev
, c
, 1, data_direction
);
1479 static void hpsa_scsi_interpret_error(struct CommandList
*cp
)
1481 struct ErrorInfo
*ei
;
1482 struct device
*d
= &cp
->h
->pdev
->dev
;
1485 switch (ei
->CommandStatus
) {
1486 case CMD_TARGET_STATUS
:
1487 dev_warn(d
, "cmd %p has completed with errors\n", cp
);
1488 dev_warn(d
, "cmd %p has SCSI Status = %x\n", cp
,
1490 if (ei
->ScsiStatus
== 0)
1491 dev_warn(d
, "SCSI status is abnormally zero. "
1492 "(probably indicates selection timeout "
1493 "reported incorrectly due to a known "
1494 "firmware bug, circa July, 2001.)\n");
1496 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
1497 dev_info(d
, "UNDERRUN\n");
1499 case CMD_DATA_OVERRUN
:
1500 dev_warn(d
, "cp %p has completed with data overrun\n", cp
);
1503 /* controller unfortunately reports SCSI passthru's
1504 * to non-existent targets as invalid commands.
1506 dev_warn(d
, "cp %p is reported invalid (probably means "
1507 "target device no longer present)\n", cp
);
1508 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1512 case CMD_PROTOCOL_ERR
:
1513 dev_warn(d
, "cp %p has protocol error \n", cp
);
1515 case CMD_HARDWARE_ERR
:
1516 /* cmd->result = DID_ERROR << 16; */
1517 dev_warn(d
, "cp %p had hardware error\n", cp
);
1519 case CMD_CONNECTION_LOST
:
1520 dev_warn(d
, "cp %p had connection lost\n", cp
);
1523 dev_warn(d
, "cp %p was aborted\n", cp
);
1525 case CMD_ABORT_FAILED
:
1526 dev_warn(d
, "cp %p reports abort failed\n", cp
);
1528 case CMD_UNSOLICITED_ABORT
:
1529 dev_warn(d
, "cp %p aborted due to an unsolicited abort\n", cp
);
1532 dev_warn(d
, "cp %p timed out\n", cp
);
1534 case CMD_UNABORTABLE
:
1535 dev_warn(d
, "Command unabortable\n");
1538 dev_warn(d
, "cp %p returned unknown status %x\n", cp
,
1543 static int hpsa_scsi_do_inquiry(struct ctlr_info
*h
, unsigned char *scsi3addr
,
1544 unsigned char page
, unsigned char *buf
,
1545 unsigned char bufsize
)
1548 struct CommandList
*c
;
1549 struct ErrorInfo
*ei
;
1551 c
= cmd_special_alloc(h
);
1553 if (c
== NULL
) { /* trouble... */
1554 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
1558 if (fill_cmd(c
, HPSA_INQUIRY
, h
, buf
, bufsize
,
1559 page
, scsi3addr
, TYPE_CMD
)) {
1563 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
1565 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
1566 hpsa_scsi_interpret_error(c
);
1570 cmd_special_free(h
, c
);
1574 static int hpsa_send_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
)
1577 struct CommandList
*c
;
1578 struct ErrorInfo
*ei
;
1580 c
= cmd_special_alloc(h
);
1582 if (c
== NULL
) { /* trouble... */
1583 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
1587 /* fill_cmd can't fail here, no data buffer to map. */
1588 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
,
1589 NULL
, 0, 0, scsi3addr
, TYPE_MSG
);
1590 hpsa_scsi_do_simple_cmd_core(h
, c
);
1591 /* no unmap needed here because no data xfer. */
1594 if (ei
->CommandStatus
!= 0) {
1595 hpsa_scsi_interpret_error(c
);
1598 cmd_special_free(h
, c
);
1602 static void hpsa_get_raid_level(struct ctlr_info
*h
,
1603 unsigned char *scsi3addr
, unsigned char *raid_level
)
1608 *raid_level
= RAID_UNKNOWN
;
1609 buf
= kzalloc(64, GFP_KERNEL
);
1612 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, 0xC1, buf
, 64);
1614 *raid_level
= buf
[8];
1615 if (*raid_level
> RAID_UNKNOWN
)
1616 *raid_level
= RAID_UNKNOWN
;
1621 /* Get the device id from inquiry page 0x83 */
1622 static int hpsa_get_device_id(struct ctlr_info
*h
, unsigned char *scsi3addr
,
1623 unsigned char *device_id
, int buflen
)
1630 buf
= kzalloc(64, GFP_KERNEL
);
1633 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, 0x83, buf
, 64);
1635 memcpy(device_id
, &buf
[8], buflen
);
1640 static int hpsa_scsi_do_report_luns(struct ctlr_info
*h
, int logical
,
1641 struct ReportLUNdata
*buf
, int bufsize
,
1642 int extended_response
)
1645 struct CommandList
*c
;
1646 unsigned char scsi3addr
[8];
1647 struct ErrorInfo
*ei
;
1649 c
= cmd_special_alloc(h
);
1650 if (c
== NULL
) { /* trouble... */
1651 dev_err(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
1654 /* address the controller */
1655 memset(scsi3addr
, 0, sizeof(scsi3addr
));
1656 if (fill_cmd(c
, logical
? HPSA_REPORT_LOG
: HPSA_REPORT_PHYS
, h
,
1657 buf
, bufsize
, 0, scsi3addr
, TYPE_CMD
)) {
1661 if (extended_response
)
1662 c
->Request
.CDB
[1] = extended_response
;
1663 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
1665 if (ei
->CommandStatus
!= 0 &&
1666 ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
1667 hpsa_scsi_interpret_error(c
);
1671 cmd_special_free(h
, c
);
1675 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
1676 struct ReportLUNdata
*buf
,
1677 int bufsize
, int extended_response
)
1679 return hpsa_scsi_do_report_luns(h
, 0, buf
, bufsize
, extended_response
);
1682 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info
*h
,
1683 struct ReportLUNdata
*buf
, int bufsize
)
1685 return hpsa_scsi_do_report_luns(h
, 1, buf
, bufsize
, 0);
1688 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t
*device
,
1689 int bus
, int target
, int lun
)
1692 device
->target
= target
;
1696 static int hpsa_update_device_info(struct ctlr_info
*h
,
1697 unsigned char scsi3addr
[], struct hpsa_scsi_dev_t
*this_device
,
1698 unsigned char *is_OBDR_device
)
1701 #define OBDR_SIG_OFFSET 43
1702 #define OBDR_TAPE_SIG "$DR-10"
1703 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1704 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1706 unsigned char *inq_buff
;
1707 unsigned char *obdr_sig
;
1709 inq_buff
= kzalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
1713 /* Do an inquiry to the device to see what it is. */
1714 if (hpsa_scsi_do_inquiry(h
, scsi3addr
, 0, inq_buff
,
1715 (unsigned char) OBDR_TAPE_INQ_SIZE
) != 0) {
1716 /* Inquiry failed (msg printed already) */
1717 dev_err(&h
->pdev
->dev
,
1718 "hpsa_update_device_info: inquiry failed\n");
1722 this_device
->devtype
= (inq_buff
[0] & 0x1f);
1723 memcpy(this_device
->scsi3addr
, scsi3addr
, 8);
1724 memcpy(this_device
->vendor
, &inq_buff
[8],
1725 sizeof(this_device
->vendor
));
1726 memcpy(this_device
->model
, &inq_buff
[16],
1727 sizeof(this_device
->model
));
1728 memset(this_device
->device_id
, 0,
1729 sizeof(this_device
->device_id
));
1730 hpsa_get_device_id(h
, scsi3addr
, this_device
->device_id
,
1731 sizeof(this_device
->device_id
));
1733 if (this_device
->devtype
== TYPE_DISK
&&
1734 is_logical_dev_addr_mode(scsi3addr
))
1735 hpsa_get_raid_level(h
, scsi3addr
, &this_device
->raid_level
);
1737 this_device
->raid_level
= RAID_UNKNOWN
;
1739 if (is_OBDR_device
) {
1740 /* See if this is a One-Button-Disaster-Recovery device
1741 * by looking for "$DR-10" at offset 43 in inquiry data.
1743 obdr_sig
= &inq_buff
[OBDR_SIG_OFFSET
];
1744 *is_OBDR_device
= (this_device
->devtype
== TYPE_ROM
&&
1745 strncmp(obdr_sig
, OBDR_TAPE_SIG
,
1746 OBDR_SIG_LEN
) == 0);
1757 static unsigned char *ext_target_model
[] = {
1766 static int is_ext_target(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
)
1770 for (i
= 0; ext_target_model
[i
]; i
++)
1771 if (strncmp(device
->model
, ext_target_model
[i
],
1772 strlen(ext_target_model
[i
])) == 0)
1777 /* Helper function to assign bus, target, lun mapping of devices.
1778 * Puts non-external target logical volumes on bus 0, external target logical
1779 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1780 * Logical drive target and lun are assigned at this time, but
1781 * physical device lun and target assignment are deferred (assigned
1782 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1784 static void figure_bus_target_lun(struct ctlr_info
*h
,
1785 u8
*lunaddrbytes
, struct hpsa_scsi_dev_t
*device
)
1787 u32 lunid
= le32_to_cpu(*((__le32
*) lunaddrbytes
));
1789 if (!is_logical_dev_addr_mode(lunaddrbytes
)) {
1790 /* physical device, target and lun filled in later */
1791 if (is_hba_lunid(lunaddrbytes
))
1792 hpsa_set_bus_target_lun(device
, 3, 0, lunid
& 0x3fff);
1794 /* defer target, lun assignment for physical devices */
1795 hpsa_set_bus_target_lun(device
, 2, -1, -1);
1798 /* It's a logical device */
1799 if (is_ext_target(h
, device
)) {
1800 /* external target way, put logicals on bus 1
1801 * and match target/lun numbers box
1802 * reports, other smart array, bus 0, target 0, match lunid
1804 hpsa_set_bus_target_lun(device
,
1805 1, (lunid
>> 16) & 0x3fff, lunid
& 0x00ff);
1808 hpsa_set_bus_target_lun(device
, 0, 0, lunid
& 0x3fff);
1812 * If there is no lun 0 on a target, linux won't find any devices.
1813 * For the external targets (arrays), we have to manually detect the enclosure
1814 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1815 * it for some reason. *tmpdevice is the target we're adding,
1816 * this_device is a pointer into the current element of currentsd[]
1817 * that we're building up in update_scsi_devices(), below.
1818 * lunzerobits is a bitmap that tracks which targets already have a
1820 * Returns 1 if an enclosure was added, 0 if not.
1822 static int add_ext_target_dev(struct ctlr_info
*h
,
1823 struct hpsa_scsi_dev_t
*tmpdevice
,
1824 struct hpsa_scsi_dev_t
*this_device
, u8
*lunaddrbytes
,
1825 unsigned long lunzerobits
[], int *n_ext_target_devs
)
1827 unsigned char scsi3addr
[8];
1829 if (test_bit(tmpdevice
->target
, lunzerobits
))
1830 return 0; /* There is already a lun 0 on this target. */
1832 if (!is_logical_dev_addr_mode(lunaddrbytes
))
1833 return 0; /* It's the logical targets that may lack lun 0. */
1835 if (!is_ext_target(h
, tmpdevice
))
1836 return 0; /* Only external target devices have this problem. */
1838 if (tmpdevice
->lun
== 0) /* if lun is 0, then we have a lun 0. */
1841 memset(scsi3addr
, 0, 8);
1842 scsi3addr
[3] = tmpdevice
->target
;
1843 if (is_hba_lunid(scsi3addr
))
1844 return 0; /* Don't add the RAID controller here. */
1846 if (is_scsi_rev_5(h
))
1847 return 0; /* p1210m doesn't need to do this. */
1849 if (*n_ext_target_devs
>= MAX_EXT_TARGETS
) {
1850 dev_warn(&h
->pdev
->dev
, "Maximum number of external "
1851 "target devices exceeded. Check your hardware "
1856 if (hpsa_update_device_info(h
, scsi3addr
, this_device
, NULL
))
1858 (*n_ext_target_devs
)++;
1859 hpsa_set_bus_target_lun(this_device
,
1860 tmpdevice
->bus
, tmpdevice
->target
, 0);
1861 set_bit(tmpdevice
->target
, lunzerobits
);
1866 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1867 * logdev. The number of luns in physdev and logdev are returned in
1868 * *nphysicals and *nlogicals, respectively.
1869 * Returns 0 on success, -1 otherwise.
1871 static int hpsa_gather_lun_info(struct ctlr_info
*h
,
1873 struct ReportLUNdata
*physdev
, u32
*nphysicals
,
1874 struct ReportLUNdata
*logdev
, u32
*nlogicals
)
1876 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, reportlunsize
, 0)) {
1877 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
1880 *nphysicals
= be32_to_cpu(*((__be32
*)physdev
->LUNListLength
)) / 8;
1881 if (*nphysicals
> HPSA_MAX_PHYS_LUN
) {
1882 dev_warn(&h
->pdev
->dev
, "maximum physical LUNs (%d) exceeded."
1883 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
1884 *nphysicals
- HPSA_MAX_PHYS_LUN
);
1885 *nphysicals
= HPSA_MAX_PHYS_LUN
;
1887 if (hpsa_scsi_do_report_log_luns(h
, logdev
, reportlunsize
)) {
1888 dev_err(&h
->pdev
->dev
, "report logical LUNs failed.\n");
1891 *nlogicals
= be32_to_cpu(*((__be32
*) logdev
->LUNListLength
)) / 8;
1892 /* Reject Logicals in excess of our max capability. */
1893 if (*nlogicals
> HPSA_MAX_LUN
) {
1894 dev_warn(&h
->pdev
->dev
,
1895 "maximum logical LUNs (%d) exceeded. "
1896 "%d LUNs ignored.\n", HPSA_MAX_LUN
,
1897 *nlogicals
- HPSA_MAX_LUN
);
1898 *nlogicals
= HPSA_MAX_LUN
;
1900 if (*nlogicals
+ *nphysicals
> HPSA_MAX_PHYS_LUN
) {
1901 dev_warn(&h
->pdev
->dev
,
1902 "maximum logical + physical LUNs (%d) exceeded. "
1903 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
1904 *nphysicals
+ *nlogicals
- HPSA_MAX_PHYS_LUN
);
1905 *nlogicals
= HPSA_MAX_PHYS_LUN
- *nphysicals
;
1910 u8
*figure_lunaddrbytes(struct ctlr_info
*h
, int raid_ctlr_position
, int i
,
1911 int nphysicals
, int nlogicals
, struct ReportLUNdata
*physdev_list
,
1912 struct ReportLUNdata
*logdev_list
)
1914 /* Helper function, figure out where the LUN ID info is coming from
1915 * given index i, lists of physical and logical devices, where in
1916 * the list the raid controller is supposed to appear (first or last)
1919 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
1920 int last_device
= nphysicals
+ nlogicals
+ (raid_ctlr_position
== 0);
1922 if (i
== raid_ctlr_position
)
1923 return RAID_CTLR_LUNID
;
1925 if (i
< logicals_start
)
1926 return &physdev_list
->LUN
[i
- (raid_ctlr_position
== 0)][0];
1928 if (i
< last_device
)
1929 return &logdev_list
->LUN
[i
- nphysicals
-
1930 (raid_ctlr_position
== 0)][0];
1935 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
)
1937 /* the idea here is we could get notified
1938 * that some devices have changed, so we do a report
1939 * physical luns and report logical luns cmd, and adjust
1940 * our list of devices accordingly.
1942 * The scsi3addr's of devices won't change so long as the
1943 * adapter is not reset. That means we can rescan and
1944 * tell which devices we already know about, vs. new
1945 * devices, vs. disappearing devices.
1947 struct ReportLUNdata
*physdev_list
= NULL
;
1948 struct ReportLUNdata
*logdev_list
= NULL
;
1951 u32 ndev_allocated
= 0;
1952 struct hpsa_scsi_dev_t
**currentsd
, *this_device
, *tmpdevice
;
1954 int reportlunsize
= sizeof(*physdev_list
) + HPSA_MAX_PHYS_LUN
* 8;
1955 int i
, n_ext_target_devs
, ndevs_to_allocate
;
1956 int raid_ctlr_position
;
1957 DECLARE_BITMAP(lunzerobits
, MAX_EXT_TARGETS
);
1959 currentsd
= kzalloc(sizeof(*currentsd
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1960 physdev_list
= kzalloc(reportlunsize
, GFP_KERNEL
);
1961 logdev_list
= kzalloc(reportlunsize
, GFP_KERNEL
);
1962 tmpdevice
= kzalloc(sizeof(*tmpdevice
), GFP_KERNEL
);
1964 if (!currentsd
|| !physdev_list
|| !logdev_list
|| !tmpdevice
) {
1965 dev_err(&h
->pdev
->dev
, "out of memory\n");
1968 memset(lunzerobits
, 0, sizeof(lunzerobits
));
1970 if (hpsa_gather_lun_info(h
, reportlunsize
, physdev_list
, &nphysicals
,
1971 logdev_list
, &nlogicals
))
1974 /* We might see up to the maximum number of logical and physical disks
1975 * plus external target devices, and a device for the local RAID
1978 ndevs_to_allocate
= nphysicals
+ nlogicals
+ MAX_EXT_TARGETS
+ 1;
1980 /* Allocate the per device structures */
1981 for (i
= 0; i
< ndevs_to_allocate
; i
++) {
1982 if (i
>= HPSA_MAX_DEVICES
) {
1983 dev_warn(&h
->pdev
->dev
, "maximum devices (%d) exceeded."
1984 " %d devices ignored.\n", HPSA_MAX_DEVICES
,
1985 ndevs_to_allocate
- HPSA_MAX_DEVICES
);
1989 currentsd
[i
] = kzalloc(sizeof(*currentsd
[i
]), GFP_KERNEL
);
1990 if (!currentsd
[i
]) {
1991 dev_warn(&h
->pdev
->dev
, "out of memory at %s:%d\n",
1992 __FILE__
, __LINE__
);
1998 if (unlikely(is_scsi_rev_5(h
)))
1999 raid_ctlr_position
= 0;
2001 raid_ctlr_position
= nphysicals
+ nlogicals
;
2003 /* adjust our table of devices */
2004 n_ext_target_devs
= 0;
2005 for (i
= 0; i
< nphysicals
+ nlogicals
+ 1; i
++) {
2006 u8
*lunaddrbytes
, is_OBDR
= 0;
2008 /* Figure out where the LUN ID info is coming from */
2009 lunaddrbytes
= figure_lunaddrbytes(h
, raid_ctlr_position
,
2010 i
, nphysicals
, nlogicals
, physdev_list
, logdev_list
);
2011 /* skip masked physical devices. */
2012 if (lunaddrbytes
[3] & 0xC0 &&
2013 i
< nphysicals
+ (raid_ctlr_position
== 0))
2016 /* Get device type, vendor, model, device id */
2017 if (hpsa_update_device_info(h
, lunaddrbytes
, tmpdevice
,
2019 continue; /* skip it if we can't talk to it. */
2020 figure_bus_target_lun(h
, lunaddrbytes
, tmpdevice
);
2021 this_device
= currentsd
[ncurrent
];
2024 * For external target devices, we have to insert a LUN 0 which
2025 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
2026 * is nonetheless an enclosure device there. We have to
2027 * present that otherwise linux won't find anything if
2028 * there is no lun 0.
2030 if (add_ext_target_dev(h
, tmpdevice
, this_device
,
2031 lunaddrbytes
, lunzerobits
,
2032 &n_ext_target_devs
)) {
2034 this_device
= currentsd
[ncurrent
];
2037 *this_device
= *tmpdevice
;
2039 switch (this_device
->devtype
) {
2041 /* We don't *really* support actual CD-ROM devices,
2042 * just "One Button Disaster Recovery" tape drive
2043 * which temporarily pretends to be a CD-ROM drive.
2044 * So we check that the device is really an OBDR tape
2045 * device by checking for "$DR-10" in bytes 43-48 of
2057 case TYPE_MEDIUM_CHANGER
:
2061 /* Only present the Smartarray HBA as a RAID controller.
2062 * If it's a RAID controller other than the HBA itself
2063 * (an external RAID controller, MSA500 or similar)
2066 if (!is_hba_lunid(lunaddrbytes
))
2073 if (ncurrent
>= HPSA_MAX_DEVICES
)
2076 adjust_hpsa_scsi_table(h
, hostno
, currentsd
, ncurrent
);
2079 for (i
= 0; i
< ndev_allocated
; i
++)
2080 kfree(currentsd
[i
]);
2082 kfree(physdev_list
);
2086 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
2087 * dma mapping and fills in the scatter gather entries of the
2090 static int hpsa_scatter_gather(struct ctlr_info
*h
,
2091 struct CommandList
*cp
,
2092 struct scsi_cmnd
*cmd
)
2095 struct scatterlist
*sg
;
2097 int use_sg
, i
, sg_index
, chained
;
2098 struct SGDescriptor
*curr_sg
;
2100 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
2102 use_sg
= scsi_dma_map(cmd
);
2107 goto sglist_finished
;
2112 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
2113 if (i
== h
->max_cmd_sg_entries
- 1 &&
2114 use_sg
> h
->max_cmd_sg_entries
) {
2116 curr_sg
= h
->cmd_sg_list
[cp
->cmdindex
];
2119 addr64
= (u64
) sg_dma_address(sg
);
2120 len
= sg_dma_len(sg
);
2121 curr_sg
->Addr
.lower
= (u32
) (addr64
& 0x0FFFFFFFFULL
);
2122 curr_sg
->Addr
.upper
= (u32
) ((addr64
>> 32) & 0x0FFFFFFFFULL
);
2124 curr_sg
->Ext
= 0; /* we are not chaining */
2128 if (use_sg
+ chained
> h
->maxSG
)
2129 h
->maxSG
= use_sg
+ chained
;
2132 cp
->Header
.SGList
= h
->max_cmd_sg_entries
;
2133 cp
->Header
.SGTotal
= (u16
) (use_sg
+ 1);
2134 if (hpsa_map_sg_chain_block(h
, cp
)) {
2135 scsi_dma_unmap(cmd
);
2143 cp
->Header
.SGList
= (u8
) use_sg
; /* no. SGs contig in this cmd */
2144 cp
->Header
.SGTotal
= (u16
) use_sg
; /* total sgs in this cmd list */
2149 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd
*cmd
,
2150 void (*done
)(struct scsi_cmnd
*))
2152 struct ctlr_info
*h
;
2153 struct hpsa_scsi_dev_t
*dev
;
2154 unsigned char scsi3addr
[8];
2155 struct CommandList
*c
;
2156 unsigned long flags
;
2158 /* Get the ptr to our adapter structure out of cmd->host. */
2159 h
= sdev_to_hba(cmd
->device
);
2160 dev
= cmd
->device
->hostdata
;
2162 cmd
->result
= DID_NO_CONNECT
<< 16;
2166 memcpy(scsi3addr
, dev
->scsi3addr
, sizeof(scsi3addr
));
2168 spin_lock_irqsave(&h
->lock
, flags
);
2169 if (unlikely(h
->lockup_detected
)) {
2170 spin_unlock_irqrestore(&h
->lock
, flags
);
2171 cmd
->result
= DID_ERROR
<< 16;
2175 spin_unlock_irqrestore(&h
->lock
, flags
);
2177 if (c
== NULL
) { /* trouble... */
2178 dev_err(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
2179 return SCSI_MLQUEUE_HOST_BUSY
;
2182 /* Fill in the command list header */
2184 cmd
->scsi_done
= done
; /* save this for use by completion code */
2186 /* save c in case we have to abort it */
2187 cmd
->host_scribble
= (unsigned char *) c
;
2189 c
->cmd_type
= CMD_SCSI
;
2191 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
2192 memcpy(&c
->Header
.LUN
.LunAddrBytes
[0], &scsi3addr
[0], 8);
2193 c
->Header
.Tag
.lower
= (c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
2194 c
->Header
.Tag
.lower
|= DIRECT_LOOKUP_BIT
;
2196 /* Fill in the request block... */
2198 c
->Request
.Timeout
= 0;
2199 memset(c
->Request
.CDB
, 0, sizeof(c
->Request
.CDB
));
2200 BUG_ON(cmd
->cmd_len
> sizeof(c
->Request
.CDB
));
2201 c
->Request
.CDBLen
= cmd
->cmd_len
;
2202 memcpy(c
->Request
.CDB
, cmd
->cmnd
, cmd
->cmd_len
);
2203 c
->Request
.Type
.Type
= TYPE_CMD
;
2204 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
2205 switch (cmd
->sc_data_direction
) {
2207 c
->Request
.Type
.Direction
= XFER_WRITE
;
2209 case DMA_FROM_DEVICE
:
2210 c
->Request
.Type
.Direction
= XFER_READ
;
2213 c
->Request
.Type
.Direction
= XFER_NONE
;
2215 case DMA_BIDIRECTIONAL
:
2216 /* This can happen if a buggy application does a scsi passthru
2217 * and sets both inlen and outlen to non-zero. ( see
2218 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2221 c
->Request
.Type
.Direction
= XFER_RSVD
;
2222 /* This is technically wrong, and hpsa controllers should
2223 * reject it with CMD_INVALID, which is the most correct
2224 * response, but non-fibre backends appear to let it
2225 * slide by, and give the same results as if this field
2226 * were set correctly. Either way is acceptable for
2227 * our purposes here.
2233 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
2234 cmd
->sc_data_direction
);
2239 if (hpsa_scatter_gather(h
, c
, cmd
) < 0) { /* Fill SG list */
2241 return SCSI_MLQUEUE_HOST_BUSY
;
2243 enqueue_cmd_and_start_io(h
, c
);
2244 /* the cmd'll come back via intr handler in complete_scsi_command() */
2248 static DEF_SCSI_QCMD(hpsa_scsi_queue_command
)
2250 static void hpsa_scan_start(struct Scsi_Host
*sh
)
2252 struct ctlr_info
*h
= shost_to_hba(sh
);
2253 unsigned long flags
;
2255 /* wait until any scan already in progress is finished. */
2257 spin_lock_irqsave(&h
->scan_lock
, flags
);
2258 if (h
->scan_finished
)
2260 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2261 wait_event(h
->scan_wait_queue
, h
->scan_finished
);
2262 /* Note: We don't need to worry about a race between this
2263 * thread and driver unload because the midlayer will
2264 * have incremented the reference count, so unload won't
2265 * happen if we're in here.
2268 h
->scan_finished
= 0; /* mark scan as in progress */
2269 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2271 hpsa_update_scsi_devices(h
, h
->scsi_host
->host_no
);
2273 spin_lock_irqsave(&h
->scan_lock
, flags
);
2274 h
->scan_finished
= 1; /* mark scan as finished. */
2275 wake_up_all(&h
->scan_wait_queue
);
2276 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2279 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
2280 unsigned long elapsed_time
)
2282 struct ctlr_info
*h
= shost_to_hba(sh
);
2283 unsigned long flags
;
2286 spin_lock_irqsave(&h
->scan_lock
, flags
);
2287 finished
= h
->scan_finished
;
2288 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
2292 static int hpsa_change_queue_depth(struct scsi_device
*sdev
,
2293 int qdepth
, int reason
)
2295 struct ctlr_info
*h
= sdev_to_hba(sdev
);
2297 if (reason
!= SCSI_QDEPTH_DEFAULT
)
2303 if (qdepth
> h
->nr_cmds
)
2304 qdepth
= h
->nr_cmds
;
2305 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
2306 return sdev
->queue_depth
;
2309 static void hpsa_unregister_scsi(struct ctlr_info
*h
)
2311 /* we are being forcibly unloaded, and may not refuse. */
2312 scsi_remove_host(h
->scsi_host
);
2313 scsi_host_put(h
->scsi_host
);
2314 h
->scsi_host
= NULL
;
2317 static int hpsa_register_scsi(struct ctlr_info
*h
)
2319 struct Scsi_Host
*sh
;
2322 sh
= scsi_host_alloc(&hpsa_driver_template
, sizeof(h
));
2329 sh
->max_channel
= 3;
2330 sh
->max_cmd_len
= MAX_COMMAND_SIZE
;
2331 sh
->max_lun
= HPSA_MAX_LUN
;
2332 sh
->max_id
= HPSA_MAX_LUN
;
2333 sh
->can_queue
= h
->nr_cmds
;
2334 sh
->cmd_per_lun
= h
->nr_cmds
;
2335 sh
->sg_tablesize
= h
->maxsgentries
;
2337 sh
->hostdata
[0] = (unsigned long) h
;
2338 sh
->irq
= h
->intr
[h
->intr_mode
];
2339 sh
->unique_id
= sh
->irq
;
2340 error
= scsi_add_host(sh
, &h
->pdev
->dev
);
2347 dev_err(&h
->pdev
->dev
, "%s: scsi_add_host"
2348 " failed for controller %d\n", __func__
, h
->ctlr
);
2352 dev_err(&h
->pdev
->dev
, "%s: scsi_host_alloc"
2353 " failed for controller %d\n", __func__
, h
->ctlr
);
2357 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
2358 unsigned char lunaddr
[])
2362 int waittime
= 1; /* seconds */
2363 struct CommandList
*c
;
2365 c
= cmd_special_alloc(h
);
2367 dev_warn(&h
->pdev
->dev
, "out of memory in "
2368 "wait_for_device_to_become_ready.\n");
2372 /* Send test unit ready until device ready, or give up. */
2373 while (count
< HPSA_TUR_RETRY_LIMIT
) {
2375 /* Wait for a bit. do this first, because if we send
2376 * the TUR right away, the reset will just abort it.
2378 msleep(1000 * waittime
);
2381 /* Increase wait time with each try, up to a point. */
2382 if (waittime
< HPSA_MAX_WAIT_INTERVAL_SECS
)
2383 waittime
= waittime
* 2;
2385 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
2386 (void) fill_cmd(c
, TEST_UNIT_READY
, h
,
2387 NULL
, 0, 0, lunaddr
, TYPE_CMD
);
2388 hpsa_scsi_do_simple_cmd_core(h
, c
);
2389 /* no unmap needed here because no data xfer. */
2391 if (c
->err_info
->CommandStatus
== CMD_SUCCESS
)
2394 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
2395 c
->err_info
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
&&
2396 (c
->err_info
->SenseInfo
[2] == NO_SENSE
||
2397 c
->err_info
->SenseInfo
[2] == UNIT_ATTENTION
))
2400 dev_warn(&h
->pdev
->dev
, "waiting %d secs "
2401 "for device to become ready.\n", waittime
);
2402 rc
= 1; /* device not ready. */
2406 dev_warn(&h
->pdev
->dev
, "giving up on device.\n");
2408 dev_warn(&h
->pdev
->dev
, "device is ready.\n");
2410 cmd_special_free(h
, c
);
2414 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2415 * complaining. Doing a host- or bus-reset can't do anything good here.
2417 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
)
2420 struct ctlr_info
*h
;
2421 struct hpsa_scsi_dev_t
*dev
;
2423 /* find the controller to which the command to be aborted was sent */
2424 h
= sdev_to_hba(scsicmd
->device
);
2425 if (h
== NULL
) /* paranoia */
2427 dev
= scsicmd
->device
->hostdata
;
2429 dev_err(&h
->pdev
->dev
, "hpsa_eh_device_reset_handler: "
2430 "device lookup failed.\n");
2433 dev_warn(&h
->pdev
->dev
, "resetting device %d:%d:%d:%d\n",
2434 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
);
2435 /* send a reset to the SCSI LUN which the command was sent to */
2436 rc
= hpsa_send_reset(h
, dev
->scsi3addr
);
2437 if (rc
== 0 && wait_for_device_to_become_ready(h
, dev
->scsi3addr
) == 0)
2440 dev_warn(&h
->pdev
->dev
, "resetting device failed.\n");
2444 static void swizzle_abort_tag(u8
*tag
)
2448 memcpy(original_tag
, tag
, 8);
2449 tag
[0] = original_tag
[3];
2450 tag
[1] = original_tag
[2];
2451 tag
[2] = original_tag
[1];
2452 tag
[3] = original_tag
[0];
2453 tag
[4] = original_tag
[7];
2454 tag
[5] = original_tag
[6];
2455 tag
[6] = original_tag
[5];
2456 tag
[7] = original_tag
[4];
2459 static int hpsa_send_abort(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2460 struct CommandList
*abort
, int swizzle
)
2463 struct CommandList
*c
;
2464 struct ErrorInfo
*ei
;
2466 c
= cmd_special_alloc(h
);
2467 if (c
== NULL
) { /* trouble... */
2468 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
2472 /* fill_cmd can't fail here, no buffer to map */
2473 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, abort
,
2474 0, 0, scsi3addr
, TYPE_MSG
);
2476 swizzle_abort_tag(&c
->Request
.CDB
[4]);
2477 hpsa_scsi_do_simple_cmd_core(h
, c
);
2478 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
2479 __func__
, abort
->Header
.Tag
.upper
, abort
->Header
.Tag
.lower
);
2480 /* no unmap needed here because no data xfer. */
2483 switch (ei
->CommandStatus
) {
2486 case CMD_UNABORTABLE
: /* Very common, don't make noise. */
2490 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: interpreting error.\n",
2491 __func__
, abort
->Header
.Tag
.upper
,
2492 abort
->Header
.Tag
.lower
);
2493 hpsa_scsi_interpret_error(c
);
2497 cmd_special_free(h
, c
);
2498 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n", __func__
,
2499 abort
->Header
.Tag
.upper
, abort
->Header
.Tag
.lower
);
2504 * hpsa_find_cmd_in_queue
2506 * Used to determine whether a command (find) is still present
2507 * in queue_head. Optionally excludes the last element of queue_head.
2509 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
2510 * not yet been submitted, and so can be aborted by the driver without
2511 * sending an abort to the hardware.
2513 * Returns pointer to command if found in queue, NULL otherwise.
2515 static struct CommandList
*hpsa_find_cmd_in_queue(struct ctlr_info
*h
,
2516 struct scsi_cmnd
*find
, struct list_head
*queue_head
)
2518 unsigned long flags
;
2519 struct CommandList
*c
= NULL
; /* ptr into cmpQ */
2523 spin_lock_irqsave(&h
->lock
, flags
);
2524 list_for_each_entry(c
, queue_head
, list
) {
2525 if (c
->scsi_cmd
== NULL
) /* e.g.: passthru ioctl */
2527 if (c
->scsi_cmd
== find
) {
2528 spin_unlock_irqrestore(&h
->lock
, flags
);
2532 spin_unlock_irqrestore(&h
->lock
, flags
);
2536 static struct CommandList
*hpsa_find_cmd_in_queue_by_tag(struct ctlr_info
*h
,
2537 u8
*tag
, struct list_head
*queue_head
)
2539 unsigned long flags
;
2540 struct CommandList
*c
;
2542 spin_lock_irqsave(&h
->lock
, flags
);
2543 list_for_each_entry(c
, queue_head
, list
) {
2544 if (memcmp(&c
->Header
.Tag
, tag
, 8) != 0)
2546 spin_unlock_irqrestore(&h
->lock
, flags
);
2549 spin_unlock_irqrestore(&h
->lock
, flags
);
2553 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
2554 * tell which kind we're dealing with, so we send the abort both ways. There
2555 * shouldn't be any collisions between swizzled and unswizzled tags due to the
2556 * way we construct our tags but we check anyway in case the assumptions which
2557 * make this true someday become false.
2559 static int hpsa_send_abort_both_ways(struct ctlr_info
*h
,
2560 unsigned char *scsi3addr
, struct CommandList
*abort
)
2563 struct CommandList
*c
;
2564 int rc
= 0, rc2
= 0;
2566 /* we do not expect to find the swizzled tag in our queue, but
2567 * check anyway just to be sure the assumptions which make this
2568 * the case haven't become wrong.
2570 memcpy(swizzled_tag
, &abort
->Request
.CDB
[4], 8);
2571 swizzle_abort_tag(swizzled_tag
);
2572 c
= hpsa_find_cmd_in_queue_by_tag(h
, swizzled_tag
, &h
->cmpQ
);
2574 dev_warn(&h
->pdev
->dev
, "Unexpectedly found byte-swapped tag in completion queue.\n");
2575 return hpsa_send_abort(h
, scsi3addr
, abort
, 0);
2577 rc
= hpsa_send_abort(h
, scsi3addr
, abort
, 0);
2579 /* if the command is still in our queue, we can't conclude that it was
2580 * aborted (it might have just completed normally) but in any case
2581 * we don't need to try to abort it another way.
2583 c
= hpsa_find_cmd_in_queue(h
, abort
->scsi_cmd
, &h
->cmpQ
);
2585 rc2
= hpsa_send_abort(h
, scsi3addr
, abort
, 1);
2589 /* Send an abort for the specified command.
2590 * If the device and controller support it,
2591 * send a task abort request.
2593 static int hpsa_eh_abort_handler(struct scsi_cmnd
*sc
)
2597 struct ctlr_info
*h
;
2598 struct hpsa_scsi_dev_t
*dev
;
2599 struct CommandList
*abort
; /* pointer to command to be aborted */
2600 struct CommandList
*found
;
2601 struct scsi_cmnd
*as
; /* ptr to scsi cmd inside aborted command. */
2602 char msg
[256]; /* For debug messaging. */
2605 /* Find the controller of the command to be aborted */
2606 h
= sdev_to_hba(sc
->device
);
2608 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
2611 /* Check that controller supports some kind of task abort */
2612 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
) &&
2613 !(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
2616 memset(msg
, 0, sizeof(msg
));
2617 ml
+= sprintf(msg
+ml
, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
2618 h
->scsi_host
->host_no
, sc
->device
->channel
,
2619 sc
->device
->id
, sc
->device
->lun
);
2621 /* Find the device of the command to be aborted */
2622 dev
= sc
->device
->hostdata
;
2624 dev_err(&h
->pdev
->dev
, "%s FAILED, Device lookup failed.\n",
2629 /* Get SCSI command to be aborted */
2630 abort
= (struct CommandList
*) sc
->host_scribble
;
2631 if (abort
== NULL
) {
2632 dev_err(&h
->pdev
->dev
, "%s FAILED, Command to abort is NULL.\n",
2637 ml
+= sprintf(msg
+ml
, "Tag:0x%08x:%08x ",
2638 abort
->Header
.Tag
.upper
, abort
->Header
.Tag
.lower
);
2639 as
= (struct scsi_cmnd
*) abort
->scsi_cmd
;
2641 ml
+= sprintf(msg
+ml
, "Command:0x%x SN:0x%lx ",
2642 as
->cmnd
[0], as
->serial_number
);
2643 dev_dbg(&h
->pdev
->dev
, "%s\n", msg
);
2644 dev_warn(&h
->pdev
->dev
, "Abort request on C%d:B%d:T%d:L%d\n",
2645 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
);
2647 /* Search reqQ to See if command is queued but not submitted,
2648 * if so, complete the command with aborted status and remove
2651 found
= hpsa_find_cmd_in_queue(h
, sc
, &h
->reqQ
);
2653 found
->err_info
->CommandStatus
= CMD_ABORTED
;
2655 dev_info(&h
->pdev
->dev
, "%s Request SUCCEEDED (driver queue).\n",
2660 /* not in reqQ, if also not in cmpQ, must have already completed */
2661 found
= hpsa_find_cmd_in_queue(h
, sc
, &h
->cmpQ
);
2663 dev_dbg(&h
->pdev
->dev
, "%s Request SUCCEEDED (not known to driver).\n",
2669 * Command is in flight, or possibly already completed
2670 * by the firmware (but not to the scsi mid layer) but we can't
2671 * distinguish which. Send the abort down.
2673 rc
= hpsa_send_abort_both_ways(h
, dev
->scsi3addr
, abort
);
2675 dev_dbg(&h
->pdev
->dev
, "%s Request FAILED.\n", msg
);
2676 dev_warn(&h
->pdev
->dev
, "FAILED abort on device C%d:B%d:T%d:L%d\n",
2677 h
->scsi_host
->host_no
,
2678 dev
->bus
, dev
->target
, dev
->lun
);
2681 dev_info(&h
->pdev
->dev
, "%s REQUEST SUCCEEDED.\n", msg
);
2683 /* If the abort(s) above completed and actually aborted the
2684 * command, then the command to be aborted should already be
2685 * completed. If not, wait around a bit more to see if they
2686 * manage to complete normally.
2688 #define ABORT_COMPLETE_WAIT_SECS 30
2689 for (i
= 0; i
< ABORT_COMPLETE_WAIT_SECS
* 10; i
++) {
2690 found
= hpsa_find_cmd_in_queue(h
, sc
, &h
->cmpQ
);
2695 dev_warn(&h
->pdev
->dev
, "%s FAILED. Aborted command has not completed after %d seconds.\n",
2696 msg
, ABORT_COMPLETE_WAIT_SECS
);
2702 * For operations that cannot sleep, a command block is allocated at init,
2703 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2704 * which ones are free or in use. Lock must be held when calling this.
2705 * cmd_free() is the complement.
2707 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
)
2709 struct CommandList
*c
;
2711 union u64bit temp64
;
2712 dma_addr_t cmd_dma_handle
, err_dma_handle
;
2713 unsigned long flags
;
2715 spin_lock_irqsave(&h
->lock
, flags
);
2717 i
= find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
);
2718 if (i
== h
->nr_cmds
) {
2719 spin_unlock_irqrestore(&h
->lock
, flags
);
2722 } while (test_and_set_bit
2723 (i
& (BITS_PER_LONG
- 1),
2724 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
)) != 0);
2726 spin_unlock_irqrestore(&h
->lock
, flags
);
2728 c
= h
->cmd_pool
+ i
;
2729 memset(c
, 0, sizeof(*c
));
2730 cmd_dma_handle
= h
->cmd_pool_dhandle
2732 c
->err_info
= h
->errinfo_pool
+ i
;
2733 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2734 err_dma_handle
= h
->errinfo_pool_dhandle
2735 + i
* sizeof(*c
->err_info
);
2739 INIT_LIST_HEAD(&c
->list
);
2740 c
->busaddr
= (u32
) cmd_dma_handle
;
2741 temp64
.val
= (u64
) err_dma_handle
;
2742 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
2743 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
2744 c
->ErrDesc
.Len
= sizeof(*c
->err_info
);
2750 /* For operations that can wait for kmalloc to possibly sleep,
2751 * this routine can be called. Lock need not be held to call
2752 * cmd_special_alloc. cmd_special_free() is the complement.
2754 static struct CommandList
*cmd_special_alloc(struct ctlr_info
*h
)
2756 struct CommandList
*c
;
2757 union u64bit temp64
;
2758 dma_addr_t cmd_dma_handle
, err_dma_handle
;
2760 c
= pci_alloc_consistent(h
->pdev
, sizeof(*c
), &cmd_dma_handle
);
2763 memset(c
, 0, sizeof(*c
));
2767 c
->err_info
= pci_alloc_consistent(h
->pdev
, sizeof(*c
->err_info
),
2770 if (c
->err_info
== NULL
) {
2771 pci_free_consistent(h
->pdev
,
2772 sizeof(*c
), c
, cmd_dma_handle
);
2775 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2777 INIT_LIST_HEAD(&c
->list
);
2778 c
->busaddr
= (u32
) cmd_dma_handle
;
2779 temp64
.val
= (u64
) err_dma_handle
;
2780 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
2781 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
2782 c
->ErrDesc
.Len
= sizeof(*c
->err_info
);
2788 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
)
2791 unsigned long flags
;
2793 i
= c
- h
->cmd_pool
;
2794 spin_lock_irqsave(&h
->lock
, flags
);
2795 clear_bit(i
& (BITS_PER_LONG
- 1),
2796 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
2798 spin_unlock_irqrestore(&h
->lock
, flags
);
2801 static void cmd_special_free(struct ctlr_info
*h
, struct CommandList
*c
)
2803 union u64bit temp64
;
2805 temp64
.val32
.lower
= c
->ErrDesc
.Addr
.lower
;
2806 temp64
.val32
.upper
= c
->ErrDesc
.Addr
.upper
;
2807 pci_free_consistent(h
->pdev
, sizeof(*c
->err_info
),
2808 c
->err_info
, (dma_addr_t
) temp64
.val
);
2809 pci_free_consistent(h
->pdev
, sizeof(*c
),
2810 c
, (dma_addr_t
) (c
->busaddr
& DIRECT_LOOKUP_MASK
));
2813 #ifdef CONFIG_COMPAT
2815 static int hpsa_ioctl32_passthru(struct scsi_device
*dev
, int cmd
, void *arg
)
2817 IOCTL32_Command_struct __user
*arg32
=
2818 (IOCTL32_Command_struct __user
*) arg
;
2819 IOCTL_Command_struct arg64
;
2820 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
2824 memset(&arg64
, 0, sizeof(arg64
));
2826 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
2827 sizeof(arg64
.LUN_info
));
2828 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
2829 sizeof(arg64
.Request
));
2830 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
2831 sizeof(arg64
.error_info
));
2832 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
2833 err
|= get_user(cp
, &arg32
->buf
);
2834 arg64
.buf
= compat_ptr(cp
);
2835 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
2840 err
= hpsa_ioctl(dev
, CCISS_PASSTHRU
, (void *)p
);
2843 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
2844 sizeof(arg32
->error_info
));
2850 static int hpsa_ioctl32_big_passthru(struct scsi_device
*dev
,
2853 BIG_IOCTL32_Command_struct __user
*arg32
=
2854 (BIG_IOCTL32_Command_struct __user
*) arg
;
2855 BIG_IOCTL_Command_struct arg64
;
2856 BIG_IOCTL_Command_struct __user
*p
=
2857 compat_alloc_user_space(sizeof(arg64
));
2861 memset(&arg64
, 0, sizeof(arg64
));
2863 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
2864 sizeof(arg64
.LUN_info
));
2865 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
2866 sizeof(arg64
.Request
));
2867 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
2868 sizeof(arg64
.error_info
));
2869 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
2870 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
2871 err
|= get_user(cp
, &arg32
->buf
);
2872 arg64
.buf
= compat_ptr(cp
);
2873 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
2878 err
= hpsa_ioctl(dev
, CCISS_BIG_PASSTHRU
, (void *)p
);
2881 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
2882 sizeof(arg32
->error_info
));
2888 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
)
2891 case CCISS_GETPCIINFO
:
2892 case CCISS_GETINTINFO
:
2893 case CCISS_SETINTINFO
:
2894 case CCISS_GETNODENAME
:
2895 case CCISS_SETNODENAME
:
2896 case CCISS_GETHEARTBEAT
:
2897 case CCISS_GETBUSTYPES
:
2898 case CCISS_GETFIRMVER
:
2899 case CCISS_GETDRIVVER
:
2900 case CCISS_REVALIDVOLS
:
2901 case CCISS_DEREGDISK
:
2902 case CCISS_REGNEWDISK
:
2904 case CCISS_RESCANDISK
:
2905 case CCISS_GETLUNINFO
:
2906 return hpsa_ioctl(dev
, cmd
, arg
);
2908 case CCISS_PASSTHRU32
:
2909 return hpsa_ioctl32_passthru(dev
, cmd
, arg
);
2910 case CCISS_BIG_PASSTHRU32
:
2911 return hpsa_ioctl32_big_passthru(dev
, cmd
, arg
);
2914 return -ENOIOCTLCMD
;
2919 static int hpsa_getpciinfo_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2921 struct hpsa_pci_info pciinfo
;
2925 pciinfo
.domain
= pci_domain_nr(h
->pdev
->bus
);
2926 pciinfo
.bus
= h
->pdev
->bus
->number
;
2927 pciinfo
.dev_fn
= h
->pdev
->devfn
;
2928 pciinfo
.board_id
= h
->board_id
;
2929 if (copy_to_user(argp
, &pciinfo
, sizeof(pciinfo
)))
2934 static int hpsa_getdrivver_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2936 DriverVer_type DriverVer
;
2937 unsigned char vmaj
, vmin
, vsubmin
;
2940 rc
= sscanf(HPSA_DRIVER_VERSION
, "%hhu.%hhu.%hhu",
2941 &vmaj
, &vmin
, &vsubmin
);
2943 dev_info(&h
->pdev
->dev
, "driver version string '%s' "
2944 "unrecognized.", HPSA_DRIVER_VERSION
);
2949 DriverVer
= (vmaj
<< 16) | (vmin
<< 8) | vsubmin
;
2952 if (copy_to_user(argp
, &DriverVer
, sizeof(DriverVer_type
)))
2957 static int hpsa_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
2959 IOCTL_Command_struct iocommand
;
2960 struct CommandList
*c
;
2962 union u64bit temp64
;
2967 if (!capable(CAP_SYS_RAWIO
))
2969 if (copy_from_user(&iocommand
, argp
, sizeof(iocommand
)))
2971 if ((iocommand
.buf_size
< 1) &&
2972 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
2975 if (iocommand
.buf_size
> 0) {
2976 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
2979 if (iocommand
.Request
.Type
.Direction
== XFER_WRITE
) {
2980 /* Copy the data into the buffer we created */
2981 if (copy_from_user(buff
, iocommand
.buf
,
2982 iocommand
.buf_size
)) {
2987 memset(buff
, 0, iocommand
.buf_size
);
2990 c
= cmd_special_alloc(h
);
2995 /* Fill in the command type */
2996 c
->cmd_type
= CMD_IOCTL_PEND
;
2997 /* Fill in Command Header */
2998 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
2999 if (iocommand
.buf_size
> 0) { /* buffer to fill */
3000 c
->Header
.SGList
= 1;
3001 c
->Header
.SGTotal
= 1;
3002 } else { /* no buffers to fill */
3003 c
->Header
.SGList
= 0;
3004 c
->Header
.SGTotal
= 0;
3006 memcpy(&c
->Header
.LUN
, &iocommand
.LUN_info
, sizeof(c
->Header
.LUN
));
3007 /* use the kernel address the cmd block for tag */
3008 c
->Header
.Tag
.lower
= c
->busaddr
;
3010 /* Fill in Request block */
3011 memcpy(&c
->Request
, &iocommand
.Request
,
3012 sizeof(c
->Request
));
3014 /* Fill in the scatter gather information */
3015 if (iocommand
.buf_size
> 0) {
3016 temp64
.val
= pci_map_single(h
->pdev
, buff
,
3017 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
3018 if (dma_mapping_error(&h
->pdev
->dev
, temp64
.val
)) {
3019 c
->SG
[0].Addr
.lower
= 0;
3020 c
->SG
[0].Addr
.upper
= 0;
3025 c
->SG
[0].Addr
.lower
= temp64
.val32
.lower
;
3026 c
->SG
[0].Addr
.upper
= temp64
.val32
.upper
;
3027 c
->SG
[0].Len
= iocommand
.buf_size
;
3028 c
->SG
[0].Ext
= 0; /* we are not chaining*/
3030 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h
, c
);
3031 if (iocommand
.buf_size
> 0)
3032 hpsa_pci_unmap(h
->pdev
, c
, 1, PCI_DMA_BIDIRECTIONAL
);
3033 check_ioctl_unit_attention(h
, c
);
3035 /* Copy the error information out */
3036 memcpy(&iocommand
.error_info
, c
->err_info
,
3037 sizeof(iocommand
.error_info
));
3038 if (copy_to_user(argp
, &iocommand
, sizeof(iocommand
))) {
3042 if (iocommand
.Request
.Type
.Direction
== XFER_READ
&&
3043 iocommand
.buf_size
> 0) {
3044 /* Copy the data out of the buffer we created */
3045 if (copy_to_user(iocommand
.buf
, buff
, iocommand
.buf_size
)) {
3051 cmd_special_free(h
, c
);
3057 static int hpsa_big_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
3059 BIG_IOCTL_Command_struct
*ioc
;
3060 struct CommandList
*c
;
3061 unsigned char **buff
= NULL
;
3062 int *buff_size
= NULL
;
3063 union u64bit temp64
;
3069 BYTE __user
*data_ptr
;
3073 if (!capable(CAP_SYS_RAWIO
))
3075 ioc
= (BIG_IOCTL_Command_struct
*)
3076 kmalloc(sizeof(*ioc
), GFP_KERNEL
);
3081 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
3085 if ((ioc
->buf_size
< 1) &&
3086 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
3090 /* Check kmalloc limits using all SGs */
3091 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
3095 if (ioc
->buf_size
> ioc
->malloc_size
* SG_ENTRIES_IN_CMD
) {
3099 buff
= kzalloc(SG_ENTRIES_IN_CMD
* sizeof(char *), GFP_KERNEL
);
3104 buff_size
= kmalloc(SG_ENTRIES_IN_CMD
* sizeof(int), GFP_KERNEL
);
3109 left
= ioc
->buf_size
;
3110 data_ptr
= ioc
->buf
;
3112 sz
= (left
> ioc
->malloc_size
) ? ioc
->malloc_size
: left
;
3113 buff_size
[sg_used
] = sz
;
3114 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
3115 if (buff
[sg_used
] == NULL
) {
3119 if (ioc
->Request
.Type
.Direction
== XFER_WRITE
) {
3120 if (copy_from_user(buff
[sg_used
], data_ptr
, sz
)) {
3125 memset(buff
[sg_used
], 0, sz
);
3130 c
= cmd_special_alloc(h
);
3135 c
->cmd_type
= CMD_IOCTL_PEND
;
3136 c
->Header
.ReplyQueue
= 0;
3137 c
->Header
.SGList
= c
->Header
.SGTotal
= sg_used
;
3138 memcpy(&c
->Header
.LUN
, &ioc
->LUN_info
, sizeof(c
->Header
.LUN
));
3139 c
->Header
.Tag
.lower
= c
->busaddr
;
3140 memcpy(&c
->Request
, &ioc
->Request
, sizeof(c
->Request
));
3141 if (ioc
->buf_size
> 0) {
3143 for (i
= 0; i
< sg_used
; i
++) {
3144 temp64
.val
= pci_map_single(h
->pdev
, buff
[i
],
3145 buff_size
[i
], PCI_DMA_BIDIRECTIONAL
);
3146 if (dma_mapping_error(&h
->pdev
->dev
, temp64
.val
)) {
3147 c
->SG
[i
].Addr
.lower
= 0;
3148 c
->SG
[i
].Addr
.upper
= 0;
3150 hpsa_pci_unmap(h
->pdev
, c
, i
,
3151 PCI_DMA_BIDIRECTIONAL
);
3155 c
->SG
[i
].Addr
.lower
= temp64
.val32
.lower
;
3156 c
->SG
[i
].Addr
.upper
= temp64
.val32
.upper
;
3157 c
->SG
[i
].Len
= buff_size
[i
];
3158 /* we are not chaining */
3162 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h
, c
);
3164 hpsa_pci_unmap(h
->pdev
, c
, sg_used
, PCI_DMA_BIDIRECTIONAL
);
3165 check_ioctl_unit_attention(h
, c
);
3166 /* Copy the error information out */
3167 memcpy(&ioc
->error_info
, c
->err_info
, sizeof(ioc
->error_info
));
3168 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
3169 cmd_special_free(h
, c
);
3173 if (ioc
->Request
.Type
.Direction
== XFER_READ
&& ioc
->buf_size
> 0) {
3174 /* Copy the data out of the buffer we created */
3175 BYTE __user
*ptr
= ioc
->buf
;
3176 for (i
= 0; i
< sg_used
; i
++) {
3177 if (copy_to_user(ptr
, buff
[i
], buff_size
[i
])) {
3178 cmd_special_free(h
, c
);
3182 ptr
+= buff_size
[i
];
3185 cmd_special_free(h
, c
);
3189 for (i
= 0; i
< sg_used
; i
++)
3198 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
3199 struct CommandList
*c
)
3201 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
3202 c
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
)
3203 (void) check_for_unit_attention(h
, c
);
3208 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
)
3210 struct ctlr_info
*h
;
3211 void __user
*argp
= (void __user
*)arg
;
3213 h
= sdev_to_hba(dev
);
3216 case CCISS_DEREGDISK
:
3217 case CCISS_REGNEWDISK
:
3219 hpsa_scan_start(h
->scsi_host
);
3221 case CCISS_GETPCIINFO
:
3222 return hpsa_getpciinfo_ioctl(h
, argp
);
3223 case CCISS_GETDRIVVER
:
3224 return hpsa_getdrivver_ioctl(h
, argp
);
3225 case CCISS_PASSTHRU
:
3226 return hpsa_passthru_ioctl(h
, argp
);
3227 case CCISS_BIG_PASSTHRU
:
3228 return hpsa_big_passthru_ioctl(h
, argp
);
3234 static int hpsa_send_host_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3237 struct CommandList
*c
;
3242 /* fill_cmd can't fail here, no data buffer to map */
3243 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
3244 RAID_CTLR_LUNID
, TYPE_MSG
);
3245 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to target reset */
3247 enqueue_cmd_and_start_io(h
, c
);
3248 /* Don't wait for completion, the reset won't complete. Don't free
3249 * the command either. This is the last command we will send before
3250 * re-initializing everything, so it doesn't matter and won't leak.
3255 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
3256 void *buff
, size_t size
, u8 page_code
, unsigned char *scsi3addr
,
3259 int pci_dir
= XFER_NONE
;
3260 struct CommandList
*a
; /* for commands to be aborted */
3262 c
->cmd_type
= CMD_IOCTL_PEND
;
3263 c
->Header
.ReplyQueue
= 0;
3264 if (buff
!= NULL
&& size
> 0) {
3265 c
->Header
.SGList
= 1;
3266 c
->Header
.SGTotal
= 1;
3268 c
->Header
.SGList
= 0;
3269 c
->Header
.SGTotal
= 0;
3271 c
->Header
.Tag
.lower
= c
->busaddr
;
3272 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
, 8);
3274 c
->Request
.Type
.Type
= cmd_type
;
3275 if (cmd_type
== TYPE_CMD
) {
3278 /* are we trying to read a vital product page */
3279 if (page_code
!= 0) {
3280 c
->Request
.CDB
[1] = 0x01;
3281 c
->Request
.CDB
[2] = page_code
;
3283 c
->Request
.CDBLen
= 6;
3284 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3285 c
->Request
.Type
.Direction
= XFER_READ
;
3286 c
->Request
.Timeout
= 0;
3287 c
->Request
.CDB
[0] = HPSA_INQUIRY
;
3288 c
->Request
.CDB
[4] = size
& 0xFF;
3290 case HPSA_REPORT_LOG
:
3291 case HPSA_REPORT_PHYS
:
3292 /* Talking to controller so It's a physical command
3293 mode = 00 target = 0. Nothing to write.
3295 c
->Request
.CDBLen
= 12;
3296 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3297 c
->Request
.Type
.Direction
= XFER_READ
;
3298 c
->Request
.Timeout
= 0;
3299 c
->Request
.CDB
[0] = cmd
;
3300 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
3301 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
3302 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
3303 c
->Request
.CDB
[9] = size
& 0xFF;
3305 case HPSA_CACHE_FLUSH
:
3306 c
->Request
.CDBLen
= 12;
3307 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3308 c
->Request
.Type
.Direction
= XFER_WRITE
;
3309 c
->Request
.Timeout
= 0;
3310 c
->Request
.CDB
[0] = BMIC_WRITE
;
3311 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
3312 c
->Request
.CDB
[7] = (size
>> 8) & 0xFF;
3313 c
->Request
.CDB
[8] = size
& 0xFF;
3315 case TEST_UNIT_READY
:
3316 c
->Request
.CDBLen
= 6;
3317 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3318 c
->Request
.Type
.Direction
= XFER_NONE
;
3319 c
->Request
.Timeout
= 0;
3322 dev_warn(&h
->pdev
->dev
, "unknown command 0x%c\n", cmd
);
3326 } else if (cmd_type
== TYPE_MSG
) {
3329 case HPSA_DEVICE_RESET_MSG
:
3330 c
->Request
.CDBLen
= 16;
3331 c
->Request
.Type
.Type
= 1; /* It is a MSG not a CMD */
3332 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3333 c
->Request
.Type
.Direction
= XFER_NONE
;
3334 c
->Request
.Timeout
= 0; /* Don't time out */
3335 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
3336 c
->Request
.CDB
[0] = cmd
;
3337 c
->Request
.CDB
[1] = HPSA_RESET_TYPE_LUN
;
3338 /* If bytes 4-7 are zero, it means reset the */
3340 c
->Request
.CDB
[4] = 0x00;
3341 c
->Request
.CDB
[5] = 0x00;
3342 c
->Request
.CDB
[6] = 0x00;
3343 c
->Request
.CDB
[7] = 0x00;
3345 case HPSA_ABORT_MSG
:
3346 a
= buff
; /* point to command to be aborted */
3347 dev_dbg(&h
->pdev
->dev
, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
3348 a
->Header
.Tag
.upper
, a
->Header
.Tag
.lower
,
3349 c
->Header
.Tag
.upper
, c
->Header
.Tag
.lower
);
3350 c
->Request
.CDBLen
= 16;
3351 c
->Request
.Type
.Type
= TYPE_MSG
;
3352 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
3353 c
->Request
.Type
.Direction
= XFER_WRITE
;
3354 c
->Request
.Timeout
= 0; /* Don't time out */
3355 c
->Request
.CDB
[0] = HPSA_TASK_MANAGEMENT
;
3356 c
->Request
.CDB
[1] = HPSA_TMF_ABORT_TASK
;
3357 c
->Request
.CDB
[2] = 0x00; /* reserved */
3358 c
->Request
.CDB
[3] = 0x00; /* reserved */
3359 /* Tag to abort goes in CDB[4]-CDB[11] */
3360 c
->Request
.CDB
[4] = a
->Header
.Tag
.lower
& 0xFF;
3361 c
->Request
.CDB
[5] = (a
->Header
.Tag
.lower
>> 8) & 0xFF;
3362 c
->Request
.CDB
[6] = (a
->Header
.Tag
.lower
>> 16) & 0xFF;
3363 c
->Request
.CDB
[7] = (a
->Header
.Tag
.lower
>> 24) & 0xFF;
3364 c
->Request
.CDB
[8] = a
->Header
.Tag
.upper
& 0xFF;
3365 c
->Request
.CDB
[9] = (a
->Header
.Tag
.upper
>> 8) & 0xFF;
3366 c
->Request
.CDB
[10] = (a
->Header
.Tag
.upper
>> 16) & 0xFF;
3367 c
->Request
.CDB
[11] = (a
->Header
.Tag
.upper
>> 24) & 0xFF;
3368 c
->Request
.CDB
[12] = 0x00; /* reserved */
3369 c
->Request
.CDB
[13] = 0x00; /* reserved */
3370 c
->Request
.CDB
[14] = 0x00; /* reserved */
3371 c
->Request
.CDB
[15] = 0x00; /* reserved */
3374 dev_warn(&h
->pdev
->dev
, "unknown message type %d\n",
3379 dev_warn(&h
->pdev
->dev
, "unknown command type %d\n", cmd_type
);
3383 switch (c
->Request
.Type
.Direction
) {
3385 pci_dir
= PCI_DMA_FROMDEVICE
;
3388 pci_dir
= PCI_DMA_TODEVICE
;
3391 pci_dir
= PCI_DMA_NONE
;
3394 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
3396 if (hpsa_map_one(h
->pdev
, c
, buff
, size
, pci_dir
))
3402 * Map (physical) PCI mem into (virtual) kernel space
3404 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
3406 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
3407 ulong page_offs
= ((ulong
) base
) - page_base
;
3408 void __iomem
*page_remapped
= ioremap_nocache(page_base
,
3411 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
3414 /* Takes cmds off the submission queue and sends them to the hardware,
3415 * then puts them on the queue of cmds waiting for completion.
3417 static void start_io(struct ctlr_info
*h
)
3419 struct CommandList
*c
;
3420 unsigned long flags
;
3422 spin_lock_irqsave(&h
->lock
, flags
);
3423 while (!list_empty(&h
->reqQ
)) {
3424 c
= list_entry(h
->reqQ
.next
, struct CommandList
, list
);
3425 /* can't do anything if fifo is full */
3426 if ((h
->access
.fifo_full(h
))) {
3427 dev_warn(&h
->pdev
->dev
, "fifo full\n");
3431 /* Get the first entry from the Request Q */
3435 /* Put job onto the completed Q */
3438 /* Must increment commands_outstanding before unlocking
3439 * and submitting to avoid race checking for fifo full
3442 h
->commands_outstanding
++;
3443 if (h
->commands_outstanding
> h
->max_outstanding
)
3444 h
->max_outstanding
= h
->commands_outstanding
;
3446 /* Tell the controller execute command */
3447 spin_unlock_irqrestore(&h
->lock
, flags
);
3448 h
->access
.submit_command(h
, c
);
3449 spin_lock_irqsave(&h
->lock
, flags
);
3451 spin_unlock_irqrestore(&h
->lock
, flags
);
3454 static inline unsigned long get_next_completion(struct ctlr_info
*h
, u8 q
)
3456 return h
->access
.command_completed(h
, q
);
3459 static inline bool interrupt_pending(struct ctlr_info
*h
)
3461 return h
->access
.intr_pending(h
);
3464 static inline long interrupt_not_for_us(struct ctlr_info
*h
)
3466 return (h
->access
.intr_pending(h
) == 0) ||
3467 (h
->interrupts_enabled
== 0);
3470 static inline int bad_tag(struct ctlr_info
*h
, u32 tag_index
,
3473 if (unlikely(tag_index
>= h
->nr_cmds
)) {
3474 dev_warn(&h
->pdev
->dev
, "bad tag 0x%08x ignored.\n", raw_tag
);
3480 static inline void finish_cmd(struct CommandList
*c
)
3482 unsigned long flags
;
3484 spin_lock_irqsave(&c
->h
->lock
, flags
);
3486 spin_unlock_irqrestore(&c
->h
->lock
, flags
);
3487 dial_up_lockup_detection_on_fw_flash_complete(c
->h
, c
);
3488 if (likely(c
->cmd_type
== CMD_SCSI
))
3489 complete_scsi_command(c
);
3490 else if (c
->cmd_type
== CMD_IOCTL_PEND
)
3491 complete(c
->waiting
);
3494 static inline u32
hpsa_tag_contains_index(u32 tag
)
3496 return tag
& DIRECT_LOOKUP_BIT
;
3499 static inline u32
hpsa_tag_to_index(u32 tag
)
3501 return tag
>> DIRECT_LOOKUP_SHIFT
;
3505 static inline u32
hpsa_tag_discard_error_bits(struct ctlr_info
*h
, u32 tag
)
3507 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3508 #define HPSA_SIMPLE_ERROR_BITS 0x03
3509 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
3510 return tag
& ~HPSA_SIMPLE_ERROR_BITS
;
3511 return tag
& ~HPSA_PERF_ERROR_BITS
;
3514 /* process completion of an indexed ("direct lookup") command */
3515 static inline void process_indexed_cmd(struct ctlr_info
*h
,
3519 struct CommandList
*c
;
3521 tag_index
= hpsa_tag_to_index(raw_tag
);
3522 if (!bad_tag(h
, tag_index
, raw_tag
)) {
3523 c
= h
->cmd_pool
+ tag_index
;
3528 /* process completion of a non-indexed command */
3529 static inline void process_nonindexed_cmd(struct ctlr_info
*h
,
3533 struct CommandList
*c
= NULL
;
3534 unsigned long flags
;
3536 tag
= hpsa_tag_discard_error_bits(h
, raw_tag
);
3537 spin_lock_irqsave(&h
->lock
, flags
);
3538 list_for_each_entry(c
, &h
->cmpQ
, list
) {
3539 if ((c
->busaddr
& 0xFFFFFFE0) == (tag
& 0xFFFFFFE0)) {
3540 spin_unlock_irqrestore(&h
->lock
, flags
);
3545 spin_unlock_irqrestore(&h
->lock
, flags
);
3546 bad_tag(h
, h
->nr_cmds
+ 1, raw_tag
);
3549 /* Some controllers, like p400, will give us one interrupt
3550 * after a soft reset, even if we turned interrupts off.
3551 * Only need to check for this in the hpsa_xxx_discard_completions
3554 static int ignore_bogus_interrupt(struct ctlr_info
*h
)
3556 if (likely(!reset_devices
))
3559 if (likely(h
->interrupts_enabled
))
3562 dev_info(&h
->pdev
->dev
, "Received interrupt while interrupts disabled "
3563 "(known firmware bug.) Ignoring.\n");
3569 * Convert &h->q[x] (passed to interrupt handlers) back to h.
3570 * Relies on (h-q[x] == x) being true for x such that
3571 * 0 <= x < MAX_REPLY_QUEUES.
3573 static struct ctlr_info
*queue_to_hba(u8
*queue
)
3575 return container_of((queue
- *queue
), struct ctlr_info
, q
[0]);
3578 static irqreturn_t
hpsa_intx_discard_completions(int irq
, void *queue
)
3580 struct ctlr_info
*h
= queue_to_hba(queue
);
3581 u8 q
= *(u8
*) queue
;
3584 if (ignore_bogus_interrupt(h
))
3587 if (interrupt_not_for_us(h
))
3589 h
->last_intr_timestamp
= get_jiffies_64();
3590 while (interrupt_pending(h
)) {
3591 raw_tag
= get_next_completion(h
, q
);
3592 while (raw_tag
!= FIFO_EMPTY
)
3593 raw_tag
= next_command(h
, q
);
3598 static irqreturn_t
hpsa_msix_discard_completions(int irq
, void *queue
)
3600 struct ctlr_info
*h
= queue_to_hba(queue
);
3602 u8 q
= *(u8
*) queue
;
3604 if (ignore_bogus_interrupt(h
))
3607 h
->last_intr_timestamp
= get_jiffies_64();
3608 raw_tag
= get_next_completion(h
, q
);
3609 while (raw_tag
!= FIFO_EMPTY
)
3610 raw_tag
= next_command(h
, q
);
3614 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *queue
)
3616 struct ctlr_info
*h
= queue_to_hba((u8
*) queue
);
3618 u8 q
= *(u8
*) queue
;
3620 if (interrupt_not_for_us(h
))
3622 h
->last_intr_timestamp
= get_jiffies_64();
3623 while (interrupt_pending(h
)) {
3624 raw_tag
= get_next_completion(h
, q
);
3625 while (raw_tag
!= FIFO_EMPTY
) {
3626 if (likely(hpsa_tag_contains_index(raw_tag
)))
3627 process_indexed_cmd(h
, raw_tag
);
3629 process_nonindexed_cmd(h
, raw_tag
);
3630 raw_tag
= next_command(h
, q
);
3636 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *queue
)
3638 struct ctlr_info
*h
= queue_to_hba(queue
);
3640 u8 q
= *(u8
*) queue
;
3642 h
->last_intr_timestamp
= get_jiffies_64();
3643 raw_tag
= get_next_completion(h
, q
);
3644 while (raw_tag
!= FIFO_EMPTY
) {
3645 if (likely(hpsa_tag_contains_index(raw_tag
)))
3646 process_indexed_cmd(h
, raw_tag
);
3648 process_nonindexed_cmd(h
, raw_tag
);
3649 raw_tag
= next_command(h
, q
);
3654 /* Send a message CDB to the firmware. Careful, this only works
3655 * in simple mode, not performant mode due to the tag lookup.
3656 * We only ever use this immediately after a controller reset.
3658 static int hpsa_message(struct pci_dev
*pdev
, unsigned char opcode
,
3662 struct CommandListHeader CommandHeader
;
3663 struct RequestBlock Request
;
3664 struct ErrDescriptor ErrorDescriptor
;
3666 struct Command
*cmd
;
3667 static const size_t cmd_sz
= sizeof(*cmd
) +
3668 sizeof(cmd
->ErrorDescriptor
);
3670 uint32_t paddr32
, tag
;
3671 void __iomem
*vaddr
;
3674 vaddr
= pci_ioremap_bar(pdev
, 0);
3678 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3679 * CCISS commands, so they must be allocated from the lower 4GiB of
3682 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
3688 cmd
= pci_alloc_consistent(pdev
, cmd_sz
, &paddr64
);
3694 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3695 * although there's no guarantee, we assume that the address is at
3696 * least 4-byte aligned (most likely, it's page-aligned).
3700 cmd
->CommandHeader
.ReplyQueue
= 0;
3701 cmd
->CommandHeader
.SGList
= 0;
3702 cmd
->CommandHeader
.SGTotal
= 0;
3703 cmd
->CommandHeader
.Tag
.lower
= paddr32
;
3704 cmd
->CommandHeader
.Tag
.upper
= 0;
3705 memset(&cmd
->CommandHeader
.LUN
.LunAddrBytes
, 0, 8);
3707 cmd
->Request
.CDBLen
= 16;
3708 cmd
->Request
.Type
.Type
= TYPE_MSG
;
3709 cmd
->Request
.Type
.Attribute
= ATTR_HEADOFQUEUE
;
3710 cmd
->Request
.Type
.Direction
= XFER_NONE
;
3711 cmd
->Request
.Timeout
= 0; /* Don't time out */
3712 cmd
->Request
.CDB
[0] = opcode
;
3713 cmd
->Request
.CDB
[1] = type
;
3714 memset(&cmd
->Request
.CDB
[2], 0, 14); /* rest of the CDB is reserved */
3715 cmd
->ErrorDescriptor
.Addr
.lower
= paddr32
+ sizeof(*cmd
);
3716 cmd
->ErrorDescriptor
.Addr
.upper
= 0;
3717 cmd
->ErrorDescriptor
.Len
= sizeof(struct ErrorInfo
);
3719 writel(paddr32
, vaddr
+ SA5_REQUEST_PORT_OFFSET
);
3721 for (i
= 0; i
< HPSA_MSG_SEND_RETRY_LIMIT
; i
++) {
3722 tag
= readl(vaddr
+ SA5_REPLY_PORT_OFFSET
);
3723 if ((tag
& ~HPSA_SIMPLE_ERROR_BITS
) == paddr32
)
3725 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS
);
3730 /* we leak the DMA buffer here ... no choice since the controller could
3731 * still complete the command.
3733 if (i
== HPSA_MSG_SEND_RETRY_LIMIT
) {
3734 dev_err(&pdev
->dev
, "controller message %02x:%02x timed out\n",
3739 pci_free_consistent(pdev
, cmd_sz
, cmd
, paddr64
);
3741 if (tag
& HPSA_ERROR_BIT
) {
3742 dev_err(&pdev
->dev
, "controller message %02x:%02x failed\n",
3747 dev_info(&pdev
->dev
, "controller message %02x:%02x succeeded\n",
3752 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3754 static int hpsa_controller_hard_reset(struct pci_dev
*pdev
,
3755 void * __iomem vaddr
, u32 use_doorbell
)
3761 /* For everything after the P600, the PCI power state method
3762 * of resetting the controller doesn't work, so we have this
3763 * other way using the doorbell register.
3765 dev_info(&pdev
->dev
, "using doorbell to reset controller\n");
3766 writel(use_doorbell
, vaddr
+ SA5_DOORBELL
);
3767 } else { /* Try to do it the PCI power state way */
3769 /* Quoting from the Open CISS Specification: "The Power
3770 * Management Control/Status Register (CSR) controls the power
3771 * state of the device. The normal operating state is D0,
3772 * CSR=00h. The software off state is D3, CSR=03h. To reset
3773 * the controller, place the interface device in D3 then to D0,
3774 * this causes a secondary PCI reset which will reset the
3777 pos
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
3780 "hpsa_reset_controller: "
3781 "PCI PM not supported\n");
3784 dev_info(&pdev
->dev
, "using PCI PM to reset controller\n");
3785 /* enter the D3hot power management state */
3786 pci_read_config_word(pdev
, pos
+ PCI_PM_CTRL
, &pmcsr
);
3787 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3789 pci_write_config_word(pdev
, pos
+ PCI_PM_CTRL
, pmcsr
);
3793 /* enter the D0 power management state */
3794 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3796 pci_write_config_word(pdev
, pos
+ PCI_PM_CTRL
, pmcsr
);
3799 * The P600 requires a small delay when changing states.
3800 * Otherwise we may think the board did not reset and we bail.
3801 * This for kdump only and is particular to the P600.
3808 static void init_driver_version(char *driver_version
, int len
)
3810 memset(driver_version
, 0, len
);
3811 strncpy(driver_version
, HPSA
" " HPSA_DRIVER_VERSION
, len
- 1);
3814 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem
*cfgtable
)
3816 char *driver_version
;
3817 int i
, size
= sizeof(cfgtable
->driver_version
);
3819 driver_version
= kmalloc(size
, GFP_KERNEL
);
3820 if (!driver_version
)
3823 init_driver_version(driver_version
, size
);
3824 for (i
= 0; i
< size
; i
++)
3825 writeb(driver_version
[i
], &cfgtable
->driver_version
[i
]);
3826 kfree(driver_version
);
3830 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem
*cfgtable
,
3831 unsigned char *driver_ver
)
3835 for (i
= 0; i
< sizeof(cfgtable
->driver_version
); i
++)
3836 driver_ver
[i
] = readb(&cfgtable
->driver_version
[i
]);
3839 static int controller_reset_failed(struct CfgTable __iomem
*cfgtable
)
3842 char *driver_ver
, *old_driver_ver
;
3843 int rc
, size
= sizeof(cfgtable
->driver_version
);
3845 old_driver_ver
= kmalloc(2 * size
, GFP_KERNEL
);
3846 if (!old_driver_ver
)
3848 driver_ver
= old_driver_ver
+ size
;
3850 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3851 * should have been changed, otherwise we know the reset failed.
3853 init_driver_version(old_driver_ver
, size
);
3854 read_driver_ver_from_cfgtable(cfgtable
, driver_ver
);
3855 rc
= !memcmp(driver_ver
, old_driver_ver
, size
);
3856 kfree(old_driver_ver
);
3859 /* This does a hard reset of the controller using PCI power management
3860 * states or the using the doorbell register.
3862 static int hpsa_kdump_hard_reset_controller(struct pci_dev
*pdev
)
3866 u64 cfg_base_addr_index
;
3867 void __iomem
*vaddr
;
3868 unsigned long paddr
;
3869 u32 misc_fw_support
;
3871 struct CfgTable __iomem
*cfgtable
;
3874 u16 command_register
;
3876 /* For controllers as old as the P600, this is very nearly
3879 * pci_save_state(pci_dev);
3880 * pci_set_power_state(pci_dev, PCI_D3hot);
3881 * pci_set_power_state(pci_dev, PCI_D0);
3882 * pci_restore_state(pci_dev);
3884 * For controllers newer than the P600, the pci power state
3885 * method of resetting doesn't work so we have another way
3886 * using the doorbell register.
3889 rc
= hpsa_lookup_board_id(pdev
, &board_id
);
3890 if (rc
< 0 || !ctlr_is_resettable(board_id
)) {
3891 dev_warn(&pdev
->dev
, "Not resetting device.\n");
3895 /* if controller is soft- but not hard resettable... */
3896 if (!ctlr_is_hard_resettable(board_id
))
3897 return -ENOTSUPP
; /* try soft reset later. */
3899 /* Save the PCI command register */
3900 pci_read_config_word(pdev
, 4, &command_register
);
3901 pci_save_state(pdev
);
3903 /* find the first memory BAR, so we can find the cfg table */
3904 rc
= hpsa_pci_find_memory_BAR(pdev
, &paddr
);
3907 vaddr
= remap_pci_mem(paddr
, 0x250);
3911 /* find cfgtable in order to check if reset via doorbell is supported */
3912 rc
= hpsa_find_cfg_addrs(pdev
, vaddr
, &cfg_base_addr
,
3913 &cfg_base_addr_index
, &cfg_offset
);
3916 cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
3917 cfg_base_addr_index
) + cfg_offset
, sizeof(*cfgtable
));
3922 rc
= write_driver_ver_to_cfgtable(cfgtable
);
3926 /* If reset via doorbell register is supported, use that.
3927 * There are two such methods. Favor the newest method.
3929 misc_fw_support
= readl(&cfgtable
->misc_fw_support
);
3930 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET2
;
3932 use_doorbell
= DOORBELL_CTLR_RESET2
;
3934 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET
;
3936 dev_warn(&pdev
->dev
, "Soft reset not supported. "
3937 "Firmware update is required.\n");
3938 rc
= -ENOTSUPP
; /* try soft reset */
3939 goto unmap_cfgtable
;
3943 rc
= hpsa_controller_hard_reset(pdev
, vaddr
, use_doorbell
);
3945 goto unmap_cfgtable
;
3947 pci_restore_state(pdev
);
3948 pci_write_config_word(pdev
, 4, command_register
);
3950 /* Some devices (notably the HP Smart Array 5i Controller)
3951 need a little pause here */
3952 msleep(HPSA_POST_RESET_PAUSE_MSECS
);
3954 /* Wait for board to become not ready, then ready. */
3955 dev_info(&pdev
->dev
, "Waiting for board to reset.\n");
3956 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_NOT_READY
);
3958 dev_warn(&pdev
->dev
,
3959 "failed waiting for board to reset."
3960 " Will try soft reset.\n");
3961 rc
= -ENOTSUPP
; /* Not expected, but try soft reset later */
3962 goto unmap_cfgtable
;
3964 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_READY
);
3966 dev_warn(&pdev
->dev
,
3967 "failed waiting for board to become ready "
3968 "after hard reset\n");
3969 goto unmap_cfgtable
;
3972 rc
= controller_reset_failed(vaddr
);
3974 goto unmap_cfgtable
;
3976 dev_warn(&pdev
->dev
, "Unable to successfully reset "
3977 "controller. Will try soft reset.\n");
3980 dev_info(&pdev
->dev
, "board ready after hard reset.\n");
3992 * We cannot read the structure directly, for portability we must use
3994 * This is for debug only.
3996 static void print_cfg_table(struct device
*dev
, struct CfgTable
*tb
)
4002 dev_info(dev
, "Controller Configuration information\n");
4003 dev_info(dev
, "------------------------------------\n");
4004 for (i
= 0; i
< 4; i
++)
4005 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
4006 temp_name
[4] = '\0';
4007 dev_info(dev
, " Signature = %s\n", temp_name
);
4008 dev_info(dev
, " Spec Number = %d\n", readl(&(tb
->SpecValence
)));
4009 dev_info(dev
, " Transport methods supported = 0x%x\n",
4010 readl(&(tb
->TransportSupport
)));
4011 dev_info(dev
, " Transport methods active = 0x%x\n",
4012 readl(&(tb
->TransportActive
)));
4013 dev_info(dev
, " Requested transport Method = 0x%x\n",
4014 readl(&(tb
->HostWrite
.TransportRequest
)));
4015 dev_info(dev
, " Coalesce Interrupt Delay = 0x%x\n",
4016 readl(&(tb
->HostWrite
.CoalIntDelay
)));
4017 dev_info(dev
, " Coalesce Interrupt Count = 0x%x\n",
4018 readl(&(tb
->HostWrite
.CoalIntCount
)));
4019 dev_info(dev
, " Max outstanding commands = 0x%d\n",
4020 readl(&(tb
->CmdsOutMax
)));
4021 dev_info(dev
, " Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
4022 for (i
= 0; i
< 16; i
++)
4023 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
4024 temp_name
[16] = '\0';
4025 dev_info(dev
, " Server Name = %s\n", temp_name
);
4026 dev_info(dev
, " Heartbeat Counter = 0x%x\n\n\n",
4027 readl(&(tb
->HeartBeat
)));
4028 #endif /* HPSA_DEBUG */
4031 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
4033 int i
, offset
, mem_type
, bar_type
;
4035 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
4038 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
4039 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
4040 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
4043 mem_type
= pci_resource_flags(pdev
, i
) &
4044 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
4046 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
4047 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
4048 offset
+= 4; /* 32 bit */
4050 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
4053 default: /* reserved in PCI 2.2 */
4054 dev_warn(&pdev
->dev
,
4055 "base address is invalid\n");
4060 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
4066 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
4067 * controllers that are capable. If not, we use IO-APIC mode.
4070 static void hpsa_interrupt_mode(struct ctlr_info
*h
)
4072 #ifdef CONFIG_PCI_MSI
4074 struct msix_entry hpsa_msix_entries
[MAX_REPLY_QUEUES
];
4076 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++) {
4077 hpsa_msix_entries
[i
].vector
= 0;
4078 hpsa_msix_entries
[i
].entry
= i
;
4081 /* Some boards advertise MSI but don't really support it */
4082 if ((h
->board_id
== 0x40700E11) || (h
->board_id
== 0x40800E11) ||
4083 (h
->board_id
== 0x40820E11) || (h
->board_id
== 0x40830E11))
4084 goto default_int_mode
;
4085 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSIX
)) {
4086 dev_info(&h
->pdev
->dev
, "MSIX\n");
4087 err
= pci_enable_msix(h
->pdev
, hpsa_msix_entries
,
4090 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
4091 h
->intr
[i
] = hpsa_msix_entries
[i
].vector
;
4096 dev_warn(&h
->pdev
->dev
, "only %d MSI-X vectors "
4097 "available\n", err
);
4098 goto default_int_mode
;
4100 dev_warn(&h
->pdev
->dev
, "MSI-X init failed %d\n",
4102 goto default_int_mode
;
4105 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSI
)) {
4106 dev_info(&h
->pdev
->dev
, "MSI\n");
4107 if (!pci_enable_msi(h
->pdev
))
4110 dev_warn(&h
->pdev
->dev
, "MSI init failed\n");
4113 #endif /* CONFIG_PCI_MSI */
4114 /* if we get here we're going to use the default interrupt mode */
4115 h
->intr
[h
->intr_mode
] = h
->pdev
->irq
;
4118 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
)
4121 u32 subsystem_vendor_id
, subsystem_device_id
;
4123 subsystem_vendor_id
= pdev
->subsystem_vendor
;
4124 subsystem_device_id
= pdev
->subsystem_device
;
4125 *board_id
= ((subsystem_device_id
<< 16) & 0xffff0000) |
4126 subsystem_vendor_id
;
4128 for (i
= 0; i
< ARRAY_SIZE(products
); i
++)
4129 if (*board_id
== products
[i
].board_id
)
4132 if ((subsystem_vendor_id
!= PCI_VENDOR_ID_HP
&&
4133 subsystem_vendor_id
!= PCI_VENDOR_ID_COMPAQ
) ||
4135 dev_warn(&pdev
->dev
, "unrecognized board ID: "
4136 "0x%08x, ignoring.\n", *board_id
);
4139 return ARRAY_SIZE(products
) - 1; /* generic unknown smart array */
4142 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
4143 unsigned long *memory_bar
)
4147 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
4148 if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
4149 /* addressing mode bits already removed */
4150 *memory_bar
= pci_resource_start(pdev
, i
);
4151 dev_dbg(&pdev
->dev
, "memory BAR = %lx\n",
4155 dev_warn(&pdev
->dev
, "no memory BAR found\n");
4159 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
4165 iterations
= HPSA_BOARD_READY_ITERATIONS
;
4167 iterations
= HPSA_BOARD_NOT_READY_ITERATIONS
;
4169 for (i
= 0; i
< iterations
; i
++) {
4170 scratchpad
= readl(vaddr
+ SA5_SCRATCHPAD_OFFSET
);
4171 if (wait_for_ready
) {
4172 if (scratchpad
== HPSA_FIRMWARE_READY
)
4175 if (scratchpad
!= HPSA_FIRMWARE_READY
)
4178 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS
);
4180 dev_warn(&pdev
->dev
, "board not ready, timed out.\n");
4184 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
4185 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
4188 *cfg_base_addr
= readl(vaddr
+ SA5_CTCFG_OFFSET
);
4189 *cfg_offset
= readl(vaddr
+ SA5_CTMEM_OFFSET
);
4190 *cfg_base_addr
&= (u32
) 0x0000ffff;
4191 *cfg_base_addr_index
= find_PCI_BAR_index(pdev
, *cfg_base_addr
);
4192 if (*cfg_base_addr_index
== -1) {
4193 dev_warn(&pdev
->dev
, "cannot find cfg_base_addr_index\n");
4199 static int hpsa_find_cfgtables(struct ctlr_info
*h
)
4203 u64 cfg_base_addr_index
;
4207 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
4208 &cfg_base_addr_index
, &cfg_offset
);
4211 h
->cfgtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
4212 cfg_base_addr_index
) + cfg_offset
, sizeof(*h
->cfgtable
));
4215 rc
= write_driver_ver_to_cfgtable(h
->cfgtable
);
4218 /* Find performant mode table. */
4219 trans_offset
= readl(&h
->cfgtable
->TransMethodOffset
);
4220 h
->transtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
4221 cfg_base_addr_index
)+cfg_offset
+trans_offset
,
4222 sizeof(*h
->transtable
));
4228 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info
*h
)
4230 h
->max_commands
= readl(&(h
->cfgtable
->MaxPerformantModeCommands
));
4232 /* Limit commands in memory limited kdump scenario. */
4233 if (reset_devices
&& h
->max_commands
> 32)
4234 h
->max_commands
= 32;
4236 if (h
->max_commands
< 16) {
4237 dev_warn(&h
->pdev
->dev
, "Controller reports "
4238 "max supported commands of %d, an obvious lie. "
4239 "Using 16. Ensure that firmware is up to date.\n",
4241 h
->max_commands
= 16;
4245 /* Interrogate the hardware for some limits:
4246 * max commands, max SG elements without chaining, and with chaining,
4247 * SG chain block size, etc.
4249 static void hpsa_find_board_params(struct ctlr_info
*h
)
4251 hpsa_get_max_perf_mode_cmds(h
);
4252 h
->nr_cmds
= h
->max_commands
- 4; /* Allow room for some ioctls */
4253 h
->maxsgentries
= readl(&(h
->cfgtable
->MaxScatterGatherElements
));
4255 * Limit in-command s/g elements to 32 save dma'able memory.
4256 * Howvever spec says if 0, use 31
4258 h
->max_cmd_sg_entries
= 31;
4259 if (h
->maxsgentries
> 512) {
4260 h
->max_cmd_sg_entries
= 32;
4261 h
->chainsize
= h
->maxsgentries
- h
->max_cmd_sg_entries
+ 1;
4262 h
->maxsgentries
--; /* save one for chain pointer */
4264 h
->maxsgentries
= 31; /* default to traditional values */
4268 /* Find out what task management functions are supported and cache */
4269 h
->TMFSupportFlags
= readl(&(h
->cfgtable
->TMFSupportFlags
));
4272 static inline bool hpsa_CISS_signature_present(struct ctlr_info
*h
)
4274 if (!check_signature(h
->cfgtable
->Signature
, "CISS", 4)) {
4275 dev_warn(&h
->pdev
->dev
, "not a valid CISS config table\n");
4281 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
4282 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info
*h
)
4287 prefetch
= readl(&(h
->cfgtable
->SCSI_Prefetch
));
4289 writel(prefetch
, &(h
->cfgtable
->SCSI_Prefetch
));
4293 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
4294 * in a prefetch beyond physical memory.
4296 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info
*h
)
4300 if (h
->board_id
!= 0x3225103C)
4302 dma_prefetch
= readl(h
->vaddr
+ I2O_DMA1_CFG
);
4303 dma_prefetch
|= 0x8000;
4304 writel(dma_prefetch
, h
->vaddr
+ I2O_DMA1_CFG
);
4307 static void hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
)
4311 unsigned long flags
;
4313 /* under certain very rare conditions, this can take awhile.
4314 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
4315 * as we enter this code.)
4317 for (i
= 0; i
< MAX_CONFIG_WAIT
; i
++) {
4318 spin_lock_irqsave(&h
->lock
, flags
);
4319 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
4320 spin_unlock_irqrestore(&h
->lock
, flags
);
4321 if (!(doorbell_value
& CFGTBL_ChangeReq
))
4323 /* delay and try again */
4324 usleep_range(10000, 20000);
4328 static int hpsa_enter_simple_mode(struct ctlr_info
*h
)
4332 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
4333 if (!(trans_support
& SIMPLE_MODE
))
4336 h
->max_commands
= readl(&(h
->cfgtable
->CmdsOutMax
));
4337 /* Update the field, and then ring the doorbell */
4338 writel(CFGTBL_Trans_Simple
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
4339 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
4340 hpsa_wait_for_mode_change_ack(h
);
4341 print_cfg_table(&h
->pdev
->dev
, h
->cfgtable
);
4342 if (!(readl(&(h
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
)) {
4343 dev_warn(&h
->pdev
->dev
,
4344 "unable to get board into simple mode\n");
4347 h
->transMethod
= CFGTBL_Trans_Simple
;
4351 static int hpsa_pci_init(struct ctlr_info
*h
)
4353 int prod_index
, err
;
4355 prod_index
= hpsa_lookup_board_id(h
->pdev
, &h
->board_id
);
4358 h
->product_name
= products
[prod_index
].product_name
;
4359 h
->access
= *(products
[prod_index
].access
);
4361 pci_disable_link_state(h
->pdev
, PCIE_LINK_STATE_L0S
|
4362 PCIE_LINK_STATE_L1
| PCIE_LINK_STATE_CLKPM
);
4364 err
= pci_enable_device(h
->pdev
);
4366 dev_warn(&h
->pdev
->dev
, "unable to enable PCI device\n");
4370 /* Enable bus mastering (pci_disable_device may disable this) */
4371 pci_set_master(h
->pdev
);
4373 err
= pci_request_regions(h
->pdev
, HPSA
);
4375 dev_err(&h
->pdev
->dev
,
4376 "cannot obtain PCI resources, aborting\n");
4379 hpsa_interrupt_mode(h
);
4380 err
= hpsa_pci_find_memory_BAR(h
->pdev
, &h
->paddr
);
4382 goto err_out_free_res
;
4383 h
->vaddr
= remap_pci_mem(h
->paddr
, 0x250);
4386 goto err_out_free_res
;
4388 err
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
4390 goto err_out_free_res
;
4391 err
= hpsa_find_cfgtables(h
);
4393 goto err_out_free_res
;
4394 hpsa_find_board_params(h
);
4396 if (!hpsa_CISS_signature_present(h
)) {
4398 goto err_out_free_res
;
4400 hpsa_enable_scsi_prefetch(h
);
4401 hpsa_p600_dma_prefetch_quirk(h
);
4402 err
= hpsa_enter_simple_mode(h
);
4404 goto err_out_free_res
;
4409 iounmap(h
->transtable
);
4411 iounmap(h
->cfgtable
);
4414 pci_disable_device(h
->pdev
);
4415 pci_release_regions(h
->pdev
);
4419 static void hpsa_hba_inquiry(struct ctlr_info
*h
)
4423 #define HBA_INQUIRY_BYTE_COUNT 64
4424 h
->hba_inquiry_data
= kmalloc(HBA_INQUIRY_BYTE_COUNT
, GFP_KERNEL
);
4425 if (!h
->hba_inquiry_data
)
4427 rc
= hpsa_scsi_do_inquiry(h
, RAID_CTLR_LUNID
, 0,
4428 h
->hba_inquiry_data
, HBA_INQUIRY_BYTE_COUNT
);
4430 kfree(h
->hba_inquiry_data
);
4431 h
->hba_inquiry_data
= NULL
;
4435 static int hpsa_init_reset_devices(struct pci_dev
*pdev
)
4442 /* kdump kernel is loading, we don't know in which state is
4443 * the pci interface. The dev->enable_cnt is equal zero
4444 * so we call enable+disable, wait a while and switch it on.
4446 rc
= pci_enable_device(pdev
);
4448 dev_warn(&pdev
->dev
, "Failed to enable PCI device\n");
4451 pci_disable_device(pdev
);
4452 msleep(260); /* a randomly chosen number */
4453 rc
= pci_enable_device(pdev
);
4455 dev_warn(&pdev
->dev
, "failed to enable device.\n");
4458 pci_set_master(pdev
);
4459 /* Reset the controller with a PCI power-cycle or via doorbell */
4460 rc
= hpsa_kdump_hard_reset_controller(pdev
);
4462 /* -ENOTSUPP here means we cannot reset the controller
4463 * but it's already (and still) up and running in
4464 * "performant mode". Or, it might be 640x, which can't reset
4465 * due to concerns about shared bbwc between 6402/6404 pair.
4468 if (rc
!= -ENOTSUPP
) /* just try to do the kdump anyhow. */
4473 /* Now try to get the controller to respond to a no-op */
4474 dev_warn(&pdev
->dev
, "Waiting for controller to respond to no-op\n");
4475 for (i
= 0; i
< HPSA_POST_RESET_NOOP_RETRIES
; i
++) {
4476 if (hpsa_noop(pdev
) == 0)
4479 dev_warn(&pdev
->dev
, "no-op failed%s\n",
4480 (i
< 11 ? "; re-trying" : ""));
4485 pci_disable_device(pdev
);
4489 static int hpsa_allocate_cmd_pool(struct ctlr_info
*h
)
4491 h
->cmd_pool_bits
= kzalloc(
4492 DIV_ROUND_UP(h
->nr_cmds
, BITS_PER_LONG
) *
4493 sizeof(unsigned long), GFP_KERNEL
);
4494 h
->cmd_pool
= pci_alloc_consistent(h
->pdev
,
4495 h
->nr_cmds
* sizeof(*h
->cmd_pool
),
4496 &(h
->cmd_pool_dhandle
));
4497 h
->errinfo_pool
= pci_alloc_consistent(h
->pdev
,
4498 h
->nr_cmds
* sizeof(*h
->errinfo_pool
),
4499 &(h
->errinfo_pool_dhandle
));
4500 if ((h
->cmd_pool_bits
== NULL
)
4501 || (h
->cmd_pool
== NULL
)
4502 || (h
->errinfo_pool
== NULL
)) {
4503 dev_err(&h
->pdev
->dev
, "out of memory in %s", __func__
);
4509 static void hpsa_free_cmd_pool(struct ctlr_info
*h
)
4511 kfree(h
->cmd_pool_bits
);
4513 pci_free_consistent(h
->pdev
,
4514 h
->nr_cmds
* sizeof(struct CommandList
),
4515 h
->cmd_pool
, h
->cmd_pool_dhandle
);
4516 if (h
->errinfo_pool
)
4517 pci_free_consistent(h
->pdev
,
4518 h
->nr_cmds
* sizeof(struct ErrorInfo
),
4520 h
->errinfo_pool_dhandle
);
4523 static int hpsa_request_irq(struct ctlr_info
*h
,
4524 irqreturn_t (*msixhandler
)(int, void *),
4525 irqreturn_t (*intxhandler
)(int, void *))
4530 * initialize h->q[x] = x so that interrupt handlers know which
4533 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
4536 if (h
->intr_mode
== PERF_MODE_INT
&& h
->msix_vector
) {
4537 /* If performant mode and MSI-X, use multiple reply queues */
4538 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
4539 rc
= request_irq(h
->intr
[i
], msixhandler
,
4543 /* Use single reply pool */
4544 if (h
->msix_vector
|| h
->msi_vector
) {
4545 rc
= request_irq(h
->intr
[h
->intr_mode
],
4546 msixhandler
, 0, h
->devname
,
4547 &h
->q
[h
->intr_mode
]);
4549 rc
= request_irq(h
->intr
[h
->intr_mode
],
4550 intxhandler
, IRQF_SHARED
, h
->devname
,
4551 &h
->q
[h
->intr_mode
]);
4555 dev_err(&h
->pdev
->dev
, "unable to get irq %d for %s\n",
4556 h
->intr
[h
->intr_mode
], h
->devname
);
4562 static int hpsa_kdump_soft_reset(struct ctlr_info
*h
)
4564 if (hpsa_send_host_reset(h
, RAID_CTLR_LUNID
,
4565 HPSA_RESET_TYPE_CONTROLLER
)) {
4566 dev_warn(&h
->pdev
->dev
, "Resetting array controller failed.\n");
4570 dev_info(&h
->pdev
->dev
, "Waiting for board to soft reset.\n");
4571 if (hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_NOT_READY
)) {
4572 dev_warn(&h
->pdev
->dev
, "Soft reset had no effect.\n");
4576 dev_info(&h
->pdev
->dev
, "Board reset, awaiting READY status.\n");
4577 if (hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
)) {
4578 dev_warn(&h
->pdev
->dev
, "Board failed to become ready "
4579 "after soft reset.\n");
4586 static void free_irqs(struct ctlr_info
*h
)
4590 if (!h
->msix_vector
|| h
->intr_mode
!= PERF_MODE_INT
) {
4591 /* Single reply queue, only one irq to free */
4593 free_irq(h
->intr
[i
], &h
->q
[i
]);
4597 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
4598 free_irq(h
->intr
[i
], &h
->q
[i
]);
4601 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info
*h
)
4604 #ifdef CONFIG_PCI_MSI
4605 if (h
->msix_vector
) {
4606 if (h
->pdev
->msix_enabled
)
4607 pci_disable_msix(h
->pdev
);
4608 } else if (h
->msi_vector
) {
4609 if (h
->pdev
->msi_enabled
)
4610 pci_disable_msi(h
->pdev
);
4612 #endif /* CONFIG_PCI_MSI */
4615 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info
*h
)
4617 hpsa_free_irqs_and_disable_msix(h
);
4618 hpsa_free_sg_chain_blocks(h
);
4619 hpsa_free_cmd_pool(h
);
4620 kfree(h
->blockFetchTable
);
4621 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
4622 h
->reply_pool
, h
->reply_pool_dhandle
);
4626 iounmap(h
->transtable
);
4628 iounmap(h
->cfgtable
);
4629 pci_disable_device(h
->pdev
);
4630 pci_release_regions(h
->pdev
);
4634 static void remove_ctlr_from_lockup_detector_list(struct ctlr_info
*h
)
4636 assert_spin_locked(&lockup_detector_lock
);
4637 if (!hpsa_lockup_detector
)
4639 if (h
->lockup_detected
)
4640 return; /* already stopped the lockup detector */
4641 list_del(&h
->lockup_list
);
4644 /* Called when controller lockup detected. */
4645 static void fail_all_cmds_on_list(struct ctlr_info
*h
, struct list_head
*list
)
4647 struct CommandList
*c
= NULL
;
4649 assert_spin_locked(&h
->lock
);
4650 /* Mark all outstanding commands as failed and complete them. */
4651 while (!list_empty(list
)) {
4652 c
= list_entry(list
->next
, struct CommandList
, list
);
4653 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
4658 static void controller_lockup_detected(struct ctlr_info
*h
)
4660 unsigned long flags
;
4662 assert_spin_locked(&lockup_detector_lock
);
4663 remove_ctlr_from_lockup_detector_list(h
);
4664 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4665 spin_lock_irqsave(&h
->lock
, flags
);
4666 h
->lockup_detected
= readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
4667 spin_unlock_irqrestore(&h
->lock
, flags
);
4668 dev_warn(&h
->pdev
->dev
, "Controller lockup detected: 0x%08x\n",
4669 h
->lockup_detected
);
4670 pci_disable_device(h
->pdev
);
4671 spin_lock_irqsave(&h
->lock
, flags
);
4672 fail_all_cmds_on_list(h
, &h
->cmpQ
);
4673 fail_all_cmds_on_list(h
, &h
->reqQ
);
4674 spin_unlock_irqrestore(&h
->lock
, flags
);
4677 static void detect_controller_lockup(struct ctlr_info
*h
)
4681 unsigned long flags
;
4683 assert_spin_locked(&lockup_detector_lock
);
4684 now
= get_jiffies_64();
4685 /* If we've received an interrupt recently, we're ok. */
4686 if (time_after64(h
->last_intr_timestamp
+
4687 (h
->heartbeat_sample_interval
), now
))
4691 * If we've already checked the heartbeat recently, we're ok.
4692 * This could happen if someone sends us a signal. We
4693 * otherwise don't care about signals in this thread.
4695 if (time_after64(h
->last_heartbeat_timestamp
+
4696 (h
->heartbeat_sample_interval
), now
))
4699 /* If heartbeat has not changed since we last looked, we're not ok. */
4700 spin_lock_irqsave(&h
->lock
, flags
);
4701 heartbeat
= readl(&h
->cfgtable
->HeartBeat
);
4702 spin_unlock_irqrestore(&h
->lock
, flags
);
4703 if (h
->last_heartbeat
== heartbeat
) {
4704 controller_lockup_detected(h
);
4709 h
->last_heartbeat
= heartbeat
;
4710 h
->last_heartbeat_timestamp
= now
;
4713 static int detect_controller_lockup_thread(void *notused
)
4715 struct ctlr_info
*h
;
4716 unsigned long flags
;
4719 struct list_head
*this, *tmp
;
4721 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL
);
4722 if (kthread_should_stop())
4724 spin_lock_irqsave(&lockup_detector_lock
, flags
);
4725 list_for_each_safe(this, tmp
, &hpsa_ctlr_list
) {
4726 h
= list_entry(this, struct ctlr_info
, lockup_list
);
4727 detect_controller_lockup(h
);
4729 spin_unlock_irqrestore(&lockup_detector_lock
, flags
);
4734 static void add_ctlr_to_lockup_detector_list(struct ctlr_info
*h
)
4736 unsigned long flags
;
4738 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
4739 spin_lock_irqsave(&lockup_detector_lock
, flags
);
4740 list_add_tail(&h
->lockup_list
, &hpsa_ctlr_list
);
4741 spin_unlock_irqrestore(&lockup_detector_lock
, flags
);
4744 static void start_controller_lockup_detector(struct ctlr_info
*h
)
4746 /* Start the lockup detector thread if not already started */
4747 if (!hpsa_lockup_detector
) {
4748 spin_lock_init(&lockup_detector_lock
);
4749 hpsa_lockup_detector
=
4750 kthread_run(detect_controller_lockup_thread
,
4753 if (!hpsa_lockup_detector
) {
4754 dev_warn(&h
->pdev
->dev
,
4755 "Could not start lockup detector thread\n");
4758 add_ctlr_to_lockup_detector_list(h
);
4761 static void stop_controller_lockup_detector(struct ctlr_info
*h
)
4763 unsigned long flags
;
4765 spin_lock_irqsave(&lockup_detector_lock
, flags
);
4766 remove_ctlr_from_lockup_detector_list(h
);
4767 /* If the list of ctlr's to monitor is empty, stop the thread */
4768 if (list_empty(&hpsa_ctlr_list
)) {
4769 spin_unlock_irqrestore(&lockup_detector_lock
, flags
);
4770 kthread_stop(hpsa_lockup_detector
);
4771 spin_lock_irqsave(&lockup_detector_lock
, flags
);
4772 hpsa_lockup_detector
= NULL
;
4774 spin_unlock_irqrestore(&lockup_detector_lock
, flags
);
4777 static int hpsa_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4780 struct ctlr_info
*h
;
4781 int try_soft_reset
= 0;
4782 unsigned long flags
;
4784 if (number_of_controllers
== 0)
4785 printk(KERN_INFO DRIVER_NAME
"\n");
4787 rc
= hpsa_init_reset_devices(pdev
);
4789 if (rc
!= -ENOTSUPP
)
4791 /* If the reset fails in a particular way (it has no way to do
4792 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4793 * a soft reset once we get the controller configured up to the
4794 * point that it can accept a command.
4800 reinit_after_soft_reset
:
4802 /* Command structures must be aligned on a 32-byte boundary because
4803 * the 5 lower bits of the address are used by the hardware. and by
4804 * the driver. See comments in hpsa.h for more info.
4806 #define COMMANDLIST_ALIGNMENT 32
4807 BUILD_BUG_ON(sizeof(struct CommandList
) % COMMANDLIST_ALIGNMENT
);
4808 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
4813 h
->intr_mode
= hpsa_simple_mode
? SIMPLE_MODE_INT
: PERF_MODE_INT
;
4814 INIT_LIST_HEAD(&h
->cmpQ
);
4815 INIT_LIST_HEAD(&h
->reqQ
);
4816 spin_lock_init(&h
->lock
);
4817 spin_lock_init(&h
->scan_lock
);
4818 rc
= hpsa_pci_init(h
);
4822 sprintf(h
->devname
, HPSA
"%d", number_of_controllers
);
4823 h
->ctlr
= number_of_controllers
;
4824 number_of_controllers
++;
4826 /* configure PCI DMA stuff */
4827 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
4831 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4835 dev_err(&pdev
->dev
, "no suitable DMA available\n");
4840 /* make sure the board interrupts are off */
4841 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4843 if (hpsa_request_irq(h
, do_hpsa_intr_msi
, do_hpsa_intr_intx
))
4845 dev_info(&pdev
->dev
, "%s: <0x%x> at IRQ %d%s using DAC\n",
4846 h
->devname
, pdev
->device
,
4847 h
->intr
[h
->intr_mode
], dac
? "" : " not");
4848 if (hpsa_allocate_cmd_pool(h
))
4850 if (hpsa_allocate_sg_chain_blocks(h
))
4852 init_waitqueue_head(&h
->scan_wait_queue
);
4853 h
->scan_finished
= 1; /* no scan currently in progress */
4855 pci_set_drvdata(pdev
, h
);
4857 h
->scsi_host
= NULL
;
4858 spin_lock_init(&h
->devlock
);
4859 hpsa_put_ctlr_into_performant_mode(h
);
4861 /* At this point, the controller is ready to take commands.
4862 * Now, if reset_devices and the hard reset didn't work, try
4863 * the soft reset and see if that works.
4865 if (try_soft_reset
) {
4867 /* This is kind of gross. We may or may not get a completion
4868 * from the soft reset command, and if we do, then the value
4869 * from the fifo may or may not be valid. So, we wait 10 secs
4870 * after the reset throwing away any completions we get during
4871 * that time. Unregister the interrupt handler and register
4872 * fake ones to scoop up any residual completions.
4874 spin_lock_irqsave(&h
->lock
, flags
);
4875 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4876 spin_unlock_irqrestore(&h
->lock
, flags
);
4878 rc
= hpsa_request_irq(h
, hpsa_msix_discard_completions
,
4879 hpsa_intx_discard_completions
);
4881 dev_warn(&h
->pdev
->dev
, "Failed to request_irq after "
4886 rc
= hpsa_kdump_soft_reset(h
);
4888 /* Neither hard nor soft reset worked, we're hosed. */
4891 dev_info(&h
->pdev
->dev
, "Board READY.\n");
4892 dev_info(&h
->pdev
->dev
,
4893 "Waiting for stale completions to drain.\n");
4894 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
4896 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4898 rc
= controller_reset_failed(h
->cfgtable
);
4900 dev_info(&h
->pdev
->dev
,
4901 "Soft reset appears to have failed.\n");
4903 /* since the controller's reset, we have to go back and re-init
4904 * everything. Easiest to just forget what we've done and do it
4907 hpsa_undo_allocations_after_kdump_soft_reset(h
);
4910 /* don't go to clean4, we already unallocated */
4913 goto reinit_after_soft_reset
;
4916 /* Turn the interrupts on so we can service requests */
4917 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
4919 hpsa_hba_inquiry(h
);
4920 hpsa_register_scsi(h
); /* hook ourselves into SCSI subsystem */
4921 start_controller_lockup_detector(h
);
4925 hpsa_free_sg_chain_blocks(h
);
4926 hpsa_free_cmd_pool(h
);
4934 static void hpsa_flush_cache(struct ctlr_info
*h
)
4937 struct CommandList
*c
;
4939 flush_buf
= kzalloc(4, GFP_KERNEL
);
4943 c
= cmd_special_alloc(h
);
4945 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
4948 if (fill_cmd(c
, HPSA_CACHE_FLUSH
, h
, flush_buf
, 4, 0,
4949 RAID_CTLR_LUNID
, TYPE_CMD
)) {
4952 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_TODEVICE
);
4953 if (c
->err_info
->CommandStatus
!= 0)
4955 dev_warn(&h
->pdev
->dev
,
4956 "error flushing cache on controller\n");
4957 cmd_special_free(h
, c
);
4962 static void hpsa_shutdown(struct pci_dev
*pdev
)
4964 struct ctlr_info
*h
;
4966 h
= pci_get_drvdata(pdev
);
4967 /* Turn board interrupts off and send the flush cache command
4968 * sendcmd will turn off interrupt, and send the flush...
4969 * To write all data in the battery backed cache to disks
4971 hpsa_flush_cache(h
);
4972 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
4973 hpsa_free_irqs_and_disable_msix(h
);
4976 static void hpsa_free_device_info(struct ctlr_info
*h
)
4980 for (i
= 0; i
< h
->ndevices
; i
++)
4984 static void hpsa_remove_one(struct pci_dev
*pdev
)
4986 struct ctlr_info
*h
;
4988 if (pci_get_drvdata(pdev
) == NULL
) {
4989 dev_err(&pdev
->dev
, "unable to remove device\n");
4992 h
= pci_get_drvdata(pdev
);
4993 stop_controller_lockup_detector(h
);
4994 hpsa_unregister_scsi(h
); /* unhook from SCSI subsystem */
4995 hpsa_shutdown(pdev
);
4997 iounmap(h
->transtable
);
4998 iounmap(h
->cfgtable
);
4999 hpsa_free_device_info(h
);
5000 hpsa_free_sg_chain_blocks(h
);
5001 pci_free_consistent(h
->pdev
,
5002 h
->nr_cmds
* sizeof(struct CommandList
),
5003 h
->cmd_pool
, h
->cmd_pool_dhandle
);
5004 pci_free_consistent(h
->pdev
,
5005 h
->nr_cmds
* sizeof(struct ErrorInfo
),
5006 h
->errinfo_pool
, h
->errinfo_pool_dhandle
);
5007 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
5008 h
->reply_pool
, h
->reply_pool_dhandle
);
5009 kfree(h
->cmd_pool_bits
);
5010 kfree(h
->blockFetchTable
);
5011 kfree(h
->hba_inquiry_data
);
5012 pci_disable_device(pdev
);
5013 pci_release_regions(pdev
);
5014 pci_set_drvdata(pdev
, NULL
);
5018 static int hpsa_suspend(__attribute__((unused
)) struct pci_dev
*pdev
,
5019 __attribute__((unused
)) pm_message_t state
)
5024 static int hpsa_resume(__attribute__((unused
)) struct pci_dev
*pdev
)
5029 static struct pci_driver hpsa_pci_driver
= {
5031 .probe
= hpsa_init_one
,
5032 .remove
= hpsa_remove_one
,
5033 .id_table
= hpsa_pci_device_id
, /* id_table */
5034 .shutdown
= hpsa_shutdown
,
5035 .suspend
= hpsa_suspend
,
5036 .resume
= hpsa_resume
,
5039 /* Fill in bucket_map[], given nsgs (the max number of
5040 * scatter gather elements supported) and bucket[],
5041 * which is an array of 8 integers. The bucket[] array
5042 * contains 8 different DMA transfer sizes (in 16
5043 * byte increments) which the controller uses to fetch
5044 * commands. This function fills in bucket_map[], which
5045 * maps a given number of scatter gather elements to one of
5046 * the 8 DMA transfer sizes. The point of it is to allow the
5047 * controller to only do as much DMA as needed to fetch the
5048 * command, with the DMA transfer size encoded in the lower
5049 * bits of the command address.
5051 static void calc_bucket_map(int bucket
[], int num_buckets
,
5052 int nsgs
, int *bucket_map
)
5056 /* even a command with 0 SGs requires 4 blocks */
5057 #define MINIMUM_TRANSFER_BLOCKS 4
5058 #define NUM_BUCKETS 8
5059 /* Note, bucket_map must have nsgs+1 entries. */
5060 for (i
= 0; i
<= nsgs
; i
++) {
5061 /* Compute size of a command with i SG entries */
5062 size
= i
+ MINIMUM_TRANSFER_BLOCKS
;
5063 b
= num_buckets
; /* Assume the biggest bucket */
5064 /* Find the bucket that is just big enough */
5065 for (j
= 0; j
< 8; j
++) {
5066 if (bucket
[j
] >= size
) {
5071 /* for a command with i SG entries, use bucket b. */
5076 static void hpsa_enter_performant_mode(struct ctlr_info
*h
, u32 use_short_tags
)
5079 unsigned long register_value
;
5081 /* This is a bit complicated. There are 8 registers on
5082 * the controller which we write to to tell it 8 different
5083 * sizes of commands which there may be. It's a way of
5084 * reducing the DMA done to fetch each command. Encoded into
5085 * each command's tag are 3 bits which communicate to the controller
5086 * which of the eight sizes that command fits within. The size of
5087 * each command depends on how many scatter gather entries there are.
5088 * Each SG entry requires 16 bytes. The eight registers are programmed
5089 * with the number of 16-byte blocks a command of that size requires.
5090 * The smallest command possible requires 5 such 16 byte blocks.
5091 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
5092 * blocks. Note, this only extends to the SG entries contained
5093 * within the command block, and does not extend to chained blocks
5094 * of SG elements. bft[] contains the eight values we write to
5095 * the registers. They are not evenly distributed, but have more
5096 * sizes for small commands, and fewer sizes for larger commands.
5098 int bft
[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD
+ 4};
5099 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD
+ 4);
5100 /* 5 = 1 s/g entry or 4k
5101 * 6 = 2 s/g entry or 8k
5102 * 8 = 4 s/g entry or 16k
5103 * 10 = 6 s/g entry or 24k
5106 /* Controller spec: zero out this buffer. */
5107 memset(h
->reply_pool
, 0, h
->reply_pool_size
);
5109 bft
[7] = SG_ENTRIES_IN_CMD
+ 4;
5110 calc_bucket_map(bft
, ARRAY_SIZE(bft
),
5111 SG_ENTRIES_IN_CMD
, h
->blockFetchTable
);
5112 for (i
= 0; i
< 8; i
++)
5113 writel(bft
[i
], &h
->transtable
->BlockFetch
[i
]);
5115 /* size of controller ring buffer */
5116 writel(h
->max_commands
, &h
->transtable
->RepQSize
);
5117 writel(h
->nreply_queues
, &h
->transtable
->RepQCount
);
5118 writel(0, &h
->transtable
->RepQCtrAddrLow32
);
5119 writel(0, &h
->transtable
->RepQCtrAddrHigh32
);
5121 for (i
= 0; i
< h
->nreply_queues
; i
++) {
5122 writel(0, &h
->transtable
->RepQAddr
[i
].upper
);
5123 writel(h
->reply_pool_dhandle
+
5124 (h
->max_commands
* sizeof(u64
) * i
),
5125 &h
->transtable
->RepQAddr
[i
].lower
);
5128 writel(CFGTBL_Trans_Performant
| use_short_tags
|
5129 CFGTBL_Trans_enable_directed_msix
,
5130 &(h
->cfgtable
->HostWrite
.TransportRequest
));
5131 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
5132 hpsa_wait_for_mode_change_ack(h
);
5133 register_value
= readl(&(h
->cfgtable
->TransportActive
));
5134 if (!(register_value
& CFGTBL_Trans_Performant
)) {
5135 dev_warn(&h
->pdev
->dev
, "unable to get board into"
5136 " performant mode\n");
5139 /* Change the access methods to the performant access methods */
5140 h
->access
= SA5_performant_access
;
5141 h
->transMethod
= CFGTBL_Trans_Performant
;
5144 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
)
5149 if (hpsa_simple_mode
)
5152 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
5153 if (!(trans_support
& PERFORMANT_MODE
))
5156 h
->nreply_queues
= h
->msix_vector
? MAX_REPLY_QUEUES
: 1;
5157 hpsa_get_max_perf_mode_cmds(h
);
5158 /* Performant mode ring buffer and supporting data structures */
5159 h
->reply_pool_size
= h
->max_commands
* sizeof(u64
) * h
->nreply_queues
;
5160 h
->reply_pool
= pci_alloc_consistent(h
->pdev
, h
->reply_pool_size
,
5161 &(h
->reply_pool_dhandle
));
5163 for (i
= 0; i
< h
->nreply_queues
; i
++) {
5164 h
->reply_queue
[i
].head
= &h
->reply_pool
[h
->max_commands
* i
];
5165 h
->reply_queue
[i
].size
= h
->max_commands
;
5166 h
->reply_queue
[i
].wraparound
= 1; /* spec: init to 1 */
5167 h
->reply_queue
[i
].current_entry
= 0;
5170 /* Need a block fetch table for performant mode */
5171 h
->blockFetchTable
= kmalloc(((SG_ENTRIES_IN_CMD
+ 1) *
5172 sizeof(u32
)), GFP_KERNEL
);
5174 if ((h
->reply_pool
== NULL
)
5175 || (h
->blockFetchTable
== NULL
))
5178 hpsa_enter_performant_mode(h
,
5179 trans_support
& CFGTBL_Trans_use_short_tags
);
5185 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
5186 h
->reply_pool
, h
->reply_pool_dhandle
);
5187 kfree(h
->blockFetchTable
);
5191 * This is it. Register the PCI driver information for the cards we control
5192 * the OS will call our registered routines when it finds one of our cards.
5194 static int __init
hpsa_init(void)
5196 return pci_register_driver(&hpsa_pci_driver
);
5199 static void __exit
hpsa_cleanup(void)
5201 pci_unregister_driver(&hpsa_pci_driver
);
5204 module_init(hpsa_init
);
5205 module_exit(hpsa_cleanup
);