h->dev[entry]->raid_map = new_entry->raid_map;
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
}
+ if (new_entry->hba_ioaccel_enabled) {
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+ wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
+ }
+ h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
h->dev[entry]->offload_config = new_entry->offload_config;
h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
h->dev[entry]->queue_depth = new_entry->queue_depth;
this_device->offload_config = 0;
this_device->offload_enabled = 0;
this_device->offload_to_be_enabled = 0;
+ this_device->hba_ioaccel_enabled = 0;
this_device->volume_offline = 0;
this_device->queue_depth = h->nr_cmds;
}
(struct ext_report_lun_entry *) lunaddrbytes;
dev->ioaccel_handle = rle->ioaccel_handle;
+ if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
+ dev->hba_ioaccel_enabled = 1;
memset(id_phys, 0, sizeof(*id_phys));
rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
rc = hpsa_scsi_ioaccel_raid_map(h, c);
if (rc < 0) /* scsi_dma_map failed. */
rc = SCSI_MLQUEUE_HOST_BUSY;
- } else if (dev->ioaccel_handle) {
+ } else if (dev->hba_ioaccel_enabled) {
hpsa_cmd_init(h, c->cmdindex, c);
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
int offload_config; /* I/O accel RAID offload configured */
int offload_enabled; /* I/O accel RAID offload enabled */
int offload_to_be_enabled;
+ int hba_ioaccel_enabled;
int offload_to_mirror; /* Send next I/O accelerator RAID
* offload request to mirror drive
*/