{
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+ dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
ioarcb->write_data_transfer_length = 0;
ioarcb->read_data_transfer_length = 0;
ioarcb->write_ioadl_len = 0;
ioarcb->read_ioadl_len = 0;
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
ioasa->ioasc = 0;
ioasa->residual_data_len = 0;
ioasa->u.gata.status = 0;
sglist = scsi_cmd->request_buffer;
+ if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
+ ioadl = ioarcb->add_data.u.ioadl;
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
+ offsetof(struct ipr_ioarcb, add_data));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ }
+
for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
ioadl[i].flags_and_data_len =
cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
scsi_cmd->sc_data_direction);
if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
+ ioadl = ioarcb->add_data.u.ioadl;
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
+ offsetof(struct ipr_ioarcb, add_data));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
ipr_cmd->dma_use_sg = 1;
ioadl[0].flags_and_data_len =
cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
**/
static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioarcb *ioarcb;
- struct ipr_ioasa *ioasa;
-
- ioarcb = &ipr_cmd->ioarcb;
- ioasa = &ipr_cmd->ioasa;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+ dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
ioarcb->write_data_transfer_length = 0;
ioarcb->read_ioadl_len = 0;
ioasa->ioasc = 0;
ioasa->residual_data_len = 0;
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
}
/**
u8 ctl;
}__attribute__ ((packed, aligned(4)));
+struct ipr_ioadl_desc {
+ __be32 flags_and_data_len;
+#define IPR_IOADL_FLAGS_MASK 0xff000000
+#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
+#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff
+#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
+#define IPR_IOADL_FLAGS_READ 0x48000000
+#define IPR_IOADL_FLAGS_READ_LAST 0x49000000
+#define IPR_IOADL_FLAGS_WRITE 0x68000000
+#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000
+#define IPR_IOADL_FLAGS_LAST 0x01000000
+
+ __be32 address;
+}__attribute__((packed, aligned (8)));
+
struct ipr_ioarcb_add_data {
union {
struct ipr_ioarcb_ata_regs regs;
+ struct ipr_ioadl_desc ioadl[5];
__be32 add_cmd_parms[10];
}u;
}__attribute__ ((packed, aligned(4)));
struct ipr_ioarcb_add_data add_data;
}__attribute__((packed, aligned (4)));
-struct ipr_ioadl_desc {
- __be32 flags_and_data_len;
-#define IPR_IOADL_FLAGS_MASK 0xff000000
-#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
-#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff
-#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
-#define IPR_IOADL_FLAGS_READ 0x48000000
-#define IPR_IOADL_FLAGS_READ_LAST 0x49000000
-#define IPR_IOADL_FLAGS_WRITE 0x68000000
-#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000
-#define IPR_IOADL_FLAGS_LAST 0x01000000
-
- __be32 address;
-}__attribute__((packed, aligned (8)));
-
struct ipr_ioasa_vset {
__be32 failing_lba_hi;
__be32 failing_lba_lo;