typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:23;
+ u32 reserved:21;
+ u32 support_fp_rlbypass:1;
+ u32 support_vfid_in_ioframe:1;
u32 support_ext_io_size:1;
u32 support_ext_queue_depth:1;
u32 security_protocol_cmds_fw:1;
u32 security_protocol_cmds_fw:1;
u32 support_ext_queue_depth:1;
u32 support_ext_io_size:1;
- u32 reserved:23;
+ u32 support_vfid_in_ioframe:1;
+ u32 support_fp_rlbypass:1;
+ u32 reserved:21;
#endif
} mfi_capabilities;
__le32 reg;
/* assume this IO needs the full row - we'll adjust if not true */
regSize = stripSize;
+ io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock;
+
/* Check if we can send this I/O via FastPath */
if (raid->capability.fpCapable) {
if (isRead)
if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
drv_ops->mfi_capabilities.support_ext_io_size = 1;
+ drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
+
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (fusion->adapter_type == INVADER_SERIES) {
- if (io_request->RaidContext.regLockFlags ==
- REGION_TYPE_UNUSED)
+ if (io_info.do_fp_rlbypass ||
+ (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED))
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
struct MR_LD_RAID {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved4:6;
+ u32 reserved4:5;
+ u32 fpBypassRegionLock:1;
u32 tmCapable:1;
u32 fpNonRWCapable:1;
u32 fpReadAcrossStripe:1;
u32 fpReadAcrossStripe:1;
u32 fpNonRWCapable:1;
u32 tmCapable:1;
- u32 reserved4:6;
+ u32 fpBypassRegionLock:1;
+ u32 reserved4:5;
#endif
} capability;
__le32 reserved6;
u8 fpOkForIo;
u8 IoforUnevenSpan;
u8 start_span;
- u8 reserved;
+ u8 do_fp_rlbypass;
u64 start_row;
u8 span_arm; /* span[7:5], arm[4:0] */
u8 pd_after_lb;