target/qla2xxx: Honor max_data_sg_nents I/O transfer limit
authorNicholas Bellinger <nab@linux-iscsi.org>
Fri, 31 Jul 2015 01:28:13 +0000 (18:28 -0700)
committerNicholas Bellinger <nab@linux-iscsi.org>
Fri, 11 Sep 2015 07:31:39 +0000 (00:31 -0700)
This patch adds an optional fabric driver provided SGL limit
that target-core will honor as it's own internal I/O maximum
transfer length limit, as exposed by EVPD=0xb0 block limits
parameters.

This is required for handling cases when host I/O transfer
length exceeds the requested EVPD block limits maximum
transfer length. The initial user of this logic is qla2xxx,
so that we can avoid having to reject I/Os from some legacy
FC hosts where EVPD=0xb0 parameters are not honored.

When se_cmd payload length exceeds the provided limit in
target_check_max_data_sg_nents() code, se_cmd->data_length +
se_cmd->prot_length are reset with se_cmd->residual_count
plus underflow bit for outgoing TFO response callbacks.
It also checks for existing CDB level underflow + overflow
and recalculates final residual_count as necessary.

Note this patch currently assumes 1:1 mapping of PAGE_SIZE
per struct scatterlist entry.

Reported-by: Craig Watson <craig.watson@vanguard-rugged.com>
Cc: Craig Watson <craig.watson@vanguard-rugged.com>
Tested-by: Himanshu Madhani <himanshu.madhani@qlogic.com>
Cc: Roland Dreier <roland@purestorage.com>
Cc: Arun Easi <arun.easi@qlogic.com>
Cc: Giridhar Malavali <giridhar.malavali@qlogic.com>
Cc: Andrew Vasquez <andrew.vasquez@qlogic.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/target/target_core_spc.c
drivers/target/target_core_transport.c
include/target/target_core_fabric.h

index c621623abeedf0086ff4de77517e739065582341..edeb3aefa6fee38d8b18bbe60d3927c8770613a1 100644 (file)
@@ -1808,6 +1808,11 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
        .module                         = THIS_MODULE,
        .name                           = "qla2xxx",
        .node_acl_size                  = sizeof(struct tcm_qla2xxx_nacl),
+       /*
+        * XXX: Limit assumes single page per scatter-gather-list entry.
+        * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096
+        */
+       .max_data_sg_nents              = 1200,
        .get_fabric_name                = tcm_qla2xxx_get_fabric_name,
        .tpg_get_wwn                    = tcm_qla2xxx_get_fabric_wwn,
        .tpg_get_tag                    = tcm_qla2xxx_get_tag,
index a07d455e0dd56a21f5e7897004dc3b6535cb12ea..0e0456f6a28294036cdc835dbff1991f904fee96 100644 (file)
@@ -477,8 +477,8 @@ static sense_reason_t
 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
-       int have_tp = 0;
-       int opt, min;
+       u32 mtl = 0;
+       int have_tp = 0, opt, min;
 
        /*
         * Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -509,8 +509,15 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 
        /*
         * Set MAXIMUM TRANSFER LENGTH
+        *
+        * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
+        * enforcing maximum HW scatter-gather-list entry limit
         */
-       put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
+       if (cmd->se_tfo->max_data_sg_nents) {
+               mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
+                      dev->dev_attrib.block_size;
+       }
+       put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]);
 
        /*
         * Set OPTIMAL TRANSFER LENGTH
index 3f0b50082de4e52fdf076a396fe76b2a1c2c1003..62bafaa670f4645f5d5450dcb04126ad3edc9c40 100644 (file)
@@ -1075,6 +1075,55 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
 }
 EXPORT_SYMBOL(transport_set_vpd_ident);
 
+static sense_reason_t
+target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
+                              unsigned int size)
+{
+       u32 mtl;
+
+       if (!cmd->se_tfo->max_data_sg_nents)
+               return TCM_NO_SENSE;
+       /*
+        * Check if fabric enforced maximum SGL entries per I/O descriptor
+        * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
+        * residual_count and reduce original cmd->data_length to maximum
+        * length based on single PAGE_SIZE entry scatter-lists.
+        */
+       mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
+       if (cmd->data_length > mtl) {
+               /*
+                * If an existing CDB overflow is present, calculate new residual
+                * based on CDB size minus fabric maximum transfer length.
+                *
+                * If an existing CDB underflow is present, calculate new residual
+                * based on original cmd->data_length minus fabric maximum transfer
+                * length.
+                *
+                * Otherwise, set the underflow residual based on cmd->data_length
+                * minus fabric maximum transfer length.
+                */
+               if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+                       cmd->residual_count = (size - mtl);
+               } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+                       u32 orig_dl = size + cmd->residual_count;
+                       cmd->residual_count = (orig_dl - mtl);
+               } else {
+                       cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+                       cmd->residual_count = (cmd->data_length - mtl);
+               }
+               cmd->data_length = mtl;
+               /*
+                * Reset sbc_check_prot() calculated protection payload
+                * length based upon the new smaller MTL.
+                */
+               if (cmd->prot_length) {
+                       u32 sectors = (mtl / dev->dev_attrib.block_size);
+                       cmd->prot_length = dev->prot_length * sectors;
+               }
+       }
+       return TCM_NO_SENSE;
+}
+
 sense_reason_t
 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
 {
@@ -1120,7 +1169,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
                }
        }
 
-       return 0;
+       return target_check_max_data_sg_nents(cmd, dev, size);
 
 }
 
index 69355feabd1d5684f0bf71edbbf82c6be3095711..7fb2557a760e432ffa054f2550acd8e8a9e7085a 100644 (file)
@@ -5,6 +5,19 @@ struct target_core_fabric_ops {
        struct module *module;
        const char *name;
        size_t node_acl_size;
+       /*
+        * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
+        * Setting this value tells target-core to enforce this limit, and
+        * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
+        *
+        * target-core will currently reset se_cmd->data_length to this
+        * maximum size, and set UNDERFLOW residual count if length exceeds
+        * this limit.
+        *
+        * XXX: Not all initiator hosts honor this block-limit EVPD
+        * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
+        */
+       u32 max_data_sg_nents;
        char *(*get_fabric_name)(void);
        char *(*tpg_get_wwn)(struct se_portal_group *);
        u16 (*tpg_get_tag)(struct se_portal_group *);