diff options
author | Nicholas Bellinger <nab@linux-iscsi.org> | 2015-07-30 18:28:13 -0700 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2015-09-11 00:31:39 -0700 |
commit | 8f9b565482c537821588444e09ff732c7d65ed6e (patch) | |
tree | b411dd3479eb5f3f86e2eb390c1c973e4b31ee12 /drivers/target | |
parent | 13a3cf08fa1e4b3a252f24202d47a556242aea03 (diff) |
target/qla2xxx: Honor max_data_sg_nents I/O transfer limit
This patch adds an optional fabric driver provided SGL limit
that target-core will honor as it's own internal I/O maximum
transfer length limit, as exposed by EVPD=0xb0 block limits
parameters.
This is required for handling cases when host I/O transfer
length exceeds the requested EVPD block limits maximum
transfer length. The initial user of this logic is qla2xxx,
so that we can avoid having to reject I/Os from some legacy
FC hosts where EVPD=0xb0 parameters are not honored.
When se_cmd payload length exceeds the provided limit in
target_check_max_data_sg_nents() code, se_cmd->data_length +
se_cmd->prot_length are reset with se_cmd->residual_count
plus underflow bit for outgoing TFO response callbacks.
It also checks for existing CDB level underflow + overflow
and recalculates final residual_count as necessary.
Note this patch currently assumes 1:1 mapping of PAGE_SIZE
per struct scatterlist entry.
Reported-by: Craig Watson <craig.watson@vanguard-rugged.com>
Cc: Craig Watson <craig.watson@vanguard-rugged.com>
Tested-by: Himanshu Madhani <himanshu.madhani@qlogic.com>
Cc: Roland Dreier <roland@purestorage.com>
Cc: Arun Easi <arun.easi@qlogic.com>
Cc: Giridhar Malavali <giridhar.malavali@qlogic.com>
Cc: Andrew Vasquez <andrew.vasquez@qlogic.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_spc.c | 13 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 51 |
2 files changed, 60 insertions, 4 deletions
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index a07d455e0dd5..0e0456f6a282 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -477,8 +477,8 @@ static sense_reason_t spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; - int have_tp = 0; - int opt, min; + u32 mtl = 0; + int have_tp = 0, opt, min; /* * Following spc3r22 section 6.5.3 Block Limits VPD page, when @@ -509,8 +509,15 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set MAXIMUM TRANSFER LENGTH + * + * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics + * enforcing maximum HW scatter-gather-list entry limit */ - put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); + if (cmd->se_tfo->max_data_sg_nents) { + mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) / + dev->dev_attrib.block_size; + } + put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 3f0b50082de4..62bafaa670f4 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1075,6 +1075,55 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) } EXPORT_SYMBOL(transport_set_vpd_ident); +static sense_reason_t +target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, + unsigned int size) +{ + u32 mtl; + + if (!cmd->se_tfo->max_data_sg_nents) + return TCM_NO_SENSE; + /* + * Check if fabric enforced maximum SGL entries per I/O descriptor + * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + + * residual_count and reduce original cmd->data_length to maximum + * length based on single PAGE_SIZE entry scatter-lists. + */ + mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); + if (cmd->data_length > mtl) { + /* + * If an existing CDB overflow is present, calculate new residual + * based on CDB size minus fabric maximum transfer length. + * + * If an existing CDB underflow is present, calculate new residual + * based on original cmd->data_length minus fabric maximum transfer + * length. + * + * Otherwise, set the underflow residual based on cmd->data_length + * minus fabric maximum transfer length. + */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + cmd->residual_count = (size - mtl); + } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + u32 orig_dl = size + cmd->residual_count; + cmd->residual_count = (orig_dl - mtl); + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = (cmd->data_length - mtl); + } + cmd->data_length = mtl; + /* + * Reset sbc_check_prot() calculated protection payload + * length based upon the new smaller MTL. + */ + if (cmd->prot_length) { + u32 sectors = (mtl / dev->dev_attrib.block_size); + cmd->prot_length = dev->prot_length * sectors; + } + } + return TCM_NO_SENSE; +} + sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size) { @@ -1120,7 +1169,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) } } - return 0; + return target_check_max_data_sg_nents(cmd, dev, size); } |