diff options
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/Kconfig | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 12 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 62 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 392 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 46 |
8 files changed, 210 insertions, 363 deletions
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig index 9d9a9dc51f18..55855eeabae7 100644 --- a/drivers/infiniband/ulp/ipoib/Kconfig +++ b/drivers/infiniband/ulp/ipoib/Kconfig @@ -1,7 +1,6 @@ config INFINIBAND_IPOIB tristate "IP-over-InfiniBand" depends on NETDEVICES && INET && (IPV6 || IPV6=n) - select INET_LRO ---help--- Support for the IP-over-InfiniBand protocol (IPoIB). This transports IP packets over InfiniBand so you can use your IB diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 753a983a5fdc..ab97f92fc257 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -50,7 +50,7 @@ #include <rdma/ib_verbs.h> #include <rdma/ib_pack.h> #include <rdma/ib_sa.h> -#include <linux/inet_lro.h> +#include <linux/sched.h> /* constants */ @@ -100,9 +100,6 @@ enum { IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ IPOIB_MCAST_FLAG_ATTACHED = 3, - IPOIB_MAX_LRO_DESCRIPTORS = 8, - IPOIB_LRO_MAX_AGGR = 64, - MAX_SEND_CQE = 16, IPOIB_CM_COPYBREAK = 256, }; @@ -262,11 +259,6 @@ struct ipoib_ethtool_st { u16 max_coalesced_frames; }; -struct ipoib_lro { - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS]; -}; - /* * Device private locking: network stack tx_lock protects members used * in TX fast path, lock protects everything else. lock nests inside @@ -352,8 +344,6 @@ struct ipoib_dev_priv { int hca_caps; struct ipoib_ethtool_st ethtool; struct timer_list poll_timer; - - struct ipoib_lro lro; }; struct ipoib_ah { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index bb1004114dec..c1c49f2d35b5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1480,6 +1480,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) { dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + priv->dev->features |= NETIF_F_GRO; if (priv->hca_caps & IB_DEVICE_UD_TSO) dev->features |= NETIF_F_TSO; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 1a1657c82edd..19f7f5206f78 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -106,63 +106,12 @@ static int ipoib_set_coalesce(struct net_device *dev, return 0; } -static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = { - "LRO aggregated", "LRO flushed", - "LRO avg aggr", "LRO no desc" -}; - -static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - switch (stringset) { - case ETH_SS_STATS: - memcpy(data, *ipoib_stats_keys, sizeof(ipoib_stats_keys)); - break; - } -} - -static int ipoib_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(ipoib_stats_keys); - default: - return -EOPNOTSUPP; - } -} - -static void ipoib_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, uint64_t *data) -{ - struct ipoib_dev_priv *priv = netdev_priv(dev); - int index = 0; - - /* Get LRO statistics */ - data[index++] = priv->lro.lro_mgr.stats.aggregated; - data[index++] = priv->lro.lro_mgr.stats.flushed; - if (priv->lro.lro_mgr.stats.flushed) - data[index++] = priv->lro.lro_mgr.stats.aggregated / - priv->lro.lro_mgr.stats.flushed; - else - data[index++] = 0; - data[index++] = priv->lro.lro_mgr.stats.no_desc; -} - -static int ipoib_set_flags(struct net_device *dev, u32 flags) -{ - return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO); -} - static const struct ethtool_ops ipoib_ethtool_ops = { .get_drvinfo = ipoib_get_drvinfo, .get_rx_csum = ipoib_get_rx_csum, .set_tso = ipoib_set_tso, .get_coalesce = ipoib_get_coalesce, .set_coalesce = ipoib_set_coalesce, - .get_flags = ethtool_op_get_flags, - .set_flags = ipoib_set_flags, - .get_strings = ipoib_get_strings, - .get_sset_count = ipoib_get_sset_count, - .get_ethtool_stats = ipoib_get_ethtool_stats, }; void ipoib_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index dfa71903d6e4..806d0292dc39 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -295,10 +295,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok)) skb->ip_summed = CHECKSUM_UNNECESSARY; - if (dev->features & NETIF_F_LRO) - lro_receive_skb(&priv->lro.lro_mgr, skb, NULL); - else - netif_receive_skb(skb); + napi_gro_receive(&priv->napi, skb); repost: if (unlikely(ipoib_ib_post_receive(dev, wr_id))) @@ -450,9 +447,6 @@ poll_more: } if (done < budget) { - if (dev->features & NETIF_F_LRO) - lro_flush_all(&priv->lro.lro_mgr); - napi_complete(napi); if (unlikely(ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP | diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 9ff7bc73ed95..7a07a728fe0d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -60,15 +60,6 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); -static int lro; -module_param(lro, bool, 0444); -MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)"); - -static int lro_max_aggr = IPOIB_LRO_MAX_AGGR; -module_param(lro_max_aggr, int, 0644); -MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated " - "(default = 64)"); - #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG int ipoib_debug_level; @@ -976,54 +967,6 @@ static const struct header_ops ipoib_header_ops = { .create = ipoib_hard_header, }; -static int get_skb_hdr(struct sk_buff *skb, void **iphdr, - void **tcph, u64 *hdr_flags, void *priv) -{ - unsigned int ip_len; - struct iphdr *iph; - - if (unlikely(skb->protocol != htons(ETH_P_IP))) - return -1; - - /* - * In the future we may add an else clause that verifies the - * checksum and allows devices which do not calculate checksum - * to use LRO. - */ - if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY)) - return -1; - - /* Check for non-TCP packet */ - skb_reset_network_header(skb); - iph = ip_hdr(skb); - if (iph->protocol != IPPROTO_TCP) - return -1; - - ip_len = ip_hdrlen(skb); - skb_set_transport_header(skb, ip_len); - *tcph = tcp_hdr(skb); - - /* check if IP header and TCP header are complete */ - if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) - return -1; - - *hdr_flags = LRO_IPV4 | LRO_TCP; - *iphdr = iph; - - return 0; -} - -static void ipoib_lro_setup(struct ipoib_dev_priv *priv) -{ - priv->lro.lro_mgr.max_aggr = lro_max_aggr; - priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS; - priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc; - priv->lro.lro_mgr.get_skb_header = get_skb_hdr; - priv->lro.lro_mgr.features = LRO_F_NAPI; - priv->lro.lro_mgr.dev = priv->dev; - priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; -} - static const struct net_device_ops ipoib_netdev_ops = { .ndo_open = ipoib_open, .ndo_stop = ipoib_stop, @@ -1067,8 +1010,6 @@ static void ipoib_setup(struct net_device *dev) priv->dev = dev; - ipoib_lro_setup(priv); - spin_lock_init(&priv->lock); mutex_init(&priv->vlan_mutex); @@ -1218,8 +1159,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; } - if (lro) - priv->dev->features |= NETIF_F_LRO; + priv->dev->features |= NETIF_F_GRO; if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) priv->dev->features |= NETIF_F_TSO; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 1e1e347a7715..4b62105ed1e8 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -441,18 +441,28 @@ static void srp_disconnect_target(struct srp_target_port *target) wait_for_completion(&target->done); } +static bool srp_change_state(struct srp_target_port *target, + enum srp_target_state old, + enum srp_target_state new) +{ + bool changed = false; + + spin_lock_irq(&target->lock); + if (target->state == old) { + target->state = new; + changed = true; + } + spin_unlock_irq(&target->lock); + return changed; +} + static void srp_remove_work(struct work_struct *work) { struct srp_target_port *target = container_of(work, struct srp_target_port, work); - spin_lock_irq(target->scsi_host->host_lock); - if (target->state != SRP_TARGET_DEAD) { - spin_unlock_irq(target->scsi_host->host_lock); + if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED)) return; - } - target->state = SRP_TARGET_REMOVED; - spin_unlock_irq(target->scsi_host->host_lock); spin_lock(&target->srp_host->target_lock); list_del(&target->list); @@ -539,33 +549,34 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, scsi_sg_count(scmnd), scmnd->sc_data_direction); } -static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) +static void srp_remove_req(struct srp_target_port *target, + struct srp_request *req, s32 req_lim_delta) { + unsigned long flags; + srp_unmap_data(req->scmnd, target, req); - list_move_tail(&req->list, &target->free_reqs); + spin_lock_irqsave(&target->lock, flags); + target->req_lim += req_lim_delta; + req->scmnd = NULL; + list_add_tail(&req->list, &target->free_reqs); + spin_unlock_irqrestore(&target->lock, flags); } static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) { req->scmnd->result = DID_RESET << 16; req->scmnd->scsi_done(req->scmnd); - srp_remove_req(target, req); + srp_remove_req(target, req, 0); } static int srp_reconnect_target(struct srp_target_port *target) { struct ib_qp_attr qp_attr; - struct srp_request *req, *tmp; struct ib_wc wc; - int ret; + int i, ret; - spin_lock_irq(target->scsi_host->host_lock); - if (target->state != SRP_TARGET_LIVE) { - spin_unlock_irq(target->scsi_host->host_lock); + if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING)) return -EAGAIN; - } - target->state = SRP_TARGET_CONNECTING; - spin_unlock_irq(target->scsi_host->host_lock); srp_disconnect_target(target); /* @@ -590,27 +601,23 @@ static int srp_reconnect_target(struct srp_target_port *target) while (ib_poll_cq(target->send_cq, 1, &wc) > 0) ; /* nothing */ - spin_lock_irq(target->scsi_host->host_lock); - list_for_each_entry_safe(req, tmp, &target->req_queue, list) - srp_reset_req(target, req); - spin_unlock_irq(target->scsi_host->host_lock); + for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { + struct srp_request *req = &target->req_ring[i]; + if (req->scmnd) + srp_reset_req(target, req); + } - target->rx_head = 0; - target->tx_head = 0; - target->tx_tail = 0; + INIT_LIST_HEAD(&target->free_tx); + for (i = 0; i < SRP_SQ_SIZE; ++i) + list_add(&target->tx_ring[i]->list, &target->free_tx); target->qp_in_error = 0; ret = srp_connect_target(target); if (ret) goto err; - spin_lock_irq(target->scsi_host->host_lock); - if (target->state == SRP_TARGET_CONNECTING) { - ret = 0; - target->state = SRP_TARGET_LIVE; - } else + if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE)) ret = -EAGAIN; - spin_unlock_irq(target->scsi_host->host_lock); return ret; @@ -620,17 +627,20 @@ err: /* * We couldn't reconnect, so kill our target port off. - * However, we have to defer the real removal because we might - * be in the context of the SCSI error handler now, which - * would deadlock if we call scsi_remove_host(). + * However, we have to defer the real removal because we + * are in the context of the SCSI error handler now, which + * will deadlock if we call scsi_remove_host(). + * + * Schedule our work inside the lock to avoid a race with + * the flush_scheduled_work() in srp_remove_one(). */ - spin_lock_irq(target->scsi_host->host_lock); + spin_lock_irq(&target->lock); if (target->state == SRP_TARGET_CONNECTING) { target->state = SRP_TARGET_DEAD; INIT_WORK(&target->work, srp_remove_work); schedule_work(&target->work); } - spin_unlock_irq(target->scsi_host->host_lock); + spin_unlock_irq(&target->lock); return ret; } @@ -758,7 +768,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, struct srp_direct_buf *buf = (void *) cmd->add_data; buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); - buf->key = cpu_to_be32(dev->mr->rkey); + buf->key = cpu_to_be32(target->rkey); buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); } else if (srp_map_fmr(target, scat, count, req, (void *) cmd->add_data)) { @@ -783,7 +793,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, buf->desc_list[i].va = cpu_to_be64(ib_sg_dma_address(ibdev, sg)); buf->desc_list[i].key = - cpu_to_be32(dev->mr->rkey); + cpu_to_be32(target->rkey); buf->desc_list[i].len = cpu_to_be32(dma_len); datalen += dma_len; } @@ -796,7 +806,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, buf->table_desc.va = cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); buf->table_desc.key = - cpu_to_be32(target->srp_host->srp_dev->mr->rkey); + cpu_to_be32(target->rkey); buf->table_desc.len = cpu_to_be32(count * sizeof (struct srp_direct_buf)); @@ -812,9 +822,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, } /* - * Must be called with target->scsi_host->host_lock held to protect - * req_lim and tx_head. Lock cannot be dropped between call here and - * call to __srp_post_send(). + * Return an IU and possible credit to the free pool + */ +static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, + enum srp_iu_type iu_type) +{ + unsigned long flags; + + spin_lock_irqsave(&target->lock, flags); + list_add(&iu->list, &target->free_tx); + if (iu_type != SRP_IU_RSP) + ++target->req_lim; + spin_unlock_irqrestore(&target->lock, flags); +} + +/* + * Must be called with target->lock held to protect req_lim and free_tx. + * If IU is not sent, it must be returned using srp_put_tx_iu(). * * Note: * An upper limit for the number of allocated information units for each @@ -833,83 +857,59 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, srp_send_completion(target->send_cq, target); - if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) + if (list_empty(&target->free_tx)) return NULL; /* Initiator responses to target requests do not consume credits */ - if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { - ++target->zero_req_lim; - return NULL; + if (iu_type != SRP_IU_RSP) { + if (target->req_lim <= rsv) { + ++target->zero_req_lim; + return NULL; + } + + --target->req_lim; } - iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; - iu->type = iu_type; + iu = list_first_entry(&target->free_tx, struct srp_iu, list); + list_del(&iu->list); return iu; } -/* - * Must be called with target->scsi_host->host_lock held to protect - * req_lim and tx_head. - */ -static int __srp_post_send(struct srp_target_port *target, - struct srp_iu *iu, int len) +static int srp_post_send(struct srp_target_port *target, + struct srp_iu *iu, int len) { struct ib_sge list; struct ib_send_wr wr, *bad_wr; - int ret = 0; list.addr = iu->dma; list.length = len; - list.lkey = target->srp_host->srp_dev->mr->lkey; + list.lkey = target->lkey; wr.next = NULL; - wr.wr_id = target->tx_head & SRP_SQ_MASK; + wr.wr_id = (uintptr_t) iu; wr.sg_list = &list; wr.num_sge = 1; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; - ret = ib_post_send(target->qp, &wr, &bad_wr); - - if (!ret) { - ++target->tx_head; - if (iu->type != SRP_IU_RSP) - --target->req_lim; - } - - return ret; + return ib_post_send(target->qp, &wr, &bad_wr); } -static int srp_post_recv(struct srp_target_port *target) +static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) { - unsigned long flags; - struct srp_iu *iu; - struct ib_sge list; struct ib_recv_wr wr, *bad_wr; - unsigned int next; - int ret; - - spin_lock_irqsave(target->scsi_host->host_lock, flags); - - next = target->rx_head & SRP_RQ_MASK; - wr.wr_id = next; - iu = target->rx_ring[next]; + struct ib_sge list; list.addr = iu->dma; list.length = iu->size; - list.lkey = target->srp_host->srp_dev->mr->lkey; + list.lkey = target->lkey; wr.next = NULL; + wr.wr_id = (uintptr_t) iu; wr.sg_list = &list; wr.num_sge = 1; - ret = ib_post_recv(target->qp, &wr, &bad_wr); - if (!ret) - ++target->rx_head; - - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); - - return ret; + return ib_post_recv(target->qp, &wr, &bad_wr); } static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) @@ -917,23 +917,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) struct srp_request *req; struct scsi_cmnd *scmnd; unsigned long flags; - s32 delta; - - delta = (s32) be32_to_cpu(rsp->req_lim_delta); - - spin_lock_irqsave(target->scsi_host->host_lock, flags); - - target->req_lim += delta; - - req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { - if (be32_to_cpu(rsp->resp_data_len) < 4) - req->tsk_status = -1; - else - req->tsk_status = rsp->data[3]; - complete(&req->done); + spin_lock_irqsave(&target->lock, flags); + target->req_lim += be32_to_cpu(rsp->req_lim_delta); + spin_unlock_irqrestore(&target->lock, flags); + + target->tsk_mgmt_status = -1; + if (be32_to_cpu(rsp->resp_data_len) >= 4) + target->tsk_mgmt_status = rsp->data[3]; + complete(&target->tsk_mgmt_done); } else { + req = &target->req_ring[rsp->tag]; scmnd = req->scmnd; if (!scmnd) shost_printk(KERN_ERR, target->scsi_host, @@ -953,49 +948,42 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); - if (!req->tsk_mgmt) { - scmnd->host_scribble = (void *) -1L; - scmnd->scsi_done(scmnd); - - srp_remove_req(target, req); - } else - req->cmd_done = 1; + srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); + scmnd->host_scribble = NULL; + scmnd->scsi_done(scmnd); } - - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); } static int srp_response_common(struct srp_target_port *target, s32 req_delta, void *rsp, int len) { - struct ib_device *dev; + struct ib_device *dev = target->srp_host->srp_dev->dev; unsigned long flags; struct srp_iu *iu; - int err = 1; + int err; - dev = target->srp_host->srp_dev->dev; - - spin_lock_irqsave(target->scsi_host->host_lock, flags); + spin_lock_irqsave(&target->lock, flags); target->req_lim += req_delta; - iu = __srp_get_tx_iu(target, SRP_IU_RSP); + spin_unlock_irqrestore(&target->lock, flags); + if (!iu) { shost_printk(KERN_ERR, target->scsi_host, PFX "no IU available to send response\n"); - goto out; + return 1; } ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); memcpy(iu->buf, rsp, len); ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); - err = __srp_post_send(target, iu, len); - if (err) + err = srp_post_send(target, iu, len); + if (err) { shost_printk(KERN_ERR, target->scsi_host, PFX "unable to post response: %d\n", err); + srp_put_tx_iu(target, iu, SRP_IU_RSP); + } -out: - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); return err; } @@ -1032,14 +1020,11 @@ static void srp_process_aer_req(struct srp_target_port *target, static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) { - struct ib_device *dev; - struct srp_iu *iu; + struct ib_device *dev = target->srp_host->srp_dev->dev; + struct srp_iu *iu = (struct srp_iu *) wc->wr_id; int res; u8 opcode; - iu = target->rx_ring[wc->wr_id]; - - dev = target->srp_host->srp_dev->dev; ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, DMA_FROM_DEVICE); @@ -1080,7 +1065,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, DMA_FROM_DEVICE); - res = srp_post_recv(target); + res = srp_post_recv(target, iu); if (res != 0) shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed with error code %d\n", res); @@ -1109,6 +1094,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) { struct srp_target_port *target = target_ptr; struct ib_wc wc; + struct srp_iu *iu; while (ib_poll_cq(cq, 1, &wc) > 0) { if (wc.status) { @@ -1119,18 +1105,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) break; } - ++target->tx_tail; + iu = (struct srp_iu *) wc.wr_id; + list_add(&iu->list, &target->free_tx); } } -static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, - void (*done)(struct scsi_cmnd *)) +static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) { - struct srp_target_port *target = host_to_target(scmnd->device->host); + struct srp_target_port *target = host_to_target(shost); struct srp_request *req; struct srp_iu *iu; struct srp_cmd *cmd; struct ib_device *dev; + unsigned long flags; int len; if (target->state == SRP_TARGET_CONNECTING) @@ -1139,11 +1126,19 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, if (target->state == SRP_TARGET_DEAD || target->state == SRP_TARGET_REMOVED) { scmnd->result = DID_BAD_TARGET << 16; - done(scmnd); + scmnd->scsi_done(scmnd); return 0; } + spin_lock_irqsave(&target->lock, flags); iu = __srp_get_tx_iu(target, SRP_IU_CMD); + if (iu) { + req = list_first_entry(&target->free_reqs, struct srp_request, + list); + list_del(&req->list); + } + spin_unlock_irqrestore(&target->lock, flags); + if (!iu) goto err; @@ -1151,11 +1146,8 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, DMA_TO_DEVICE); - req = list_first_entry(&target->free_reqs, struct srp_request, list); - - scmnd->scsi_done = done; scmnd->result = 0; - scmnd->host_scribble = (void *) (long) req->index; + scmnd->host_scribble = (void *) req; cmd = iu->buf; memset(cmd, 0, sizeof *cmd); @@ -1167,37 +1159,38 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, req->scmnd = scmnd; req->cmd = iu; - req->cmd_done = 0; - req->tsk_mgmt = NULL; len = srp_map_data(scmnd, target, req); if (len < 0) { shost_printk(KERN_ERR, target->scsi_host, PFX "Failed to map data\n"); - goto err; + goto err_iu; } ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, DMA_TO_DEVICE); - if (__srp_post_send(target, iu, len)) { + if (srp_post_send(target, iu, len)) { shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); goto err_unmap; } - list_move_tail(&req->list, &target->req_queue); - return 0; err_unmap: srp_unmap_data(scmnd, target, req); +err_iu: + srp_put_tx_iu(target, iu, SRP_IU_CMD); + + spin_lock_irqsave(&target->lock, flags); + list_add(&req->list, &target->free_reqs); + spin_unlock_irqrestore(&target->lock, flags); + err: return SCSI_MLQUEUE_HOST_BUSY; } -static DEF_SCSI_QCMD(srp_queuecommand) - static int srp_alloc_iu_bufs(struct srp_target_port *target) { int i; @@ -1216,6 +1209,8 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) GFP_KERNEL, DMA_TO_DEVICE); if (!target->tx_ring[i]) goto err; + + list_add(&target->tx_ring[i]->list, &target->free_tx); } return 0; @@ -1377,7 +1372,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) break; for (i = 0; i < SRP_RQ_SIZE; i++) { - target->status = srp_post_recv(target); + struct srp_iu *iu = target->rx_ring[i]; + target->status = srp_post_recv(target, iu); if (target->status) break; } @@ -1442,25 +1438,24 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) } static int srp_send_tsk_mgmt(struct srp_target_port *target, - struct srp_request *req, u8 func) + u64 req_tag, unsigned int lun, u8 func) { struct ib_device *dev = target->srp_host->srp_dev->dev; struct srp_iu *iu; struct srp_tsk_mgmt *tsk_mgmt; - spin_lock_irq(target->scsi_host->host_lock); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) { - req->scmnd->result = DID_BAD_TARGET << 16; - goto out; - } + target->state == SRP_TARGET_REMOVED) + return -1; - init_completion(&req->done); + init_completion(&target->tsk_mgmt_done); + spin_lock_irq(&target->lock); iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); + spin_unlock_irq(&target->lock); + if (!iu) - goto out; + return -1; ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, DMA_TO_DEVICE); @@ -1468,70 +1463,46 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, memset(tsk_mgmt, 0, sizeof *tsk_mgmt); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); - tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; + tsk_mgmt->lun = cpu_to_be64((u64) lun << 48); + tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; tsk_mgmt->tsk_mgmt_func = func; - tsk_mgmt->task_tag = req->index; + tsk_mgmt->task_tag = req_tag; ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, DMA_TO_DEVICE); - if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) - goto out; - - req->tsk_mgmt = iu; - - spin_unlock_irq(target->scsi_host->host_lock); - - if (!wait_for_completion_timeout(&req->done, - msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) + if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { + srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); return -1; + } - return 0; - -out: - spin_unlock_irq(target->scsi_host->host_lock); - return -1; -} - -static int srp_find_req(struct srp_target_port *target, - struct scsi_cmnd *scmnd, - struct srp_request **req) -{ - if (scmnd->host_scribble == (void *) -1L) + if (!wait_for_completion_timeout(&target->tsk_mgmt_done, + msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) return -1; - *req = &target->req_ring[(long) scmnd->host_scribble]; - return 0; } static int srp_abort(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); - struct srp_request *req; + struct srp_request *req = (struct srp_request *) scmnd->host_scribble; int ret = SUCCESS; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); - if (target->qp_in_error) + if (!req || target->qp_in_error) return FAILED; - if (srp_find_req(target, scmnd, &req)) + if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, + SRP_TSK_ABORT_TASK)) return FAILED; - if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) - return FAILED; - - spin_lock_irq(target->scsi_host->host_lock); - if (req->cmd_done) { - srp_remove_req(target, req); - scmnd->scsi_done(scmnd); - } else if (!req->tsk_status) { - srp_remove_req(target, req); - scmnd->result = DID_ABORT << 16; - } else - ret = FAILED; - - spin_unlock_irq(target->scsi_host->host_lock); + if (req->scmnd) { + if (!target->tsk_mgmt_status) { + srp_remove_req(target, req, 0); + scmnd->result = DID_ABORT << 16; + } else + ret = FAILED; + } return ret; } @@ -1539,26 +1510,23 @@ static int srp_abort(struct scsi_cmnd *scmnd) static int srp_reset_device(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); - struct srp_request *req, *tmp; + int i; shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); if (target->qp_in_error) return FAILED; - if (srp_find_req(target, scmnd, &req)) + if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, + SRP_TSK_LUN_RESET)) return FAILED; - if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) + if (target->tsk_mgmt_status) return FAILED; - if (req->tsk_status) - return FAILED; - - spin_lock_irq(target->scsi_host->host_lock); - list_for_each_entry_safe(req, tmp, &target->req_queue, list) - if (req->scmnd->device == scmnd->device) + for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { + struct srp_request *req = &target->req_ring[i]; + if (req->scmnd && req->scmnd->device == scmnd->device) srp_reset_req(target, req); - - spin_unlock_irq(target->scsi_host->host_lock); + } return SUCCESS; } @@ -1987,9 +1955,12 @@ static ssize_t srp_create_target(struct device *dev, target->io_class = SRP_REV16A_IB_IO_CLASS; target->scsi_host = target_host; target->srp_host = host; + target->lkey = host->srp_dev->mr->lkey; + target->rkey = host->srp_dev->mr->rkey; + spin_lock_init(&target->lock); + INIT_LIST_HEAD(&target->free_tx); INIT_LIST_HEAD(&target->free_reqs); - INIT_LIST_HEAD(&target->req_queue); for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { target->req_ring[i].index = i; list_add_tail(&target->req_ring[i].list, &target->free_reqs); @@ -2217,9 +2188,9 @@ static void srp_remove_one(struct ib_device *device) */ spin_lock(&host->target_lock); list_for_each_entry(target, &host->target_list, list) { - spin_lock_irq(target->scsi_host->host_lock); + spin_lock_irq(&target->lock); target->state = SRP_TARGET_REMOVED; - spin_unlock_irq(target->scsi_host->host_lock); + spin_unlock_irq(&target->lock); } spin_unlock(&host->target_lock); @@ -2258,8 +2229,7 @@ static int __init srp_init_module(void) { int ret; - BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE); - BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE); + BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); if (srp_sg_tablesize > 255) { printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index ed0dce9e479f..9dc6fc3fd894 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -59,16 +59,15 @@ enum { SRP_RQ_SHIFT = 6, SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, - SRP_RQ_MASK = SRP_RQ_SIZE - 1, SRP_SQ_SIZE = SRP_RQ_SIZE, - SRP_SQ_MASK = SRP_SQ_SIZE - 1, SRP_RSP_SQ_SIZE = 1, SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, SRP_TSK_MGMT_SQ_SIZE = 1, SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, - SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), + SRP_TAG_NO_REQ = ~0U, + SRP_TAG_TSK_MGMT = 1U << 31, SRP_FMR_SIZE = 256, SRP_FMR_POOL_SIZE = 1024, @@ -113,15 +112,29 @@ struct srp_request { struct list_head list; struct scsi_cmnd *scmnd; struct srp_iu *cmd; - struct srp_iu *tsk_mgmt; struct ib_pool_fmr *fmr; - struct completion done; short index; - u8 cmd_done; - u8 tsk_status; }; struct srp_target_port { + /* These are RW in the hot path, and commonly used together */ + struct list_head free_tx; + struct list_head free_reqs; + spinlock_t lock; + s32 req_lim; + + /* These are read-only in the hot path */ + struct ib_cq *send_cq ____cacheline_aligned_in_smp; + struct ib_cq *recv_cq; + struct ib_qp *qp; + u32 lkey; + u32 rkey; + enum srp_target_state state; + + /* Everything above this point is used in the hot path of + * command processing. Try to keep them packed into cachelines. + */ + __be64 id_ext; __be64 ioc_guid; __be64 service_id; @@ -138,24 +151,13 @@ struct srp_target_port { int path_query_id; struct ib_cm_id *cm_id; - struct ib_cq *recv_cq; - struct ib_cq *send_cq; - struct ib_qp *qp; int max_ti_iu_len; - s32 req_lim; int zero_req_lim; - unsigned rx_head; - struct srp_iu *rx_ring[SRP_RQ_SIZE]; - - unsigned tx_head; - unsigned tx_tail; struct srp_iu *tx_ring[SRP_SQ_SIZE]; - - struct list_head free_reqs; - struct list_head req_queue; + struct srp_iu *rx_ring[SRP_RQ_SIZE]; struct srp_request req_ring[SRP_CMD_SQ_SIZE]; struct work_struct work; @@ -163,16 +165,18 @@ struct srp_target_port { struct list_head list; struct completion done; int status; - enum srp_target_state state; int qp_in_error; + + struct completion tsk_mgmt_done; + u8 tsk_mgmt_status; }; struct srp_iu { + struct list_head list; u64 dma; void *buf; size_t size; enum dma_data_direction direction; - enum srp_iu_type type; }; #endif /* IB_SRP_H */ |