diff options
author | Michael S. Tsirkin <mst@mellanox.co.il> | 2006-01-09 14:04:40 -0800 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-01-09 14:04:40 -0800 |
commit | 92898522e3ee1a0ba54140aad1974d9e868f74ae (patch) | |
tree | 25f398798849c6806830aa3a32a98c72640a12e3 /drivers | |
parent | 6627fa662e86c400284b64c13661fdf6bff05983 (diff) |
IB/mthca: prevent event queue overrun
I am seeing EQ overruns in SDP stress tests: if the CQ completion
handler arms a CQ, this could generate more EQEs, so that EQ will
never get empty and consumer index will never get updated.
This is similiar to what we have with command interface:
/*
* cmd_event() may add more commands.
* The card will think the queue has overflowed if
* we don't tell it we've been processing events.
*/
However, for completion events, we *don't* want to update the consumer
index on each event. So, perform EQ doorbell coalescing: allocate EQs
with some spare EQEs, and update once we run out of them.
The value 0x80 was selected to avoid any performance impact.
Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_eq.c | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index e8a948f087c0..2eabb27804cd 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -45,6 +45,7 @@ enum { MTHCA_NUM_ASYNC_EQE = 0x80, MTHCA_NUM_CMD_EQE = 0x80, + MTHCA_NUM_SPARE_EQE = 0x80, MTHCA_EQ_ENTRY_SIZE = 0x20 }; @@ -277,11 +278,10 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) { struct mthca_eqe *eqe; int disarm_cqn; - int eqes_found = 0; + int eqes_found = 0; + int set_ci = 0; while ((eqe = next_eqe_sw(eq))) { - int set_ci = 0; - /* * Make sure we read EQ entry contents after we've * checked the ownership bit. @@ -345,12 +345,6 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) be16_to_cpu(eqe->event.cmd.token), eqe->event.cmd.status, be64_to_cpu(eqe->event.cmd.out_param)); - /* - * cmd_event() may add more commands. - * The card will think the queue has overflowed if - * we don't tell it we've been processing events. - */ - set_ci = 1; break; case MTHCA_EVENT_TYPE_PORT_CHANGE: @@ -385,8 +379,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) set_eqe_hw(eqe); ++eq->cons_index; eqes_found = 1; + ++set_ci; - if (unlikely(set_ci)) { + /* + * The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MTHCA_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) { /* * Conditional on hca_type is OK here because * this is a rare case, not the fast path. @@ -862,19 +864,19 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev) intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? 128 : dev->eq_table.inta_pin; - err = mthca_create_eq(dev, dev->limits.num_cqs, + err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, &dev->eq_table.eq[MTHCA_EQ_COMP]); if (err) goto err_out_unmap; - err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE, + err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); if (err) goto err_out_comp; - err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE, + err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE, (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, &dev->eq_table.eq[MTHCA_EQ_CMD]); if (err) |