summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-07-19 17:06:46 -0700
committerAndy Grover <andy.grover@oracle.com>2010-09-08 18:16:42 -0700
commit8576f374ac9537674e3cccb0a9d43fa2b7ebbf5b (patch)
treeed7279333989d204a6627b703ae568ba0136a4b1
parentb4e1da3c9a0ac9b01f45a8578b7347e3a31f9fb8 (diff)
RDS: flush fmrs before allocating new ones
Flushing FMRs is somewhat expensive, and is currently kicked off when the interrupt handler notices that we are getting low. The result of this is that FMR flushing only happens from the interrupt cpus. This spreads the load more effectively by triggering flushes just before we allocate a new FMR. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--net/rds/ib_rdma.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 3efdddc39d49..0017964f2fcf 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -38,6 +38,8 @@
#include "ib.h"
#include "xlist.h"
+struct workqueue_struct *rds_ib_fmr_wq;
+
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
#define CLEAN_LIST_BUSY_BIT 0
@@ -304,6 +306,9 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
struct rds_ib_mr *ibmr = NULL;
int err = 0, iter = 0;
+ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+
while (1) {
ibmr = rds_ib_reuse_fmr(pool);
if (ibmr)
@@ -691,8 +696,6 @@ out_nolock:
return ret;
}
-struct workqueue_struct *rds_ib_fmr_wq;
-
int rds_ib_fmr_init(void)
{
rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");