Commit 87407673 authored by Tom Tucker's avatar Tom Tucker

svcrdma: Use standard Linux lists for context cache

Replace the one-off linked list implementation used to implement the
context cache with the standard Linux list_head lists. Add a context
counter to catch resource leaks. A WARN_ON will be added later to
ensure that we've freed all contexts.
Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
parent 02e7452d
...@@ -72,7 +72,7 @@ extern atomic_t rdma_stat_sq_prod; ...@@ -72,7 +72,7 @@ extern atomic_t rdma_stat_sq_prod;
*/ */
struct svc_rdma_op_ctxt { struct svc_rdma_op_ctxt {
struct svc_rdma_op_ctxt *read_hdr; struct svc_rdma_op_ctxt *read_hdr;
struct svc_rdma_op_ctxt *next; struct list_head free_list;
struct xdr_buf arg; struct xdr_buf arg;
struct list_head dto_q; struct list_head dto_q;
enum ib_wr_opcode wr_op; enum ib_wr_opcode wr_op;
...@@ -104,7 +104,8 @@ struct svcxprt_rdma { ...@@ -104,7 +104,8 @@ struct svcxprt_rdma {
struct ib_pd *sc_pd; struct ib_pd *sc_pd;
struct svc_rdma_op_ctxt *sc_ctxt_head; atomic_t sc_ctxt_used;
struct list_head sc_ctxt_free;
int sc_ctxt_cnt; int sc_ctxt_cnt;
int sc_ctxt_bump; int sc_ctxt_bump;
int sc_ctxt_max; int sc_ctxt_max;
......
...@@ -103,8 +103,8 @@ static int rdma_bump_context_cache(struct svcxprt_rdma *xprt) ...@@ -103,8 +103,8 @@ static int rdma_bump_context_cache(struct svcxprt_rdma *xprt)
spin_lock_bh(&xprt->sc_ctxt_lock); spin_lock_bh(&xprt->sc_ctxt_lock);
if (ctxt) { if (ctxt) {
at_least_one = 1; at_least_one = 1;
ctxt->next = xprt->sc_ctxt_head; INIT_LIST_HEAD(&ctxt->free_list);
xprt->sc_ctxt_head = ctxt; list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
} else { } else {
/* kmalloc failed...give up for now */ /* kmalloc failed...give up for now */
xprt->sc_ctxt_cnt--; xprt->sc_ctxt_cnt--;
...@@ -123,7 +123,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) ...@@ -123,7 +123,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
while (1) { while (1) {
spin_lock_bh(&xprt->sc_ctxt_lock); spin_lock_bh(&xprt->sc_ctxt_lock);
if (unlikely(xprt->sc_ctxt_head == NULL)) { if (unlikely(list_empty(&xprt->sc_ctxt_free))) {
/* Try to bump my cache. */ /* Try to bump my cache. */
spin_unlock_bh(&xprt->sc_ctxt_lock); spin_unlock_bh(&xprt->sc_ctxt_lock);
...@@ -136,12 +136,15 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) ...@@ -136,12 +136,15 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
schedule_timeout_uninterruptible(msecs_to_jiffies(500)); schedule_timeout_uninterruptible(msecs_to_jiffies(500));
continue; continue;
} }
ctxt = xprt->sc_ctxt_head; ctxt = list_entry(xprt->sc_ctxt_free.next,
xprt->sc_ctxt_head = ctxt->next; struct svc_rdma_op_ctxt,
free_list);
list_del_init(&ctxt->free_list);
spin_unlock_bh(&xprt->sc_ctxt_lock); spin_unlock_bh(&xprt->sc_ctxt_lock);
ctxt->xprt = xprt; ctxt->xprt = xprt;
INIT_LIST_HEAD(&ctxt->dto_q); INIT_LIST_HEAD(&ctxt->dto_q);
ctxt->count = 0; ctxt->count = 0;
atomic_inc(&xprt->sc_ctxt_used);
break; break;
} }
return ctxt; return ctxt;
...@@ -163,10 +166,11 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) ...@@ -163,10 +166,11 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
ctxt->sge[i].addr, ctxt->sge[i].addr,
ctxt->sge[i].length, ctxt->sge[i].length,
ctxt->direction); ctxt->direction);
spin_lock_bh(&xprt->sc_ctxt_lock); spin_lock_bh(&xprt->sc_ctxt_lock);
ctxt->next = xprt->sc_ctxt_head; list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
xprt->sc_ctxt_head = ctxt;
spin_unlock_bh(&xprt->sc_ctxt_lock); spin_unlock_bh(&xprt->sc_ctxt_lock);
atomic_dec(&xprt->sc_ctxt_used);
} }
/* ib_cq event handler */ /* ib_cq event handler */
...@@ -412,28 +416,29 @@ static void create_context_cache(struct svcxprt_rdma *xprt, ...@@ -412,28 +416,29 @@ static void create_context_cache(struct svcxprt_rdma *xprt,
xprt->sc_ctxt_max = ctxt_max; xprt->sc_ctxt_max = ctxt_max;
xprt->sc_ctxt_bump = ctxt_bump; xprt->sc_ctxt_bump = ctxt_bump;
xprt->sc_ctxt_cnt = 0; xprt->sc_ctxt_cnt = 0;
xprt->sc_ctxt_head = NULL; atomic_set(&xprt->sc_ctxt_used, 0);
INIT_LIST_HEAD(&xprt->sc_ctxt_free);
for (i = 0; i < ctxt_count; i++) { for (i = 0; i < ctxt_count; i++) {
ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
if (ctxt) { if (ctxt) {
ctxt->next = xprt->sc_ctxt_head; INIT_LIST_HEAD(&ctxt->free_list);
xprt->sc_ctxt_head = ctxt; list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
xprt->sc_ctxt_cnt++; xprt->sc_ctxt_cnt++;
} }
} }
} }
static void destroy_context_cache(struct svc_rdma_op_ctxt *ctxt) static void destroy_context_cache(struct svcxprt_rdma *xprt)
{ {
struct svc_rdma_op_ctxt *next; while (!list_empty(&xprt->sc_ctxt_free)) {
if (!ctxt) struct svc_rdma_op_ctxt *ctxt;
return; ctxt = list_entry(xprt->sc_ctxt_free.next,
struct svc_rdma_op_ctxt,
do { free_list);
next = ctxt->next; list_del_init(&ctxt->free_list);
kfree(ctxt); kfree(ctxt);
ctxt = next; }
} while (next);
} }
static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
...@@ -470,7 +475,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, ...@@ -470,7 +475,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
reqs + reqs +
cma_xprt->sc_sq_depth + cma_xprt->sc_sq_depth +
RPCRDMA_MAX_THREADS + 1); /* max */ RPCRDMA_MAX_THREADS + 1); /* max */
if (!cma_xprt->sc_ctxt_head) { if (list_empty(&cma_xprt->sc_ctxt_free)) {
kfree(cma_xprt); kfree(cma_xprt);
return NULL; return NULL;
} }
...@@ -976,7 +981,7 @@ static void svc_rdma_free(struct svc_xprt *xprt) ...@@ -976,7 +981,7 @@ static void svc_rdma_free(struct svc_xprt *xprt)
if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
ib_dealloc_pd(rdma->sc_pd); ib_dealloc_pd(rdma->sc_pd);
destroy_context_cache(rdma->sc_ctxt_head); destroy_context_cache(rdma);
kfree(rdma); kfree(rdma);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment