Commit 98779be8 authored by Steve Wise's avatar Steve Wise Committed by J. Bruce Fields

svcrdma: dma unmap the correct length for the RPCRDMA header page.

The svcrdma module was incorrectly unmapping the RPCRDMA header page.
On IBM pserver systems this causes a resource leak that results in
running out of bus address space (10 cthon iterations will reproduce it).
The code was mapping the full page but only unmapping the actual header
length.  The fix is to only map the header length.

I also cleaned up the use of ib_dma_map_page() calls since the unmap
logic always uses ib_dma_unmap_single().  I made these symmetrical.
Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
parent 7f421835
...@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, ...@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
page_bytes -= sge_bytes; page_bytes -= sge_bytes;
frmr->page_list->page_list[page_no] = frmr->page_list->page_list[page_no] =
ib_dma_map_page(xprt->sc_cm_id->device, page, 0, ib_dma_map_single(xprt->sc_cm_id->device,
page_address(page),
PAGE_SIZE, DMA_TO_DEVICE); PAGE_SIZE, DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no])) frmr->page_list->page_list[page_no]))
...@@ -532,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -532,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma,
clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
/* Prepare the SGE for the RPCRDMA Header */ /* Prepare the SGE for the RPCRDMA Header */
ctxt->sge[0].lkey = rdma->sc_dma_lkey;
ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
ctxt->sge[0].addr = ctxt->sge[0].addr =
ib_dma_map_page(rdma->sc_cm_id->device, ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
page, 0, PAGE_SIZE, DMA_TO_DEVICE); ctxt->sge[0].length, DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
goto err; goto err;
atomic_inc(&rdma->sc_dma_used); atomic_inc(&rdma->sc_dma_used);
ctxt->direction = DMA_TO_DEVICE; ctxt->direction = DMA_TO_DEVICE;
ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
ctxt->sge[0].lkey = rdma->sc_dma_lkey;
/* Determine how many of our SGE are to be transmitted */ /* Determine how many of our SGE are to be transmitted */
for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
......
...@@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) ...@@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
BUG_ON(sge_no >= xprt->sc_max_sge); BUG_ON(sge_no >= xprt->sc_max_sge);
page = svc_rdma_get_page(); page = svc_rdma_get_page();
ctxt->pages[sge_no] = page; ctxt->pages[sge_no] = page;
pa = ib_dma_map_page(xprt->sc_cm_id->device, pa = ib_dma_map_single(xprt->sc_cm_id->device,
page, 0, PAGE_SIZE, page_address(page), PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
goto err_put_ctxt; goto err_put_ctxt;
...@@ -1315,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, ...@@ -1315,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
/* Prepare SGE for local address */ /* Prepare SGE for local address */
sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
p, 0, PAGE_SIZE, DMA_FROM_DEVICE); page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
put_page(p); put_page(p);
return; return;
...@@ -1343,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, ...@@ -1343,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
if (ret) { if (ret) {
dprintk("svcrdma: Error %d posting send for protocol error\n", dprintk("svcrdma: Error %d posting send for protocol error\n",
ret); ret);
ib_dma_unmap_page(xprt->sc_cm_id->device, ib_dma_unmap_single(xprt->sc_cm_id->device,
sge.addr, PAGE_SIZE, sge.addr, PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
svc_rdma_put_context(ctxt, 1); svc_rdma_put_context(ctxt, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment