Commit a870d627 authored by Andy Grover's avatar Andy Grover Committed by David S. Miller

RDS/IB: Always use PAGE_SIZE for FMR page size

While FMRs allow significant flexibility in what size of pages they can use,
we really just want FMR pages to match CPU page size. Roland says we can
count on this always being supported, so this simplifies things.
Signed-off-by: default avatarAndy Grover <andy.grover@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent edacaeae
...@@ -85,9 +85,6 @@ void rds_ib_add_one(struct ib_device *device) ...@@ -85,9 +85,6 @@ void rds_ib_add_one(struct ib_device *device)
rds_ibdev->max_wrs = dev_attr->max_qp_wr; rds_ibdev->max_wrs = dev_attr->max_qp_wr;
rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
rds_ibdev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
rds_ibdev->fmr_page_size = 1 << rds_ibdev->fmr_page_shift;
rds_ibdev->fmr_page_mask = ~((u64) rds_ibdev->fmr_page_size - 1);
rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32; rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32;
rds_ibdev->max_fmrs = dev_attr->max_fmr ? rds_ibdev->max_fmrs = dev_attr->max_fmr ?
min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
......
...@@ -159,9 +159,6 @@ struct rds_ib_device { ...@@ -159,9 +159,6 @@ struct rds_ib_device {
struct ib_pd *pd; struct ib_pd *pd;
struct ib_mr *mr; struct ib_mr *mr;
struct rds_ib_mr_pool *mr_pool; struct rds_ib_mr_pool *mr_pool;
int fmr_page_shift;
int fmr_page_size;
u64 fmr_page_mask;
unsigned int fmr_max_remaps; unsigned int fmr_max_remaps;
unsigned int max_fmrs; unsigned int max_fmrs;
int max_sge; int max_sge;
......
...@@ -211,7 +211,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) ...@@ -211,7 +211,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
pool->fmr_attr.max_pages = fmr_message_size; pool->fmr_attr.max_pages = fmr_message_size;
pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
pool->fmr_attr.page_shift = rds_ibdev->fmr_page_shift; pool->fmr_attr.page_shift = PAGE_SHIFT;
pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
/* We never allow more than max_items MRs to be allocated. /* We never allow more than max_items MRs to be allocated.
...@@ -349,13 +349,13 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm ...@@ -349,13 +349,13 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
if (dma_addr & ~rds_ibdev->fmr_page_mask) { if (dma_addr & ~PAGE_MASK) {
if (i > 0) if (i > 0)
return -EINVAL; return -EINVAL;
else else
++page_cnt; ++page_cnt;
} }
if ((dma_addr + dma_len) & ~rds_ibdev->fmr_page_mask) { if ((dma_addr + dma_len) & ~PAGE_MASK) {
if (i < sg_dma_len - 1) if (i < sg_dma_len - 1)
return -EINVAL; return -EINVAL;
else else
...@@ -365,7 +365,7 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm ...@@ -365,7 +365,7 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
len += dma_len; len += dma_len;
} }
page_cnt += len >> rds_ibdev->fmr_page_shift; page_cnt += len >> PAGE_SHIFT;
if (page_cnt > fmr_message_size) if (page_cnt > fmr_message_size)
return -EINVAL; return -EINVAL;
...@@ -378,9 +378,9 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm ...@@ -378,9 +378,9 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
for (j = 0; j < dma_len; j += rds_ibdev->fmr_page_size) for (j = 0; j < dma_len; j += PAGE_SIZE)
dma_pages[page_cnt++] = dma_pages[page_cnt++] =
(dma_addr & rds_ibdev->fmr_page_mask) + j; (dma_addr & PAGE_MASK) + j;
} }
ret = ib_map_phys_fmr(ibmr->fmr, ret = ib_map_phys_fmr(ibmr->fmr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment