Commit 1eade380 authored by Michael Holzheu's avatar Michael Holzheu Committed by Martin Schwidefsky

[S390] vmur: allocate single record buffers instead of one big data buffer

vmur allocates one contiguous kernel buffer to copy user data when creating
ccw programs for punch or printer. If big block sizes are used, under memory
pressure it can happen, that we do not get memory in one chunk. Now we
allocate memory for each single record to avoid high order allocations.
Signed-off-by: default avatarMichael Holzheu <holzheu@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 6d740a43
...@@ -119,10 +119,12 @@ static void urdev_put(struct urdev *urd) ...@@ -119,10 +119,12 @@ static void urdev_put(struct urdev *urd)
/* /*
* Low-level functions to do I/O to a ur device. * Low-level functions to do I/O to a ur device.
* alloc_chan_prog * alloc_chan_prog
* free_chan_prog
* do_ur_io * do_ur_io
* ur_int_handler * ur_int_handler
* *
* alloc_chan_prog allocates and builds the channel program * alloc_chan_prog allocates and builds the channel program
* free_chan_prog frees memory of the channel program
* *
* do_ur_io issues the channel program to the device and blocks waiting * do_ur_io issues the channel program to the device and blocks waiting
* on a completion event it publishes at urd->io_done. The function * on a completion event it publishes at urd->io_done. The function
...@@ -137,6 +139,16 @@ static void urdev_put(struct urdev *urd) ...@@ -137,6 +139,16 @@ static void urdev_put(struct urdev *urd)
* address pointer that alloc_chan_prog returned. * address pointer that alloc_chan_prog returned.
*/ */
static void free_chan_prog(struct ccw1 *cpa)
{
struct ccw1 *ptr = cpa;
while (ptr->cda) {
kfree((void *)(addr_t) ptr->cda);
ptr++;
}
kfree(cpa);
}
/* /*
* alloc_chan_prog * alloc_chan_prog
...@@ -144,44 +156,45 @@ static void urdev_put(struct urdev *urd) ...@@ -144,44 +156,45 @@ static void urdev_put(struct urdev *urd)
* with a final NOP CCW command-chained on (which ensures that CE and DE * with a final NOP CCW command-chained on (which ensures that CE and DE
* are presented together in a single interrupt instead of as separate * are presented together in a single interrupt instead of as separate
* interrupts unless an incorrect length indication kicks in first). The * interrupts unless an incorrect length indication kicks in first). The
* data length in each CCW is reclen. The caller must ensure that count * data length in each CCW is reclen.
* is an integral multiple of reclen.
* The channel program pointer returned by this function must be freed
* with kfree. The caller is responsible for checking that
* count/reclen is not ridiculously large.
*/ */
static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen) static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
int reclen)
{ {
size_t num_ccws;
struct ccw1 *cpa; struct ccw1 *cpa;
void *kbuf;
int i; int i;
TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen); TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
/* /*
* We chain a NOP onto the writes to force CE+DE together. * We chain a NOP onto the writes to force CE+DE together.
* That means we allocate room for CCWs to cover count/reclen * That means we allocate room for CCWs to cover count/reclen
* records plus a NOP. * records plus a NOP.
*/ */
num_ccws = count / reclen + 1; cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (!cpa) if (!cpa)
return NULL; return ERR_PTR(-ENOMEM);
for (i = 0; count; i++) { for (i = 0; i < rec_count; i++) {
cpa[i].cmd_code = WRITE_CCW_CMD; cpa[i].cmd_code = WRITE_CCW_CMD;
cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
cpa[i].count = reclen; cpa[i].count = reclen;
cpa[i].cda = __pa(buf); kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
buf += reclen; if (!kbuf) {
count -= reclen; free_chan_prog(cpa);
return ERR_PTR(-ENOMEM);
}
cpa[i].cda = (u32)(addr_t) kbuf;
if (copy_from_user(kbuf, ubuf, reclen)) {
free_chan_prog(cpa);
return ERR_PTR(-EFAULT);
}
ubuf += reclen;
} }
/* The following NOP CCW forces CE+DE to be presented together */ /* The following NOP CCW forces CE+DE to be presented together */
cpa[i].cmd_code = CCW_CMD_NOOP; cpa[i].cmd_code = CCW_CMD_NOOP;
cpa[i].flags = 0;
cpa[i].count = 0;
cpa[i].cda = 0;
return cpa; return cpa;
} }
...@@ -325,24 +338,11 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata, ...@@ -325,24 +338,11 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
size_t count, size_t reclen, loff_t *ppos) size_t count, size_t reclen, loff_t *ppos)
{ {
struct ccw1 *cpa; struct ccw1 *cpa;
char *buf;
int rc; int rc;
/* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */ cpa = alloc_chan_prog(udata, count / reclen, reclen);
buf = kmalloc(count, GFP_KERNEL | GFP_DMA); if (IS_ERR(cpa))
if (!buf) return PTR_ERR(cpa);
return -ENOMEM;
if (copy_from_user(buf, udata, count)) {
rc = -EFAULT;
goto fail_kfree_buf;
}
cpa = alloc_chan_prog(buf, count, reclen);
if (!cpa) {
rc = -ENOMEM;
goto fail_kfree_buf;
}
rc = do_ur_io(urd, cpa); rc = do_ur_io(urd, cpa);
if (rc) if (rc)
...@@ -354,10 +354,9 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata, ...@@ -354,10 +354,9 @@ static ssize_t do_write(struct urdev *urd, const char __user *udata,
} }
*ppos += count; *ppos += count;
rc = count; rc = count;
fail_kfree_cpa: fail_kfree_cpa:
kfree(cpa); free_chan_prog(cpa);
fail_kfree_buf:
kfree(buf);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment