Commit f9aed0e2 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] sg: use compound pages

sg increments the refcount of constituent pages in its higher order memory
allocations when they are about to be mapped by userspace.  This is done so
the subsequent get_page/put_page when doing the mapping and unmapping does not
free the page.

Move over to the preferred way, that is, using compound pages instead.  This
fixes a whole class of possible obscure bugs where a get_user_pages on a
constituent page may outlast the user mappings or even the driver.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Douglas Gilbert <dougg@torque.net>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent a6f563db
......@@ -1140,32 +1140,6 @@ sg_fasync(int fd, struct file *filp, int mode)
return (retval < 0) ? retval : 0;
}
/* When startFinish==1 increments page counts for pages other than the
first of scatter gather elements obtained from alloc_pages().
When startFinish==0 decrements ... */
static void
sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
{
struct scatterlist *sg = rsv_schp->buffer;
struct page *page;
int k, m;
SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
startFinish, rsv_schp->k_use_sg));
/* N.B. correction _not_ applied to base page of each allocation */
for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) {
page = sg->page;
if (startFinish)
get_page(page);
else {
if (page_count(page) > 0)
__put_page(page);
}
}
}
}
static struct page *
sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
{
......@@ -1237,10 +1211,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
sa += len;
}
if (0 == sfp->mmap_called) {
sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
sfp->mmap_called = 1;
}
vma->vm_flags |= VM_RESERVED;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
......@@ -2395,8 +2366,6 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
SCSI_LOG_TIMEOUT(6,
printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
if (sfp->mmap_called)
sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
sg_remove_scat(&sfp->reserve);
}
sfp->parentdp = NULL;
......@@ -2478,9 +2447,9 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp)
return resp;
if (lowDma)
page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN;
page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
else
page_mask = GFP_ATOMIC | __GFP_NOWARN;
page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
order++, a_size <<= 1) ;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment