Commit 58fa879e authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: FOLL flags for GUP flags

__get_user_pages() has been taking its own GUP flags, then processing
them into FOLL flags for follow_page().  Though oddly named, the FOLL
flags are more widely used, so pass them to __get_user_pages() now.
Sorry, VM flags, VM_FAULT flags and FAULT_FLAGs are still distinct.

(The patch to __get_user_pages() looks peculiar, with both gup_flags
and foll_flags: the gup_flags remain constant; but as before there's
an exceptional case, out of scope of the patch, in which foll_flags
per page have FOLL_WRITE masked off.)
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Rik van Riel <riel@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a13ea5b7
...@@ -1232,6 +1232,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, ...@@ -1232,6 +1232,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_TOUCH 0x02 /* mark page accessed */ #define FOLL_TOUCH 0x02 /* mark page accessed */
#define FOLL_GET 0x04 /* do get_page on page */ #define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data); void *data);
......
...@@ -250,12 +250,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, ...@@ -250,12 +250,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
} }
#endif /* CONFIG_SPARSEMEM */ #endif /* CONFIG_SPARSEMEM */
#define GUP_FLAGS_WRITE 0x01
#define GUP_FLAGS_FORCE 0x02
#define GUP_FLAGS_DUMP 0x04
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int flags, unsigned long start, int len, unsigned int foll_flags,
struct page **pages, struct vm_area_struct **vmas); struct page **pages, struct vm_area_struct **vmas);
#define ZONE_RECLAIM_NOSCAN -2 #define ZONE_RECLAIM_NOSCAN -2
......
...@@ -1209,27 +1209,29 @@ no_page_table: ...@@ -1209,27 +1209,29 @@ no_page_table:
} }
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int flags, unsigned long start, int nr_pages, unsigned int gup_flags,
struct page **pages, struct vm_area_struct **vmas) struct page **pages, struct vm_area_struct **vmas)
{ {
int i; int i;
unsigned int vm_flags = 0; unsigned long vm_flags;
int write = !!(flags & GUP_FLAGS_WRITE);
int force = !!(flags & GUP_FLAGS_FORCE);
if (nr_pages <= 0) if (nr_pages <= 0)
return 0; return 0;
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
/* /*
* Require read or write permissions. * Require read or write permissions.
* If 'force' is set, we only require the "MAY" flags. * If FOLL_FORCE is set, we only require the "MAY" flags.
*/ */
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags = (gup_flags & FOLL_WRITE) ?
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (gup_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0; i = 0;
do { do {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned int foll_flags;
vma = find_extend_vma(mm, start); vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(tsk, start)) { if (!vma && in_gate_area(tsk, start)) {
...@@ -1241,7 +1243,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1241,7 +1243,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pte_t *pte; pte_t *pte;
/* user gate pages are read-only */ /* user gate pages are read-only */
if (write) if (gup_flags & FOLL_WRITE)
return i ? : -EFAULT; return i ? : -EFAULT;
if (pg > TASK_SIZE) if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg); pgd = pgd_offset_k(pg);
...@@ -1278,22 +1280,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1278,22 +1280,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
!(vm_flags & vma->vm_flags)) !(vm_flags & vma->vm_flags))
return i ? : -EFAULT; return i ? : -EFAULT;
foll_flags = FOLL_TOUCH;
if (pages)
foll_flags |= FOLL_GET;
if (flags & GUP_FLAGS_DUMP)
foll_flags |= FOLL_DUMP;
if (write)
foll_flags |= FOLL_WRITE;
if (is_vm_hugetlb_page(vma)) { if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas, i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, foll_flags); &start, &nr_pages, i, gup_flags);
continue; continue;
} }
do { do {
struct page *page; struct page *page;
unsigned int foll_flags = gup_flags;
/* /*
* If we have a pending SIGKILL, don't keep faulting * If we have a pending SIGKILL, don't keep faulting
...@@ -1302,9 +1297,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1302,9 +1297,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (unlikely(fatal_signal_pending(current))) if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS; return i ? i : -ERESTARTSYS;
if (write)
foll_flags |= FOLL_WRITE;
cond_resched(); cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) { while (!(page = follow_page(vma, start, foll_flags))) {
int ret; int ret;
...@@ -1415,12 +1407,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1415,12 +1407,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int write, int force, unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas) struct page **pages, struct vm_area_struct **vmas)
{ {
int flags = 0; int flags = FOLL_TOUCH;
if (pages)
flags |= FOLL_GET;
if (write) if (write)
flags |= GUP_FLAGS_WRITE; flags |= FOLL_WRITE;
if (force) if (force)
flags |= GUP_FLAGS_FORCE; flags |= FOLL_FORCE;
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
} }
...@@ -1447,7 +1441,7 @@ struct page *get_dump_page(unsigned long addr) ...@@ -1447,7 +1441,7 @@ struct page *get_dump_page(unsigned long addr)
struct page *page; struct page *page;
if (__get_user_pages(current, current->mm, addr, 1, if (__get_user_pages(current, current->mm, addr, 1,
GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1) FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
return NULL; return NULL;
if (page == ZERO_PAGE(0)) { if (page == ZERO_PAGE(0)) {
page_cache_release(page); page_cache_release(page);
......
...@@ -166,9 +166,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -166,9 +166,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(end > vma->vm_end);
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
gup_flags = 0; gup_flags = FOLL_TOUCH | FOLL_GET;
if (vma->vm_flags & VM_WRITE) if (vma->vm_flags & VM_WRITE)
gup_flags = GUP_FLAGS_WRITE; gup_flags |= FOLL_WRITE;
while (nr_pages > 0) { while (nr_pages > 0) {
int i; int i;
......
...@@ -168,20 +168,20 @@ unsigned int kobjsize(const void *objp) ...@@ -168,20 +168,20 @@ unsigned int kobjsize(const void *objp)
} }
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int flags, unsigned long start, int nr_pages, int foll_flags,
struct page **pages, struct vm_area_struct **vmas) struct page **pages, struct vm_area_struct **vmas)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long vm_flags; unsigned long vm_flags;
int i; int i;
int write = !!(flags & GUP_FLAGS_WRITE);
int force = !!(flags & GUP_FLAGS_FORCE);
/* calculate required read or write permissions. /* calculate required read or write permissions.
* - if 'force' is set, we only require the "MAY" flags. * If FOLL_FORCE is set, we only require the "MAY" flags.
*/ */
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags = (foll_flags & FOLL_WRITE) ?
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (foll_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start); vma = find_vma(mm, start);
...@@ -223,9 +223,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -223,9 +223,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int flags = 0; int flags = 0;
if (write) if (write)
flags |= GUP_FLAGS_WRITE; flags |= FOLL_WRITE;
if (force) if (force)
flags |= GUP_FLAGS_FORCE; flags |= FOLL_FORCE;
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment