Commit 810a56b9 authored by Mike Kravetz's avatar Mike Kravetz Committed by Linus Torvalds
Browse files

userfaultfd: hugetlbfs: fix __mcopy_atomic_hugetlb retry/error processing

The new routine copy_huge_page_from_user() uses kmap_atomic() to map
PAGE_SIZE pages.  However, this prevents page faults in the subsequent
call to copy_from_user().  This is OK in the case where the routine is
copied with mmap_sema held.  However, in another case we want to allow
page faults.  So, add a new argument allow_pagefault to indicate if the
routine should allow page faults.

[ unmap the correct pointer]
[ kunmap() takes a page*, per Hugh]

Signed-off-by: default avatarMike Kravetz <>
Signed-off-by: default avatarAndrea Arcangeli <>
Signed-off-by: default avatarDan Carpenter <>
Cc: "Dr. David Alan Gilbert" <>
Cc: Hillf Danton <>
Cc: Michael Rapoport <>
Cc: Mike Rapoport <>
Cc: Pavel Emelyanov <>
Cc: Hugh Dickins <>
Cc: Hugh Dickins <>
Signed-off-by: default avatarAndrew Morton <>
Signed-off-by: default avatarLinus Torvalds <>
parent 60d4d2d2
......@@ -2426,7 +2426,8 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned int pages_per_huge_page);
extern long copy_huge_page_from_user(struct page *dst_page,
const void __user *usr_src,
unsigned int pages_per_huge_page);
unsigned int pages_per_huge_page,
bool allow_pagefault);
extern struct page_ext_operations debug_guardpage_ops;
......@@ -3973,7 +3973,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
ret = copy_huge_page_from_user(page,
(const void __user *) src_addr,
pages_per_huge_page(h), false);
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
......@@ -4155,7 +4155,8 @@ void copy_user_huge_page(struct page *dst, struct page *src,
long copy_huge_page_from_user(struct page *dst_page,
const void __user *usr_src,
unsigned int pages_per_huge_page)
unsigned int pages_per_huge_page,
bool allow_pagefault)
void *src = (void *)usr_src;
void *page_kaddr;
......@@ -4163,10 +4164,16 @@ long copy_huge_page_from_user(struct page *dst_page,
unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
for (i = 0; i < pages_per_huge_page; i++) {
if (allow_pagefault)
page_kaddr = kmap(dst_page + i);
page_kaddr = kmap_atomic(dst_page + i);
rc = copy_from_user(page_kaddr,
(const void __user *)(src + i * PAGE_SIZE),
if (allow_pagefault)
kunmap(dst_page + i);
ret_val -= (PAGE_SIZE - rc);
......@@ -274,7 +274,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
err = copy_huge_page_from_user(page,
(const void __user *)src_addr,
pages_per_huge_page(h), true);
if (unlikely(err)) {
err = -EFAULT;
goto out;
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment