Commit 10d5726c authored by Philippe Gerum's avatar Philippe Gerum Committed by Greg Gallagher
Browse files

mm: ipipe: disable ondemand memory

Co-kernels cannot bear with the extra latency caused by memory access
faults involved in COW or overcommit. __ipipe_disable_ondemand_mappings()
force commits all common memory mappings with physical RAM.

In addition, the architecture code is given a chance to pre-load page
table entries for ioremap and vmalloc memory, for preventing further
minor faults accessing such memory due to PTE misses (if that ever
makes sense for them).
parent af0abe1e
......@@ -129,10 +129,8 @@ EXPORT_SYMBOL(zero_pfn);
unsigned long highest_memmap_pfn __read_mostly;
static inline void cow_user_page(struct page *dst,
struct page *src,
unsigned long va,
struct vm_area_struct *vma);
static bool cow_user_page(struct page *dst, struct page *src,
struct vm_fault *vmf);
/*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
......@@ -951,7 +949,8 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
static inline unsigned long
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
unsigned long addr, int *rss, struct page *uncow_page)
unsigned long addr, int *rss, pmd_t *src_pmd,
struct page *uncow_page)
{
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
......@@ -1032,16 +1031,28 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
#ifdef CONFIG_IPIPE
if (uncow_page) {
struct page *old_page = vm_normal_page(vma, addr, pte);
cow_user_page(uncow_page, old_page, addr, vma);
pte = mk_pte(uncow_page, vma->vm_page_prot);
struct vm_fault vmf;
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
vmf.vma = vma;
vmf.address = addr;
vmf.orig_pte = pte;
vmf.pmd = src_pmd;
page_add_new_anon_rmap(uncow_page, vma, addr, false);
rss[!!PageAnon(uncow_page)]++;
goto out_set_pte;
if (cow_user_page(uncow_page, old_page, &vmf)) {
pte = mk_pte(uncow_page, vma->vm_page_prot);
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
page_add_new_anon_rmap(uncow_page, vma, addr,
false);
rss[!!PageAnon(uncow_page)]++;
goto out_set_pte;
} else {
/* unexpected: source page no longer present */
WARN_ON_ONCE(1);
}
}
#endif /* CONFIG_IPIPE */
ptep_set_wrprotect(src_mm, addr, src_pte);
......@@ -1151,7 +1162,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
#endif
entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
vma, addr, rss, uncow_page);
vma, addr, rss, src_pmd, uncow_page);
uncow_page = NULL;
if (entry.val)
break;
......@@ -2387,7 +2398,8 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
return same;
}
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
static bool cow_user_page(struct page *dst, struct page *src,
struct vm_fault *vmf)
{
debug_dma_assert_idle(src);
......
......@@ -871,7 +871,7 @@ void user_shm_unlock(size_t size, struct user_struct *user)
int __ipipe_pin_vma(struct mm_struct *mm, struct vm_area_struct *vma)
{
unsigned int gup_flags = 0;
int ret, len;
int ret, write, len;
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return 0;
......@@ -885,8 +885,10 @@ int __ipipe_pin_vma(struct mm_struct *mm, struct vm_area_struct *vma)
if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
gup_flags |= FOLL_WRITE;
write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
len = DIV_ROUND_UP(vma->vm_end, PAGE_SIZE) - vma->vm_start/PAGE_SIZE;
ret = get_user_pages_locked(vma->vm_start, len, gup_flags, NULL, NULL);
ret = get_user_pages(vma->vm_start, len, write, 0, NULL);
if (ret < 0)
return ret;
return ret == len ? 0 : -EFAULT;
......
......@@ -112,8 +112,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
}
flags = hard_local_irq_save();
ptent = ptep_modify_prot_start(mm, addr, pte);
ptent = pte_modify(ptent, newprot);
oldpte = ptep_modify_prot_start(vma, addr, pte);
ptent = pte_modify(oldpte, newprot);
if (preserve_write)
ptent = pte_mk_savedwrite(ptent);
......@@ -123,7 +123,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
!(vma->vm_flags & VM_SOFTDIRTY))) {
ptent = pte_mkwrite(ptent);
}
ptep_modify_prot_commit(mm, addr, pte, ptent);
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
hard_local_irq_restore(flags);
pages++;
} else if (IS_ENABLED(CONFIG_MIGRATION)) {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment