2b834b083d
Patches and rationale by Seth Forshee[1]: My testing shows that the "POWER9: Additional power9 patches" patches are responsible, two of them in particular: - mm: introduce page_vma_mapped_walk() - mm, ksm: convert write_protect_page() to use page_vma_mapped_walk() These patches don't appear to be included for any functionality they provide, but rather to make "mm/ksm: handle protnone saved writes when making page write protect" a clean cherry pick instead of a backport. But the backport isn't that difficult, so as far as I can tell we can do away with the other two patches. 1: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1674838/comments/108
98 lines
3 KiB
Diff
98 lines
3 KiB
Diff
From 40e1fa51bc8aa3df1d3a23711b5de62d8251bff5 Mon Sep 17 00:00:00 2001
|
|
From: Seth Forshee <seth.forshee@canonical.com>
|
|
Date: Wed, 3 May 2017 08:34:52 -0500
|
|
Subject: [PATCH 2/4] Revert "mm, ksm: convert write_protect_page() to use
|
|
page_vma_mapped_walk()"
|
|
|
|
This reverts commit 3000e033152a70fa139765b4dbb5baec46b1cc1b.
|
|
|
|
Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
|
|
---
|
|
mm/ksm.c | 34 ++++++++++++++++------------------
|
|
1 file changed, 16 insertions(+), 18 deletions(-)
|
|
|
|
diff --git a/mm/ksm.c b/mm/ksm.c
|
|
index 9dd2e58fb6dc..fed4afd8293b 100644
|
|
--- a/mm/ksm.c
|
|
+++ b/mm/ksm.c
|
|
@@ -856,35 +856,33 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
|
pte_t *orig_pte)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
- struct page_vma_mapped_walk pvmw = {
|
|
- .page = page,
|
|
- .vma = vma,
|
|
- };
|
|
+ unsigned long addr;
|
|
+ pte_t *ptep;
|
|
+ spinlock_t *ptl;
|
|
int swapped;
|
|
int err = -EFAULT;
|
|
unsigned long mmun_start; /* For mmu_notifiers */
|
|
unsigned long mmun_end; /* For mmu_notifiers */
|
|
|
|
- pvmw.address = page_address_in_vma(page, vma);
|
|
- if (pvmw.address == -EFAULT)
|
|
+ addr = page_address_in_vma(page, vma);
|
|
+ if (addr == -EFAULT)
|
|
goto out;
|
|
|
|
BUG_ON(PageTransCompound(page));
|
|
|
|
- mmun_start = pvmw.address;
|
|
- mmun_end = pvmw.address + PAGE_SIZE;
|
|
+ mmun_start = addr;
|
|
+ mmun_end = addr + PAGE_SIZE;
|
|
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
|
|
|
- if (!page_vma_mapped_walk(&pvmw))
|
|
+ ptep = page_check_address(page, mm, addr, &ptl, 0);
|
|
+ if (!ptep)
|
|
goto out_mn;
|
|
- if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
|
|
- goto out_unlock;
|
|
|
|
- if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) {
|
|
+ if (pte_write(*ptep) || pte_dirty(*ptep)) {
|
|
pte_t entry;
|
|
|
|
swapped = PageSwapCache(page);
|
|
- flush_cache_page(vma, pvmw.address, page_to_pfn(page));
|
|
+ flush_cache_page(vma, addr, page_to_pfn(page));
|
|
/*
|
|
* Ok this is tricky, when get_user_pages_fast() run it doesn't
|
|
* take any lock, therefore the check that we are going to make
|
|
@@ -894,25 +892,25 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
|
* this assure us that no O_DIRECT can happen after the check
|
|
* or in the middle of the check.
|
|
*/
|
|
- entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
|
|
+ entry = ptep_clear_flush_notify(vma, addr, ptep);
|
|
/*
|
|
* Check that no O_DIRECT or similar I/O is in progress on the
|
|
* page
|
|
*/
|
|
if (page_mapcount(page) + 1 + swapped != page_count(page)) {
|
|
- set_pte_at(mm, pvmw.address, pvmw.pte, entry);
|
|
+ set_pte_at(mm, addr, ptep, entry);
|
|
goto out_unlock;
|
|
}
|
|
if (pte_dirty(entry))
|
|
set_page_dirty(page);
|
|
entry = pte_mkclean(pte_wrprotect(entry));
|
|
- set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
|
|
+ set_pte_at_notify(mm, addr, ptep, entry);
|
|
}
|
|
- *orig_pte = *pvmw.pte;
|
|
+ *orig_pte = *ptep;
|
|
err = 0;
|
|
|
|
out_unlock:
|
|
- page_vma_mapped_walk_done(&pvmw);
|
|
+ pte_unmap_unlock(ptep, ptl);
|
|
out_mn:
|
|
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
|
out:
|
|
--
|
|
2.7.4
|
|
|