59d5af6732
drop numbers and commit hashes from patch metadata to reduce future patch churn
172 lines
5.3 KiB
Diff
172 lines
5.3 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Andy Lutomirski <luto@kernel.org>
|
|
Date: Tue, 12 Dec 2017 07:56:42 -0800
|
|
Subject: [PATCH] x86/pti: Map the vsyscall page if needed
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
CVE-2017-5754
|
|
|
|
Make VSYSCALLs work fully in PTI mode by mapping them properly to the user
|
|
space visible page tables.
|
|
|
|
[ tglx: Hide unused functions (Patch by Arnd Bergmann) ]
|
|
|
|
Signed-off-by: Andy Lutomirski <luto@kernel.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: Borislav Petkov <bp@alien8.de>
|
|
Cc: Brian Gerst <brgerst@gmail.com>
|
|
Cc: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Cc: David Laight <David.Laight@aculab.com>
|
|
Cc: H. Peter Anvin <hpa@zytor.com>
|
|
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
|
|
Cc: Juergen Gross <jgross@suse.com>
|
|
Cc: Kees Cook <keescook@chromium.org>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
(cherry picked from commit 85900ea51577e31b186e523c8f4e068c79ecc7d3)
|
|
Signed-off-by: Andy Whitcroft <apw@canonical.com>
|
|
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
|
|
(cherry picked from commit 7a2ba0ea0a18cfc1f18c3f1389ef85f2a0d3227d)
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
---
|
|
arch/x86/include/asm/vsyscall.h | 1 +
|
|
arch/x86/entry/vsyscall/vsyscall_64.c | 6 ++--
|
|
arch/x86/mm/pti.c | 65 +++++++++++++++++++++++++++++++++++
|
|
3 files changed, 69 insertions(+), 3 deletions(-)
|
|
|
|
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
|
|
index 6ba66ee79710..0eaeb223d692 100644
|
|
--- a/arch/x86/include/asm/vsyscall.h
|
|
+++ b/arch/x86/include/asm/vsyscall.h
|
|
@@ -6,6 +6,7 @@
|
|
|
|
#ifdef CONFIG_X86_VSYSCALL_EMULATION
|
|
extern void map_vsyscall(void);
|
|
+extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
|
|
|
|
/*
|
|
* Called on instruction fetch fault in vsyscall page.
|
|
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
|
|
index 5e56a4ced848..238b4bcd3c47 100644
|
|
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
|
|
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
|
|
@@ -343,14 +343,14 @@ int in_gate_area_no_mm(unsigned long addr)
|
|
* vsyscalls but leave the page not present. If so, we skip calling
|
|
* this.
|
|
*/
|
|
-static void __init set_vsyscall_pgtable_user_bits(void)
|
|
+void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
|
|
{
|
|
pgd_t *pgd;
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
- pgd = pgd_offset_k(VSYSCALL_ADDR);
|
|
+ pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
|
|
set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
|
|
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
|
|
#if CONFIG_PGTABLE_LEVELS >= 5
|
|
@@ -372,7 +372,7 @@ void __init map_vsyscall(void)
|
|
vsyscall_mode == NATIVE
|
|
? PAGE_KERNEL_VSYSCALL
|
|
: PAGE_KERNEL_VVAR);
|
|
- set_vsyscall_pgtable_user_bits();
|
|
+ set_vsyscall_pgtable_user_bits(swapper_pg_dir);
|
|
}
|
|
|
|
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
|
|
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
|
|
index b1c38ef9fbbb..bce8aea65606 100644
|
|
--- a/arch/x86/mm/pti.c
|
|
+++ b/arch/x86/mm/pti.c
|
|
@@ -38,6 +38,7 @@
|
|
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/hypervisor.h>
|
|
+#include <asm/vsyscall.h>
|
|
#include <asm/cmdline.h>
|
|
#include <asm/pti.h>
|
|
#include <asm/pgtable.h>
|
|
@@ -223,6 +224,69 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
|
|
return pmd_offset(pud, address);
|
|
}
|
|
|
|
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
|
|
+/*
|
|
+ * Walk the shadow copy of the page tables (optionally) trying to allocate
|
|
+ * page table pages on the way down. Does not support large pages.
|
|
+ *
|
|
+ * Note: this is only used when mapping *new* kernel data into the
|
|
+ * user/shadow page tables. It is never used for userspace data.
|
|
+ *
|
|
+ * Returns a pointer to a PTE on success, or NULL on failure.
|
|
+ */
|
|
+static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
|
+{
|
|
+ gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
|
+ pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
|
|
+ pte_t *pte;
|
|
+
|
|
+ /* We can't do anything sensible if we hit a large mapping. */
|
|
+ if (pmd_large(*pmd)) {
|
|
+ WARN_ON(1);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (pmd_none(*pmd)) {
|
|
+ unsigned long new_pte_page = __get_free_page(gfp);
|
|
+ if (!new_pte_page)
|
|
+ return NULL;
|
|
+
|
|
+ if (pmd_none(*pmd)) {
|
|
+ set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
|
|
+ new_pte_page = 0;
|
|
+ }
|
|
+ if (new_pte_page)
|
|
+ free_page(new_pte_page);
|
|
+ }
|
|
+
|
|
+ pte = pte_offset_kernel(pmd, address);
|
|
+ if (pte_flags(*pte) & _PAGE_USER) {
|
|
+ WARN_ONCE(1, "attempt to walk to user pte\n");
|
|
+ return NULL;
|
|
+ }
|
|
+ return pte;
|
|
+}
|
|
+
|
|
+static void __init pti_setup_vsyscall(void)
|
|
+{
|
|
+ pte_t *pte, *target_pte;
|
|
+ unsigned int level;
|
|
+
|
|
+ pte = lookup_address(VSYSCALL_ADDR, &level);
|
|
+ if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
|
|
+ return;
|
|
+
|
|
+ target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
|
|
+ if (WARN_ON(!target_pte))
|
|
+ return;
|
|
+
|
|
+ *target_pte = *pte;
|
|
+ set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
|
|
+}
|
|
+#else
|
|
+static void __init pti_setup_vsyscall(void) { }
|
|
+#endif
|
|
+
|
|
static void __init
|
|
pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
|
|
{
|
|
@@ -319,4 +383,5 @@ void __init pti_init(void)
|
|
pti_clone_user_shared();
|
|
pti_clone_entry_text();
|
|
pti_setup_espfix64();
|
|
+ pti_setup_vsyscall();
|
|
}
|
|
--
|
|
2.14.2
|
|
|