a0f7ab8a6a
cherry-pick from upstream 4.14
124 lines
5.1 KiB
Diff
124 lines
5.1 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Andy Lutomirski <luto@kernel.org>
|
|
Date: Mon, 4 Dec 2017 15:07:22 +0100
|
|
Subject: [PATCH] x86/espfix/64: Stop assuming that pt_regs is on the entry
|
|
stack
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
CVE-2017-5754
|
|
|
|
When we start using an entry trampoline, a #GP from userspace will
|
|
be delivered on the entry stack, not on the task stack. Fix the
|
|
espfix64 #DF fixup to set up #GP according to TSS.SP0, rather than
|
|
assuming that pt_regs + 1 == SP0. This won't change anything
|
|
without an entry stack, but it will make the code continue to work
|
|
when an entry stack is added.
|
|
|
|
While we're at it, improve the comments to explain what's actually
|
|
going on.
|
|
|
|
Signed-off-by: Andy Lutomirski <luto@kernel.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
Cc: Borislav Petkov <bp@alien8.de>
|
|
Cc: Borislav Petkov <bpetkov@suse.de>
|
|
Cc: Brian Gerst <brgerst@gmail.com>
|
|
Cc: Dave Hansen <dave.hansen@intel.com>
|
|
Cc: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Cc: David Laight <David.Laight@aculab.com>
|
|
Cc: Denys Vlasenko <dvlasenk@redhat.com>
|
|
Cc: Eduardo Valentin <eduval@amazon.com>
|
|
Cc: Greg KH <gregkh@linuxfoundation.org>
|
|
Cc: H. Peter Anvin <hpa@zytor.com>
|
|
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
|
|
Cc: Juergen Gross <jgross@suse.com>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Rik van Riel <riel@redhat.com>
|
|
Cc: Will Deacon <will.deacon@arm.com>
|
|
Cc: aliguori@amazon.com
|
|
Cc: daniel.gruss@iaik.tugraz.at
|
|
Cc: hughd@google.com
|
|
Cc: keescook@google.com
|
|
Link: https://lkml.kernel.org/r/20171204150606.130778051@linutronix.de
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
(cherry picked from commit 6d9256f0a89eaff97fca6006100bcaea8d1d8bdb)
|
|
Signed-off-by: Andy Whitcroft <apw@canonical.com>
|
|
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
|
|
(cherry picked from commit f5d8df279d00c22e4c338a5891a874a59947e5f5)
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
---
|
|
arch/x86/kernel/traps.c | 37 ++++++++++++++++++++++++++++---------
|
|
1 file changed, 28 insertions(+), 9 deletions(-)
|
|
|
|
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
|
|
index 7b1d0df624cf..b69db1ee8733 100644
|
|
--- a/arch/x86/kernel/traps.c
|
|
+++ b/arch/x86/kernel/traps.c
|
|
@@ -360,9 +360,15 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
|
|
|
|
/*
|
|
* If IRET takes a non-IST fault on the espfix64 stack, then we
|
|
- * end up promoting it to a doublefault. In that case, modify
|
|
- * the stack to make it look like we just entered the #GP
|
|
- * handler from user space, similar to bad_iret.
|
|
+ * end up promoting it to a doublefault. In that case, take
|
|
+ * advantage of the fact that we're not using the normal (TSS.sp0)
|
|
+ * stack right now. We can write a fake #GP(0) frame at TSS.sp0
|
|
+ * and then modify our own IRET frame so that, when we return,
|
|
+ * we land directly at the #GP(0) vector with the stack already
|
|
+ * set up according to its expectations.
|
|
+ *
|
|
+ * The net result is that our #GP handler will think that we
|
|
+ * entered from usermode with the bad user context.
|
|
*
|
|
* No need for ist_enter here because we don't use RCU.
|
|
*/
|
|
@@ -370,13 +376,26 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
|
|
regs->cs == __KERNEL_CS &&
|
|
regs->ip == (unsigned long)native_irq_return_iret)
|
|
{
|
|
- struct pt_regs *normal_regs = task_pt_regs(current);
|
|
+ struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1;
|
|
+
|
|
+ /*
|
|
+ * regs->sp points to the failing IRET frame on the
|
|
+ * ESPFIX64 stack. Copy it to the entry stack. This fills
|
|
+ * in gpregs->ss through gpregs->ip.
|
|
+ *
|
|
+ */
|
|
+ memmove(&gpregs->ip, (void *)regs->sp, 5*8);
|
|
+ gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
|
|
|
|
- /* Fake a #GP(0) from userspace. */
|
|
- memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
|
|
- normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
|
|
+ /*
|
|
+ * Adjust our frame so that we return straight to the #GP
|
|
+ * vector with the expected RSP value. This is safe because
|
|
+ * we won't enable interupts or schedule before we invoke
|
|
+ * general_protection, so nothing will clobber the stack
|
|
+ * frame we just set up.
|
|
+ */
|
|
regs->ip = (unsigned long)general_protection;
|
|
- regs->sp = (unsigned long)&normal_regs->orig_ax;
|
|
+ regs->sp = (unsigned long)&gpregs->orig_ax;
|
|
|
|
return;
|
|
}
|
|
@@ -401,7 +420,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
|
|
*
|
|
* Processors update CR2 whenever a page fault is detected. If a
|
|
* second page fault occurs while an earlier page fault is being
|
|
- * deliv- ered, the faulting linear address of the second fault will
|
|
+ * delivered, the faulting linear address of the second fault will
|
|
* overwrite the contents of CR2 (replacing the previous
|
|
* address). These updates to CR2 occur even if the page fault
|
|
* results in a double fault or occurs during the delivery of a
|
|
--
|
|
2.14.2
|
|
|