arm64 updates for 3.19
Changes include: - Support for alternative instruction patching from Andre - seccomp from Akashi - Some AArch32 instruction emulation, required by the Android folks - Optimisations for exception entry/exit code, cmpxchg, pcpu atomics - mmu_gather range calculations moved into core code - EFI updates from Ard, including long-awaited SMBIOS support - /proc/cpuinfo fixes to align with the format used by arch/arm/ - A few non-critical fixes across the architecture -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJUhbSAAAoJELescNyEwWM07PQH/AolxqOJTTg8TKe2wvRC+DwY R98bcECMwhXvwep1KhTBew7z7NRzXJvVVs+EePSpXWX2+KK2aWN4L50rAb9ow4ty PZ5EFw564g3rUpc7cbqIrM/lasiYWuIWw/BL+wccOm3mWbZfokBB2t0tn/2rVv0K 5tf2VCLLxgiFJPLuYk61uH7Nshvv5uJ6ODwdXjbrH+Mfl6xsaiKv17ZrfP4D/M4o hrLoXxVTuuWj3sy/lBJv8vbTbKbQ6BGl9JQhBZGZHeKOdvX7UnbKH4N5vWLUFZya QYO92AK1xGolu8a9bEfzrmxn0zXeAHgFTnRwtDCekOvy0kTR9MRIqXASXKO3ZEU= =rnFX -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Will Deacon: "Here's the usual mixed bag of arm64 updates, also including some related EFI changes (Acked by Matt) and the MMU gather range cleanup (Acked by you). Changes include: - support for alternative instruction patching from Andre - seccomp from Akashi - some AArch32 instruction emulation, required by the Android folks - optimisations for exception entry/exit code, cmpxchg, pcpu atomics - mmu_gather range calculations moved into core code - EFI updates from Ard, including long-awaited SMBIOS support - /proc/cpuinfo fixes to align with the format used by arch/arm/ - a few non-critical fixes across the architecture" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits) arm64: remove the unnecessary arm64_swiotlb_init() arm64: add module support for alternatives fixups arm64: perf: Prevent wraparound during overflow arm64/include/asm: Fixed a warning about 'struct pt_regs' arm64: Provide a namespace to NCAPS arm64: bpf: lift restriction on last instruction arm64: Implement support for read-mostly sections arm64: compat: align cacheflush syscall with arch/arm arm64: add seccomp support arm64: add SIGSYS siginfo for compat task arm64: add seccomp syscall for compat task asm-generic: add generic seccomp.h for secure computing mode 1 arm64: ptrace: allow tracer to skip a system call arm64: ptrace: add NT_ARM_SYSTEM_CALL regset arm64: Move some head.text functions to executable section arm64: jump labels: NOP out NOP -> NOP replacement arm64: add support to dump the kernel page tables arm64: Add FIX_HOLE to permanent fixed addresses arm64: alternatives: fix pr_fmt string for consistency arm64: vmlinux.lds.S: don't discard .exit.* sections at link-time ...
This commit is contained in:
commit
b64bb1d758
84 changed files with 3151 additions and 718 deletions
30
include/asm-generic/seccomp.h
Normal file
30
include/asm-generic/seccomp.h
Normal file
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* include/asm-generic/seccomp.h
|
||||
*
|
||||
* Copyright (C) 2014 Linaro Limited
|
||||
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef _ASM_GENERIC_SECCOMP_H
|
||||
#define _ASM_GENERIC_SECCOMP_H
|
||||
|
||||
#include <linux/unistd.h>
|
||||
|
||||
#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32)
|
||||
#define __NR_seccomp_read_32 __NR_read
|
||||
#define __NR_seccomp_write_32 __NR_write
|
||||
#define __NR_seccomp_exit_32 __NR_exit
|
||||
#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
|
||||
#endif /* CONFIG_COMPAT && ! already defined */
|
||||
|
||||
#define __NR_seccomp_read __NR_read
|
||||
#define __NR_seccomp_write __NR_write
|
||||
#define __NR_seccomp_exit __NR_exit
|
||||
#ifndef __NR_seccomp_sigreturn
|
||||
#define __NR_seccomp_sigreturn __NR_rt_sigreturn
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_GENERIC_SECCOMP_H */
|
|
@ -96,10 +96,9 @@ struct mmu_gather {
|
|||
#endif
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned int need_flush : 1, /* Did free PTEs */
|
||||
/* we are in the middle of an operation to clear
|
||||
* a full mm and can make some optimizations */
|
||||
fullmm : 1,
|
||||
unsigned int fullmm : 1,
|
||||
/* we have performed an operation which
|
||||
* requires a complete flush of the tlb */
|
||||
need_flush_all : 1;
|
||||
|
@ -128,16 +127,54 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|||
tlb_flush_mmu(tlb);
|
||||
}
|
||||
|
||||
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
|
||||
unsigned long address)
|
||||
{
|
||||
tlb->start = min(tlb->start, address);
|
||||
tlb->end = max(tlb->end, address + PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline void __tlb_reset_range(struct mmu_gather *tlb)
|
||||
{
|
||||
tlb->start = TASK_SIZE;
|
||||
tlb->end = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In the case of tlb vma handling, we can optimise these away in the
|
||||
* case where we're doing a full MM flush. When we're doing a munmap,
|
||||
* the vmas are adjusted to only cover the region to be torn down.
|
||||
*/
|
||||
#ifndef tlb_start_vma
|
||||
#define tlb_start_vma(tlb, vma) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define __tlb_end_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm && tlb->end) { \
|
||||
tlb_flush(tlb); \
|
||||
__tlb_reset_range(tlb); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifndef tlb_end_vma
|
||||
#define tlb_end_vma __tlb_end_vma
|
||||
#endif
|
||||
|
||||
#ifndef __tlb_remove_tlb_entry
|
||||
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
|
||||
*
|
||||
* Record the fact that pte's were really umapped in ->need_flush, so we can
|
||||
* later optimise away the tlb invalidate. This helps when userspace is
|
||||
* unmapping already-unmapped pages, which happens quite a lot.
|
||||
* Record the fact that pte's were really unmapped by updating the range,
|
||||
* so we can later optimise away the tlb invalidate. This helps when
|
||||
* userspace is unmapping already-unmapped pages, which happens quite a lot.
|
||||
*/
|
||||
#define tlb_remove_tlb_entry(tlb, ptep, address) \
|
||||
do { \
|
||||
tlb->need_flush = 1; \
|
||||
__tlb_adjust_range(tlb, address); \
|
||||
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
||||
} while (0)
|
||||
|
||||
|
@ -151,27 +188,27 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|||
|
||||
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
|
||||
do { \
|
||||
tlb->need_flush = 1; \
|
||||
__tlb_adjust_range(tlb, address); \
|
||||
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
|
||||
} while (0)
|
||||
|
||||
#define pte_free_tlb(tlb, ptep, address) \
|
||||
do { \
|
||||
tlb->need_flush = 1; \
|
||||
__tlb_adjust_range(tlb, address); \
|
||||
__pte_free_tlb(tlb, ptep, address); \
|
||||
} while (0)
|
||||
|
||||
#ifndef __ARCH_HAS_4LEVEL_HACK
|
||||
#define pud_free_tlb(tlb, pudp, address) \
|
||||
do { \
|
||||
tlb->need_flush = 1; \
|
||||
__tlb_adjust_range(tlb, address); \
|
||||
__pud_free_tlb(tlb, pudp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define pmd_free_tlb(tlb, pmdp, address) \
|
||||
do { \
|
||||
tlb->need_flush = 1; \
|
||||
__tlb_adjust_range(tlb, address); \
|
||||
__pmd_free_tlb(tlb, pmdp, address); \
|
||||
} while (0)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue