Merge branch 'linux-linaro-lsk-v4.4-android' of git://git.linaro.org/kernel/linux-linaro-stable.git
* linux-linaro-lsk-v4.4-android: (733 commits)
LSK-ANDROID: memcg: Remove wrong ->attach callback
LSK-ANDROID: arm64: mm: Fix __create_pgd_mapping() call
ANDROID: sdcardfs: Move default_normal to superblock
blkdev: Refactoring block io latency histogram codes
FROMLIST: arm64: kpti: Fix the interaction between ASID switching and software PAN
FROMLIST: arm64: Move post_ttbr_update_workaround to C code
FROMLIST: arm64: mm: Rename post_ttbr0_update_workaround
sched: EAS: Initialize push_task as NULL to avoid direct reference on out_unlock path
fscrypt: updates on 4.15-rc4
ANDROID: uid_sys_stats: fix the comment
BACKPORT: tee: indicate privileged dev in gen_caps
BACKPORT: tee: optee: sync with new naming of interrupts
BACKPORT: tee: tee_shm: Constify dma_buf_ops structures.
BACKPORT: tee: optee: interruptible RPC sleep
BACKPORT: tee: optee: add const to tee_driver_ops and tee_desc structures
BACKPORT: tee.txt: standardize document format
BACKPORT: tee: add forward declaration for struct device
BACKPORT: tee: optee: fix uninitialized symbol 'parg'
BACKPORT: tee: add ARM_SMCCC dependency
BACKPORT: selinux: nlmsgtab: add SOCK_DESTROY to the netlink mapping tables
...
Conflicts:
arch/arm64/kernel/vdso.c
drivers/usb/host/xhci-plat.c
include/drm/drmP.h
include/linux/kasan.h
kernel/time/timekeeping.c
mm/kasan/kasan.c
security/selinux/nlmsgtab.c
Also add this commit:
0bcdc0987c ("time: Fix ktime_get_raw() incorrect base accumulation")
This commit is contained in:
commit
640193f76b
730 changed files with 11966 additions and 4053 deletions
|
|
@ -271,3 +271,19 @@ Description: Parameters for the CPU cache attributes
|
|||
- WriteBack: data is written only to the cache line and
|
||||
the modified cache line is written to main
|
||||
memory only when it is replaced
|
||||
|
||||
What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/meltdown
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v1
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v2
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
||||
The files are named after the code names of CPU
|
||||
vulnerabilities. The output of those files reflects the
|
||||
state of the CPUs in the system. Possible output values:
|
||||
|
||||
"Not affected" CPU is not affected by the vulnerability
|
||||
"Vulnerable" CPU is affected and no mitigation in effect
|
||||
"Mitigation: $M" CPU is affected and mitigation $M is in effect
|
||||
|
|
|
|||
|
|
@ -51,6 +51,18 @@ Description:
|
|||
Controls the dirty page count condition for the in-place-update
|
||||
policies.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/min_hot_blocks
|
||||
Date: March 2017
|
||||
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
|
||||
Description:
|
||||
Controls the dirty page count condition for redefining hot data.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/min_ssr_sections
|
||||
Date: October 2017
|
||||
Contact: "Chao Yu" <yuchao0@huawei.com>
|
||||
Description:
|
||||
Controls the fee section threshold to trigger SSR allocation.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/max_small_discards
|
||||
Date: November 2013
|
||||
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
|
||||
|
|
@ -96,6 +108,18 @@ Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
|
|||
Description:
|
||||
Controls the checkpoint timing.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/idle_interval
|
||||
Date: January 2016
|
||||
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
|
||||
Description:
|
||||
Controls the idle timing.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/iostat_enable
|
||||
Date: August 2017
|
||||
Contact: "Chao Yu" <yuchao0@huawei.com>
|
||||
Description:
|
||||
Controls to enable/disable IO stat.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/ra_nid_pages
|
||||
Date: October 2015
|
||||
Contact: "Chao Yu" <chao2.yu@samsung.com>
|
||||
|
|
@ -116,6 +140,12 @@ Contact: "Shuoran Liu" <liushuoran@huawei.com>
|
|||
Description:
|
||||
Shows total written kbytes issued to disk.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/feature
|
||||
Date: July 2017
|
||||
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
|
||||
Description:
|
||||
Shows all enabled features in current device.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/inject_rate
|
||||
Date: May 2016
|
||||
Contact: "Sheng Yong" <shengyong1@huawei.com>
|
||||
|
|
@ -132,7 +162,18 @@ What: /sys/fs/f2fs/<disk>/reserved_blocks
|
|||
Date: June 2017
|
||||
Contact: "Chao Yu" <yuchao0@huawei.com>
|
||||
Description:
|
||||
Controls current reserved blocks in system.
|
||||
Controls target reserved blocks in system, the threshold
|
||||
is soft, it could exceed current available user space.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/current_reserved_blocks
|
||||
Date: October 2017
|
||||
Contact: "Yunlong Song" <yunlong.song@huawei.com>
|
||||
Contact: "Chao Yu" <yuchao0@huawei.com>
|
||||
Description:
|
||||
Shows current reserved blocks in system, it may be temporarily
|
||||
smaller than target_reserved_blocks, but will gradually
|
||||
increase to target_reserved_blocks when more free blocks are
|
||||
freed by user later.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/gc_urgent
|
||||
Date: August 2017
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@ KASAN uses compile-time instrumentation for checking every memory access,
|
|||
therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is
|
||||
required for detection of out-of-bounds accesses to stack or global variables.
|
||||
|
||||
Currently KASAN is supported only for x86_64 architecture and requires the
|
||||
kernel to be built with the SLUB allocator.
|
||||
Currently KASAN is supported only for x86_64 architecture.
|
||||
|
||||
1. Usage
|
||||
========
|
||||
|
|
@ -27,7 +26,7 @@ inline are compiler instrumentation types. The former produces smaller binary
|
|||
the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC
|
||||
version 5.0 or later.
|
||||
|
||||
Currently KASAN works only with the SLUB memory allocator.
|
||||
KASAN works with both SLUB and SLAB memory allocators.
|
||||
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
|
||||
|
||||
To disable instrumentation for specific files or directories, add a line
|
||||
|
|
|
|||
111
Documentation/kcov.txt
Normal file
111
Documentation/kcov.txt
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
kcov: code coverage for fuzzing
|
||||
===============================
|
||||
|
||||
kcov exposes kernel code coverage information in a form suitable for coverage-
|
||||
guided fuzzing (randomized testing). Coverage data of a running kernel is
|
||||
exported via the "kcov" debugfs file. Coverage collection is enabled on a task
|
||||
basis, and thus it can capture precise coverage of a single system call.
|
||||
|
||||
Note that kcov does not aim to collect as much coverage as possible. It aims
|
||||
to collect more or less stable coverage that is function of syscall inputs.
|
||||
To achieve this goal it does not collect coverage in soft/hard interrupts
|
||||
and instrumentation of some inherently non-deterministic parts of kernel is
|
||||
disbled (e.g. scheduler, locking).
|
||||
|
||||
Usage:
|
||||
======
|
||||
|
||||
Configure kernel with:
|
||||
|
||||
CONFIG_KCOV=y
|
||||
|
||||
CONFIG_KCOV requires gcc built on revision 231296 or later.
|
||||
Profiling data will only become accessible once debugfs has been mounted:
|
||||
|
||||
mount -t debugfs none /sys/kernel/debug
|
||||
|
||||
The following program demonstrates kcov usage from within a test program:
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
|
||||
#define KCOV_ENABLE _IO('c', 100)
|
||||
#define KCOV_DISABLE _IO('c', 101)
|
||||
#define COVER_SIZE (64<<10)
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int fd;
|
||||
unsigned long *cover, n, i;
|
||||
|
||||
/* A single fd descriptor allows coverage collection on a single
|
||||
* thread.
|
||||
*/
|
||||
fd = open("/sys/kernel/debug/kcov", O_RDWR);
|
||||
if (fd == -1)
|
||||
perror("open"), exit(1);
|
||||
/* Setup trace mode and trace size. */
|
||||
if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
|
||||
perror("ioctl"), exit(1);
|
||||
/* Mmap buffer shared between kernel- and user-space. */
|
||||
cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
if ((void*)cover == MAP_FAILED)
|
||||
perror("mmap"), exit(1);
|
||||
/* Enable coverage collection on the current thread. */
|
||||
if (ioctl(fd, KCOV_ENABLE, 0))
|
||||
perror("ioctl"), exit(1);
|
||||
/* Reset coverage from the tail of the ioctl() call. */
|
||||
__atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
|
||||
/* That's the target syscal call. */
|
||||
read(-1, NULL, 0);
|
||||
/* Read number of PCs collected. */
|
||||
n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
|
||||
for (i = 0; i < n; i++)
|
||||
printf("0x%lx\n", cover[i + 1]);
|
||||
/* Disable coverage collection for the current thread. After this call
|
||||
* coverage can be enabled for a different thread.
|
||||
*/
|
||||
if (ioctl(fd, KCOV_DISABLE, 0))
|
||||
perror("ioctl"), exit(1);
|
||||
/* Free resources. */
|
||||
if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
|
||||
perror("munmap"), exit(1);
|
||||
if (close(fd))
|
||||
perror("close"), exit(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
After piping through addr2line output of the program looks as follows:
|
||||
|
||||
SyS_read
|
||||
fs/read_write.c:562
|
||||
__fdget_pos
|
||||
fs/file.c:774
|
||||
__fget_light
|
||||
fs/file.c:746
|
||||
__fget_light
|
||||
fs/file.c:750
|
||||
__fget_light
|
||||
fs/file.c:760
|
||||
__fdget_pos
|
||||
fs/file.c:784
|
||||
SyS_read
|
||||
fs/read_write.c:562
|
||||
|
||||
If a program needs to collect coverage from several threads (independently),
|
||||
it needs to open /sys/kernel/debug/kcov in each thread separately.
|
||||
|
||||
The interface is fine-grained to allow efficient forking of test processes.
|
||||
That is, a parent process opens /sys/kernel/debug/kcov, enables trace mode,
|
||||
mmaps coverage buffer and then forks child processes in a loop. Child processes
|
||||
only need to enable coverage (disable happens automatically on thread end).
|
||||
|
|
@ -2525,6 +2525,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
|
||||
nointroute [IA-64]
|
||||
|
||||
noinvpcid [X86] Disable the INVPCID cpu feature.
|
||||
|
||||
nojitter [IA-64] Disables jitter checking for ITC timers.
|
||||
|
||||
no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver
|
||||
|
|
@ -2559,6 +2561,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
nopat [X86] Disable PAT (page attribute table extension of
|
||||
pagetables) support.
|
||||
|
||||
nopcid [X86-64] Disable the PCID cpu feature.
|
||||
|
||||
norandmaps Don't use address space randomization. Equivalent to
|
||||
echo 0 > /proc/sys/kernel/randomize_va_space
|
||||
|
||||
|
|
@ -3056,6 +3060,21 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
pt. [PARIDE]
|
||||
See Documentation/blockdev/paride.txt.
|
||||
|
||||
pti= [X86_64] Control Page Table Isolation of user and
|
||||
kernel address spaces. Disabling this feature
|
||||
removes hardening, but improves performance of
|
||||
system calls and interrupts.
|
||||
|
||||
on - unconditionally enable
|
||||
off - unconditionally disable
|
||||
auto - kernel detects whether your CPU model is
|
||||
vulnerable to issues that PTI mitigates
|
||||
|
||||
Not specifying this option is equivalent to pti=auto.
|
||||
|
||||
nopti [X86_64]
|
||||
Equivalent to pti=off
|
||||
|
||||
pty.legacy_count=
|
||||
[KNL] Number of legacy pty's. Overwrites compiled-in
|
||||
default number.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
=============
|
||||
TEE subsystem
|
||||
=============
|
||||
|
||||
This document describes the TEE subsystem in Linux.
|
||||
|
||||
A TEE (Trusted Execution Environment) is a trusted OS running in some
|
||||
|
|
@ -80,27 +83,27 @@ The GlobalPlatform TEE Client API [5] is implemented on top of the generic
|
|||
TEE API.
|
||||
|
||||
Picture of the relationship between the different components in the
|
||||
OP-TEE architecture.
|
||||
OP-TEE architecture::
|
||||
|
||||
User space Kernel Secure world
|
||||
~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~
|
||||
+--------+ +-------------+
|
||||
| Client | | Trusted |
|
||||
+--------+ | Application |
|
||||
/\ +-------------+
|
||||
|| +----------+ /\
|
||||
|| |tee- | ||
|
||||
|| |supplicant| \/
|
||||
|| +----------+ +-------------+
|
||||
\/ /\ | TEE Internal|
|
||||
+-------+ || | API |
|
||||
+ TEE | || +--------+--------+ +-------------+
|
||||
| Client| || | TEE | OP-TEE | | OP-TEE |
|
||||
| API | \/ | subsys | driver | | Trusted OS |
|
||||
+-------+----------------+----+-------+----+-----------+-------------+
|
||||
| Generic TEE API | | OP-TEE MSG |
|
||||
| IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) |
|
||||
+-----------------------------+ +------------------------------+
|
||||
User space Kernel Secure world
|
||||
~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~
|
||||
+--------+ +-------------+
|
||||
| Client | | Trusted |
|
||||
+--------+ | Application |
|
||||
/\ +-------------+
|
||||
|| +----------+ /\
|
||||
|| |tee- | ||
|
||||
|| |supplicant| \/
|
||||
|| +----------+ +-------------+
|
||||
\/ /\ | TEE Internal|
|
||||
+-------+ || | API |
|
||||
+ TEE | || +--------+--------+ +-------------+
|
||||
| Client| || | TEE | OP-TEE | | OP-TEE |
|
||||
| API | \/ | subsys | driver | | Trusted OS |
|
||||
+-------+----------------+----+-------+----+-----------+-------------+
|
||||
| Generic TEE API | | OP-TEE MSG |
|
||||
| IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) |
|
||||
+-----------------------------+ +------------------------------+
|
||||
|
||||
RPC (Remote Procedure Call) are requests from secure world to kernel driver
|
||||
or tee-supplicant. An RPC is identified by a special range of SMCCC return
|
||||
|
|
@ -109,10 +112,16 @@ kernel are handled by the kernel driver. Other RPC messages will be forwarded to
|
|||
tee-supplicant without further involvement of the driver, except switching
|
||||
shared memory buffer representation.
|
||||
|
||||
References:
|
||||
References
|
||||
==========
|
||||
|
||||
[1] https://github.com/OP-TEE/optee_os
|
||||
|
||||
[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
|
||||
|
||||
[3] drivers/tee/optee/optee_smc.h
|
||||
|
||||
[4] drivers/tee/optee/optee_msg.h
|
||||
|
||||
[5] http://www.globalplatform.org/specificationsdevice.asp look for
|
||||
"TEE Client API Specification v1.0" and click download.
|
||||
|
|
|
|||
186
Documentation/x86/pti.txt
Normal file
186
Documentation/x86/pti.txt
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
Overview
|
||||
========
|
||||
|
||||
Page Table Isolation (pti, previously known as KAISER[1]) is a
|
||||
countermeasure against attacks on the shared user/kernel address
|
||||
space such as the "Meltdown" approach[2].
|
||||
|
||||
To mitigate this class of attacks, we create an independent set of
|
||||
page tables for use only when running userspace applications. When
|
||||
the kernel is entered via syscalls, interrupts or exceptions, the
|
||||
page tables are switched to the full "kernel" copy. When the system
|
||||
switches back to user mode, the user copy is used again.
|
||||
|
||||
The userspace page tables contain only a minimal amount of kernel
|
||||
data: only what is needed to enter/exit the kernel such as the
|
||||
entry/exit functions themselves and the interrupt descriptor table
|
||||
(IDT). There are a few strictly unnecessary things that get mapped
|
||||
such as the first C function when entering an interrupt (see
|
||||
comments in pti.c).
|
||||
|
||||
This approach helps to ensure that side-channel attacks leveraging
|
||||
the paging structures do not function when PTI is enabled. It can be
|
||||
enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time.
|
||||
Once enabled at compile-time, it can be disabled at boot with the
|
||||
'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt).
|
||||
|
||||
Page Table Management
|
||||
=====================
|
||||
|
||||
When PTI is enabled, the kernel manages two sets of page tables.
|
||||
The first set is very similar to the single set which is present in
|
||||
kernels without PTI. This includes a complete mapping of userspace
|
||||
that the kernel can use for things like copy_to_user().
|
||||
|
||||
Although _complete_, the user portion of the kernel page tables is
|
||||
crippled by setting the NX bit in the top level. This ensures
|
||||
that any missed kernel->user CR3 switch will immediately crash
|
||||
userspace upon executing its first instruction.
|
||||
|
||||
The userspace page tables map only the kernel data needed to enter
|
||||
and exit the kernel. This data is entirely contained in the 'struct
|
||||
cpu_entry_area' structure which is placed in the fixmap which gives
|
||||
each CPU's copy of the area a compile-time-fixed virtual address.
|
||||
|
||||
For new userspace mappings, the kernel makes the entries in its
|
||||
page tables like normal. The only difference is when the kernel
|
||||
makes entries in the top (PGD) level. In addition to setting the
|
||||
entry in the main kernel PGD, a copy of the entry is made in the
|
||||
userspace page tables' PGD.
|
||||
|
||||
This sharing at the PGD level also inherently shares all the lower
|
||||
layers of the page tables. This leaves a single, shared set of
|
||||
userspace page tables to manage. One PTE to lock, one set of
|
||||
accessed bits, dirty bits, etc...
|
||||
|
||||
Overhead
|
||||
========
|
||||
|
||||
Protection against side-channel attacks is important. But,
|
||||
this protection comes at a cost:
|
||||
|
||||
1. Increased Memory Use
|
||||
a. Each process now needs an order-1 PGD instead of order-0.
|
||||
(Consumes an additional 4k per process).
|
||||
b. The 'cpu_entry_area' structure must be 2MB in size and 2MB
|
||||
aligned so that it can be mapped by setting a single PMD
|
||||
entry. This consumes nearly 2MB of RAM once the kernel
|
||||
is decompressed, but no space in the kernel image itself.
|
||||
|
||||
2. Runtime Cost
|
||||
a. CR3 manipulation to switch between the page table copies
|
||||
must be done at interrupt, syscall, and exception entry
|
||||
and exit (it can be skipped when the kernel is interrupted,
|
||||
though.) Moves to CR3 are on the order of a hundred
|
||||
cycles, and are required at every entry and exit.
|
||||
b. A "trampoline" must be used for SYSCALL entry. This
|
||||
trampoline depends on a smaller set of resources than the
|
||||
non-PTI SYSCALL entry code, so requires mapping fewer
|
||||
things into the userspace page tables. The downside is
|
||||
that stacks must be switched at entry time.
|
||||
d. Global pages are disabled for all kernel structures not
|
||||
mapped into both kernel and userspace page tables. This
|
||||
feature of the MMU allows different processes to share TLB
|
||||
entries mapping the kernel. Losing the feature means more
|
||||
TLB misses after a context switch. The actual loss of
|
||||
performance is very small, however, never exceeding 1%.
|
||||
d. Process Context IDentifiers (PCID) is a CPU feature that
|
||||
allows us to skip flushing the entire TLB when switching page
|
||||
tables by setting a special bit in CR3 when the page tables
|
||||
are changed. This makes switching the page tables (at context
|
||||
switch, or kernel entry/exit) cheaper. But, on systems with
|
||||
PCID support, the context switch code must flush both the user
|
||||
and kernel entries out of the TLB. The user PCID TLB flush is
|
||||
deferred until the exit to userspace, minimizing the cost.
|
||||
See intel.com/sdm for the gory PCID/INVPCID details.
|
||||
e. The userspace page tables must be populated for each new
|
||||
process. Even without PTI, the shared kernel mappings
|
||||
are created by copying top-level (PGD) entries into each
|
||||
new process. But, with PTI, there are now *two* kernel
|
||||
mappings: one in the kernel page tables that maps everything
|
||||
and one for the entry/exit structures. At fork(), we need to
|
||||
copy both.
|
||||
f. In addition to the fork()-time copying, there must also
|
||||
be an update to the userspace PGD any time a set_pgd() is done
|
||||
on a PGD used to map userspace. This ensures that the kernel
|
||||
and userspace copies always map the same userspace
|
||||
memory.
|
||||
g. On systems without PCID support, each CR3 write flushes
|
||||
the entire TLB. That means that each syscall, interrupt
|
||||
or exception flushes the TLB.
|
||||
h. INVPCID is a TLB-flushing instruction which allows flushing
|
||||
of TLB entries for non-current PCIDs. Some systems support
|
||||
PCIDs, but do not support INVPCID. On these systems, addresses
|
||||
can only be flushed from the TLB for the current PCID. When
|
||||
flushing a kernel address, we need to flush all PCIDs, so a
|
||||
single kernel address flush will require a TLB-flushing CR3
|
||||
write upon the next use of every PCID.
|
||||
|
||||
Possible Future Work
|
||||
====================
|
||||
1. We can be more careful about not actually writing to CR3
|
||||
unless its value is actually changed.
|
||||
2. Allow PTI to be enabled/disabled at runtime in addition to the
|
||||
boot-time switching.
|
||||
|
||||
Testing
|
||||
========
|
||||
|
||||
To test stability of PTI, the following test procedure is recommended,
|
||||
ideally doing all of these in parallel:
|
||||
|
||||
1. Set CONFIG_DEBUG_ENTRY=y
|
||||
2. Run several copies of all of the tools/testing/selftests/x86/ tests
|
||||
(excluding MPX and protection_keys) in a loop on multiple CPUs for
|
||||
several minutes. These tests frequently uncover corner cases in the
|
||||
kernel entry code. In general, old kernels might cause these tests
|
||||
themselves to crash, but they should never crash the kernel.
|
||||
3. Run the 'perf' tool in a mode (top or record) that generates many
|
||||
frequent performance monitoring non-maskable interrupts (see "NMI"
|
||||
in /proc/interrupts). This exercises the NMI entry/exit code which
|
||||
is known to trigger bugs in code paths that did not expect to be
|
||||
interrupted, including nested NMIs. Using "-c" boosts the rate of
|
||||
NMIs, and using two -c with separate counters encourages nested NMIs
|
||||
and less deterministic behavior.
|
||||
|
||||
while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done
|
||||
|
||||
4. Launch a KVM virtual machine.
|
||||
5. Run 32-bit binaries on systems supporting the SYSCALL instruction.
|
||||
This has been a lightly-tested code path and needs extra scrutiny.
|
||||
|
||||
Debugging
|
||||
=========
|
||||
|
||||
Bugs in PTI cause a few different signatures of crashes
|
||||
that are worth noting here.
|
||||
|
||||
* Failures of the selftests/x86 code. Usually a bug in one of the
|
||||
more obscure corners of entry_64.S
|
||||
* Crashes in early boot, especially around CPU bringup. Bugs
|
||||
in the trampoline code or mappings cause these.
|
||||
* Crashes at the first interrupt. Caused by bugs in entry_64.S,
|
||||
like screwing up a page table switch. Also caused by
|
||||
incorrectly mapping the IRQ handler entry code.
|
||||
* Crashes at the first NMI. The NMI code is separate from main
|
||||
interrupt handlers and can have bugs that do not affect
|
||||
normal interrupts. Also caused by incorrectly mapping NMI
|
||||
code. NMIs that interrupt the entry code must be very
|
||||
careful and can be the cause of crashes that show up when
|
||||
running perf.
|
||||
* Kernel crashes at the first exit to userspace. entry_64.S
|
||||
bugs, or failing to map some of the exit code.
|
||||
* Crashes at first interrupt that interrupts userspace. The paths
|
||||
in entry_64.S that return to userspace are sometimes separate
|
||||
from the ones that return to the kernel.
|
||||
* Double faults: overflowing the kernel stack because of page
|
||||
faults upon page faults. Caused by touching non-pti-mapped
|
||||
data in the entry code, or forgetting to switch to kernel
|
||||
CR3 before calling into C functions which are not pti-mapped.
|
||||
* Userspace segfaults early in boot, sometimes manifesting
|
||||
as mount(8) failing to mount the rootfs. These have
|
||||
tended to be TLB invalidation issues. Usually invalidating
|
||||
the wrong PCID, or otherwise missing an invalidation.
|
||||
|
||||
1. https://gruss.cc/files/kaiser.pdf
|
||||
2. https://meltdownattack.com/meltdown.pdf
|
||||
14
MAINTAINERS
14
MAINTAINERS
|
|
@ -5986,6 +5986,20 @@ S: Maintained
|
|||
F: Documentation/hwmon/k8temp
|
||||
F: drivers/hwmon/k8temp.c
|
||||
|
||||
KASAN
|
||||
M: Andrey Ryabinin <aryabinin@virtuozzo.com>
|
||||
R: Alexander Potapenko <glider@google.com>
|
||||
R: Dmitry Vyukov <dvyukov@google.com>
|
||||
L: kasan-dev@googlegroups.com
|
||||
S: Maintained
|
||||
F: arch/*/include/asm/kasan.h
|
||||
F: arch/*/mm/kasan_init*
|
||||
F: Documentation/kasan.txt
|
||||
F: include/linux/kasan*.h
|
||||
F: lib/test_kasan.c
|
||||
F: mm/kasan/
|
||||
F: scripts/Makefile.kasan
|
||||
|
||||
KCONFIG
|
||||
M: "Yann E. MORIN" <yann.morin.1998@free.fr>
|
||||
L: linux-kbuild@vger.kernel.org
|
||||
|
|
|
|||
16
Makefile
16
Makefile
|
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 103
|
||||
SUBLEVEL = 112
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
@ -386,6 +386,7 @@ LDFLAGS_MODULE =
|
|||
CFLAGS_KERNEL =
|
||||
AFLAGS_KERNEL =
|
||||
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
|
||||
CFLAGS_KCOV = -fsanitize-coverage=trace-pc
|
||||
|
||||
|
||||
# Use USERINCLUDE when you must reference the UAPI directories only.
|
||||
|
|
@ -433,7 +434,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
|
|||
export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
|
||||
|
||||
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
|
||||
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN
|
||||
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN
|
||||
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
|
||||
|
|
@ -715,6 +716,14 @@ endif
|
|||
endif
|
||||
KBUILD_CFLAGS += $(stackp-flag)
|
||||
|
||||
ifdef CONFIG_KCOV
|
||||
ifeq ($(call cc-option, $(CFLAGS_KCOV)),)
|
||||
$(warning Cannot use CONFIG_KCOV: \
|
||||
-fsanitize-coverage=trace-pc is not supported by compiler)
|
||||
CFLAGS_KCOV =
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(cc-name),clang)
|
||||
ifneq ($(CROSS_COMPILE),)
|
||||
CLANG_TRIPLE ?= $(CROSS_COMPILE)
|
||||
|
|
@ -817,6 +826,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
|
|||
# disable invalid "can't wrap" optimizations for signed / pointers
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
|
||||
|
||||
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
|
||||
|
||||
# conserve stack if available
|
||||
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
* Copyright (C) 1996, Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
|
|
|||
|
|
@ -673,6 +673,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
|
|||
return 0;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" mov lp_count, %5 \n"
|
||||
" lp 3f \n"
|
||||
"1: ldb.ab %3, [%2, 1] \n"
|
||||
" breq.d %3, 0, 3f \n"
|
||||
|
|
@ -689,8 +690,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
|
|||
" .word 1b, 4b \n"
|
||||
" .previous \n"
|
||||
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
|
||||
: "g"(-EFAULT), "l"(count)
|
||||
: "memory");
|
||||
: "g"(-EFAULT), "r"(count)
|
||||
: "lp_count", "lp_start", "lp_end", "memory");
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -668,6 +668,7 @@
|
|||
ti,non-removable;
|
||||
bus-width = <4>;
|
||||
cap-power-off-card;
|
||||
keep-power-in-suspend;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc2_pins>;
|
||||
|
||||
|
|
|
|||
|
|
@ -227,6 +227,7 @@
|
|||
device_type = "pci";
|
||||
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
||||
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
|
||||
bus-range = <0x00 0xff>;
|
||||
#interrupt-cells = <1>;
|
||||
num-lanes = <1>;
|
||||
ti,hwmods = "pcie1";
|
||||
|
|
@ -262,6 +263,7 @@
|
|||
device_type = "pci";
|
||||
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
||||
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
|
||||
bus-range = <0x00 0xff>;
|
||||
#interrupt-cells = <1>;
|
||||
num-lanes = <1>;
|
||||
ti,hwmods = "pcie2";
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@
|
|||
interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc1_pins &mmc1_cd>;
|
||||
cd-gpios = <&gpio4 31 IRQ_TYPE_LEVEL_LOW>; /* gpio127 */
|
||||
cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio127 */
|
||||
vmmc-supply = <&vmmc1>;
|
||||
bus-width = <4>;
|
||||
cap-power-off-card;
|
||||
|
|
|
|||
|
|
@ -512,4 +512,22 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|||
#endif
|
||||
.endm
|
||||
|
||||
.macro bug, msg, line
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
1: .inst 0xde02
|
||||
#else
|
||||
1: .inst 0xe7f001f2
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||
.pushsection .rodata.str, "aMS", %progbits, 1
|
||||
2: .asciz "\msg"
|
||||
.popsection
|
||||
.pushsection __bug_table, "aw"
|
||||
.align 2
|
||||
.word 1b, 2b
|
||||
.hword \line
|
||||
.popsection
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H__ */
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
#ifndef __ASM_ARM_EXCEPTION_H
|
||||
#define __ASM_ARM_EXCEPTION_H
|
||||
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#define __exception __attribute__((section(".exception.text")))
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
|
|
|||
|
|
@ -161,8 +161,7 @@
|
|||
#else
|
||||
#define VTTBR_X (5 - KVM_T0SZ)
|
||||
#endif
|
||||
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
||||
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
||||
#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_X)
|
||||
#define VTTBR_VMID_SHIFT _AC(48, ULL)
|
||||
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
||||
|
||||
|
|
@ -209,6 +208,7 @@
|
|||
#define HSR_EC_IABT_HYP (0x21)
|
||||
#define HSR_EC_DABT (0x24)
|
||||
#define HSR_EC_DABT_HYP (0x25)
|
||||
#define HSR_EC_MAX (0x3f)
|
||||
|
||||
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
|
||||
|
||||
|
|
|
|||
|
|
@ -61,6 +61,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
|
|||
cpu_switch_mm(mm->pgd, mm);
|
||||
}
|
||||
|
||||
#ifndef MODULE
|
||||
#define finish_arch_post_lock_switch \
|
||||
finish_arch_post_lock_switch
|
||||
static inline void finish_arch_post_lock_switch(void)
|
||||
|
|
@ -82,6 +83,7 @@ static inline void finish_arch_post_lock_switch(void)
|
|||
preempt_enable_no_resched();
|
||||
}
|
||||
}
|
||||
#endif /* !MODULE */
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ struct undef_hook {
|
|||
void register_undef_hook(struct undef_hook *hook);
|
||||
void unregister_undef_hook(struct undef_hook *hook);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static inline int __in_irqentry_text(unsigned long ptr)
|
||||
{
|
||||
extern char __irqentry_text_start[];
|
||||
|
|
@ -27,12 +26,6 @@ static inline int __in_irqentry_text(unsigned long ptr)
|
|||
return ptr >= (unsigned long)&__irqentry_text_start &&
|
||||
ptr < (unsigned long)&__irqentry_text_end;
|
||||
}
|
||||
#else
|
||||
static inline int __in_irqentry_text(unsigned long ptr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int in_exception_text(unsigned long ptr)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -295,6 +295,8 @@
|
|||
mov r2, sp
|
||||
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
|
||||
ldr lr, [r2, #\offset + S_PC]! @ get pc
|
||||
tst r1, #PSR_I_BIT | 0x0f
|
||||
bne 1f
|
||||
msr spsr_cxsf, r1 @ save in spsr_svc
|
||||
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
|
|
@ -309,6 +311,7 @@
|
|||
@ after ldm {}^
|
||||
add sp, sp, #\offset + S_FRAME_SIZE
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
1: bug "Returning to usermode but unexpected PSR bits set?", \@
|
||||
#elif defined(CONFIG_CPU_V7M)
|
||||
@ V7M restore.
|
||||
@ Note that we don't need to do clrex here as clearing the local
|
||||
|
|
@ -324,6 +327,8 @@
|
|||
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
|
||||
ldr lr, [sp, #\offset + S_PC] @ get pc
|
||||
add sp, sp, #\offset + S_SP
|
||||
tst r1, #PSR_I_BIT | 0x0f
|
||||
bne 1f
|
||||
msr spsr_cxsf, r1 @ save in spsr_svc
|
||||
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
|
|
@ -336,6 +341,7 @@
|
|||
.endif
|
||||
add sp, sp, #S_FRAME_SIZE - S_SP
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
1: bug "Returning to usermode but unexpected PSR bits set?", \@
|
||||
#endif /* !CONFIG_THUMB2_KERNEL */
|
||||
.endm
|
||||
|
||||
|
|
|
|||
|
|
@ -105,6 +105,7 @@ SECTIONS
|
|||
*(.exception.text)
|
||||
__exception_text_end = .;
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
|
|
|
|||
|
|
@ -100,7 +100,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
|
||||
hsr);
|
||||
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static exit_handle_fn arm_exit_handlers[] = {
|
||||
[0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
|
||||
[HSR_EC_WFI] = kvm_handle_wfx,
|
||||
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
|
||||
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
|
||||
|
|
@ -122,13 +134,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
|
||||
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
|
||||
!arm_exit_handlers[hsr_ec]) {
|
||||
kvm_err("Unknown exception class: hsr: %#08x\n",
|
||||
(unsigned int)kvm_vcpu_get_hsr(vcpu));
|
||||
BUG();
|
||||
}
|
||||
|
||||
return arm_exit_handlers[hsr_ec];
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
}
|
||||
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
|
||||
data);
|
||||
&data);
|
||||
data = vcpu_data_host_to_guest(vcpu, data, len);
|
||||
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
|
||||
}
|
||||
|
|
@ -189,14 +189,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
|
||||
len);
|
||||
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
|
||||
mmio_write_buf(data_buf, len, data);
|
||||
|
||||
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
|
||||
data_buf);
|
||||
} else {
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
|
||||
fault_ipa, 0);
|
||||
fault_ipa, NULL);
|
||||
|
||||
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
|
||||
data_buf);
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@
|
|||
#include "soc.h"
|
||||
|
||||
#define OMAP1_DMA_BASE (0xfffed800)
|
||||
#define OMAP1_LOGICAL_DMA_CH_COUNT 17
|
||||
|
||||
static u32 enable_1510_mode;
|
||||
|
||||
|
|
@ -311,8 +310,6 @@ static int __init omap1_system_dma_init(void)
|
|||
goto exit_iounmap;
|
||||
}
|
||||
|
||||
d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
|
||||
|
||||
/* Valid attributes for omap1 plus processors */
|
||||
if (cpu_is_omap15xx())
|
||||
d->dev_caps = ENABLE_1510_MODE;
|
||||
|
|
@ -329,13 +326,14 @@ static int __init omap1_system_dma_init(void)
|
|||
d->dev_caps |= CLEAR_CSR_ON_READ;
|
||||
d->dev_caps |= IS_WORD_16;
|
||||
|
||||
if (cpu_is_omap15xx())
|
||||
d->chan_count = 9;
|
||||
else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
|
||||
if (!(d->dev_caps & ENABLE_1510_MODE))
|
||||
d->chan_count = 16;
|
||||
/* available logical channels */
|
||||
if (cpu_is_omap15xx()) {
|
||||
d->lch_count = 9;
|
||||
} else {
|
||||
if (d->dev_caps & ENABLE_1510_MODE)
|
||||
d->lch_count = 9;
|
||||
else
|
||||
d->chan_count = 9;
|
||||
d->lch_count = 16;
|
||||
}
|
||||
|
||||
p = dma_plat_info;
|
||||
|
|
|
|||
|
|
@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
||||
int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
||||
{
|
||||
int err;
|
||||
struct device *dev = &gpmc_onenand_device.dev;
|
||||
|
|
@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
|||
if (err < 0) {
|
||||
dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
|
||||
gpmc_onenand_data->cs, err);
|
||||
return;
|
||||
return err;
|
||||
}
|
||||
|
||||
gpmc_onenand_resource.end = gpmc_onenand_resource.start +
|
||||
ONENAND_IO_SIZE - 1;
|
||||
|
||||
if (platform_device_register(&gpmc_onenand_device) < 0) {
|
||||
err = platform_device_register(&gpmc_onenand_device);
|
||||
if (err) {
|
||||
dev_err(dev, "Unable to register OneNAND device\n");
|
||||
gpmc_cs_free(gpmc_onenand_data->cs);
|
||||
return;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3885,16 +3885,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
|
|||
* Return: 0 if device named @dev_name is not likely to be accessible,
|
||||
* or 1 if it is likely to be accessible.
|
||||
*/
|
||||
static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
|
||||
const char *dev_name)
|
||||
static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
|
||||
const char *dev_name)
|
||||
{
|
||||
struct device_node *node;
|
||||
bool available;
|
||||
|
||||
if (!bus)
|
||||
return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
|
||||
return omap_type() == OMAP2_DEVICE_TYPE_GP;
|
||||
|
||||
if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
|
||||
return 1;
|
||||
node = of_get_child_by_name(bus, dev_name);
|
||||
available = of_device_is_available(node);
|
||||
of_node_put(node);
|
||||
|
||||
return 0;
|
||||
return available;
|
||||
}
|
||||
|
||||
int __init omap3xxx_hwmod_init(void)
|
||||
|
|
@ -3963,15 +3967,20 @@ int __init omap3xxx_hwmod_init(void)
|
|||
|
||||
if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
|
||||
r = omap_hwmod_register_links(h_sham);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
of_node_put(bus);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
|
||||
r = omap_hwmod_register_links(h_aes);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
of_node_put(bus);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
of_node_put(bus);
|
||||
|
||||
/*
|
||||
* Register hwmod links specific to certain ES levels of a
|
||||
|
|
|
|||
|
|
@ -753,13 +753,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
|
|||
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
|
||||
* that the intention is to allow exporting memory allocated via the
|
||||
* coherent DMA APIs through the dma_buf API, which only accepts a
|
||||
* scattertable. This presents a couple of problems:
|
||||
* 1. Not all memory allocated via the coherent DMA APIs is backed by
|
||||
* a struct page
|
||||
* 2. Passing coherent DMA memory into the streaming APIs is not allowed
|
||||
* as we will try to flush the memory through a different alias to that
|
||||
* actually being used (and the flushes are redundant.)
|
||||
*/
|
||||
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
||||
unsigned long pfn = dma_to_pfn(dev, handle);
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
/* If the PFN is not valid, we do not have a struct page */
|
||||
if (!pfn_valid(pfn))
|
||||
return -ENXIO;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -433,6 +433,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
kprobe_opcode_t *correct_ret_addr = NULL;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
kretprobe_hash_lock(current, &head, &flags);
|
||||
|
|
@ -455,15 +456,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->rp->handler(ri, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
|
|
@ -475,6 +468,33 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
||||
|
||||
correct_ret_addr = ri->ret_addr;
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->ret_addr = correct_ret_addr;
|
||||
ri->rp->handler(ri, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
* This is the real return address. Any other
|
||||
* instances associated with this task are for
|
||||
* other calls deeper on the call stack
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
|
|
|
|||
|
|
@ -976,7 +976,10 @@ static void coverage_end(void)
|
|||
void __naked __kprobes_test_case_start(void)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"stmdb sp!, {r4-r11} \n\t"
|
||||
"mov r2, sp \n\t"
|
||||
"bic r3, r2, #7 \n\t"
|
||||
"mov sp, r3 \n\t"
|
||||
"stmdb sp!, {r2-r11} \n\t"
|
||||
"sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
|
||||
"bic r0, lr, #1 @ r0 = inline data \n\t"
|
||||
"mov r1, sp \n\t"
|
||||
|
|
@ -996,7 +999,8 @@ void __naked __kprobes_test_case_end_32(void)
|
|||
"movne pc, r0 \n\t"
|
||||
"mov r0, r4 \n\t"
|
||||
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
|
||||
"ldmia sp!, {r4-r11} \n\t"
|
||||
"ldmia sp!, {r2-r11} \n\t"
|
||||
"mov sp, r2 \n\t"
|
||||
"mov pc, r0 \n\t"
|
||||
);
|
||||
}
|
||||
|
|
@ -1012,7 +1016,8 @@ void __naked __kprobes_test_case_end_16(void)
|
|||
"bxne r0 \n\t"
|
||||
"mov r0, r4 \n\t"
|
||||
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
|
||||
"ldmia sp!, {r4-r11} \n\t"
|
||||
"ldmia sp!, {r2-r11} \n\t"
|
||||
"mov sp, r2 \n\t"
|
||||
"bx r0 \n\t"
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -687,6 +687,18 @@ config FORCE_MAX_ZONEORDER
|
|||
However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
|
||||
4M allocations matching the default size used by generic code.
|
||||
|
||||
config UNMAP_KERNEL_AT_EL0
|
||||
bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
|
||||
default y
|
||||
help
|
||||
Speculation attacks against some high-performance processors can
|
||||
be used to bypass MMU permission checks and leak kernel data to
|
||||
userspace. This can be defended against by unmapping the kernel
|
||||
when running in userspace, mapping it back in on exception entry
|
||||
via a trampoline page in the vector table.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
menuconfig ARMV8_DEPRECATED
|
||||
bool "Emulate deprecated/obsolete ARMv8 instructions"
|
||||
depends on COMPAT
|
||||
|
|
|
|||
|
|
@ -398,17 +398,4 @@ alternative_endif
|
|||
mrs \rd, sp_el0
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Errata workaround post TTBR0_EL1 update.
|
||||
*/
|
||||
.macro post_ttbr0_update_workaround
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
alternative_if ARM64_WORKAROUND_CAVIUM_27456
|
||||
ic iallu
|
||||
dsb nsh
|
||||
isb
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H */
|
||||
|
|
|
|||
|
|
@ -36,7 +36,8 @@
|
|||
|
||||
#define ARM64_WORKAROUND_CAVIUM_27456 11
|
||||
#define ARM64_HAS_VIRT_HOST_EXTN 12
|
||||
#define ARM64_NCAPS 13
|
||||
#define ARM64_UNMAP_KERNEL_AT_EL0 23
|
||||
#define ARM64_NCAPS 24
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
|||
|
|
@ -76,12 +76,14 @@ static inline void efi_set_pgd(struct mm_struct *mm)
|
|||
if (mm != current->active_mm) {
|
||||
/*
|
||||
* Update the current thread's saved ttbr0 since it is
|
||||
* restored as part of a return from exception. Set
|
||||
* the hardware TTBR0_EL1 using cpu_switch_mm()
|
||||
* directly to enable potential errata workarounds.
|
||||
* restored as part of a return from exception. Enable
|
||||
* access to the valid TTBR0_EL1 and invoke the errata
|
||||
* workaround directly since there is no return from
|
||||
* exception when invoking the EFI run-time services.
|
||||
*/
|
||||
update_saved_ttbr0(current, mm);
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
uaccess_ttbr0_enable();
|
||||
post_ttbr_update_workaround();
|
||||
} else {
|
||||
/*
|
||||
* Defer the switch to the current thread's TTBR0_EL1
|
||||
|
|
@ -89,7 +91,7 @@ static inline void efi_set_pgd(struct mm_struct *mm)
|
|||
* thread's saved ttbr0 corresponding to its active_mm
|
||||
* (if different from init_mm).
|
||||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
uaccess_ttbr0_disable();
|
||||
if (current->active_mm != &init_mm)
|
||||
update_saved_ttbr0(current, current->active_mm);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
#ifndef __ASM_EXCEPTION_H
|
||||
#define __ASM_EXCEPTION_H
|
||||
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#define __exception __attribute__((section(".exception.text")))
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
|
|
|||
|
|
@ -50,6 +50,11 @@ enum fixed_addresses {
|
|||
|
||||
FIX_EARLYCON_MEM_BASE,
|
||||
FIX_TEXT_POKE0,
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
FIX_ENTRY_TRAMP_DATA,
|
||||
FIX_ENTRY_TRAMP_TEXT,
|
||||
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -78,8 +78,16 @@
|
|||
/*
|
||||
* Initial memory map attributes.
|
||||
*/
|
||||
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG)
|
||||
#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
|
||||
#else
|
||||
#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS
|
||||
#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS
|
||||
#endif
|
||||
|
||||
#if ARM64_SWAPPER_USES_SECTION_MAPS
|
||||
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
|
||||
|
|
|
|||
|
|
@ -154,8 +154,7 @@
|
|||
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
|
||||
#endif
|
||||
|
||||
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
||||
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
||||
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
|
||||
#define VTTBR_VMID_SHIFT (UL(48))
|
||||
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
||||
|
||||
|
|
|
|||
|
|
@ -266,7 +266,7 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
|
|||
kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
|
||||
}
|
||||
|
||||
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
|
||||
#define kvm_virt_to_phys(x) __pa_symbol(x)
|
||||
|
||||
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
|
||||
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
|
||||
|
|
|
|||
|
|
@ -148,6 +148,11 @@ extern u64 kimage_vaddr;
|
|||
/* the offset between the kernel virtual and physical mappings */
|
||||
extern u64 kimage_voffset;
|
||||
|
||||
static inline unsigned long kaslr_offset(void)
|
||||
{
|
||||
return kimage_vaddr - KIMAGE_VADDR;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow all memory at the discovery stage. We will clip it later.
|
||||
*/
|
||||
|
|
@ -188,6 +193,7 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|||
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
|
||||
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
||||
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x))
|
||||
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
|
||||
|
||||
/*
|
||||
* virt_to_page(k) convert a _valid_ virtual address to struct page *
|
||||
|
|
|
|||
|
|
@ -16,6 +16,11 @@
|
|||
#ifndef __ASM_MMU_H
|
||||
#define __ASM_MMU_H
|
||||
|
||||
#define USER_ASID_FLAG (UL(1) << 48)
|
||||
#define TTBR_ASID_MASK (UL(0xffff) << 48)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef struct {
|
||||
atomic64_t id;
|
||||
void *vdso;
|
||||
|
|
@ -28,6 +33,12 @@ typedef struct {
|
|||
*/
|
||||
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
|
||||
|
||||
static inline bool arm64_kernel_unmapped_at_el0(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
|
||||
cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
|
||||
}
|
||||
|
||||
extern void paging_init(void);
|
||||
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
|
||||
extern void init_mem_pgprot(void);
|
||||
|
|
@ -36,4 +47,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|||
pgprot_t prot, bool allow_block_mappings);
|
||||
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
|
|||
*/
|
||||
static inline void cpu_set_reserved_ttbr0(void)
|
||||
{
|
||||
unsigned long ttbr = virt_to_phys(empty_zero_page);
|
||||
unsigned long ttbr = __pa_symbol(empty_zero_page);
|
||||
|
||||
asm(
|
||||
" msr ttbr0_el1, %0 // set TTBR0\n"
|
||||
|
|
@ -59,6 +59,13 @@ static inline void cpu_set_reserved_ttbr0(void)
|
|||
: "r" (ttbr));
|
||||
}
|
||||
|
||||
static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
|
||||
{
|
||||
BUG_ON(pgd == swapper_pg_dir);
|
||||
cpu_set_reserved_ttbr0();
|
||||
cpu_do_switch_mm(virt_to_phys(pgd),mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* TCR.T0SZ value to use when the ID map is active. Usually equals
|
||||
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
|
||||
|
|
@ -124,7 +131,7 @@ static inline void cpu_install_idmap(void)
|
|||
local_flush_tlb_all();
|
||||
cpu_set_idmap_tcr_t0sz();
|
||||
|
||||
cpu_switch_mm(idmap_pg_dir, &init_mm);
|
||||
cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -139,7 +146,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgd)
|
|||
|
||||
phys_addr_t pgd_phys = virt_to_phys(pgd);
|
||||
|
||||
replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
|
||||
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
|
||||
|
||||
cpu_install_idmap();
|
||||
replace_phys(pgd_phys);
|
||||
|
|
@ -179,9 +186,10 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
|
|||
struct mm_struct *mm)
|
||||
{
|
||||
if (system_uses_ttbr0_pan()) {
|
||||
u64 ttbr;
|
||||
BUG_ON(mm->pgd == swapper_pg_dir);
|
||||
task_thread_info(tsk)->ttbr0 =
|
||||
virt_to_phys(mm->pgd) | ASID(mm) << 48;
|
||||
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
|
||||
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
|
@ -228,4 +236,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, current)
|
||||
|
||||
void post_ttbr_update_workaround(void);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -224,6 +224,8 @@
|
|||
#define TCR_TG1_16K (UL(1) << 30)
|
||||
#define TCR_TG1_4K (UL(2) << 30)
|
||||
#define TCR_TG1_64K (UL(3) << 30)
|
||||
|
||||
#define TCR_A1 (UL(1) << 22)
|
||||
#define TCR_ASID16 (UL(1) << 36)
|
||||
#define TCR_TBI0 (UL(1) << 37)
|
||||
#define TCR_HA (UL(1) << 39)
|
||||
|
|
|
|||
|
|
@ -61,8 +61,16 @@ extern void __pmd_error(const char *file, int line, unsigned long val);
|
|||
extern void __pud_error(const char *file, int line, unsigned long val);
|
||||
extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
|
||||
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG)
|
||||
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG)
|
||||
#else
|
||||
#define PROT_DEFAULT _PROT_DEFAULT
|
||||
#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
|
||||
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
|
||||
|
|
@ -75,6 +83,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG)
|
||||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
|
|
@ -82,13 +91,13 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
|
||||
|
||||
#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
|
||||
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP)
|
||||
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
|
||||
|
||||
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
|
||||
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
|
||||
|
||||
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
|
||||
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||
#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
|
|
@ -119,7 +128,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
||||
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
|
||||
#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
|
||||
|
||||
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
|
||||
|
||||
|
|
@ -716,6 +725,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
|||
|
||||
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
|
||||
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
|
||||
|
||||
/*
|
||||
* Encode and decode a swap entry:
|
||||
|
|
|
|||
|
|
@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
|
|||
|
||||
#include <asm/memory.h>
|
||||
|
||||
#define cpu_switch_mm(pgd,mm) \
|
||||
do { \
|
||||
BUG_ON(pgd == swapper_pg_dir); \
|
||||
cpu_do_switch_mm(virt_to_phys(pgd),mm); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_PROCFNS_H */
|
||||
|
|
|
|||
|
|
@ -23,6 +23,30 @@
|
|||
|
||||
#include <linux/sched.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
/*
|
||||
* Raw TLBI operations.
|
||||
*
|
||||
* Where necessary, use the __tlbi() macro to avoid asm()
|
||||
* boilerplate. Drivers and most kernel code should use the TLB
|
||||
* management routines in preference to the macro below.
|
||||
*
|
||||
* The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
|
||||
* on whether a particular TLBI operation takes an argument or
|
||||
* not. The macros handles invoking the asm with or without the
|
||||
* register argument as appropriate.
|
||||
*/
|
||||
#define __TLBI_0(op, arg) asm ("tlbi " #op)
|
||||
#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg))
|
||||
#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
|
||||
|
||||
#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
|
||||
|
||||
#define __tlbi_user(op, arg) do { \
|
||||
if (arm64_kernel_unmapped_at_el0()) \
|
||||
__tlbi(op, (arg) | USER_ASID_FLAG); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* TLB Management
|
||||
|
|
@ -66,7 +90,7 @@
|
|||
static inline void local_flush_tlb_all(void)
|
||||
{
|
||||
dsb(nshst);
|
||||
asm("tlbi vmalle1");
|
||||
__tlbi(vmalle1);
|
||||
dsb(nsh);
|
||||
isb();
|
||||
}
|
||||
|
|
@ -74,7 +98,7 @@ static inline void local_flush_tlb_all(void)
|
|||
static inline void flush_tlb_all(void)
|
||||
{
|
||||
dsb(ishst);
|
||||
asm("tlbi vmalle1is");
|
||||
__tlbi(vmalle1is);
|
||||
dsb(ish);
|
||||
isb();
|
||||
}
|
||||
|
|
@ -84,7 +108,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
|
|||
unsigned long asid = ASID(mm) << 48;
|
||||
|
||||
dsb(ishst);
|
||||
asm("tlbi aside1is, %0" : : "r" (asid));
|
||||
__tlbi(aside1is, asid);
|
||||
__tlbi_user(aside1is, asid);
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
|
|
@ -94,7 +119,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|||
unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
|
||||
|
||||
dsb(ishst);
|
||||
asm("tlbi vale1is, %0" : : "r" (addr));
|
||||
__tlbi(vale1is, addr);
|
||||
__tlbi_user(vale1is, addr);
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
|
|
@ -121,10 +147,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
|||
|
||||
dsb(ishst);
|
||||
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
|
||||
if (last_level)
|
||||
asm("tlbi vale1is, %0" : : "r"(addr));
|
||||
else
|
||||
asm("tlbi vae1is, %0" : : "r"(addr));
|
||||
if (last_level) {
|
||||
__tlbi(vale1is, addr);
|
||||
__tlbi_user(vale1is, addr);
|
||||
} else {
|
||||
__tlbi(vae1is, addr);
|
||||
__tlbi_user(vae1is, addr);
|
||||
}
|
||||
}
|
||||
dsb(ish);
|
||||
}
|
||||
|
|
@ -149,7 +178,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
|
|||
|
||||
dsb(ishst);
|
||||
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
|
||||
asm("tlbi vaae1is, %0" : : "r"(addr));
|
||||
__tlbi(vaae1is, addr);
|
||||
dsb(ish);
|
||||
isb();
|
||||
}
|
||||
|
|
@ -163,7 +192,8 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
|
|||
{
|
||||
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
|
||||
|
||||
asm("tlbi vae1is, %0" : : "r" (addr));
|
||||
__tlbi(vae1is, addr);
|
||||
__tlbi_user(vae1is, addr);
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,18 +35,11 @@ struct undef_hook {
|
|||
void register_undef_hook(struct undef_hook *hook);
|
||||
void unregister_undef_hook(struct undef_hook *hook);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static inline int __in_irqentry_text(unsigned long ptr)
|
||||
{
|
||||
return ptr >= (unsigned long)&__irqentry_text_start &&
|
||||
ptr < (unsigned long)&__irqentry_text_end;
|
||||
}
|
||||
#else
|
||||
static inline int __in_irqentry_text(unsigned long ptr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int in_exception_text(unsigned long ptr)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
|
@ -142,17 +143,23 @@ static inline void set_fs(mm_segment_t fs)
|
|||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
static inline void __uaccess_ttbr0_disable(void)
|
||||
{
|
||||
unsigned long ttbr;
|
||||
unsigned long flags, ttbr;
|
||||
|
||||
local_irq_save(flags);
|
||||
ttbr = read_sysreg(ttbr1_el1);
|
||||
ttbr &= ~TTBR_ASID_MASK;
|
||||
/* reserved_ttbr0 placed at the end of swapper_pg_dir */
|
||||
ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
|
||||
write_sysreg(ttbr, ttbr0_el1);
|
||||
write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
|
||||
isb();
|
||||
/* Set reserved ASID */
|
||||
write_sysreg(ttbr, ttbr1_el1);
|
||||
isb();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void __uaccess_ttbr0_enable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long flags, ttbr0, ttbr1;
|
||||
|
||||
/*
|
||||
* Disable interrupts to avoid preemption between reading the 'ttbr0'
|
||||
|
|
@ -160,7 +167,17 @@ static inline void __uaccess_ttbr0_enable(void)
|
|||
* roll-over and an update of 'ttbr0'.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
|
||||
ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
|
||||
|
||||
/* Restore active ASID */
|
||||
ttbr1 = read_sysreg(ttbr1_el1);
|
||||
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
|
||||
ttbr1 |= ttbr0 & TTBR_ASID_MASK;
|
||||
write_sysreg(ttbr1, ttbr1_el1);
|
||||
isb();
|
||||
|
||||
/* Restore user page table */
|
||||
write_sysreg(ttbr0, ttbr0_el1);
|
||||
isb();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
|
@ -439,51 +456,62 @@ extern __must_check long strnlen_user(const char __user *str, long n);
|
|||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
.macro __uaccess_ttbr0_disable, tmp1
|
||||
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
|
||||
bic \tmp1, \tmp1, #TTBR_ASID_MASK
|
||||
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
|
||||
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
|
||||
isb
|
||||
sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE
|
||||
msr ttbr1_el1, \tmp1 // set reserved ASID
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro __uaccess_ttbr0_enable, tmp1
|
||||
.macro __uaccess_ttbr0_enable, tmp1, tmp2
|
||||
get_thread_info \tmp1
|
||||
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
|
||||
mrs \tmp2, ttbr1_el1
|
||||
extr \tmp2, \tmp2, \tmp1, #48
|
||||
ror \tmp2, \tmp2, #16
|
||||
msr ttbr1_el1, \tmp2 // set the active ASID
|
||||
isb
|
||||
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
__uaccess_ttbr0_disable \tmp1
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
.macro uaccess_ttbr0_disable, tmp1, tmp2
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
save_and_disable_irq \tmp2 // avoid preemption
|
||||
__uaccess_ttbr0_enable \tmp1
|
||||
__uaccess_ttbr0_disable \tmp1
|
||||
restore_irq \tmp2
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
save_and_disable_irq \tmp3 // avoid preemption
|
||||
__uaccess_ttbr0_enable \tmp1, \tmp2
|
||||
restore_irq \tmp3
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
#else
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
.macro uaccess_ttbr0_disable, tmp1, tmp2
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
|
||||
.endm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These macros are no-ops when UAO is present.
|
||||
*/
|
||||
.macro uaccess_disable_not_uao, tmp1
|
||||
uaccess_ttbr0_disable \tmp1
|
||||
.macro uaccess_disable_not_uao, tmp1, tmp2
|
||||
uaccess_ttbr0_disable \tmp1, \tmp2
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(1)
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_enable_not_uao, tmp1, tmp2
|
||||
uaccess_ttbr0_enable \tmp1, \tmp2
|
||||
.macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
|
||||
uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(0)
|
||||
alternative_else_nop_endif
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/cpu_ops.h>
|
||||
|
|
@ -102,7 +103,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
|
|||
* that read this address need to convert this address to the
|
||||
* Boot-Loader's endianness before jumping.
|
||||
*/
|
||||
writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
|
||||
writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
|
||||
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
|
||||
|
||||
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
|
@ -161,5 +162,9 @@ int main(void)
|
|||
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
|
||||
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
|
||||
DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
|
||||
BLANK();
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/sort.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
|
|
@ -655,6 +656,39 @@ static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
|
|||
return is_kernel_in_hyp_mode();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
|
||||
|
||||
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
/* Forced on command line? */
|
||||
if (__kpti_forced) {
|
||||
pr_info_once("kernel page table isolation forced %s by command line option\n",
|
||||
__kpti_forced > 0 ? "ON" : "OFF");
|
||||
return __kpti_forced > 0;
|
||||
}
|
||||
|
||||
/* Useful for KASLR robustness */
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __init parse_kpti(char *str)
|
||||
{
|
||||
bool enabled;
|
||||
int ret = strtobool(str, &enabled);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
__kpti_forced = enabled ? 1 : -1;
|
||||
return 0;
|
||||
}
|
||||
__setup("kpti=", parse_kpti);
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
{
|
||||
.desc = "GIC system register CPU interface",
|
||||
|
|
@ -712,6 +746,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
||||
.matches = runs_at_el2,
|
||||
},
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
{
|
||||
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
|
||||
.matches = unmap_kernel_at_el0,
|
||||
},
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@
|
|||
#include <asm/esr.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
|
@ -70,8 +71,31 @@
|
|||
#define BAD_FIQ 2
|
||||
#define BAD_ERROR 3
|
||||
|
||||
.macro kernel_entry, el, regsize = 64
|
||||
.macro kernel_ventry, el, label, regsize = 64
|
||||
.align 7
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
alternative_if ARM64_UNMAP_KERNEL_AT_EL0
|
||||
.if \el == 0
|
||||
.if \regsize == 64
|
||||
mrs x30, tpidrro_el0
|
||||
msr tpidrro_el0, xzr
|
||||
.else
|
||||
mov x30, xzr
|
||||
.endif
|
||||
.endif
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
b el\()\el\()_\label
|
||||
.endm
|
||||
|
||||
.macro tramp_alias, dst, sym
|
||||
mov_q \dst, TRAMP_VALIAS
|
||||
add \dst, \dst, #(\sym - .entry.tramp.text)
|
||||
.endm
|
||||
|
||||
.macro kernel_entry, el, regsize = 64
|
||||
.if \regsize == 32
|
||||
mov w0, w0 // zero upper 32 bits of x0
|
||||
.endif
|
||||
|
|
@ -140,7 +164,7 @@ alternative_else_nop_endif
|
|||
|
||||
.if \el != 0
|
||||
mrs x21, ttbr0_el1
|
||||
tst x21, #0xffff << 48 // Check for the reserved ASID
|
||||
tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
|
||||
orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
|
||||
b.eq 1f // TTBR0 access already disabled
|
||||
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
|
||||
|
|
@ -205,7 +229,7 @@ alternative_else_nop_endif
|
|||
tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
|
||||
.endif
|
||||
|
||||
__uaccess_ttbr0_enable x0
|
||||
__uaccess_ttbr0_enable x0, x1
|
||||
|
||||
.if \el == 0
|
||||
/*
|
||||
|
|
@ -214,7 +238,7 @@ alternative_else_nop_endif
|
|||
* Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
|
||||
* corruption).
|
||||
*/
|
||||
post_ttbr0_update_workaround
|
||||
bl post_ttbr_update_workaround
|
||||
.endif
|
||||
1:
|
||||
.if \el != 0
|
||||
|
|
@ -226,24 +250,20 @@ alternative_else_nop_endif
|
|||
.if \el == 0
|
||||
ldr x23, [sp, #S_SP] // load return stack pointer
|
||||
msr sp_el0, x23
|
||||
tst x22, #PSR_MODE32_BIT // native task?
|
||||
b.eq 3f
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_845719
|
||||
alternative_if_not ARM64_WORKAROUND_845719
|
||||
nop
|
||||
nop
|
||||
#ifdef CONFIG_PID_IN_CONTEXTIDR
|
||||
nop
|
||||
#endif
|
||||
alternative_else
|
||||
tbz x22, #4, 1f
|
||||
alternative_if ARM64_WORKAROUND_845719
|
||||
#ifdef CONFIG_PID_IN_CONTEXTIDR
|
||||
mrs x29, contextidr_el1
|
||||
msr contextidr_el1, x29
|
||||
#else
|
||||
msr contextidr_el1, xzr
|
||||
#endif
|
||||
1:
|
||||
alternative_endif
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
3:
|
||||
.endif
|
||||
|
||||
msr elr_el1, x21 // set up the return data
|
||||
|
|
@ -265,7 +285,21 @@ alternative_endif
|
|||
ldp x28, x29, [sp, #16 * 14]
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #S_FRAME_SIZE // restore sp
|
||||
eret // return to kernel
|
||||
|
||||
.if \el == 0
|
||||
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
bne 4f
|
||||
msr far_el1, x30
|
||||
tramp_alias x30, tramp_exit_native
|
||||
br x30
|
||||
4:
|
||||
tramp_alias x30, tramp_exit_compat
|
||||
br x30
|
||||
#endif
|
||||
.else
|
||||
eret
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro irq_stack_entry
|
||||
|
|
@ -343,31 +377,31 @@ tsk .req x28 // current thread_info
|
|||
|
||||
.align 11
|
||||
ENTRY(vectors)
|
||||
ventry el1_sync_invalid // Synchronous EL1t
|
||||
ventry el1_irq_invalid // IRQ EL1t
|
||||
ventry el1_fiq_invalid // FIQ EL1t
|
||||
ventry el1_error_invalid // Error EL1t
|
||||
kernel_ventry 1, sync_invalid // Synchronous EL1t
|
||||
kernel_ventry 1, irq_invalid // IRQ EL1t
|
||||
kernel_ventry 1, fiq_invalid // FIQ EL1t
|
||||
kernel_ventry 1, error_invalid // Error EL1t
|
||||
|
||||
ventry el1_sync // Synchronous EL1h
|
||||
ventry el1_irq // IRQ EL1h
|
||||
ventry el1_fiq_invalid // FIQ EL1h
|
||||
ventry el1_error_invalid // Error EL1h
|
||||
kernel_ventry 1, sync // Synchronous EL1h
|
||||
kernel_ventry 1, irq // IRQ EL1h
|
||||
kernel_ventry 1, fiq_invalid // FIQ EL1h
|
||||
kernel_ventry 1, error_invalid // Error EL1h
|
||||
|
||||
ventry el0_sync // Synchronous 64-bit EL0
|
||||
ventry el0_irq // IRQ 64-bit EL0
|
||||
ventry el0_fiq_invalid // FIQ 64-bit EL0
|
||||
ventry el0_error_invalid // Error 64-bit EL0
|
||||
kernel_ventry 0, sync // Synchronous 64-bit EL0
|
||||
kernel_ventry 0, irq // IRQ 64-bit EL0
|
||||
kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
|
||||
kernel_ventry 0, error_invalid // Error 64-bit EL0
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
ventry el0_sync_compat // Synchronous 32-bit EL0
|
||||
ventry el0_irq_compat // IRQ 32-bit EL0
|
||||
ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
|
||||
ventry el0_error_invalid_compat // Error 32-bit EL0
|
||||
kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
|
||||
kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
|
||||
kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
|
||||
kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0
|
||||
#else
|
||||
ventry el0_sync_invalid // Synchronous 32-bit EL0
|
||||
ventry el0_irq_invalid // IRQ 32-bit EL0
|
||||
ventry el0_fiq_invalid // FIQ 32-bit EL0
|
||||
ventry el0_error_invalid // Error 32-bit EL0
|
||||
kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
|
||||
kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
|
||||
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
|
||||
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
|
||||
#endif
|
||||
END(vectors)
|
||||
|
||||
|
|
@ -920,6 +954,119 @@ __ni_sys_trace:
|
|||
|
||||
.popsection // .entry.text
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
/*
|
||||
* Exception vectors trampoline.
|
||||
*/
|
||||
.pushsection ".entry.tramp.text", "ax"
|
||||
|
||||
.macro tramp_map_kernel, tmp
|
||||
mrs \tmp, ttbr1_el1
|
||||
sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
|
||||
bic \tmp, \tmp, #USER_ASID_FLAG
|
||||
msr ttbr1_el1, \tmp
|
||||
#ifdef CONFIG_ARCH_MSM8996
|
||||
/* ASID already in \tmp[63:48] */
|
||||
movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
|
||||
movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
|
||||
/* 2MB boundary containing the vectors, so we nobble the walk cache */
|
||||
movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
|
||||
isb
|
||||
tlbi vae1, \tmp
|
||||
dsb nsh
|
||||
#endif /* CONFIG_ARCH_MSM8996 */
|
||||
.endm
|
||||
|
||||
.macro tramp_unmap_kernel, tmp
|
||||
mrs \tmp, ttbr1_el1
|
||||
add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
|
||||
orr \tmp, \tmp, #USER_ASID_FLAG
|
||||
msr ttbr1_el1, \tmp
|
||||
/*
|
||||
* We avoid running the post_ttbr_update_workaround here because the
|
||||
* user and kernel ASIDs don't have conflicting mappings, so any
|
||||
* "blessing" as described in:
|
||||
*
|
||||
* http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
|
||||
*
|
||||
* will not hurt correctness. Whilst this may partially defeat the
|
||||
* point of using split ASIDs in the first place, it avoids
|
||||
* the hit of invalidating the entire I-cache on every return to
|
||||
* userspace.
|
||||
*/
|
||||
.endm
|
||||
|
||||
.macro tramp_ventry, regsize = 64
|
||||
.align 7
|
||||
1:
|
||||
.if \regsize == 64
|
||||
msr tpidrro_el0, x30 // Restored in kernel_ventry
|
||||
.endif
|
||||
bl 2f
|
||||
b .
|
||||
2:
|
||||
tramp_map_kernel x30
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
adr x30, tramp_vectors + PAGE_SIZE
|
||||
#ifndef CONFIG_ARCH_MSM8996
|
||||
isb
|
||||
#endif
|
||||
ldr x30, [x30]
|
||||
#else
|
||||
ldr x30, =vectors
|
||||
#endif
|
||||
prfm plil1strm, [x30, #(1b - tramp_vectors)]
|
||||
msr vbar_el1, x30
|
||||
add x30, x30, #(1b - tramp_vectors)
|
||||
isb
|
||||
ret
|
||||
.endm
|
||||
|
||||
.macro tramp_exit, regsize = 64
|
||||
adr x30, tramp_vectors
|
||||
msr vbar_el1, x30
|
||||
tramp_unmap_kernel x30
|
||||
.if \regsize == 64
|
||||
mrs x30, far_el1
|
||||
.endif
|
||||
eret
|
||||
.endm
|
||||
|
||||
.align 11
|
||||
ENTRY(tramp_vectors)
|
||||
.space 0x400
|
||||
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
END(tramp_vectors)
|
||||
|
||||
ENTRY(tramp_exit_native)
|
||||
tramp_exit
|
||||
END(tramp_exit_native)
|
||||
|
||||
ENTRY(tramp_exit_compat)
|
||||
tramp_exit 32
|
||||
END(tramp_exit_compat)
|
||||
|
||||
.ltorg
|
||||
.popsection // .entry.tramp.text
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
.pushsection ".rodata", "a"
|
||||
.align PAGE_SHIFT
|
||||
.globl __entry_tramp_data_start
|
||||
__entry_tramp_data_start:
|
||||
.quad vectors
|
||||
.popsection // .rodata
|
||||
#endif /* CONFIG_RANDOMIZE_BASE */
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
/*
|
||||
* Special system call wrappers.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ static void __kprobes *patch_map(void *addr, int fixmap)
|
|||
if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
|
||||
page = vmalloc_to_page(addr);
|
||||
else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
|
||||
page = virt_to_page(addr);
|
||||
page = phys_to_page(__pa_symbol(addr));
|
||||
else
|
||||
return addr;
|
||||
|
||||
|
|
|
|||
|
|
@ -324,6 +324,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
|||
|
||||
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
|
||||
|
||||
/*
|
||||
* In case p was allocated the same task_struct pointer as some
|
||||
* other recently-exited task, make sure p is disassociated from
|
||||
* any cpu that may have run that now-exited task recently.
|
||||
* Otherwise we could erroneously skip reloading the FPSIMD
|
||||
* registers for p.
|
||||
*/
|
||||
fpsimd_flush_task_state(p);
|
||||
|
||||
if (likely(!(p->flags & PF_KTHREAD))) {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->regs[0] = 0;
|
||||
|
|
@ -369,19 +378,17 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
|||
|
||||
static void tls_thread_switch(struct task_struct *next)
|
||||
{
|
||||
unsigned long tpidr, tpidrro;
|
||||
unsigned long tpidr;
|
||||
|
||||
asm("mrs %0, tpidr_el0" : "=r" (tpidr));
|
||||
*task_user_tls(current) = tpidr;
|
||||
|
||||
tpidr = *task_user_tls(next);
|
||||
tpidrro = is_compat_thread(task_thread_info(next)) ?
|
||||
next->thread.tp_value : 0;
|
||||
if (is_compat_thread(task_thread_info(next)))
|
||||
write_sysreg(next->thread.tp_value, tpidrro_el0);
|
||||
else if (!arm64_kernel_unmapped_at_el0())
|
||||
write_sysreg(0, tpidrro_el0);
|
||||
|
||||
asm(
|
||||
" msr tpidr_el0, %0\n"
|
||||
" msr tpidrro_el0, %1"
|
||||
: : "r" (tpidr), "r" (tpidrro));
|
||||
write_sysreg(*task_user_tls(next), tpidr_el0);
|
||||
}
|
||||
|
||||
/* Restore the UAO state depending on next's addr_limit */
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/psci.h>
|
||||
|
||||
#include <uapi/linux/psci.h>
|
||||
|
|
@ -45,7 +46,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
|
|||
|
||||
static int cpu_psci_cpu_boot(unsigned int cpu)
|
||||
{
|
||||
int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
|
||||
int err = psci_ops.cpu_on(cpu_logical_map(cpu),
|
||||
__pa_symbol(secondary_entry));
|
||||
if (err)
|
||||
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
|
||||
|
||||
|
|
|
|||
|
|
@ -43,6 +43,7 @@
|
|||
#include <linux/of_platform.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/psci.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
|
@ -199,10 +200,10 @@ static void __init request_standard_resources(void)
|
|||
struct memblock_region *region;
|
||||
struct resource *res;
|
||||
|
||||
kernel_code.start = virt_to_phys(_text);
|
||||
kernel_code.end = virt_to_phys(__init_begin - 1);
|
||||
kernel_data.start = virt_to_phys(_sdata);
|
||||
kernel_data.end = virt_to_phys(_end - 1);
|
||||
kernel_code.start = __pa_symbol(_text);
|
||||
kernel_code.end = __pa_symbol(__init_begin - 1);
|
||||
kernel_data.start = __pa_symbol(_sdata);
|
||||
kernel_data.end = __pa_symbol(_end - 1);
|
||||
|
||||
for_each_memblock(memory, region) {
|
||||
res = alloc_bootmem_low(sizeof(*res));
|
||||
|
|
@ -358,9 +359,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
* thread.
|
||||
*/
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
|
||||
init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
|
||||
#else
|
||||
init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
|
||||
init_thread_info.ttbr0 = __pa_symbol(empty_zero_page);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
@ -412,11 +413,11 @@ subsys_initcall(topology_init);
|
|||
static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
|
||||
void *p)
|
||||
{
|
||||
u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
|
||||
const unsigned long offset = kaslr_offset();
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
|
||||
pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
|
||||
kaslr_offset, KIMAGE_VADDR);
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
|
||||
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
|
||||
offset, KIMAGE_VADDR);
|
||||
} else {
|
||||
pr_emerg("Kernel Offset: disabled\n");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ ENTRY(_cpu_resume)
|
|||
|
||||
#ifdef CONFIG_KASAN
|
||||
mov x0, sp
|
||||
bl kasan_unpoison_remaining_stack
|
||||
bl kasan_unpoison_task_stack_below
|
||||
#endif
|
||||
|
||||
ldp x19, x20, [x29, #16]
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
|
|
@ -97,7 +98,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
|
|||
* boot-loader's endianess before jumping. This is mandated by
|
||||
* the boot protocol.
|
||||
*/
|
||||
writeq_relaxed(__pa(secondary_holding_pen), release_addr);
|
||||
writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
|
||||
__flush_dcache_area((__force void *)release_addr,
|
||||
sizeof(*release_addr));
|
||||
|
||||
|
|
|
|||
|
|
@ -114,6 +114,7 @@ static struct vm_special_mapping vdso_spec[2];
|
|||
static int __init vdso_init(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long pfn;
|
||||
|
||||
if (memcmp(&vdso_start, "\177ELF", 4)) {
|
||||
pr_err("vDSO is not a valid ELF object!\n");
|
||||
|
|
@ -131,11 +132,14 @@ static int __init vdso_init(void)
|
|||
return -ENOMEM;
|
||||
|
||||
/* Grab the vDSO data page. */
|
||||
vdso_pagelist[0] = virt_to_page(vdso_data);
|
||||
vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
|
||||
|
||||
|
||||
/* Grab the vDSO code pages. */
|
||||
pfn = sym_to_pfn(&vdso_start);
|
||||
|
||||
for (i = 0; i < vdso_pages; i++)
|
||||
vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
|
||||
vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
|
||||
|
||||
/* Populate the special mapping structures */
|
||||
vdso_spec[0] = (struct vm_special_mapping) {
|
||||
|
|
@ -214,10 +218,8 @@ void update_vsyscall(struct timekeeper *tk)
|
|||
if (!use_syscall) {
|
||||
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
|
||||
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
|
||||
vdso_data->raw_time_sec = tk->raw_time.tv_sec;
|
||||
vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
|
||||
tk->tkr_raw.shift) +
|
||||
tk->tkr_raw.xtime_nsec;
|
||||
vdso_data->raw_time_sec = tk->raw_sec;
|
||||
vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
|
||||
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
||||
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
|
||||
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
|
||||
|
|
|
|||
|
|
@ -309,7 +309,7 @@ ENTRY(__kernel_clock_getres)
|
|||
b.ne 4f
|
||||
ldr x2, 6f
|
||||
2:
|
||||
cbz w1, 3f
|
||||
cbz x1, 3f
|
||||
stp xzr, x2, [x1]
|
||||
|
||||
3: /* res == NULL. */
|
||||
|
|
|
|||
|
|
@ -56,6 +56,17 @@ jiffies = jiffies_64;
|
|||
#define HIBERNATE_TEXT
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
#define TRAMP_TEXT \
|
||||
. = ALIGN(PAGE_SIZE); \
|
||||
VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \
|
||||
*(.entry.tramp.text) \
|
||||
. = ALIGN(PAGE_SIZE); \
|
||||
VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
|
||||
#else
|
||||
#define TRAMP_TEXT
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The size of the PE/COFF section that covers the kernel image, which
|
||||
* runs from stext to _edata, must be a round multiple of the PE/COFF
|
||||
|
|
@ -118,6 +129,7 @@ SECTIONS
|
|||
*(.exception.text)
|
||||
__exception_text_end = .;
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
ENTRY_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
|
|
@ -126,6 +138,7 @@ SECTIONS
|
|||
HYPERVISOR_TEXT
|
||||
IDMAP_TEXT
|
||||
HIBERNATE_TEXT
|
||||
TRAMP_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
. = ALIGN(16);
|
||||
|
|
@ -218,6 +231,11 @@ SECTIONS
|
|||
. += RESERVED_TTBR0_SIZE;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
tramp_pg_dir = .;
|
||||
. += PAGE_SIZE;
|
||||
#endif
|
||||
|
||||
_end = .;
|
||||
|
||||
STABS_DEBUG
|
||||
|
|
@ -225,6 +243,10 @@ SECTIONS
|
|||
HEAD_SYMBOLS
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
|
||||
"Entry trampoline text too big")
|
||||
#endif
|
||||
/*
|
||||
* The HYP init code and ID map text can't be longer than a page each,
|
||||
* and should not cross a page boundary.
|
||||
|
|
|
|||
|
|
@ -122,7 +122,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
|
||||
hsr, esr_get_class_string(hsr));
|
||||
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static exit_handle_fn arm_exit_handlers[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
|
||||
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
|
||||
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
|
||||
[ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
|
||||
|
|
@ -148,13 +160,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
|||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
u8 hsr_ec = hsr >> ESR_ELx_EC_SHIFT;
|
||||
|
||||
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
|
||||
!arm_exit_handlers[hsr_ec]) {
|
||||
kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
|
||||
hsr, esr_get_class_string(hsr));
|
||||
BUG();
|
||||
}
|
||||
|
||||
return arm_exit_handlers[hsr_ec];
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@
|
|||
* Alignment fixed up by hardware.
|
||||
*/
|
||||
ENTRY(__clear_user)
|
||||
uaccess_enable_not_uao x2, x3
|
||||
uaccess_enable_not_uao x2, x3, x4
|
||||
mov x2, x1 // save the size for fixup return
|
||||
subs x1, x1, #8
|
||||
b.mi 2f
|
||||
|
|
@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
|
|||
b.mi 5f
|
||||
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
|
||||
5: mov x0, #0
|
||||
uaccess_disable_not_uao x2
|
||||
uaccess_disable_not_uao x2, x3
|
||||
ret
|
||||
ENDPROC(__clear_user)
|
||||
|
||||
|
|
|
|||
|
|
@ -64,10 +64,10 @@
|
|||
|
||||
end .req x5
|
||||
ENTRY(__arch_copy_from_user)
|
||||
uaccess_enable_not_uao x3, x4
|
||||
uaccess_enable_not_uao x3, x4, x5
|
||||
add end, x0, x2
|
||||
#include "copy_template.S"
|
||||
uaccess_disable_not_uao x3
|
||||
uaccess_disable_not_uao x3, x4
|
||||
mov x0, #0 // Nothing to copy
|
||||
ret
|
||||
ENDPROC(__arch_copy_from_user)
|
||||
|
|
|
|||
|
|
@ -65,10 +65,10 @@
|
|||
|
||||
end .req x5
|
||||
ENTRY(__copy_in_user)
|
||||
uaccess_enable_not_uao x3, x4
|
||||
uaccess_enable_not_uao x3, x4, x5
|
||||
add end, x0, x2
|
||||
#include "copy_template.S"
|
||||
uaccess_disable_not_uao x3
|
||||
uaccess_disable_not_uao x3, x4
|
||||
mov x0, #0
|
||||
ret
|
||||
ENDPROC(__copy_in_user)
|
||||
|
|
|
|||
|
|
@ -63,10 +63,10 @@
|
|||
|
||||
end .req x5
|
||||
ENTRY(__arch_copy_to_user)
|
||||
uaccess_enable_not_uao x3, x4
|
||||
uaccess_enable_not_uao x3, x4, x5
|
||||
add end, x0, x2
|
||||
#include "copy_template.S"
|
||||
uaccess_disable_not_uao x3
|
||||
uaccess_disable_not_uao x3, x4
|
||||
mov x0, #0
|
||||
ret
|
||||
ENDPROC(__arch_copy_to_user)
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ ENTRY(flush_icache_range)
|
|||
* - end - virtual end address of region
|
||||
*/
|
||||
ENTRY(__flush_cache_user_range)
|
||||
uaccess_ttbr0_enable x2, x3
|
||||
uaccess_ttbr0_enable x2, x3, x4
|
||||
dcache_line_size x2, x3
|
||||
sub x3, x2, #1
|
||||
bic x4, x0, x3
|
||||
|
|
@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
|
|||
isb
|
||||
mov x0, #0
|
||||
1:
|
||||
uaccess_ttbr0_disable x1
|
||||
uaccess_ttbr0_disable x1, x2
|
||||
ret
|
||||
9:
|
||||
mov x0, #-EFAULT
|
||||
|
|
|
|||
|
|
@ -38,7 +38,16 @@ static cpumask_t tlb_flush_pending;
|
|||
|
||||
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
|
||||
#define ASID_FIRST_VERSION (1UL << asid_bits)
|
||||
#define NUM_USER_ASIDS ASID_FIRST_VERSION
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
|
||||
#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
|
||||
#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
|
||||
#else
|
||||
#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
|
||||
#define asid2idx(asid) ((asid) & ~ASID_MASK)
|
||||
#define idx2asid(idx) asid2idx(idx)
|
||||
#endif
|
||||
|
||||
static void flush_context(unsigned int cpu)
|
||||
{
|
||||
|
|
@ -65,7 +74,7 @@ static void flush_context(unsigned int cpu)
|
|||
*/
|
||||
if (asid == 0)
|
||||
asid = per_cpu(reserved_asids, i);
|
||||
__set_bit(asid & ~ASID_MASK, asid_map);
|
||||
__set_bit(asid2idx(asid), asid_map);
|
||||
per_cpu(reserved_asids, i) = asid;
|
||||
}
|
||||
|
||||
|
|
@ -120,16 +129,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
|||
* We had a valid ASID in a previous life, so try to re-use
|
||||
* it if possible.
|
||||
*/
|
||||
asid &= ~ASID_MASK;
|
||||
if (!__test_and_set_bit(asid, asid_map))
|
||||
if (!__test_and_set_bit(asid2idx(asid), asid_map))
|
||||
return newasid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a free ASID. If we can't find one, take a note of the
|
||||
* currently active ASIDs and mark the TLBs as requiring flushes.
|
||||
* We always count from ASID #1, as we use ASID #0 when setting a
|
||||
* reserved TTBR0 for the init_mm.
|
||||
* currently active ASIDs and mark the TLBs as requiring flushes. We
|
||||
* always count from ASID #2 (index 1), as we use ASID #0 when setting
|
||||
* a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
|
||||
* pairs.
|
||||
*/
|
||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
|
||||
if (asid != NUM_USER_ASIDS)
|
||||
|
|
@ -146,7 +155,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
|||
set_asid:
|
||||
__set_bit(asid, asid_map);
|
||||
cur_idx = asid;
|
||||
return asid | generation;
|
||||
return idx2asid(asid) | generation;
|
||||
}
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
|
||||
|
|
@ -190,6 +199,15 @@ switch_mm_fastpath:
|
|||
cpu_switch_mm(mm->pgd, mm);
|
||||
}
|
||||
|
||||
/* Errata workaround post TTBRx_EL1 update. */
|
||||
asmlinkage void post_ttbr_update_workaround(void)
|
||||
{
|
||||
asm(ALTERNATIVE("nop; nop; nop",
|
||||
"ic iallu; dsb nsh; isb",
|
||||
ARM64_WORKAROUND_CAVIUM_27456,
|
||||
CONFIG_CAVIUM_ERRATUM_27456));
|
||||
}
|
||||
|
||||
static int asids_init(void)
|
||||
{
|
||||
int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4);
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/swiotlb.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/boot.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
|
@ -364,8 +365,8 @@ void __init arm64_memblock_init(void)
|
|||
* linear mapping. Take care not to clip the kernel which may be
|
||||
* high in memory.
|
||||
*/
|
||||
memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
|
||||
ULLONG_MAX);
|
||||
memblock_remove(max_t(u64, memstart_addr + linear_region_size,
|
||||
__pa_symbol(_end)), ULLONG_MAX);
|
||||
if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
|
||||
/* ensure that memstart_addr remains sufficiently aligned */
|
||||
memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
|
||||
|
|
@ -380,7 +381,7 @@ void __init arm64_memblock_init(void)
|
|||
*/
|
||||
if (memory_limit != (phys_addr_t)ULLONG_MAX) {
|
||||
memblock_enforce_memory_limit(memory_limit);
|
||||
memblock_add(__pa(_text), (u64)(_end - _text));
|
||||
memblock_add(__pa_symbol(_text), (u64)(_end - _text));
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
|
|
@ -404,7 +405,7 @@ void __init arm64_memblock_init(void)
|
|||
* Register the kernel text, kernel data, initrd, and initial
|
||||
* pagetables with memblock.
|
||||
*/
|
||||
memblock_reserve(__pa(_text), _end - _text);
|
||||
memblock_reserve(__pa_symbol(_text), _end - _text);
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (initrd_start) {
|
||||
memblock_reserve(initrd_start, initrd_end - initrd_start);
|
||||
|
|
@ -427,6 +428,7 @@ void __init arm64_memblock_init(void)
|
|||
|
||||
reserve_elfcorehdr();
|
||||
|
||||
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
|
||||
dma_contiguous_reserve(arm64_dma_phys_limit);
|
||||
|
||||
memblock_allow_resize();
|
||||
|
|
@ -451,7 +453,6 @@ void __init bootmem_init(void)
|
|||
sparse_init();
|
||||
zone_sizes_init(min, max);
|
||||
|
||||
high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
|
||||
max_pfn = max_low_pfn = max;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/start_kernel.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
|
|
@ -26,6 +27,13 @@
|
|||
|
||||
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
|
||||
|
||||
/*
|
||||
* The p*d_populate functions call virt_to_phys implicitly so they can't be used
|
||||
* directly on kernel symbols (bm_p*d). All the early functions are called too
|
||||
* early to use lm_alias so __p*d_populate functions must be used to populate
|
||||
* with the physical address from __pa_symbol.
|
||||
*/
|
||||
|
||||
static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end)
|
||||
{
|
||||
|
|
@ -33,12 +41,13 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
|
|||
unsigned long next;
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
|
||||
__pmd_populate(pmd, __pa_symbol(kasan_zero_pte),
|
||||
PMD_TYPE_TABLE);
|
||||
|
||||
pte = pte_offset_kimg(pmd, addr);
|
||||
do {
|
||||
next = addr + PAGE_SIZE;
|
||||
set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
|
||||
set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
|
||||
PAGE_KERNEL));
|
||||
} while (pte++, addr = next, addr != end && pte_none(*pte));
|
||||
}
|
||||
|
|
@ -51,7 +60,8 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
|
|||
unsigned long next;
|
||||
|
||||
if (pud_none(*pud))
|
||||
pud_populate(&init_mm, pud, kasan_zero_pmd);
|
||||
__pud_populate(pud, __pa_symbol(kasan_zero_pmd),
|
||||
PMD_TYPE_TABLE);
|
||||
|
||||
pmd = pmd_offset_kimg(pud, addr);
|
||||
do {
|
||||
|
|
@ -68,7 +78,8 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
|
|||
unsigned long next;
|
||||
|
||||
if (pgd_none(*pgd))
|
||||
pgd_populate(&init_mm, pgd, kasan_zero_pud);
|
||||
__pgd_populate(pgd, __pa_symbol(kasan_zero_pud),
|
||||
PUD_TYPE_TABLE);
|
||||
|
||||
pud = pud_offset_kimg(pgd, addr);
|
||||
do {
|
||||
|
|
@ -148,7 +159,7 @@ void __init kasan_init(void)
|
|||
*/
|
||||
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
|
||||
dsb(ishst);
|
||||
cpu_replace_ttbr1(tmp_pg_dir);
|
||||
cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
|
||||
|
||||
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
||||
|
||||
|
|
@ -199,10 +210,10 @@ void __init kasan_init(void)
|
|||
*/
|
||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||
set_pte(&kasan_zero_pte[i],
|
||||
pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
|
||||
pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
|
||||
|
||||
memset(kasan_zero_page, 0, PAGE_SIZE);
|
||||
cpu_replace_ttbr1(swapper_pg_dir);
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
||||
|
||||
/* At this point kasan is fully initialized. Enable error messages */
|
||||
init_task.kasan_depth = 0;
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cputype.h>
|
||||
|
|
@ -380,8 +381,8 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start,
|
|||
|
||||
static void __init map_mem(pgd_t *pgd)
|
||||
{
|
||||
unsigned long kernel_start = __pa(_text);
|
||||
unsigned long kernel_end = __pa(__init_begin);
|
||||
unsigned long kernel_start = __pa_symbol(_text);
|
||||
unsigned long kernel_end = __pa_symbol(__init_begin);
|
||||
struct memblock_region *reg;
|
||||
|
||||
/*
|
||||
|
|
@ -441,14 +442,15 @@ void mark_rodata_ro(void)
|
|||
unsigned long section_size;
|
||||
|
||||
section_size = (unsigned long)_etext - (unsigned long)_text;
|
||||
create_mapping_late(__pa(_text), (unsigned long)_text,
|
||||
create_mapping_late(__pa_symbol(_text), (unsigned long)_text,
|
||||
section_size, PAGE_KERNEL_ROX);
|
||||
/*
|
||||
* mark .rodata as read only. Use __init_begin rather than __end_rodata
|
||||
* to cover NOTES and EXCEPTION_TABLE.
|
||||
*/
|
||||
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
|
||||
create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
|
||||
create_mapping_late(__pa_symbol(__start_rodata),
|
||||
(unsigned long)__start_rodata,
|
||||
section_size, PAGE_KERNEL_RO);
|
||||
}
|
||||
|
||||
|
|
@ -465,7 +467,7 @@ void fixup_init(void)
|
|||
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
||||
pgprot_t prot, struct vm_struct *vma)
|
||||
{
|
||||
phys_addr_t pa_start = __pa(va_start);
|
||||
phys_addr_t pa_start = __pa_symbol(va_start);
|
||||
unsigned long size = va_end - va_start;
|
||||
|
||||
BUG_ON(!PAGE_ALIGNED(pa_start));
|
||||
|
|
@ -483,6 +485,37 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
|||
vm_area_add_early(vma);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static int __init map_entry_trampoline(void)
|
||||
{
|
||||
extern char __entry_tramp_text_start[];
|
||||
|
||||
pgprot_t prot = PAGE_KERNEL_EXEC;
|
||||
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
|
||||
|
||||
/* The trampoline is always mapped and can therefore be global */
|
||||
pgprot_val(prot) &= ~PTE_NG;
|
||||
|
||||
/* Map only the text into the trampoline page table */
|
||||
memset(tramp_pg_dir, 0, PGD_SIZE);
|
||||
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
|
||||
prot, late_pgtable_alloc, 0);
|
||||
|
||||
/* Map both the text and data into the kernel page table */
|
||||
__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern char __entry_tramp_data_start[];
|
||||
|
||||
__set_fixmap(FIX_ENTRY_TRAMP_DATA,
|
||||
__pa_symbol(__entry_tramp_data_start),
|
||||
PAGE_KERNEL_RO);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(map_entry_trampoline);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Create fine-grained mappings for the kernel.
|
||||
*/
|
||||
|
|
@ -513,7 +546,7 @@ static void __init map_kernel(pgd_t *pgd)
|
|||
*/
|
||||
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
||||
set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
|
||||
__pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
|
||||
__pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
|
||||
pud_clear_fixmap();
|
||||
} else {
|
||||
BUG();
|
||||
|
|
@ -544,7 +577,7 @@ void __init paging_init(void)
|
|||
*/
|
||||
cpu_replace_ttbr1(__va(pgd_phys));
|
||||
memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
|
||||
cpu_replace_ttbr1(swapper_pg_dir);
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
||||
|
||||
pgd_clear_fixmap();
|
||||
memblock_free(pgd_phys, PAGE_SIZE);
|
||||
|
|
@ -553,7 +586,7 @@ void __init paging_init(void)
|
|||
* We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
|
||||
* allocated with it.
|
||||
*/
|
||||
memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
|
||||
memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
|
||||
SWAPPER_DIR_SIZE - PAGE_SIZE);
|
||||
|
||||
bootmem_init();
|
||||
|
|
@ -666,6 +699,12 @@ static inline pte_t * fixmap_pte(unsigned long addr)
|
|||
return &bm_pte[pte_index(addr)];
|
||||
}
|
||||
|
||||
/*
|
||||
* The p*d_populate functions call virt_to_phys implicitly so they can't be used
|
||||
* directly on kernel symbols (bm_p*d). This function is called too early to use
|
||||
* lm_alias so __p*d_populate functions must be used to populate with the
|
||||
* physical address from __pa_symbol.
|
||||
*/
|
||||
void __init early_fixmap_init(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
|
|
@ -675,7 +714,7 @@ void __init early_fixmap_init(void)
|
|||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (CONFIG_PGTABLE_LEVELS > 3 &&
|
||||
!(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
|
||||
!(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
|
||||
/*
|
||||
* We only end up here if the kernel mapping and the fixmap
|
||||
* share the top level pgd entry, which should only happen on
|
||||
|
|
@ -684,12 +723,15 @@ void __init early_fixmap_init(void)
|
|||
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
||||
pud = pud_offset_kimg(pgd, addr);
|
||||
} else {
|
||||
pgd_populate(&init_mm, pgd, bm_pud);
|
||||
if (pgd_none(*pgd))
|
||||
__pgd_populate(pgd, __pa_symbol(bm_pud),
|
||||
PUD_TYPE_TABLE);
|
||||
pud = fixmap_pud(addr);
|
||||
}
|
||||
pud_populate(&init_mm, pud, bm_pmd);
|
||||
if (pud_none(*pud))
|
||||
__pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
|
||||
pmd = fixmap_pmd(addr);
|
||||
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
||||
__pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
|
||||
|
||||
/*
|
||||
* The boot-ioremap range spans multiple pmds, for which
|
||||
|
|
|
|||
|
|
@ -138,12 +138,17 @@ ENDPROC(cpu_do_resume)
|
|||
* - pgd_phys - physical address of new TTB
|
||||
*/
|
||||
ENTRY(cpu_do_switch_mm)
|
||||
mrs x2, ttbr1_el1
|
||||
mmid x1, x1 // get mm->context.id
|
||||
bfi x0, x1, #48, #16 // set the ASID
|
||||
msr ttbr0_el1, x0 // set TTBR0
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
bfi x0, x1, #48, #16 // set the ASID field in TTBR0
|
||||
#endif
|
||||
bfi x2, x1, #48, #16 // set the ASID
|
||||
msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
|
||||
isb
|
||||
post_ttbr0_update_workaround
|
||||
ret
|
||||
msr ttbr0_el1, x0 // now update TTBR0
|
||||
isb
|
||||
b post_ttbr_update_workaround // Back to C code...
|
||||
ENDPROC(cpu_do_switch_mm)
|
||||
|
||||
.pushsection ".idmap.text", "ax"
|
||||
|
|
@ -224,7 +229,7 @@ ENTRY(__cpu_setup)
|
|||
* both user and kernel.
|
||||
*/
|
||||
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
||||
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
|
||||
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
|
||||
tcr_set_idmap_t0sz x10, x9
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -98,12 +98,12 @@ ENTRY(privcmd_call)
|
|||
* need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
|
||||
* is enabled (it implies that hardware UAO and PAN disabled).
|
||||
*/
|
||||
uaccess_ttbr0_enable x6, x7
|
||||
uaccess_ttbr0_enable x6, x7, x8
|
||||
hvc XEN_IMM
|
||||
|
||||
/*
|
||||
* Disable userspace access from kernel once the hyp call completed.
|
||||
*/
|
||||
uaccess_ttbr0_disable x6
|
||||
uaccess_ttbr0_disable x6, x7
|
||||
ret
|
||||
ENDPROC(privcmd_call);
|
||||
|
|
|
|||
|
|
@ -318,11 +318,14 @@ config BF53x
|
|||
|
||||
config GPIO_ADI
|
||||
def_bool y
|
||||
depends on !PINCTRL
|
||||
depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561)
|
||||
|
||||
config PINCTRL
|
||||
config PINCTRL_BLACKFIN_ADI2
|
||||
def_bool y
|
||||
depends on BF54x || BF60x
|
||||
depends on (BF54x || BF60x)
|
||||
select PINCTRL
|
||||
select PINCTRL_ADI2
|
||||
|
||||
config MEM_MT48LC64M4A2FB_7E
|
||||
bool
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ config DEBUG_VERBOSE
|
|||
|
||||
config DEBUG_MMRS
|
||||
tristate "Generate Blackfin MMR tree"
|
||||
depends on !PINCTRL
|
||||
select DEBUG_FS
|
||||
help
|
||||
Create a tree of Blackfin MMRs via the debugfs tree. If
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ SECTIONS
|
|||
#endif
|
||||
LOCK_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
KPROBES_TEXT
|
||||
#ifdef CONFIG_ROMKERNEL
|
||||
__sinittext = .;
|
||||
|
|
|
|||
|
|
@ -72,6 +72,7 @@ SECTIONS
|
|||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
KPROBES_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ SECTIONS
|
|||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
*(.text.*)
|
||||
*(.gnu.warning)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ SECTIONS {
|
|||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
. = ALIGN (4) ;
|
||||
_etext = . ;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -664,6 +664,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
|
|||
unsigned long switch_count;
|
||||
struct task_struct *t;
|
||||
|
||||
/* If nothing to change, return right away, successfully. */
|
||||
if (value == mips_get_process_fp_mode(task))
|
||||
return 0;
|
||||
|
||||
/* Only accept a mode change if 64-bit FP enabled for o32. */
|
||||
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* And only for o32 tasks. */
|
||||
if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Check the value is valid */
|
||||
if (value & ~known_bits)
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
|||
|
|
@ -439,25 +439,38 @@ static int gpr64_set(struct task_struct *target,
|
|||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
static int fpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer,
|
||||
* !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
|
||||
* correspond 1:1 to buffer slots. Only general registers are copied.
|
||||
*/
|
||||
static int fpr_get_fpa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
void **kbuf, void __user **ubuf)
|
||||
{
|
||||
unsigned i;
|
||||
int err;
|
||||
return user_regset_copyout(pos, count, kbuf, ubuf,
|
||||
&target->thread.fpu,
|
||||
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer,
|
||||
* CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
|
||||
* general register slots are copied to buffer slots. Only general
|
||||
* registers are copied.
|
||||
*/
|
||||
static int fpr_get_msa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
void **kbuf, void __user **ubuf)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 fpr_val;
|
||||
int err;
|
||||
|
||||
/* XXX fcr31 */
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu,
|
||||
0, sizeof(elf_fpregset_t));
|
||||
|
||||
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
|
||||
for (i = 0; i < NUM_FPU_REGS; i++) {
|
||||
fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
|
||||
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
err = user_regset_copyout(pos, count, kbuf, ubuf,
|
||||
&fpr_val, i * sizeof(elf_fpreg_t),
|
||||
(i + 1) * sizeof(elf_fpreg_t));
|
||||
if (err)
|
||||
|
|
@ -467,27 +480,64 @@ static int fpr_get(struct task_struct *target,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int fpr_set(struct task_struct *target,
|
||||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer.
|
||||
* Choose the appropriate helper for general registers, and then copy
|
||||
* the FCSR register separately.
|
||||
*/
|
||||
static int fpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
unsigned i;
|
||||
const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
int err;
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
|
||||
err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
|
||||
else
|
||||
err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcr31,
|
||||
fcr31_pos, fcr31_pos + sizeof(u32));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context,
|
||||
* !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
|
||||
* context's general register slots. Only general registers are copied.
|
||||
*/
|
||||
static int fpr_set_fpa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
const void **kbuf, const void __user **ubuf)
|
||||
{
|
||||
return user_regset_copyin(pos, count, kbuf, ubuf,
|
||||
&target->thread.fpu,
|
||||
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context,
|
||||
* CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
|
||||
* bits only of FP context's general register slots. Only general
|
||||
* registers are copied.
|
||||
*/
|
||||
static int fpr_set_msa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
const void **kbuf, const void __user **ubuf)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 fpr_val;
|
||||
|
||||
/* XXX fcr31 */
|
||||
|
||||
init_fp_ctx(target);
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu,
|
||||
0, sizeof(elf_fpregset_t));
|
||||
int err;
|
||||
|
||||
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
|
||||
for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
|
||||
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
|
||||
err = user_regset_copyin(pos, count, kbuf, ubuf,
|
||||
&fpr_val, i * sizeof(elf_fpreg_t),
|
||||
(i + 1) * sizeof(elf_fpreg_t));
|
||||
if (err)
|
||||
|
|
@ -498,6 +548,53 @@ static int fpr_set(struct task_struct *target,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context.
|
||||
* Choose the appropriate helper for general registers, and then copy
|
||||
* the FCSR register separately.
|
||||
*
|
||||
* We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
|
||||
* which is supposed to have been guaranteed by the kernel before
|
||||
* calling us, e.g. in `ptrace_regset'. We enforce that requirement,
|
||||
* so that we can safely avoid preinitializing temporaries for
|
||||
* partial register writes.
|
||||
*/
|
||||
static int fpr_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
u32 fcr31;
|
||||
int err;
|
||||
|
||||
BUG_ON(count % sizeof(elf_fpreg_t));
|
||||
|
||||
if (pos + count > sizeof(elf_fpregset_t))
|
||||
return -EIO;
|
||||
|
||||
init_fp_ctx(target);
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
|
||||
err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
|
||||
else
|
||||
err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (count > 0) {
|
||||
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&fcr31,
|
||||
fcr31_pos, fcr31_pos + sizeof(u32));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ptrace_setfcr31(target, fcr31);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
enum mips_regset {
|
||||
REGSET_GPR,
|
||||
REGSET_FPR,
|
||||
|
|
|
|||
|
|
@ -58,6 +58,7 @@ SECTIONS
|
|||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
*(.text.*)
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
|
|
|||
|
|
@ -1777,7 +1777,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
|||
SPFROMREG(fs, MIPSInst_FS(ir));
|
||||
SPFROMREG(fd, MIPSInst_FD(ir));
|
||||
rv.s = ieee754sp_maddf(fd, fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmsubf_op: {
|
||||
|
|
@ -1790,7 +1790,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
|||
SPFROMREG(fs, MIPSInst_FS(ir));
|
||||
SPFROMREG(fd, MIPSInst_FD(ir));
|
||||
rv.s = ieee754sp_msubf(fd, fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case frint_op: {
|
||||
|
|
@ -1814,7 +1814,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
|||
SPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.w = ieee754sp_2008class(fs);
|
||||
rfmt = w_fmt;
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmin_op: {
|
||||
|
|
@ -1826,7 +1826,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
|||
SPFROMREG(ft, MIPSInst_FT(ir));
|
||||
SPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.s = ieee754sp_fmin(fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmina_op: {
|
||||
|
|
@ -1838,7 +1838,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
|||
SPFROMREG(ft, MIPSInst_FT(ir));
|
||||
SPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.s = ieee754sp_fmina(fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmax_op: {
|
||||
|
|
@ -1850,7 +1850,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
|||
SPFROMREG(ft, MIPSInst_FT(ir));
|
||||
SPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.s = ieee754sp_fmax(fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmaxa_op: {
|
||||
|
|
@ -1862,7 +1862,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
|||
SPFROMREG(ft, MIPSInst_FT(ir));
|
||||
SPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.s = ieee754sp_fmaxa(fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fabs_op:
|
||||
|
|
@ -2095,7 +2095,7 @@ copcsr:
|
|||
DPFROMREG(fs, MIPSInst_FS(ir));
|
||||
DPFROMREG(fd, MIPSInst_FD(ir));
|
||||
rv.d = ieee754dp_maddf(fd, fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmsubf_op: {
|
||||
|
|
@ -2108,7 +2108,7 @@ copcsr:
|
|||
DPFROMREG(fs, MIPSInst_FS(ir));
|
||||
DPFROMREG(fd, MIPSInst_FD(ir));
|
||||
rv.d = ieee754dp_msubf(fd, fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case frint_op: {
|
||||
|
|
@ -2132,7 +2132,7 @@ copcsr:
|
|||
DPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.w = ieee754dp_2008class(fs);
|
||||
rfmt = w_fmt;
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmin_op: {
|
||||
|
|
@ -2144,7 +2144,7 @@ copcsr:
|
|||
DPFROMREG(ft, MIPSInst_FT(ir));
|
||||
DPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.d = ieee754dp_fmin(fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmina_op: {
|
||||
|
|
@ -2156,7 +2156,7 @@ copcsr:
|
|||
DPFROMREG(ft, MIPSInst_FT(ir));
|
||||
DPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.d = ieee754dp_fmina(fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmax_op: {
|
||||
|
|
@ -2168,7 +2168,7 @@ copcsr:
|
|||
DPFROMREG(ft, MIPSInst_FT(ir));
|
||||
DPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.d = ieee754dp_fmax(fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fmaxa_op: {
|
||||
|
|
@ -2180,7 +2180,7 @@ copcsr:
|
|||
DPFROMREG(ft, MIPSInst_FT(ir));
|
||||
DPFROMREG(fs, MIPSInst_FS(ir));
|
||||
rv.d = ieee754dp_fmaxa(fs, ft);
|
||||
break;
|
||||
goto copcsr;
|
||||
}
|
||||
|
||||
case fabs_op:
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ SECTIONS
|
|||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
KPROBES_TEXT
|
||||
} =0
|
||||
_etext = .;
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ do { \
|
|||
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
|
||||
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
|
||||
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
|
||||
case 8: __get_user_asm2(x, ptr, retval); \
|
||||
case 8: __get_user_asm2(x, ptr, retval); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ SECTIONS
|
|||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
*(.fixup)
|
||||
*(.text.__*)
|
||||
_etext = .;
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
for the semaphore. */
|
||||
|
||||
#define __PA_LDCW_ALIGNMENT 16
|
||||
#define __PA_LDCW_ALIGN_ORDER 4
|
||||
#define __ldcw_align(a) ({ \
|
||||
unsigned long __ret = (unsigned long) &(a)->lock[0]; \
|
||||
__ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
|
||||
|
|
@ -28,6 +29,7 @@
|
|||
ldcd). */
|
||||
|
||||
#define __PA_LDCW_ALIGNMENT 4
|
||||
#define __PA_LDCW_ALIGN_ORDER 2
|
||||
#define __ldcw_align(a) (&(a)->slock)
|
||||
#define __LDCW "ldcw,co"
|
||||
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/ldcw.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
|
@ -46,6 +47,14 @@
|
|||
#endif
|
||||
|
||||
.import pa_tlb_lock,data
|
||||
.macro load_pa_tlb_lock reg
|
||||
#if __PA_LDCW_ALIGNMENT > 4
|
||||
load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
|
||||
depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
|
||||
#else
|
||||
load32 PA(pa_tlb_lock), \reg
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* space_to_prot macro creates a prot id from a space id */
|
||||
|
||||
|
|
@ -457,7 +466,7 @@
|
|||
.macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,\spc,2f
|
||||
load32 PA(pa_tlb_lock),\tmp
|
||||
load_pa_tlb_lock \tmp
|
||||
1: LDCW 0(\tmp),\tmp1
|
||||
cmpib,COND(=) 0,\tmp1,1b
|
||||
nop
|
||||
|
|
@ -480,7 +489,7 @@
|
|||
/* Release pa_tlb_lock lock. */
|
||||
.macro tlb_unlock1 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
load32 PA(pa_tlb_lock),\tmp
|
||||
load_pa_tlb_lock \tmp
|
||||
tlb_unlock0 \spc,\tmp
|
||||
#endif
|
||||
.endm
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@
|
|||
#include <asm/assembly.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/ldcw.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
|
|
@ -333,8 +334,12 @@ ENDPROC(flush_data_cache_local)
|
|||
|
||||
.macro tlb_lock la,flags,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
ldil L%pa_tlb_lock,%r1
|
||||
ldo R%pa_tlb_lock(%r1),\la
|
||||
#if __PA_LDCW_ALIGNMENT > 4
|
||||
load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
|
||||
depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
|
||||
#else
|
||||
load32 pa_tlb_lock, \la
|
||||
#endif
|
||||
rsm PSW_SM_I,\flags
|
||||
1: LDCW 0(\la),\tmp
|
||||
cmpib,<>,n 0,\tmp,3f
|
||||
|
|
|
|||
|
|
@ -72,6 +72,7 @@ SECTIONS
|
|||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
*(.text.do_softirq)
|
||||
*(.text.sys_exit)
|
||||
*(.text.do_sigaltstack)
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ SECTIONS
|
|||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
*(.got1)
|
||||
|
|
|
|||
|
|
@ -401,8 +401,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
|
|||
int ret;
|
||||
__u64 target;
|
||||
|
||||
if (is_kernel_addr(addr))
|
||||
return branch_target((unsigned int *)addr);
|
||||
if (is_kernel_addr(addr)) {
|
||||
if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
|
||||
return 0;
|
||||
|
||||
return branch_target(&instr);
|
||||
}
|
||||
|
||||
/* Userspace: need copy instruction here then translate it */
|
||||
pagefault_disable();
|
||||
|
|
|
|||
|
|
@ -514,7 +514,7 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
|
|||
{
|
||||
if (s1 < s2)
|
||||
return 1;
|
||||
if (s2 > s1)
|
||||
if (s1 > s2)
|
||||
return -1;
|
||||
|
||||
return memcmp(d1, d2, s1);
|
||||
|
|
|
|||
|
|
@ -39,18 +39,18 @@ int __opal_async_get_token(void)
|
|||
int token;
|
||||
|
||||
spin_lock_irqsave(&opal_async_comp_lock, flags);
|
||||
token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
|
||||
token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens);
|
||||
if (token >= opal_max_async_tokens) {
|
||||
token = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (__test_and_set_bit(token, opal_async_token_map)) {
|
||||
if (!__test_and_clear_bit(token, opal_async_complete_map)) {
|
||||
token = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
__clear_bit(token, opal_async_complete_map);
|
||||
__set_bit(token, opal_async_token_map);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&opal_async_comp_lock, flags);
|
||||
|
|
|
|||
|
|
@ -2270,6 +2270,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
level_shift = entries_shift + 3;
|
||||
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
|
||||
|
||||
if ((level_shift - 3) * levels + page_shift >= 60)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate TCE table */
|
||||
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
||||
levels, tce_table_size, &offset, &total_allocated);
|
||||
|
|
|
|||
|
|
@ -295,7 +295,7 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu)
|
|||
{
|
||||
unsigned long ret_freq;
|
||||
|
||||
ret_freq = cpufreq_quick_get(cpu) * 1000ul;
|
||||
ret_freq = cpufreq_get(cpu) * 1000ul;
|
||||
|
||||
/*
|
||||
* If the backend cpufreq driver does not exist,
|
||||
|
|
|
|||
|
|
@ -276,7 +276,9 @@ failed:
|
|||
if (bank->disk->major > 0)
|
||||
unregister_blkdev(bank->disk->major,
|
||||
bank->disk->disk_name);
|
||||
del_gendisk(bank->disk);
|
||||
if (bank->disk->flags & GENHD_FL_UP)
|
||||
del_gendisk(bank->disk);
|
||||
put_disk(bank->disk);
|
||||
}
|
||||
device->dev.platform_data = NULL;
|
||||
if (bank->io_addr != 0)
|
||||
|
|
@ -301,6 +303,7 @@ axon_ram_remove(struct platform_device *device)
|
|||
device_remove_file(&device->dev, &dev_attr_ecc);
|
||||
free_irq(bank->irq_id, device);
|
||||
del_gendisk(bank->disk);
|
||||
put_disk(bank->disk);
|
||||
iounmap((void __iomem *) bank->io_addr);
|
||||
kfree(bank);
|
||||
|
||||
|
|
|
|||
|
|
@ -845,12 +845,12 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
|
|||
|
||||
u32 ipic_get_mcp_status(void)
|
||||
{
|
||||
return ipic_read(primary_ipic->regs, IPIC_SERMR);
|
||||
return ipic_read(primary_ipic->regs, IPIC_SERSR);
|
||||
}
|
||||
|
||||
void ipic_clear_mcp_status(u32 mask)
|
||||
{
|
||||
ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
|
||||
ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
|
||||
}
|
||||
|
||||
/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
|
||||
|
|
|
|||
|
|
@ -1,8 +0,0 @@
|
|||
#ifndef _ASM_S390_PROTOTYPES_H
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm-generic/asm-prototypes.h>
|
||||
|
||||
#endif /* _ASM_S390_PROTOTYPES_H */
|
||||
|
|
@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
|
|||
int zpci_load(u64 *data, u64 req, u64 offset);
|
||||
int zpci_store(u64 data, u64 req, u64 offset);
|
||||
int zpci_store_block(const u64 *data, u64 req, u64 offset);
|
||||
void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
|
||||
int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -85,6 +85,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
|
|||
load_runtime_instr_cb(&runtime_instr_empty_cb);
|
||||
}
|
||||
|
||||
void exit_thread_runtime_instr(void);
|
||||
struct task_struct;
|
||||
|
||||
void runtime_instr_release(struct task_struct *tsk);
|
||||
|
||||
#endif /* _RUNTIME_INSTR_H */
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue