Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
This commit is contained in:
commit
59e35359ec
149 changed files with 1875 additions and 484 deletions
|
|
@ -2458,6 +2458,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
|
||||
nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
|
||||
|
||||
nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
|
||||
(indirect branch prediction) vulnerability. System may
|
||||
allow data leaks with this option, which is equivalent
|
||||
to spectre_v2=off.
|
||||
|
||||
noxsave [BUGS=X86] Disables x86 extended register state save
|
||||
and restore using xsave. The kernel will fallback to
|
||||
enabling legacy floating-point and sse state.
|
||||
|
|
@ -3604,6 +3609,29 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
sonypi.*= [HW] Sony Programmable I/O Control Device driver
|
||||
See Documentation/laptops/sonypi.txt
|
||||
|
||||
spectre_v2= [X86] Control mitigation of Spectre variant 2
|
||||
(indirect branch speculation) vulnerability.
|
||||
|
||||
on - unconditionally enable
|
||||
off - unconditionally disable
|
||||
auto - kernel detects whether your CPU model is
|
||||
vulnerable
|
||||
|
||||
Selecting 'on' will, and 'auto' may, choose a
|
||||
mitigation method at run time according to the
|
||||
CPU, the available microcode, the setting of the
|
||||
CONFIG_RETPOLINE configuration option, and the
|
||||
compiler with which the kernel was built.
|
||||
|
||||
Specific mitigations can also be selected manually:
|
||||
|
||||
retpoline - replace indirect branches
|
||||
retpoline,generic - google's original retpoline
|
||||
retpoline,amd - AMD-specific minimal thunk
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
spectre_v2=auto.
|
||||
|
||||
spia_io_base= [HW,MTD]
|
||||
spia_fio_base=
|
||||
spia_pedr=
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ this protection comes at a cost:
|
|||
non-PTI SYSCALL entry code, so requires mapping fewer
|
||||
things into the userspace page tables. The downside is
|
||||
that stacks must be switched at entry time.
|
||||
d. Global pages are disabled for all kernel structures not
|
||||
c. Global pages are disabled for all kernel structures not
|
||||
mapped into both kernel and userspace page tables. This
|
||||
feature of the MMU allows different processes to share TLB
|
||||
entries mapping the kernel. Losing the feature means more
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 112
|
||||
SUBLEVEL = 114
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
|||
|
|
@ -53,7 +53,8 @@
|
|||
};
|
||||
|
||||
pinctrl: pin-controller@10000 {
|
||||
pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
|
||||
pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
|
||||
&pmx_gpio_header_gpo>;
|
||||
pinctrl-names = "default";
|
||||
|
||||
pmx_uart0: pmx-uart0 {
|
||||
|
|
@ -85,11 +86,16 @@
|
|||
* ground.
|
||||
*/
|
||||
pmx_gpio_header: pmx-gpio-header {
|
||||
marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28",
|
||||
marvell,pins = "mpp17", "mpp29", "mpp28",
|
||||
"mpp35", "mpp34", "mpp40";
|
||||
marvell,function = "gpio";
|
||||
};
|
||||
|
||||
pmx_gpio_header_gpo: pxm-gpio-header-gpo {
|
||||
marvell,pins = "mpp7";
|
||||
marvell,function = "gpo";
|
||||
};
|
||||
|
||||
pmx_gpio_init: pmx-init {
|
||||
marvell,pins = "mpp38";
|
||||
marvell,function = "gpio";
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
|
||||
ret = kvm_psci_call(vcpu);
|
||||
if (ret < 0) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
@ -52,7 +52,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
|
||||
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
kvm_inject_undefined(vcpu);
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -576,7 +576,7 @@ static int __init ar7_register_uarts(void)
|
|||
uart_port.type = PORT_AR7;
|
||||
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
|
||||
uart_port.iotype = UPIO_MEM32;
|
||||
uart_port.flags = UPF_FIXED_TYPE;
|
||||
uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
|
||||
uart_port.regshift = 2;
|
||||
|
||||
uart_port.line = 0;
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ archheaders:
|
|||
archprepare: include/generated/user_constants.h
|
||||
|
||||
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
|
||||
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib
|
||||
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
|
||||
|
||||
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
|
||||
$(call cc-option, -fno-stack-protector,) \
|
||||
|
|
|
|||
|
|
@ -395,6 +395,19 @@ config GOLDFISH
|
|||
def_bool y
|
||||
depends on X86_GOLDFISH
|
||||
|
||||
config RETPOLINE
|
||||
bool "Avoid speculative indirect branches in kernel"
|
||||
default y
|
||||
---help---
|
||||
Compile kernel with the retpoline compiler options to guard against
|
||||
kernel-to-user data leaks by avoiding speculative indirect
|
||||
branches. Requires a compiler with -mindirect-branch=thunk-extern
|
||||
support for full protection. The kernel may run slower.
|
||||
|
||||
Without compiler support, at least indirect branches in assembler
|
||||
code are eliminated. Since this includes the syscall entry path,
|
||||
it is not entirely pointless.
|
||||
|
||||
if X86_32
|
||||
config X86_EXTENDED_PLATFORM
|
||||
bool "Support for extended (non-PC) x86 platforms"
|
||||
|
|
|
|||
|
|
@ -210,6 +210,14 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
|||
KBUILD_CFLAGS += $(mflags-y)
|
||||
KBUILD_AFLAGS += $(mflags-y)
|
||||
|
||||
# Avoid indirect branches in kernel to deal with Spectre
|
||||
ifdef CONFIG_RETPOLINE
|
||||
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
|
||||
ifneq ($(RETPOLINE_CFLAGS),)
|
||||
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
|
||||
endif
|
||||
endif
|
||||
|
||||
archscripts: scripts_basic
|
||||
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
|
||||
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/inst.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
/*
|
||||
* The following macros are used to move an (un)aligned 16 byte value to/from
|
||||
|
|
@ -2714,7 +2715,7 @@ ENTRY(aesni_xts_crypt8)
|
|||
pxor INC, STATE4
|
||||
movdqu IV, 0x30(OUTP)
|
||||
|
||||
call *%r11
|
||||
CALL_NOSPEC %r11
|
||||
|
||||
movdqu 0x00(OUTP), INC
|
||||
pxor INC, STATE1
|
||||
|
|
@ -2759,7 +2760,7 @@ ENTRY(aesni_xts_crypt8)
|
|||
_aesni_gf128mul_x_ble()
|
||||
movups IV, (IVP)
|
||||
|
||||
call *%r11
|
||||
CALL_NOSPEC %r11
|
||||
|
||||
movdqu 0x40(OUTP), INC
|
||||
pxor INC, STATE1
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#define CAMELLIA_TABLE_BYTE_LEN 272
|
||||
|
||||
|
|
@ -1210,7 +1211,7 @@ camellia_xts_crypt_16way:
|
|||
vpxor 14 * 16(%rax), %xmm15, %xmm14;
|
||||
vpxor 15 * 16(%rax), %xmm15, %xmm15;
|
||||
|
||||
call *%r9;
|
||||
CALL_NOSPEC %r9;
|
||||
|
||||
addq $(16 * 16), %rsp;
|
||||
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#define CAMELLIA_TABLE_BYTE_LEN 272
|
||||
|
||||
|
|
@ -1323,7 +1324,7 @@ camellia_xts_crypt_32way:
|
|||
vpxor 14 * 32(%rax), %ymm15, %ymm14;
|
||||
vpxor 15 * 32(%rax), %ymm15, %ymm15;
|
||||
|
||||
call *%r9;
|
||||
CALL_NOSPEC %r9;
|
||||
|
||||
addq $(16 * 32), %rsp;
|
||||
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@
|
|||
|
||||
#include <asm/inst.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
|
||||
|
||||
|
|
@ -172,7 +173,7 @@ continue_block:
|
|||
movzxw (bufp, %rax, 2), len
|
||||
offset=crc_array-jump_table
|
||||
lea offset(bufp, len, 1), bufp
|
||||
jmp *bufp
|
||||
JMP_NOSPEC bufp
|
||||
|
||||
################################################################
|
||||
## 2a) PROCESS FULL BLOCKS:
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@
|
|||
#include <asm/alternative-asm.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/smap.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
.section .entry.text, "ax"
|
||||
|
||||
|
|
@ -226,7 +227,8 @@ ENTRY(ret_from_kernel_thread)
|
|||
pushl $0x0202 # Reset kernel eflags
|
||||
popfl
|
||||
movl PT_EBP(%esp), %eax
|
||||
call *PT_EBX(%esp)
|
||||
movl PT_EBX(%esp), %edx
|
||||
CALL_NOSPEC %edx
|
||||
movl $0, PT_EAX(%esp)
|
||||
|
||||
/*
|
||||
|
|
@ -861,7 +863,8 @@ trace:
|
|||
movl 0x4(%ebp), %edx
|
||||
subl $MCOUNT_INSN_SIZE, %eax
|
||||
|
||||
call *ftrace_trace_function
|
||||
movl ftrace_trace_function, %ecx
|
||||
CALL_NOSPEC %ecx
|
||||
|
||||
popl %edx
|
||||
popl %ecx
|
||||
|
|
@ -896,7 +899,7 @@ return_to_handler:
|
|||
movl %eax, %ecx
|
||||
popl %edx
|
||||
popl %eax
|
||||
jmp *%ecx
|
||||
JMP_NOSPEC %ecx
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
|
|
@ -938,7 +941,7 @@ error_code:
|
|||
movl %ecx, %es
|
||||
TRACE_IRQS_OFF
|
||||
movl %esp, %eax # pt_regs pointer
|
||||
call *%edi
|
||||
CALL_NOSPEC %edi
|
||||
jmp ret_from_exception
|
||||
END(page_fault)
|
||||
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@
|
|||
#include <asm/smap.h>
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/kaiser.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
||||
|
|
@ -184,7 +185,13 @@ entry_SYSCALL_64_fastpath:
|
|||
#endif
|
||||
ja 1f /* return -ENOSYS (already in pt_regs->ax) */
|
||||
movq %r10, %rcx
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
movq sys_call_table(, %rax, 8), %rax
|
||||
call __x86_indirect_thunk_rax
|
||||
#else
|
||||
call *sys_call_table(, %rax, 8)
|
||||
#endif
|
||||
|
||||
movq %rax, RAX(%rsp)
|
||||
1:
|
||||
/*
|
||||
|
|
@ -276,7 +283,12 @@ tracesys_phase2:
|
|||
#endif
|
||||
ja 1f /* return -ENOSYS (already in pt_regs->ax) */
|
||||
movq %r10, %rcx /* fixup for C */
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
movq sys_call_table(, %rax, 8), %rax
|
||||
call __x86_indirect_thunk_rax
|
||||
#else
|
||||
call *sys_call_table(, %rax, 8)
|
||||
#endif
|
||||
movq %rax, RAX(%rsp)
|
||||
1:
|
||||
/* Use IRET because user could have changed pt_regs->foo */
|
||||
|
|
@ -491,7 +503,7 @@ ENTRY(ret_from_fork)
|
|||
* nb: we depend on RESTORE_EXTRA_REGS above
|
||||
*/
|
||||
movq %rbp, %rdi
|
||||
call *%rbx
|
||||
CALL_NOSPEC %rbx
|
||||
movl $0, RAX(%rsp)
|
||||
RESTORE_EXTRA_REGS
|
||||
jmp int_ret_from_sys_call
|
||||
|
|
@ -1025,7 +1037,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
|
||||
idtentry machine_check do_mce has_error_code=0 paranoid=1
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
|
|||
#else
|
||||
EMULATE;
|
||||
#endif
|
||||
unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL;
|
||||
|
||||
static int __init vsyscall_setup(char *str)
|
||||
{
|
||||
|
|
@ -336,11 +337,11 @@ void __init map_vsyscall(void)
|
|||
extern char __vsyscall_page;
|
||||
unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
|
||||
|
||||
if (vsyscall_mode != NATIVE)
|
||||
vsyscall_pgprot = __PAGE_KERNEL_VVAR;
|
||||
if (vsyscall_mode != NONE)
|
||||
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
|
||||
vsyscall_mode == NATIVE
|
||||
? PAGE_KERNEL_VSYSCALL
|
||||
: PAGE_KERNEL_VVAR);
|
||||
__pgprot(vsyscall_pgprot));
|
||||
|
||||
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
|
||||
(unsigned long)VSYSCALL_ADDR);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_ALTERNATIVE_H
|
||||
#define _ASM_X86_ALTERNATIVE_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/stringify.h>
|
||||
|
|
@ -271,4 +273,6 @@ extern void *text_poke(void *addr, const void *opcode, size_t len);
|
|||
extern int poke_int3_handler(struct pt_regs *regs);
|
||||
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_ALTERNATIVE_H */
|
||||
|
|
|
|||
41
arch/x86/include/asm/asm-prototypes.h
Normal file
41
arch/x86/include/asm/asm-prototypes.h
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
#include <asm/ftrace.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/string.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/checksum.h>
|
||||
|
||||
#include <asm-generic/asm-prototypes.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/special_insns.h>
|
||||
#include <asm/preempt.h>
|
||||
#include <asm/asm.h>
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
extern void cmpxchg8b_emu(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_X86_32
|
||||
#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
|
||||
#else
|
||||
#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
|
||||
INDIRECT_THUNK(8)
|
||||
INDIRECT_THUNK(9)
|
||||
INDIRECT_THUNK(10)
|
||||
INDIRECT_THUNK(11)
|
||||
INDIRECT_THUNK(12)
|
||||
INDIRECT_THUNK(13)
|
||||
INDIRECT_THUNK(14)
|
||||
INDIRECT_THUNK(15)
|
||||
#endif
|
||||
INDIRECT_THUNK(ax)
|
||||
INDIRECT_THUNK(bx)
|
||||
INDIRECT_THUNK(cx)
|
||||
INDIRECT_THUNK(dx)
|
||||
INDIRECT_THUNK(si)
|
||||
INDIRECT_THUNK(di)
|
||||
INDIRECT_THUNK(bp)
|
||||
INDIRECT_THUNK(sp)
|
||||
#endif /* CONFIG_RETPOLINE */
|
||||
|
|
@ -106,4 +106,15 @@
|
|||
/* For C file, we already have NOKPROBE_SYMBOL macro */
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* This output constraint should be used for any inline asm which has a "call"
|
||||
* instruction. Otherwise the asm may be inserted before the frame pointer
|
||||
* gets set up by the containing function. If you forget to do this, objtool
|
||||
* may print a "call without frame pointer save/setup" warning.
|
||||
*/
|
||||
register unsigned long current_stack_pointer asm(_ASM_SP);
|
||||
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_ASM_H */
|
||||
|
|
|
|||
|
|
@ -199,7 +199,10 @@
|
|||
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
|
||||
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
|
||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
|
||||
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
|
||||
/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
|
||||
#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
|
||||
|
||||
|
|
|
|||
68
arch/x86/include/asm/intel-family.h
Normal file
68
arch/x86/include/asm/intel-family.h
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
#ifndef _ASM_X86_INTEL_FAMILY_H
|
||||
#define _ASM_X86_INTEL_FAMILY_H
|
||||
|
||||
/*
|
||||
* "Big Core" Processors (Branded as Core, Xeon, etc...)
|
||||
*
|
||||
* The "_X" parts are generally the EP and EX Xeons, or the
|
||||
* "Extreme" ones, like Broadwell-E.
|
||||
*
|
||||
* Things ending in "2" are usually because we have no better
|
||||
* name for them. There's no processor called "WESTMERE2".
|
||||
*/
|
||||
|
||||
#define INTEL_FAM6_CORE_YONAH 0x0E
|
||||
#define INTEL_FAM6_CORE2_MEROM 0x0F
|
||||
#define INTEL_FAM6_CORE2_MEROM_L 0x16
|
||||
#define INTEL_FAM6_CORE2_PENRYN 0x17
|
||||
#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
|
||||
|
||||
#define INTEL_FAM6_NEHALEM 0x1E
|
||||
#define INTEL_FAM6_NEHALEM_EP 0x1A
|
||||
#define INTEL_FAM6_NEHALEM_EX 0x2E
|
||||
#define INTEL_FAM6_WESTMERE 0x25
|
||||
#define INTEL_FAM6_WESTMERE2 0x1F
|
||||
#define INTEL_FAM6_WESTMERE_EP 0x2C
|
||||
#define INTEL_FAM6_WESTMERE_EX 0x2F
|
||||
|
||||
#define INTEL_FAM6_SANDYBRIDGE 0x2A
|
||||
#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
|
||||
#define INTEL_FAM6_IVYBRIDGE 0x3A
|
||||
#define INTEL_FAM6_IVYBRIDGE_X 0x3E
|
||||
|
||||
#define INTEL_FAM6_HASWELL_CORE 0x3C
|
||||
#define INTEL_FAM6_HASWELL_X 0x3F
|
||||
#define INTEL_FAM6_HASWELL_ULT 0x45
|
||||
#define INTEL_FAM6_HASWELL_GT3E 0x46
|
||||
|
||||
#define INTEL_FAM6_BROADWELL_CORE 0x3D
|
||||
#define INTEL_FAM6_BROADWELL_XEON_D 0x56
|
||||
#define INTEL_FAM6_BROADWELL_GT3E 0x47
|
||||
#define INTEL_FAM6_BROADWELL_X 0x4F
|
||||
|
||||
#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
|
||||
#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
|
||||
#define INTEL_FAM6_SKYLAKE_X 0x55
|
||||
#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
|
||||
#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
|
||||
#define INTEL_FAM6_ATOM_LINCROFT 0x26
|
||||
#define INTEL_FAM6_ATOM_PENWELL 0x27
|
||||
#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
|
||||
#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
|
||||
#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
|
||||
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
|
||||
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
|
||||
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
|
||||
|
||||
/* Xeon Phi */
|
||||
|
||||
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
|
||||
|
||||
#endif /* _ASM_X86_INTEL_FAMILY_H */
|
||||
|
|
@ -330,6 +330,9 @@
|
|||
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
|
||||
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
|
||||
#define MSR_FAM10H_NODE_ID 0xc001100c
|
||||
#define MSR_F10H_DECFG 0xc0011029
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
/* K8 MSRs */
|
||||
#define MSR_K8_TOP_MEM1 0xc001001a
|
||||
|
|
|
|||
198
arch/x86/include/asm/nospec-branch.h
Normal file
198
arch/x86/include/asm/nospec-branch.h
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef __NOSPEC_BRANCH_H__
|
||||
#define __NOSPEC_BRANCH_H__
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
* Fill the CPU return stack buffer.
|
||||
*
|
||||
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
||||
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
|
||||
*
|
||||
* This is required in various cases for retpoline and IBRS-based
|
||||
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
||||
* eliminate potentially bogus entries from the RSB, and sometimes
|
||||
* purely to ensure that it doesn't get empty, which on some CPUs would
|
||||
* allow predictions from other (unwanted!) sources to be used.
|
||||
*
|
||||
* We define a CPP macro such that it can be used from both .S files and
|
||||
* inline assembly. It's possible to do a .macro and then include that
|
||||
* from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
|
||||
*/
|
||||
|
||||
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
|
||||
#define RSB_FILL_LOOPS 16 /* To avoid underflow */
|
||||
|
||||
/*
|
||||
* Google experimented with loop-unrolling and this turned out to be
|
||||
* the optimal version — two calls, each with their own speculation
|
||||
* trap should their return address end up getting used, in a loop.
|
||||
*/
|
||||
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
||||
mov $(nr/2), reg; \
|
||||
771: \
|
||||
call 772f; \
|
||||
773: /* speculation trap */ \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 773b; \
|
||||
772: \
|
||||
call 774f; \
|
||||
775: /* speculation trap */ \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 775b; \
|
||||
774: \
|
||||
dec reg; \
|
||||
jnz 771b; \
|
||||
add $(BITS_PER_LONG/8) * nr, sp;
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* These are the bare retpoline primitives for indirect jmp and call.
|
||||
* Do not use these directly; they only exist to make the ALTERNATIVE
|
||||
* invocation below less ugly.
|
||||
*/
|
||||
.macro RETPOLINE_JMP reg:req
|
||||
call .Ldo_rop_\@
|
||||
.Lspec_trap_\@:
|
||||
pause
|
||||
lfence
|
||||
jmp .Lspec_trap_\@
|
||||
.Ldo_rop_\@:
|
||||
mov \reg, (%_ASM_SP)
|
||||
ret
|
||||
.endm
|
||||
|
||||
/*
|
||||
* This is a wrapper around RETPOLINE_JMP so the called function in reg
|
||||
* returns to the instruction after the macro.
|
||||
*/
|
||||
.macro RETPOLINE_CALL reg:req
|
||||
jmp .Ldo_call_\@
|
||||
.Ldo_retpoline_jmp_\@:
|
||||
RETPOLINE_JMP \reg
|
||||
.Ldo_call_\@:
|
||||
call .Ldo_retpoline_jmp_\@
|
||||
.endm
|
||||
|
||||
/*
|
||||
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
|
||||
* indirect jmp/call which may be susceptible to the Spectre variant 2
|
||||
* attack.
|
||||
*/
|
||||
.macro JMP_NOSPEC reg:req
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
ALTERNATIVE_2 __stringify(jmp *\reg), \
|
||||
__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
|
||||
__stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
|
||||
#else
|
||||
jmp *\reg
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro CALL_NOSPEC reg:req
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
ALTERNATIVE_2 __stringify(call *\reg), \
|
||||
__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
|
||||
__stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
|
||||
#else
|
||||
call *\reg
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
|
||||
* monstrosity above, manually.
|
||||
*/
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
ALTERNATIVE "jmp .Lskip_rsb_\@", \
|
||||
__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
|
||||
\ftr
|
||||
.Lskip_rsb_\@:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#if defined(CONFIG_X86_64) && defined(RETPOLINE)
|
||||
|
||||
/*
|
||||
* Since the inline asm uses the %V modifier which is only in newer GCC,
|
||||
* the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
|
||||
*/
|
||||
# define CALL_NOSPEC \
|
||||
ALTERNATIVE( \
|
||||
"call *%[thunk_target]\n", \
|
||||
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE)
|
||||
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
|
||||
|
||||
#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
|
||||
/*
|
||||
* For i386 we use the original ret-equivalent retpoline, because
|
||||
* otherwise we'll run out of registers. We don't care about CET
|
||||
* here, anyway.
|
||||
*/
|
||||
# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
|
||||
" jmp 904f;\n" \
|
||||
" .align 16\n" \
|
||||
"901: call 903f;\n" \
|
||||
"902: pause;\n" \
|
||||
" lfence;\n" \
|
||||
" jmp 902b;\n" \
|
||||
" .align 16\n" \
|
||||
"903: addl $4, %%esp;\n" \
|
||||
" pushl %[thunk_target];\n" \
|
||||
" ret;\n" \
|
||||
" .align 16\n" \
|
||||
"904: call 901b;\n", \
|
||||
X86_FEATURE_RETPOLINE)
|
||||
|
||||
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
||||
#else /* No retpoline for C / inline asm */
|
||||
# define CALL_NOSPEC "call *%[thunk_target]\n"
|
||||
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
||||
#endif
|
||||
|
||||
/* The Spectre V2 mitigation variants */
|
||||
enum spectre_v2_mitigation {
|
||||
SPECTRE_V2_NONE,
|
||||
SPECTRE_V2_RETPOLINE_MINIMAL,
|
||||
SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
|
||||
SPECTRE_V2_RETPOLINE_GENERIC,
|
||||
SPECTRE_V2_RETPOLINE_AMD,
|
||||
SPECTRE_V2_IBRS,
|
||||
};
|
||||
|
||||
extern char __indirect_thunk_start[];
|
||||
extern char __indirect_thunk_end[];
|
||||
|
||||
/*
|
||||
* On VMEXIT we must ensure that no RSB predictions learned in the guest
|
||||
* can be followed in the host, by overwriting the RSB completely. Both
|
||||
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
|
||||
* CPUs with IBRS_ATT *might* it be avoided.
|
||||
*/
|
||||
static inline void vmexit_fill_RSB(void)
|
||||
{
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
unsigned long loops;
|
||||
|
||||
asm volatile (ALTERNATIVE("jmp 910f",
|
||||
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
|
||||
X86_FEATURE_RETPOLINE)
|
||||
"910:"
|
||||
: "=r" (loops), ASM_CALL_CONSTRAINT
|
||||
: : "memory" );
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __NOSPEC_BRANCH_H__ */
|
||||
|
|
@ -574,7 +574,7 @@ static inline void sync_core(void)
|
|||
{
|
||||
int tmp;
|
||||
|
||||
#ifdef CONFIG_M486
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Do a CPUID if available, otherwise do a jump. The jump
|
||||
* can conveniently enough be the jump around CPUID.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_SWITCH_TO_H
|
||||
#define _ASM_X86_SWITCH_TO_H
|
||||
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
struct task_struct; /* one of the stranger aspects of C forward declarations */
|
||||
__visible struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
|
@ -24,6 +26,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|||
#define __switch_canary_iparam
|
||||
#endif /* CC_STACKPROTECTOR */
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* When switching from a shallower to a deeper call stack
|
||||
* the RSB may either underflow or use entries populated
|
||||
* with userspace addresses. On CPUs where those concerns
|
||||
* exist, overwrite the RSB with entries which capture
|
||||
* speculative execution to prevent attack.
|
||||
*/
|
||||
#define __retpoline_fill_return_buffer \
|
||||
ALTERNATIVE("jmp 910f", \
|
||||
__stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
|
||||
X86_FEATURE_RSB_CTXSW) \
|
||||
"910:\n\t"
|
||||
#else
|
||||
#define __retpoline_fill_return_buffer
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Saving eflags is important. It switches not only IOPL between tasks,
|
||||
* it also protects other tasks from NT leaking through sysenter etc.
|
||||
|
|
@ -46,6 +65,7 @@ do { \
|
|||
"movl $1f,%[prev_ip]\n\t" /* save EIP */ \
|
||||
"pushl %[next_ip]\n\t" /* restore EIP */ \
|
||||
__switch_canary \
|
||||
__retpoline_fill_return_buffer \
|
||||
"jmp __switch_to\n" /* regparm call */ \
|
||||
"1:\t" \
|
||||
"popl %%ebp\n\t" /* restore EBP */ \
|
||||
|
|
@ -100,6 +120,23 @@ do { \
|
|||
#define __switch_canary_iparam
|
||||
#endif /* CC_STACKPROTECTOR */
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* When switching from a shallower to a deeper call stack
|
||||
* the RSB may either underflow or use entries populated
|
||||
* with userspace addresses. On CPUs where those concerns
|
||||
* exist, overwrite the RSB with entries which capture
|
||||
* speculative execution to prevent attack.
|
||||
*/
|
||||
#define __retpoline_fill_return_buffer \
|
||||
ALTERNATIVE("jmp 910f", \
|
||||
__stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\
|
||||
X86_FEATURE_RSB_CTXSW) \
|
||||
"910:\n\t"
|
||||
#else
|
||||
#define __retpoline_fill_return_buffer
|
||||
#endif
|
||||
|
||||
/*
|
||||
* There is no need to save or restore flags, because flags are always
|
||||
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
|
||||
|
|
@ -112,6 +149,7 @@ do { \
|
|||
"call __switch_to\n\t" \
|
||||
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
|
||||
__switch_canary \
|
||||
__retpoline_fill_return_buffer \
|
||||
"movq %P[thread_info](%%rsi),%%r8\n\t" \
|
||||
"movq %%rax,%%rdi\n\t" \
|
||||
"testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
|
||||
|
|
|
|||
|
|
@ -92,6 +92,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
|
|||
#ifdef CONFIG_X86_32
|
||||
dotraplinkage void do_iret_error(struct pt_regs *, long);
|
||||
#endif
|
||||
dotraplinkage void do_mce(struct pt_regs *, long);
|
||||
|
||||
static inline int get_si_code(unsigned long condition)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ extern void map_vsyscall(void);
|
|||
*/
|
||||
extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
|
||||
extern bool vsyscall_enabled(void);
|
||||
extern unsigned long vsyscall_pgprot;
|
||||
#else
|
||||
static inline void map_vsyscall(void) {}
|
||||
static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/smap.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/sched.h>
|
||||
|
|
@ -215,9 +216,9 @@ privcmd_call(unsigned call,
|
|||
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
|
||||
|
||||
stac();
|
||||
asm volatile("call *%[call]"
|
||||
asm volatile(CALL_NOSPEC
|
||||
: __HYPERCALL_5PARAM
|
||||
: [call] "a" (&hypercall_page[call])
|
||||
: [thunk_target] "a" (&hypercall_page[call])
|
||||
: __HYPERCALL_CLOBBER5);
|
||||
clac();
|
||||
|
||||
|
|
|
|||
|
|
@ -2592,8 +2592,8 @@ static struct resource * __init ioapic_setup_resources(void)
|
|||
res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
|
||||
mem += IOAPIC_RESOURCE_NAME_SIZE;
|
||||
ioapics[i].iomem_res = &res[num];
|
||||
num++;
|
||||
ioapics[i].iomem_res = res;
|
||||
}
|
||||
|
||||
ioapic_resources = res;
|
||||
|
|
|
|||
|
|
@ -359,14 +359,17 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
|||
irq_data->chip_data = data;
|
||||
irq_data->hwirq = virq + i;
|
||||
err = assign_irq_vector_policy(virq + i, node, data, info);
|
||||
if (err)
|
||||
if (err) {
|
||||
irq_data->chip_data = NULL;
|
||||
free_apic_chip_data(data);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
x86_vector_free_irqs(domain, virq, i + 1);
|
||||
x86_vector_free_irqs(domain, virq, i);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -746,8 +746,32 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
set_cpu_cap(c, X86_FEATURE_K8);
|
||||
|
||||
if (cpu_has_xmm2) {
|
||||
/* MFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
|
||||
unsigned long long val;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* A serializing LFENCE has less overhead than MFENCE, so
|
||||
* use it for execution serialization. On families which
|
||||
* don't have that MSR, LFENCE is already serializing.
|
||||
* msr_set_bit() uses the safe accessors, too, even if the MSR
|
||||
* is not present.
|
||||
*/
|
||||
msr_set_bit(MSR_F10H_DECFG,
|
||||
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
|
||||
|
||||
/*
|
||||
* Verify that the MSR write was successful (could be running
|
||||
* under a hypervisor) and only then assume that LFENCE is
|
||||
* serializing.
|
||||
*/
|
||||
ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
|
||||
if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
|
||||
/* A serializing LFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||
} else {
|
||||
/* MFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -10,6 +10,9 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/cmdline.h>
|
||||
#include <asm/bugs.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
|
@ -19,17 +22,12 @@
|
|||
#include <asm/alternative.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/intel-family.h>
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
|
||||
void __init check_bugs(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Regardless of whether PCID is enumerated, the SDM says
|
||||
* that it can't be enabled in 32-bit mode.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_PCID);
|
||||
#endif
|
||||
|
||||
identify_boot_cpu();
|
||||
|
||||
if (!IS_ENABLED(CONFIG_SMP)) {
|
||||
|
|
@ -37,6 +35,9 @@ void __init check_bugs(void)
|
|||
print_cpu_info(&boot_cpu_data);
|
||||
}
|
||||
|
||||
/* Select the proper spectre mitigation before patching alternatives */
|
||||
spectre_v2_select_mitigation();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Check whether we are able to run this kernel safely on SMP.
|
||||
|
|
@ -69,6 +70,188 @@ void __init check_bugs(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
/* The kernel command line selection */
|
||||
enum spectre_v2_mitigation_cmd {
|
||||
SPECTRE_V2_CMD_NONE,
|
||||
SPECTRE_V2_CMD_AUTO,
|
||||
SPECTRE_V2_CMD_FORCE,
|
||||
SPECTRE_V2_CMD_RETPOLINE,
|
||||
SPECTRE_V2_CMD_RETPOLINE_GENERIC,
|
||||
SPECTRE_V2_CMD_RETPOLINE_AMD,
|
||||
};
|
||||
|
||||
static const char *spectre_v2_strings[] = {
|
||||
[SPECTRE_V2_NONE] = "Vulnerable",
|
||||
[SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
|
||||
[SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
|
||||
[SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
|
||||
[SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
|
||||
};
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
|
||||
|
||||
static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
|
||||
|
||||
static void __init spec2_print_if_insecure(const char *reason)
|
||||
{
|
||||
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
pr_info("%s\n", reason);
|
||||
}
|
||||
|
||||
static void __init spec2_print_if_secure(const char *reason)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
pr_info("%s\n", reason);
|
||||
}
|
||||
|
||||
static inline bool retp_compiler(void)
|
||||
{
|
||||
return __is_defined(RETPOLINE);
|
||||
}
|
||||
|
||||
static inline bool match_option(const char *arg, int arglen, const char *opt)
|
||||
{
|
||||
int len = strlen(opt);
|
||||
|
||||
return len == arglen && !strncmp(arg, opt, len);
|
||||
}
|
||||
|
||||
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
{
|
||||
char arg[20];
|
||||
int ret;
|
||||
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
|
||||
sizeof(arg));
|
||||
if (ret > 0) {
|
||||
if (match_option(arg, ret, "off")) {
|
||||
goto disable;
|
||||
} else if (match_option(arg, ret, "on")) {
|
||||
spec2_print_if_secure("force enabled on command line.");
|
||||
return SPECTRE_V2_CMD_FORCE;
|
||||
} else if (match_option(arg, ret, "retpoline")) {
|
||||
spec2_print_if_insecure("retpoline selected on command line.");
|
||||
return SPECTRE_V2_CMD_RETPOLINE;
|
||||
} else if (match_option(arg, ret, "retpoline,amd")) {
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
|
||||
pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
spec2_print_if_insecure("AMD retpoline selected on command line.");
|
||||
return SPECTRE_V2_CMD_RETPOLINE_AMD;
|
||||
} else if (match_option(arg, ret, "retpoline,generic")) {
|
||||
spec2_print_if_insecure("generic retpoline selected on command line.");
|
||||
return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
|
||||
} else if (match_option(arg, ret, "auto")) {
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
disable:
|
||||
spec2_print_if_insecure("disabled on command line.");
|
||||
return SPECTRE_V2_CMD_NONE;
|
||||
}
|
||||
|
||||
/* Check for Skylake-like CPUs (for RSB handling) */
|
||||
static bool __init is_skylake_era(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
boot_cpu_data.x86 == 6) {
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||
enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
|
||||
|
||||
/*
|
||||
* If the CPU is not affected and the command line mode is NONE or AUTO
|
||||
* then nothing to do.
|
||||
*/
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
|
||||
(cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
|
||||
return;
|
||||
|
||||
switch (cmd) {
|
||||
case SPECTRE_V2_CMD_NONE:
|
||||
return;
|
||||
|
||||
case SPECTRE_V2_CMD_FORCE:
|
||||
/* FALLTRHU */
|
||||
case SPECTRE_V2_CMD_AUTO:
|
||||
goto retpoline_auto;
|
||||
|
||||
case SPECTRE_V2_CMD_RETPOLINE_AMD:
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE))
|
||||
goto retpoline_amd;
|
||||
break;
|
||||
case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE))
|
||||
goto retpoline_generic;
|
||||
break;
|
||||
case SPECTRE_V2_CMD_RETPOLINE:
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE))
|
||||
goto retpoline_auto;
|
||||
break;
|
||||
}
|
||||
pr_err("kernel not compiled with retpoline; no mitigation available!");
|
||||
return;
|
||||
|
||||
retpoline_auto:
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
retpoline_amd:
|
||||
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
|
||||
pr_err("LFENCE not serializing. Switching to generic retpoline\n");
|
||||
goto retpoline_generic;
|
||||
}
|
||||
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
|
||||
SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
|
||||
} else {
|
||||
retpoline_generic:
|
||||
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
|
||||
SPECTRE_V2_RETPOLINE_MINIMAL;
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
|
||||
}
|
||||
|
||||
spectre_v2_enabled = mode;
|
||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
/*
|
||||
* If neither SMEP or KPTI are available, there is a risk of
|
||||
* hitting userspace addresses in the RSB after a context switch
|
||||
* from a shallow call stack to a deeper one. To prevent this fill
|
||||
* the entire RSB, even when using IBRS.
|
||||
*
|
||||
* Skylake era CPUs have a separate issue with *underflow* of the
|
||||
* RSB, when they will predict 'ret' targets from the generic BTB.
|
||||
* The proper mitigation for this is IBRS. If IBRS is not supported
|
||||
* or deactivated in favour of retpolines the RSB fill on context
|
||||
* switch is required.
|
||||
*/
|
||||
if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
|
||||
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Filling RSB on context switch\n");
|
||||
}
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
ssize_t cpu_show_meltdown(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
|
@ -93,6 +276,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
|
|||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
|
||||
return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -831,13 +831,21 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
|||
|
||||
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
||||
|
||||
/* Assume for now that ALL x86 CPUs are insecure */
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
if (c->x86_vendor != X86_VENDOR_AMD)
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||
|
||||
fpu__init_system(c);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Regardless of whether PCID is enumerated, the SDM says
|
||||
* that it can't be enabled in 32-bit mode.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_PCID);
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init early_cpu_init(void)
|
||||
|
|
|
|||
|
|
@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
|
|||
ci_leaf_init(this_leaf++, &id4_regs);
|
||||
__cache_cpumap_setup(cpu, idx, &id4_regs);
|
||||
}
|
||||
this_cpu_ci->cpu_map_populated = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1672,6 +1672,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
|
|||
void (*machine_check_vector)(struct pt_regs *, long error_code) =
|
||||
unexpected_machine_check;
|
||||
|
||||
dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
|
||||
{
|
||||
machine_check_vector(regs, error_code);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called for each booted CPU to set up machine checks.
|
||||
* Must be called with preempt off:
|
||||
|
|
|
|||
|
|
@ -39,6 +39,9 @@
|
|||
#include <asm/setup.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
/* last level cache size per core */
|
||||
static int llc_size_per_core;
|
||||
|
||||
static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
|
||||
static struct mc_saved_data {
|
||||
unsigned int mc_saved_count;
|
||||
|
|
@ -996,15 +999,18 @@ static bool is_blacklisted(unsigned int cpu)
|
|||
|
||||
/*
|
||||
* Late loading on model 79 with microcode revision less than 0x0b000021
|
||||
* may result in a system hang. This behavior is documented in item
|
||||
* BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
|
||||
* and LLC size per core bigger than 2.5MB may result in a system hang.
|
||||
* This behavior is documented in item BDF90, #334165 (Intel Xeon
|
||||
* Processor E7-8800/4800 v4 Product Family).
|
||||
*/
|
||||
if (c->x86 == 6 &&
|
||||
c->x86_model == 79 &&
|
||||
c->x86_mask == 0x01 &&
|
||||
llc_size_per_core > 2621440 &&
|
||||
c->microcode < 0x0b000021) {
|
||||
pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
|
||||
pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
@ -1067,6 +1073,15 @@ static struct microcode_ops microcode_intel_ops = {
|
|||
.microcode_fini_cpu = microcode_fini_cpu,
|
||||
};
|
||||
|
||||
static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 llc_size = c->x86_cache_size * 1024;
|
||||
|
||||
do_div(llc_size, c->x86_max_cores);
|
||||
|
||||
return (int)llc_size;
|
||||
}
|
||||
|
||||
struct microcode_ops * __init init_intel_microcode(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
|
|
@ -1077,6 +1092,8 @@ struct microcode_ops * __init init_intel_microcode(void)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
llc_size_per_core = calc_llc_size_per_core(c);
|
||||
|
||||
return µcode_intel_ops;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
|
||||
|
|
@ -55,17 +56,17 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
|
|||
static void call_on_stack(void *func, void *stack)
|
||||
{
|
||||
asm volatile("xchgl %%ebx,%%esp \n"
|
||||
"call *%%edi \n"
|
||||
CALL_NOSPEC
|
||||
"movl %%ebx,%%esp \n"
|
||||
: "=b" (stack)
|
||||
: "0" (stack),
|
||||
"D"(func)
|
||||
[thunk_target] "D"(func)
|
||||
: "memory", "cc", "edx", "ecx", "eax");
|
||||
}
|
||||
|
||||
static inline void *current_stack(void)
|
||||
{
|
||||
return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
|
||||
return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
|
||||
}
|
||||
|
||||
static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
|
||||
|
|
@ -89,17 +90,17 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
|
|||
|
||||
/* Save the next esp at the bottom of the stack */
|
||||
prev_esp = (u32 *)irqstk;
|
||||
*prev_esp = current_stack_pointer();
|
||||
*prev_esp = current_stack_pointer;
|
||||
|
||||
if (unlikely(overflow))
|
||||
call_on_stack(print_stack_overflow, isp);
|
||||
|
||||
asm volatile("xchgl %%ebx,%%esp \n"
|
||||
"call *%%edi \n"
|
||||
CALL_NOSPEC
|
||||
"movl %%ebx,%%esp \n"
|
||||
: "=a" (arg1), "=b" (isp)
|
||||
: "0" (desc), "1" (isp),
|
||||
"D" (desc->handle_irq)
|
||||
[thunk_target] "D" (desc->handle_irq)
|
||||
: "memory", "cc", "ecx");
|
||||
return 1;
|
||||
}
|
||||
|
|
@ -142,7 +143,7 @@ void do_softirq_own_stack(void)
|
|||
|
||||
/* Push the previous esp onto the stack */
|
||||
prev_esp = (u32 *)irqstk;
|
||||
*prev_esp = current_stack_pointer();
|
||||
*prev_esp = current_stack_pointer;
|
||||
|
||||
call_on_stack(__do_softirq, isp);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@
|
|||
#include <asm/alternative.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
|
|
@ -191,7 +192,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
|
|||
}
|
||||
|
||||
/* Check whether insn is indirect jump */
|
||||
static int insn_is_indirect_jump(struct insn *insn)
|
||||
static int __insn_is_indirect_jump(struct insn *insn)
|
||||
{
|
||||
return ((insn->opcode.bytes[0] == 0xff &&
|
||||
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
|
||||
|
|
@ -225,6 +226,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
|
|||
return (start <= target && target <= start + len);
|
||||
}
|
||||
|
||||
static int insn_is_indirect_jump(struct insn *insn)
|
||||
{
|
||||
int ret = __insn_is_indirect_jump(insn);
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* Jump to x86_indirect_thunk_* is treated as an indirect jump.
|
||||
* Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
|
||||
* older gcc may use indirect jump. So we add this check instead of
|
||||
* replace indirect-jump check.
|
||||
*/
|
||||
if (!ret)
|
||||
ret = insn_jump_into_range(insn,
|
||||
(unsigned long)__indirect_thunk_start,
|
||||
(unsigned long)__indirect_thunk_end -
|
||||
(unsigned long)__indirect_thunk_start);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Decode whole function to ensure any instructions don't jump into target */
|
||||
static int can_optimize(unsigned long paddr)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
.code64
|
||||
.section .entry.text, "ax"
|
||||
|
|
@ -285,8 +285,9 @@ trace:
|
|||
* ip and parent ip are used and the list function is called when
|
||||
* function tracing is enabled.
|
||||
*/
|
||||
call *ftrace_trace_function
|
||||
|
||||
movq ftrace_trace_function, %r8
|
||||
CALL_NOSPEC %r8
|
||||
restore_mcount_regs
|
||||
|
||||
jmp fgraph_trace
|
||||
|
|
@ -329,5 +330,5 @@ GLOBAL(return_to_handler)
|
|||
movq 8(%rsp), %rdx
|
||||
movq (%rsp), %rax
|
||||
addq $24, %rsp
|
||||
jmp *%rdi
|
||||
JMP_NOSPEC %rdi
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
|
|||
* from double_fault.
|
||||
*/
|
||||
BUG_ON((unsigned long)(current_top_of_stack() -
|
||||
current_stack_pointer()) >= THREAD_SIZE);
|
||||
current_stack_pointer) >= THREAD_SIZE);
|
||||
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -104,6 +104,13 @@ SECTIONS
|
|||
SOFTIRQENTRY_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
__indirect_thunk_start = .;
|
||||
*(.text.__x86.indirect_thunk)
|
||||
__indirect_thunk_end = .;
|
||||
#endif
|
||||
|
||||
/* End of text section */
|
||||
_etext = .;
|
||||
} :text = 0x9090
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@
|
|||
#include <asm/desc.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/kvm_para.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#include <asm/virtext.h>
|
||||
#include "trace.h"
|
||||
|
|
@ -3904,6 +3905,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
#endif
|
||||
);
|
||||
|
||||
/* Eliminate branch target predictions from guest mode */
|
||||
vmexit_fill_RSB();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -47,6 +47,7 @@
|
|||
#include <asm/kexec.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "pmu.h"
|
||||
|
|
@ -8701,6 +8702,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
#endif
|
||||
);
|
||||
|
||||
/* Eliminate branch target predictions from guest mode */
|
||||
vmexit_fill_RSB();
|
||||
|
||||
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
|
||||
if (debugctlmsr)
|
||||
update_debugctlmsr(debugctlmsr);
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
|
|||
lib-y += memcpy_$(BITS).o
|
||||
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
|
||||
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
|
||||
lib-$(CONFIG_RETPOLINE) += retpoline.o
|
||||
|
||||
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,8 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/asm.h>
|
||||
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
/*
|
||||
* computes a partial checksum, e.g. for TCP/UDP fragments
|
||||
*/
|
||||
|
|
@ -155,7 +156,7 @@ ENTRY(csum_partial)
|
|||
negl %ebx
|
||||
lea 45f(%ebx,%ebx,2), %ebx
|
||||
testl %esi, %esi
|
||||
jmp *%ebx
|
||||
JMP_NOSPEC %ebx
|
||||
|
||||
# Handle 2-byte-aligned regions
|
||||
20: addw (%esi), %ax
|
||||
|
|
@ -437,7 +438,7 @@ ENTRY(csum_partial_copy_generic)
|
|||
andl $-32,%edx
|
||||
lea 3f(%ebx,%ebx), %ebx
|
||||
testl %esi, %esi
|
||||
jmp *%ebx
|
||||
JMP_NOSPEC %ebx
|
||||
1: addl $64,%esi
|
||||
addl $64,%edi
|
||||
SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
|
||||
|
|
|
|||
|
|
@ -93,6 +93,13 @@ static void delay_mwaitx(unsigned long __loops)
|
|||
{
|
||||
u64 start, end, delay, loops = __loops;
|
||||
|
||||
/*
|
||||
* Timer value of 0 causes MWAITX to wait indefinitely, unless there
|
||||
* is a store on the memory monitored by MONITORX.
|
||||
*/
|
||||
if (loops == 0)
|
||||
return;
|
||||
|
||||
start = rdtsc_ordered();
|
||||
|
||||
for (;;) {
|
||||
|
|
|
|||
49
arch/x86/lib/retpoline.S
Normal file
49
arch/x86/lib/retpoline.S
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm-generic/export.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
.macro THUNK reg
|
||||
.section .text.__x86.indirect_thunk
|
||||
|
||||
ENTRY(__x86_indirect_thunk_\reg)
|
||||
CFI_STARTPROC
|
||||
JMP_NOSPEC %\reg
|
||||
CFI_ENDPROC
|
||||
ENDPROC(__x86_indirect_thunk_\reg)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Despite being an assembler file we can't just use .irp here
|
||||
* because __KSYM_DEPS__ only uses the C preprocessor and would
|
||||
* only see one instance of "__x86_indirect_thunk_\reg" rather
|
||||
* than one per register with the correct names. So we do it
|
||||
* the simple and nasty way...
|
||||
*/
|
||||
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
|
||||
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
|
||||
#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
|
||||
|
||||
GENERATE_THUNK(_ASM_AX)
|
||||
GENERATE_THUNK(_ASM_BX)
|
||||
GENERATE_THUNK(_ASM_CX)
|
||||
GENERATE_THUNK(_ASM_DX)
|
||||
GENERATE_THUNK(_ASM_SI)
|
||||
GENERATE_THUNK(_ASM_DI)
|
||||
GENERATE_THUNK(_ASM_BP)
|
||||
GENERATE_THUNK(_ASM_SP)
|
||||
#ifdef CONFIG_64BIT
|
||||
GENERATE_THUNK(r8)
|
||||
GENERATE_THUNK(r9)
|
||||
GENERATE_THUNK(r10)
|
||||
GENERATE_THUNK(r11)
|
||||
GENERATE_THUNK(r12)
|
||||
GENERATE_THUNK(r13)
|
||||
GENERATE_THUNK(r14)
|
||||
GENERATE_THUNK(r15)
|
||||
#endif
|
||||
|
|
@ -345,7 +345,7 @@ void __init kaiser_init(void)
|
|||
if (vsyscall_enabled())
|
||||
kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
|
||||
PAGE_SIZE,
|
||||
__PAGE_KERNEL_VSYSCALL);
|
||||
vsyscall_pgprot);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
void *percpu_vaddr = __per_cpu_user_mapped_start +
|
||||
|
|
|
|||
|
|
@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
|||
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
|
||||
|
||||
pr->pblk = object.processor.pblk_address;
|
||||
|
||||
/*
|
||||
* We don't care about error returns - we just try to mark
|
||||
* these reserved so that nobody else is confused into thinking
|
||||
* that this region might be unused..
|
||||
*
|
||||
* (In particular, allocating the IO range for Cardbus)
|
||||
*/
|
||||
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -593,25 +593,20 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
|
|||
void acpi_ns_terminate(void)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_operand_object *prev;
|
||||
union acpi_operand_object *next;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ns_terminate);
|
||||
|
||||
#ifdef ACPI_EXEC_APP
|
||||
{
|
||||
union acpi_operand_object *prev;
|
||||
union acpi_operand_object *next;
|
||||
/* Delete any module-level code blocks */
|
||||
|
||||
/* Delete any module-level code blocks */
|
||||
|
||||
next = acpi_gbl_module_code_list;
|
||||
while (next) {
|
||||
prev = next;
|
||||
next = next->method.mutex;
|
||||
prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
|
||||
acpi_ut_remove_reference(prev);
|
||||
}
|
||||
next = acpi_gbl_module_code_list;
|
||||
while (next) {
|
||||
prev = next;
|
||||
next = next->method.mutex;
|
||||
prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
|
||||
acpi_ut_remove_reference(prev);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Free the entire namespace -- all nodes and all objects
|
||||
|
|
|
|||
|
|
@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
|
|||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* If the device has a _HID (or _CID) returning a valid ACPI/PNP
|
||||
* device ID, it is better to make it look less attractive here, so that
|
||||
* the other device with the same _ADR value (that may not have a valid
|
||||
* device ID) can be matched going forward. [This means a second spec
|
||||
* violation in a row, so whatever we do here is best effort anyway.]
|
||||
* If the device has a _HID returning a valid ACPI/PNP device ID, it is
|
||||
* better to make it look less attractive here, so that the other device
|
||||
* with the same _ADR value (that may not have a valid device ID) can be
|
||||
* matched going forward. [This means a second spec violation in a row,
|
||||
* so whatever we do here is best effort anyway.]
|
||||
*/
|
||||
return sta_present && list_empty(&adev->pnp.ids) ?
|
||||
return sta_present && !adev->pnp.type.platform_id ?
|
||||
FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
|
|||
if (!pr->flags.throttling)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* We don't care about error returns - we just try to mark
|
||||
* these reserved so that nobody else is confused into thinking
|
||||
* that this region might be unused..
|
||||
*
|
||||
* (In particular, allocating the IO range for Cardbus)
|
||||
*/
|
||||
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
|
||||
|
||||
pr->throttling.state = 0;
|
||||
|
||||
duty_mask = pr->throttling.state_count - 1;
|
||||
|
|
|
|||
|
|
@ -4143,6 +4143,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
|
||||
*/
|
||||
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
{ "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/* Devices we expect to fail diagnostics */
|
||||
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/cacheinfo.h>
|
||||
#include <linux/compiler.h>
|
||||
|
|
@ -104,9 +105,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
|
|||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cacheinfo *this_leaf, *sib_leaf;
|
||||
unsigned int index;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
ret = cache_setup_of_node(cpu);
|
||||
if (this_cpu_ci->cpu_map_populated)
|
||||
return 0;
|
||||
|
||||
if (of_have_populated_dt())
|
||||
ret = cache_setup_of_node(cpu);
|
||||
else if (!acpi_disabled)
|
||||
/* No cache property/hierarchy support yet in ACPI */
|
||||
ret = -ENOTSUPP;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
@ -203,8 +211,7 @@ static int detect_cache_attributes(unsigned int cpu)
|
|||
*/
|
||||
ret = cache_shared_cpu_map_setup(cpu);
|
||||
if (ret) {
|
||||
pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
|
||||
cpu);
|
||||
pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
|
||||
goto free_ci;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
|
|||
}
|
||||
EXPORT_SYMBOL(generate_pm_trace);
|
||||
|
||||
extern char __tracedata_start, __tracedata_end;
|
||||
extern char __tracedata_start[], __tracedata_end[];
|
||||
static int show_file_hash(unsigned int value)
|
||||
{
|
||||
int match;
|
||||
char *tracedata;
|
||||
|
||||
match = 0;
|
||||
for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
|
||||
for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
|
||||
tracedata += 2 + sizeof(unsigned long)) {
|
||||
unsigned short lineno = *(unsigned short *)tracedata;
|
||||
const char *file = *(const char **)(tracedata + 2);
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/clockchips.h>
|
||||
#include <asm/hyperv.h>
|
||||
#include <asm/mshyperv.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include "hyperv_vmbus.h"
|
||||
|
||||
/* The one and only */
|
||||
|
|
@ -103,9 +104,10 @@ static u64 do_hypercall(u64 control, void *input, void *output)
|
|||
return (u64)ULLONG_MAX;
|
||||
|
||||
__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
|
||||
__asm__ __volatile__("call *%3" : "=a" (hv_status) :
|
||||
__asm__ __volatile__(CALL_NOSPEC :
|
||||
"=a" (hv_status) :
|
||||
"c" (control), "d" (input_address),
|
||||
"m" (hypercall_page));
|
||||
THUNK_TARGET(hypercall_page));
|
||||
|
||||
return hv_status;
|
||||
|
||||
|
|
@ -123,11 +125,12 @@ static u64 do_hypercall(u64 control, void *input, void *output)
|
|||
if (!hypercall_page)
|
||||
return (u64)ULLONG_MAX;
|
||||
|
||||
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
|
||||
__asm__ __volatile__ (CALL_NOSPEC : "=d"(hv_status_hi),
|
||||
"=a"(hv_status_lo) : "d" (control_hi),
|
||||
"a" (control_lo), "b" (input_address_hi),
|
||||
"c" (input_address_lo), "D"(output_address_hi),
|
||||
"S"(output_address_lo), "m" (hypercall_page));
|
||||
"S"(output_address_lo),
|
||||
THUNK_TARGET(hypercall_page));
|
||||
|
||||
return hv_status_lo | ((u64)hv_status_hi << 32);
|
||||
#endif /* !x86_64 */
|
||||
|
|
|
|||
|
|
@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
|
|||
twl4030_vibra_suspend, twl4030_vibra_resume);
|
||||
|
||||
static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
|
||||
struct device_node *node)
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct device_node *node;
|
||||
|
||||
if (pdata && pdata->coexist)
|
||||
return true;
|
||||
|
||||
node = of_find_node_by_name(node, "codec");
|
||||
node = of_get_child_by_name(parent, "codec");
|
||||
if (node) {
|
||||
of_node_put(node);
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -262,7 +262,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
|
|||
int vddvibr_uV = 0;
|
||||
int error;
|
||||
|
||||
twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
|
||||
twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
|
||||
"vibra");
|
||||
if (!twl6040_core_node) {
|
||||
dev_err(&pdev->dev, "parent of node is missing?\n");
|
||||
|
|
|
|||
|
|
@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
|
|||
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
|
||||
psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
|
||||
button_info = 0x33;
|
||||
} else if (!button_info) {
|
||||
psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n");
|
||||
button_info = 0x33;
|
||||
}
|
||||
|
||||
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
|
|||
int data, n, ret;
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
np = of_find_node_by_name(np, "touch");
|
||||
np = of_get_child_by_name(np, "touch");
|
||||
if (!np) {
|
||||
dev_err(&pdev->dev, "Can't find touch node\n");
|
||||
return -EINVAL;
|
||||
|
|
@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
|
|||
if (data) {
|
||||
ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
goto err_put_node;
|
||||
}
|
||||
/* set tsi prebias time */
|
||||
if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
|
||||
ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
goto err_put_node;
|
||||
}
|
||||
/* set prebias & prechg time of pen detect */
|
||||
data = 0;
|
||||
|
|
@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
|
|||
if (data) {
|
||||
ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
goto err_put_node;
|
||||
}
|
||||
of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
|
||||
|
||||
of_node_put(np);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_node:
|
||||
of_node_put(np);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
#else
|
||||
#define pm860x_touch_dt_init(x, y, z) (-1)
|
||||
|
|
|
|||
|
|
@ -81,10 +81,14 @@
|
|||
#define SECTOR_TO_BLOCK_SHIFT 3
|
||||
|
||||
/*
|
||||
* For btree insert:
|
||||
* 3 for btree insert +
|
||||
* 2 for btree lookup used within space map
|
||||
* For btree remove:
|
||||
* 2 for shadow spine +
|
||||
* 4 for rebalance 3 child node
|
||||
*/
|
||||
#define THIN_MAX_CONCURRENT_LOCKS 5
|
||||
#define THIN_MAX_CONCURRENT_LOCKS 6
|
||||
|
||||
/* This should be plenty */
|
||||
#define SPACE_MAP_ROOT_SIZE 128
|
||||
|
|
|
|||
|
|
@ -671,23 +671,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
|
|||
pn->keys[1] = rn->keys[0];
|
||||
memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
|
||||
|
||||
/*
|
||||
* rejig the spine. This is ugly, since it knows too
|
||||
* much about the spine
|
||||
*/
|
||||
if (s->nodes[0] != new_parent) {
|
||||
unlock_block(s->info, s->nodes[0]);
|
||||
s->nodes[0] = new_parent;
|
||||
}
|
||||
if (key < le64_to_cpu(rn->keys[0])) {
|
||||
unlock_block(s->info, right);
|
||||
s->nodes[1] = left;
|
||||
} else {
|
||||
unlock_block(s->info, left);
|
||||
s->nodes[1] = right;
|
||||
}
|
||||
s->count = 2;
|
||||
|
||||
unlock_block(s->info, left);
|
||||
unlock_block(s->info, right);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -584,6 +584,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct sdhci_host *host;
|
||||
struct device_node *np;
|
||||
struct sdhci_pltfm_host *pltfm_host;
|
||||
struct sdhci_esdhc *esdhc;
|
||||
int ret;
|
||||
|
||||
np = pdev->dev.of_node;
|
||||
|
|
@ -600,6 +602,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
|
|||
|
||||
sdhci_get_of_property(pdev);
|
||||
|
||||
pltfm_host = sdhci_priv(host);
|
||||
esdhc = pltfm_host->priv;
|
||||
if (esdhc->vendor_ver == VENDOR_V_22)
|
||||
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
|
||||
|
||||
if (esdhc->vendor_ver > VENDOR_V_22)
|
||||
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
|
||||
|
|
|
|||
|
|
@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
|
|||
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
|
||||
int err = 0;
|
||||
u8 *packet_ptr;
|
||||
int i, n = 1, packet_len;
|
||||
int packet_len;
|
||||
ptrdiff_t cmd_len;
|
||||
|
||||
/* usb device unregistered? */
|
||||
|
|
@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
|
|||
}
|
||||
|
||||
packet_ptr = cmd_head;
|
||||
packet_len = cmd_len;
|
||||
|
||||
/* firmware is not able to re-assemble 512 bytes buffer in full-speed */
|
||||
if ((dev->udev->speed != USB_SPEED_HIGH) &&
|
||||
(cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
|
||||
packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
|
||||
n += cmd_len / packet_len;
|
||||
} else {
|
||||
packet_len = cmd_len;
|
||||
}
|
||||
if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
|
||||
packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
do {
|
||||
err = usb_bulk_msg(dev->udev,
|
||||
usb_sndbulkpipe(dev->udev,
|
||||
PCAN_USBPRO_EP_CMDOUT),
|
||||
|
|
@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
|
|||
}
|
||||
|
||||
packet_ptr += packet_len;
|
||||
}
|
||||
cmd_len -= packet_len;
|
||||
|
||||
if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
|
||||
packet_len = cmd_len;
|
||||
|
||||
} while (packet_len > 0);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2205,19 +2205,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
|
|||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
dma_addr_t paddr = tp->counters_phys_addr;
|
||||
u32 cmd;
|
||||
bool ret;
|
||||
|
||||
RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
|
||||
RTL_R32(CounterAddrHigh);
|
||||
cmd = (u64)paddr & DMA_BIT_MASK(32);
|
||||
RTL_W32(CounterAddrLow, cmd);
|
||||
RTL_W32(CounterAddrLow, cmd | counter_cmd);
|
||||
|
||||
ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
|
||||
|
||||
RTL_W32(CounterAddrLow, 0);
|
||||
RTL_W32(CounterAddrHigh, 0);
|
||||
|
||||
return ret;
|
||||
return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
|
||||
}
|
||||
|
||||
static bool rtl8169_reset_counters(struct net_device *dev)
|
||||
|
|
|
|||
|
|
@ -860,6 +860,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
|
|||
struct pppoe_hdr *ph;
|
||||
struct net_device *dev;
|
||||
char *start;
|
||||
int hlen;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
|
||||
|
|
@ -878,16 +879,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
|
|||
if (total_len > (dev->mtu + dev->hard_header_len))
|
||||
goto end;
|
||||
|
||||
|
||||
skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
|
||||
0, GFP_KERNEL);
|
||||
hlen = LL_RESERVED_SPACE(dev);
|
||||
skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
|
||||
dev->needed_tailroom, 0, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
error = -ENOMEM;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Reserve space for headers. */
|
||||
skb_reserve(skb, dev->hard_header_len);
|
||||
skb_reserve(skb, hlen);
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
skb->dev = dev;
|
||||
|
|
@ -948,7 +949,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
|
|||
/* Copy the data if there is no space for the header or if it's
|
||||
* read-only.
|
||||
*/
|
||||
if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
|
||||
if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
|
||||
goto abort;
|
||||
|
||||
__skb_push(skb, sizeof(*ph));
|
||||
|
|
|
|||
|
|
@ -1859,6 +1859,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
|
|||
buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
|
||||
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
|
||||
dev->rx_qlen = 4;
|
||||
dev->tx_qlen = 4;
|
||||
}
|
||||
|
||||
ret = lan78xx_write_reg(dev, BURST_CAP, buf);
|
||||
|
|
|
|||
|
|
@ -1563,7 +1563,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
|
|||
rq->rx_ring[i].basePA);
|
||||
rq->rx_ring[i].base = NULL;
|
||||
}
|
||||
rq->buf_info[i] = NULL;
|
||||
}
|
||||
|
||||
if (rq->comp_ring.base) {
|
||||
|
|
@ -1578,6 +1577,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
|
|||
(rq->rx_ring[0].size + rq->rx_ring[1].size);
|
||||
dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
|
||||
rq->buf_info_pa);
|
||||
rq->buf_info[0] = rq->buf_info[1] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
|
|||
iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
|
||||
}
|
||||
|
||||
/* Drop MSG TLP except for Vendor MSG */
|
||||
static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32(pcie->dbi + PCIE_STRFMR1);
|
||||
val &= 0xDFFFFFFF;
|
||||
iowrite32(val, pcie->dbi + PCIE_STRFMR1);
|
||||
}
|
||||
|
||||
static int ls1021_pcie_link_up(struct pcie_port *pp)
|
||||
{
|
||||
u32 state;
|
||||
|
|
@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
|
|||
static void ls1021_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct ls_pcie *pcie = to_ls_pcie(pp);
|
||||
u32 val, index[2];
|
||||
u32 index[2];
|
||||
|
||||
pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
|
||||
"fsl,pcie-scfg");
|
||||
|
|
@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
|
|||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
/*
|
||||
* LS1021A Workaround for internal TKT228622
|
||||
* to fix the INTx hang issue
|
||||
*/
|
||||
val = ioread32(pcie->dbi + PCIE_STRFMR1);
|
||||
val &= 0xffff;
|
||||
iowrite32(val, pcie->dbi + PCIE_STRFMR1);
|
||||
ls_pcie_drop_msg_tlp(pcie);
|
||||
}
|
||||
|
||||
static int ls_pcie_link_up(struct pcie_port *pp)
|
||||
|
|
@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
|
|||
iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
|
||||
ls_pcie_fix_class(pcie);
|
||||
ls_pcie_clear_multifunction(pcie);
|
||||
ls_pcie_drop_msg_tlp(pcie);
|
||||
iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
|
||||
}
|
||||
|
||||
|
|
@ -203,6 +208,7 @@ static const struct of_device_id ls_pcie_of_match[] = {
|
|||
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
|
||||
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
|
||||
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
|
||||
{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
|
||||
|
|
|
|||
|
|
@ -365,6 +365,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
|
|||
if (ret)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
/* This phy type handled by the usb-phy subsystem for now */
|
||||
if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
mutex_lock(&phy_provider_mutex);
|
||||
phy_provider = of_phy_provider_lookup(args.np);
|
||||
if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
|
||||
|
|
|
|||
|
|
@ -3638,6 +3638,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
|
|||
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
|
||||
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
|
||||
volume_offline = hpsa_volume_offline(h, scsi3addr);
|
||||
this_device->volume_offline = volume_offline;
|
||||
if (volume_offline == HPSA_LV_FAILED) {
|
||||
rc = HPSA_LV_FAILED;
|
||||
dev_err(&h->pdev->dev,
|
||||
|
|
|
|||
|
|
@ -1727,7 +1727,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
|
|||
|
||||
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
|
||||
reason = FAILURE_SESSION_IN_RECOVERY;
|
||||
sc->result = DID_REQUEUE;
|
||||
sc->result = DID_REQUEUE << 16;
|
||||
goto fault;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -160,7 +160,6 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
|
|||
struct list_head rq_list; /* head of request list */
|
||||
struct fasync_struct *async_qp; /* used by asynchronous notification */
|
||||
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
|
||||
char low_dma; /* as in parent but possibly overridden to 1 */
|
||||
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
|
||||
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
|
||||
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
|
||||
|
|
@ -932,24 +931,14 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
|
|||
/* strange ..., for backward compatibility */
|
||||
return sfp->timeout_user;
|
||||
case SG_SET_FORCE_LOW_DMA:
|
||||
result = get_user(val, ip);
|
||||
if (result)
|
||||
return result;
|
||||
if (val) {
|
||||
sfp->low_dma = 1;
|
||||
if ((0 == sfp->low_dma) && !sfp->res_in_use) {
|
||||
val = (int) sfp->reserve.bufflen;
|
||||
sg_remove_scat(sfp, &sfp->reserve);
|
||||
sg_build_reserve(sfp, val);
|
||||
}
|
||||
} else {
|
||||
if (atomic_read(&sdp->detaching))
|
||||
return -ENODEV;
|
||||
sfp->low_dma = sdp->device->host->unchecked_isa_dma;
|
||||
}
|
||||
/*
|
||||
* N.B. This ioctl never worked properly, but failed to
|
||||
* return an error value. So returning '0' to keep compability
|
||||
* with legacy applications.
|
||||
*/
|
||||
return 0;
|
||||
case SG_GET_LOW_DMA:
|
||||
return put_user((int) sfp->low_dma, ip);
|
||||
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
|
||||
case SG_GET_SCSI_ID:
|
||||
if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
|
||||
return -EFAULT;
|
||||
|
|
@ -1870,6 +1859,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
|||
int sg_tablesize = sfp->parentdp->sg_tablesize;
|
||||
int blk_size = buff_size, order;
|
||||
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
|
||||
struct sg_device *sdp = sfp->parentdp;
|
||||
|
||||
if (blk_size < 0)
|
||||
return -EFAULT;
|
||||
|
|
@ -1895,7 +1885,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
|||
scatter_elem_sz_prev = num;
|
||||
}
|
||||
|
||||
if (sfp->low_dma)
|
||||
if (sdp->device->host->unchecked_isa_dma)
|
||||
gfp_mask |= GFP_DMA;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
||||
|
|
@ -2158,8 +2148,6 @@ sg_add_sfp(Sg_device * sdp)
|
|||
sfp->timeout = SG_DEFAULT_TIMEOUT;
|
||||
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
|
||||
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
|
||||
sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
|
||||
sdp->device->host->unchecked_isa_dma : 1;
|
||||
sfp->cmd_q = SG_DEF_COMMAND_Q;
|
||||
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
|
||||
sfp->parentdp = sdp;
|
||||
|
|
@ -2618,7 +2606,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
|
|||
jiffies_to_msecs(fp->timeout),
|
||||
fp->reserve.bufflen,
|
||||
(int) fp->reserve.k_use_sg,
|
||||
(int) fp->low_dma);
|
||||
(int) sdp->device->host->unchecked_isa_dma);
|
||||
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
|
||||
(int) fp->cmd_q, (int) fp->force_packid,
|
||||
(int) fp->keep_orphan);
|
||||
|
|
|
|||
|
|
@ -163,8 +163,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
|
|||
* step 1?
|
||||
*/
|
||||
if (ud->tcp_socket) {
|
||||
dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
|
||||
ud->tcp_socket);
|
||||
dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
|
||||
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -338,23 +338,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
|
|||
return priv;
|
||||
}
|
||||
|
||||
static int get_pipe(struct stub_device *sdev, int epnum, int dir)
|
||||
static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
|
||||
{
|
||||
struct usb_device *udev = sdev->udev;
|
||||
struct usb_host_endpoint *ep;
|
||||
struct usb_endpoint_descriptor *epd = NULL;
|
||||
int epnum = pdu->base.ep;
|
||||
int dir = pdu->base.direction;
|
||||
|
||||
if (epnum < 0 || epnum > 15)
|
||||
goto err_ret;
|
||||
|
||||
if (dir == USBIP_DIR_IN)
|
||||
ep = udev->ep_in[epnum & 0x7f];
|
||||
else
|
||||
ep = udev->ep_out[epnum & 0x7f];
|
||||
if (!ep) {
|
||||
dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
|
||||
epnum);
|
||||
BUG();
|
||||
}
|
||||
if (!ep)
|
||||
goto err_ret;
|
||||
|
||||
epd = &ep->desc;
|
||||
|
||||
if (usb_endpoint_xfer_control(epd)) {
|
||||
if (dir == USBIP_DIR_OUT)
|
||||
return usb_sndctrlpipe(udev, epnum);
|
||||
|
|
@ -377,15 +380,37 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
|
|||
}
|
||||
|
||||
if (usb_endpoint_xfer_isoc(epd)) {
|
||||
/* validate packet size and number of packets */
|
||||
unsigned int maxp, packets, bytes;
|
||||
|
||||
#define USB_EP_MAXP_MULT_SHIFT 11
|
||||
#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT)
|
||||
#define USB_EP_MAXP_MULT(m) \
|
||||
(((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
|
||||
|
||||
maxp = usb_endpoint_maxp(epd);
|
||||
maxp *= (USB_EP_MAXP_MULT(
|
||||
__le16_to_cpu(epd->wMaxPacketSize)) + 1);
|
||||
bytes = pdu->u.cmd_submit.transfer_buffer_length;
|
||||
packets = DIV_ROUND_UP(bytes, maxp);
|
||||
|
||||
if (pdu->u.cmd_submit.number_of_packets < 0 ||
|
||||
pdu->u.cmd_submit.number_of_packets > packets) {
|
||||
dev_err(&sdev->udev->dev,
|
||||
"CMD_SUBMIT: isoc invalid num packets %d\n",
|
||||
pdu->u.cmd_submit.number_of_packets);
|
||||
return -1;
|
||||
}
|
||||
if (dir == USBIP_DIR_OUT)
|
||||
return usb_sndisocpipe(udev, epnum);
|
||||
else
|
||||
return usb_rcvisocpipe(udev, epnum);
|
||||
}
|
||||
|
||||
err_ret:
|
||||
/* NOT REACHED */
|
||||
dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
|
||||
return 0;
|
||||
dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void masking_bogus_flags(struct urb *urb)
|
||||
|
|
@ -449,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
|
|||
struct stub_priv *priv;
|
||||
struct usbip_device *ud = &sdev->ud;
|
||||
struct usb_device *udev = sdev->udev;
|
||||
int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
|
||||
int pipe = get_pipe(sdev, pdu);
|
||||
|
||||
if (pipe == -1)
|
||||
return;
|
||||
|
||||
priv = stub_priv_alloc(sdev, pdu);
|
||||
if (!priv)
|
||||
|
|
|
|||
|
|
@ -317,18 +317,14 @@ int usbip_recv(struct socket *sock, void *buf, int size)
|
|||
struct msghdr msg;
|
||||
struct kvec iov;
|
||||
int total = 0;
|
||||
|
||||
/* for blocks of if (usbip_dbg_flag_xmit) */
|
||||
char *bp = buf;
|
||||
int osize = size;
|
||||
|
||||
usbip_dbg_xmit("enter\n");
|
||||
|
||||
if (!sock || !buf || !size) {
|
||||
pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
|
||||
size);
|
||||
if (!sock || !buf || !size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
usbip_dbg_xmit("enter\n");
|
||||
|
||||
do {
|
||||
sock->sk->sk_allocation = GFP_NOIO;
|
||||
|
|
@ -341,11 +337,8 @@ int usbip_recv(struct socket *sock, void *buf, int size)
|
|||
msg.msg_flags = MSG_NOSIGNAL;
|
||||
|
||||
result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
|
||||
if (result <= 0) {
|
||||
pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
|
||||
sock, buf, size, result, total);
|
||||
if (result <= 0)
|
||||
goto err;
|
||||
}
|
||||
|
||||
size -= result;
|
||||
buf += result;
|
||||
|
|
|
|||
|
|
@ -261,6 +261,7 @@ struct usbip_device {
|
|||
/* lock for status */
|
||||
spinlock_t lock;
|
||||
|
||||
int sockfd;
|
||||
struct socket *tcp_socket;
|
||||
|
||||
struct task_struct *tcp_rx;
|
||||
|
|
|
|||
|
|
@ -117,11 +117,12 @@ EXPORT_SYMBOL_GPL(usbip_event_add);
|
|||
int usbip_event_happened(struct usbip_device *ud)
|
||||
{
|
||||
int happened = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&ud->lock);
|
||||
spin_lock_irqsave(&ud->lock, flags);
|
||||
if (ud->event != 0)
|
||||
happened = 1;
|
||||
spin_unlock(&ud->lock);
|
||||
spin_unlock_irqrestore(&ud->lock, flags);
|
||||
|
||||
return happened;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -121,9 +121,11 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
|
|||
|
||||
void rh_port_connect(int rhport, enum usb_device_speed speed)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION
|
||||
| (1 << USB_PORT_FEAT_C_CONNECTION);
|
||||
|
|
@ -139,22 +141,24 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
|
|||
break;
|
||||
}
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
|
||||
}
|
||||
|
||||
static void rh_port_disconnect(int rhport)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION;
|
||||
the_controller->port_status[rhport] |=
|
||||
(1 << USB_PORT_FEAT_C_CONNECTION);
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
|
||||
}
|
||||
|
||||
|
|
@ -182,13 +186,14 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
|
|||
int retval;
|
||||
int rhport;
|
||||
int changed = 0;
|
||||
unsigned long flags;
|
||||
|
||||
retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8);
|
||||
memset(buf, 0, retval);
|
||||
|
||||
vhci = hcd_to_vhci(hcd);
|
||||
|
||||
spin_lock(&vhci->lock);
|
||||
spin_lock_irqsave(&vhci->lock, flags);
|
||||
if (!HCD_HW_ACCESSIBLE(hcd)) {
|
||||
usbip_dbg_vhci_rh("hw accessible flag not on?\n");
|
||||
goto done;
|
||||
|
|
@ -209,7 +214,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
|
|||
usb_hcd_resume_root_hub(hcd);
|
||||
|
||||
done:
|
||||
spin_unlock(&vhci->lock);
|
||||
spin_unlock_irqrestore(&vhci->lock, flags);
|
||||
return changed ? retval : 0;
|
||||
}
|
||||
|
||||
|
|
@ -236,6 +241,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|||
struct vhci_hcd *dum;
|
||||
int retval = 0;
|
||||
int rhport;
|
||||
unsigned long flags;
|
||||
|
||||
u32 prev_port_status[VHCI_NPORTS];
|
||||
|
||||
|
|
@ -254,7 +260,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|||
|
||||
dum = hcd_to_vhci(hcd);
|
||||
|
||||
spin_lock(&dum->lock);
|
||||
spin_lock_irqsave(&dum->lock, flags);
|
||||
|
||||
/* store old status and compare now and old later */
|
||||
if (usbip_dbg_flag_vhci_rh) {
|
||||
|
|
@ -408,7 +414,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|||
}
|
||||
usbip_dbg_vhci_rh(" bye\n");
|
||||
|
||||
spin_unlock(&dum->lock);
|
||||
spin_unlock_irqrestore(&dum->lock, flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
|
@ -431,6 +437,7 @@ static void vhci_tx_urb(struct urb *urb)
|
|||
{
|
||||
struct vhci_device *vdev = get_vdev(urb->dev);
|
||||
struct vhci_priv *priv;
|
||||
unsigned long flags;
|
||||
|
||||
if (!vdev) {
|
||||
pr_err("could not get virtual device");
|
||||
|
|
@ -443,7 +450,7 @@ static void vhci_tx_urb(struct urb *urb)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
|
||||
priv->seqnum = atomic_inc_return(&the_controller->seqnum);
|
||||
if (priv->seqnum == 0xffff)
|
||||
|
|
@ -457,7 +464,7 @@ static void vhci_tx_urb(struct urb *urb)
|
|||
list_add_tail(&priv->list, &vdev->priv_tx);
|
||||
|
||||
wake_up(&vdev->waitq_tx);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
}
|
||||
|
||||
static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
|
||||
|
|
@ -466,15 +473,16 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
|
|||
struct device *dev = &urb->dev->dev;
|
||||
int ret = 0;
|
||||
struct vhci_device *vdev;
|
||||
unsigned long flags;
|
||||
|
||||
/* patch to usb_sg_init() is in 2.5.60 */
|
||||
BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
if (urb->status != -EINPROGRESS) {
|
||||
dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
return urb->status;
|
||||
}
|
||||
|
||||
|
|
@ -486,7 +494,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
|
|||
vdev->ud.status == VDEV_ST_ERROR) {
|
||||
dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
return -ENODEV;
|
||||
}
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
|
|
@ -559,14 +567,14 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
|
|||
|
||||
out:
|
||||
vhci_tx_urb(urb);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
no_need_xmit:
|
||||
usb_hcd_unlink_urb_from_ep(hcd, urb);
|
||||
no_need_unlink:
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
if (!ret)
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller),
|
||||
urb, urb->status);
|
||||
|
|
@ -623,14 +631,15 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
{
|
||||
struct vhci_priv *priv;
|
||||
struct vhci_device *vdev;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
priv = urb->hcpriv;
|
||||
if (!priv) {
|
||||
/* URB was never linked! or will be soon given back by
|
||||
* vhci_rx. */
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
return -EIDRM;
|
||||
}
|
||||
|
||||
|
|
@ -639,7 +648,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
|
||||
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
|
||||
if (ret) {
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
@ -664,10 +673,10 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
*/
|
||||
usb_hcd_unlink_urb_from_ep(hcd, urb);
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
|
||||
urb->status);
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
} else {
|
||||
/* tcp connection is alive */
|
||||
|
|
@ -679,7 +688,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
|
||||
if (!unlink) {
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
@ -698,7 +707,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
spin_unlock(&vdev->priv_lock);
|
||||
}
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usbip_dbg_vhci_hc("leave\n");
|
||||
return 0;
|
||||
|
|
@ -707,8 +716,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
|
||||
{
|
||||
struct vhci_unlink *unlink, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
spin_lock(&vdev->priv_lock);
|
||||
|
||||
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
|
||||
|
|
@ -742,19 +752,19 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
|
|||
list_del(&unlink->list);
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
|
||||
urb->status);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
spin_lock(&vdev->priv_lock);
|
||||
|
||||
kfree(unlink);
|
||||
}
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -768,7 +778,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
|
|||
|
||||
/* need this? see stub_dev.c */
|
||||
if (ud->tcp_socket) {
|
||||
pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
|
||||
pr_debug("shutdown sockfd %d\n", ud->sockfd);
|
||||
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
|
||||
}
|
||||
|
||||
|
|
@ -821,8 +831,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
|
|||
static void vhci_device_reset(struct usbip_device *ud)
|
||||
{
|
||||
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&ud->lock);
|
||||
spin_lock_irqsave(&ud->lock, flags);
|
||||
|
||||
vdev->speed = 0;
|
||||
vdev->devid = 0;
|
||||
|
|
@ -836,14 +847,16 @@ static void vhci_device_reset(struct usbip_device *ud)
|
|||
}
|
||||
ud->status = VDEV_ST_NULL;
|
||||
|
||||
spin_unlock(&ud->lock);
|
||||
spin_unlock_irqrestore(&ud->lock, flags);
|
||||
}
|
||||
|
||||
static void vhci_device_unusable(struct usbip_device *ud)
|
||||
{
|
||||
spin_lock(&ud->lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ud->lock, flags);
|
||||
ud->status = VDEV_ST_ERROR;
|
||||
spin_unlock(&ud->lock);
|
||||
spin_unlock_irqrestore(&ud->lock, flags);
|
||||
}
|
||||
|
||||
static void vhci_device_init(struct vhci_device *vdev)
|
||||
|
|
@ -933,12 +946,13 @@ static int vhci_get_frame_number(struct usb_hcd *hcd)
|
|||
static int vhci_bus_suspend(struct usb_hcd *hcd)
|
||||
{
|
||||
struct vhci_hcd *vhci = hcd_to_vhci(hcd);
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
|
||||
|
||||
spin_lock(&vhci->lock);
|
||||
spin_lock_irqsave(&vhci->lock, flags);
|
||||
hcd->state = HC_STATE_SUSPENDED;
|
||||
spin_unlock(&vhci->lock);
|
||||
spin_unlock_irqrestore(&vhci->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -947,15 +961,16 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
|
|||
{
|
||||
struct vhci_hcd *vhci = hcd_to_vhci(hcd);
|
||||
int rc = 0;
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
|
||||
|
||||
spin_lock(&vhci->lock);
|
||||
spin_lock_irqsave(&vhci->lock, flags);
|
||||
if (!HCD_HW_ACCESSIBLE(hcd))
|
||||
rc = -ESHUTDOWN;
|
||||
else
|
||||
hcd->state = HC_STATE_RUNNING;
|
||||
spin_unlock(&vhci->lock);
|
||||
spin_unlock_irqrestore(&vhci->lock, flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
@ -1053,17 +1068,18 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
|
|||
int rhport = 0;
|
||||
int connected = 0;
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
hcd = platform_get_drvdata(pdev);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
for (rhport = 0; rhport < VHCI_NPORTS; rhport++)
|
||||
if (the_controller->port_status[rhport] &
|
||||
USB_PORT_STAT_CONNECTION)
|
||||
connected += 1;
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
if (connected > 0) {
|
||||
dev_info(&pdev->dev,
|
||||
|
|
|
|||
|
|
@ -71,10 +71,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
|
|||
{
|
||||
struct usbip_device *ud = &vdev->ud;
|
||||
struct urb *urb;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
if (!urb) {
|
||||
pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
|
||||
|
|
@ -103,9 +104,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
|
|||
|
||||
usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
|
||||
|
||||
|
|
@ -116,8 +117,9 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
|
|||
struct usbip_header *pdu)
|
||||
{
|
||||
struct vhci_unlink *unlink, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
|
||||
pr_info("unlink->seqnum %lu\n", unlink->seqnum);
|
||||
|
|
@ -126,12 +128,12 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
|
|||
unlink->seqnum);
|
||||
list_del(&unlink->list);
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
return unlink;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -141,6 +143,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
|
|||
{
|
||||
struct vhci_unlink *unlink;
|
||||
struct urb *urb;
|
||||
unsigned long flags;
|
||||
|
||||
usbip_dump_header(pdu);
|
||||
|
||||
|
|
@ -151,9 +154,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
if (!urb) {
|
||||
/*
|
||||
|
|
@ -170,9 +173,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
|
|||
urb->status = pdu->u.ret_unlink.status;
|
||||
pr_info("urb->status %d\n", urb->status);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
|
||||
urb->status);
|
||||
|
|
@ -184,10 +187,11 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
|
|||
static int vhci_priv_tx_empty(struct vhci_device *vdev)
|
||||
{
|
||||
int empty = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
empty = list_empty(&vdev->priv_rx);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
return empty;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,23 +32,28 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
|
|||
{
|
||||
char *s = out;
|
||||
int i = 0;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!the_controller || !out);
|
||||
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
/*
|
||||
* output example:
|
||||
* prt sta spd dev socket local_busid
|
||||
* 000 004 000 000 c5a7bb80 1-2.3
|
||||
* 001 004 000 000 d8cee980 2-3.4
|
||||
* port sta spd dev sockfd local_busid
|
||||
* 0000 004 000 00000000 000003 1-2.3
|
||||
* 0001 004 000 00000000 000004 2-3.4
|
||||
*
|
||||
* IP address can be retrieved from a socket pointer address by looking
|
||||
* up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
|
||||
* port number and its peer IP address.
|
||||
* Output includes socket fd instead of socket pointer address to
|
||||
* avoid leaking kernel memory address in:
|
||||
* /sys/devices/platform/vhci_hcd.0/status and in debug output.
|
||||
* The socket pointer address is not used at the moment and it was
|
||||
* made visible as a convenient way to find IP address from socket
|
||||
* pointer address by looking up /proc/net/{tcp,tcp6}. As this opens
|
||||
* a security hole, the change is made to use sockfd instead.
|
||||
*/
|
||||
out += sprintf(out,
|
||||
"prt sta spd bus dev socket local_busid\n");
|
||||
"prt sta spd bus dev sockfd local_busid\n");
|
||||
|
||||
for (i = 0; i < VHCI_NPORTS; i++) {
|
||||
struct vhci_device *vdev = port_to_vdev(i);
|
||||
|
|
@ -60,17 +65,17 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
|
|||
out += sprintf(out, "%03u %08x ",
|
||||
vdev->speed, vdev->devid);
|
||||
out += sprintf(out, "%16p ", vdev->ud.tcp_socket);
|
||||
out += sprintf(out, "%06u", vdev->ud.sockfd);
|
||||
out += sprintf(out, "%s", dev_name(&vdev->udev->dev));
|
||||
|
||||
} else {
|
||||
out += sprintf(out, "000 000 000 0000000000000000 0-0");
|
||||
}
|
||||
} else
|
||||
out += sprintf(out, "000 000 000 000000 0-0");
|
||||
|
||||
out += sprintf(out, "\n");
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
}
|
||||
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
return out - s;
|
||||
}
|
||||
|
|
@ -80,11 +85,12 @@ static DEVICE_ATTR_RO(status);
|
|||
static int vhci_port_disconnect(__u32 rhport)
|
||||
{
|
||||
struct vhci_device *vdev;
|
||||
unsigned long flags;
|
||||
|
||||
usbip_dbg_vhci_sysfs("enter\n");
|
||||
|
||||
/* lock */
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
|
||||
vdev = port_to_vdev(rhport);
|
||||
|
||||
|
|
@ -94,14 +100,14 @@ static int vhci_port_disconnect(__u32 rhport)
|
|||
|
||||
/* unlock */
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* unlock */
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
|
||||
|
||||
|
|
@ -177,6 +183,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
|
|||
int sockfd = 0;
|
||||
__u32 rhport = 0, devid = 0, speed = 0;
|
||||
int err;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* @rhport: port number of vhci_hcd
|
||||
|
|
@ -202,14 +209,14 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
|
|||
/* now need lock until setting vdev status as used */
|
||||
|
||||
/* begin a lock */
|
||||
spin_lock(&the_controller->lock);
|
||||
spin_lock_irqsave(&the_controller->lock, flags);
|
||||
vdev = port_to_vdev(rhport);
|
||||
spin_lock(&vdev->ud.lock);
|
||||
|
||||
if (vdev->ud.status != VDEV_ST_NULL) {
|
||||
/* end of the lock */
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
|
||||
sockfd_put(socket);
|
||||
|
||||
|
|
@ -223,11 +230,12 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
vdev->devid = devid;
|
||||
vdev->speed = speed;
|
||||
vdev->ud.sockfd = sockfd;
|
||||
vdev->ud.tcp_socket = socket;
|
||||
vdev->ud.status = VDEV_ST_NOTASSIGNED;
|
||||
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
spin_unlock_irqrestore(&the_controller->lock, flags);
|
||||
/* end the lock */
|
||||
|
||||
vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
|
||||
|
|
|
|||
|
|
@ -47,16 +47,17 @@ static void setup_cmd_submit_pdu(struct usbip_header *pdup, struct urb *urb)
|
|||
static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
|
||||
{
|
||||
struct vhci_priv *priv, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
|
||||
list_move_tail(&priv->list, &vdev->priv_rx);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
return priv;
|
||||
}
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -137,16 +138,17 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
|
|||
static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
|
||||
{
|
||||
struct vhci_unlink *unlink, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vdev->priv_lock);
|
||||
spin_lock_irqsave(&vdev->priv_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
|
||||
list_move_tail(&unlink->list, &vdev->unlink_rx);
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
return unlink;
|
||||
}
|
||||
|
||||
spin_unlock(&vdev->priv_lock);
|
||||
spin_unlock_irqrestore(&vdev->priv_lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -178,11 +178,8 @@ ext2_get_acl(struct inode *inode, int type)
|
|||
return acl;
|
||||
}
|
||||
|
||||
/*
|
||||
* inode->i_mutex: down
|
||||
*/
|
||||
int
|
||||
ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
static int
|
||||
__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
int name_index;
|
||||
void *value = NULL;
|
||||
|
|
@ -192,13 +189,6 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
|||
switch(type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
|
||||
if (acl) {
|
||||
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
|
||||
if (error)
|
||||
return error;
|
||||
inode->i_ctime = CURRENT_TIME_SEC;
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
break;
|
||||
|
||||
case ACL_TYPE_DEFAULT:
|
||||
|
|
@ -224,6 +214,24 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* inode->i_mutex: down
|
||||
*/
|
||||
int
|
||||
ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (type == ACL_TYPE_ACCESS && acl) {
|
||||
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
|
||||
if (error)
|
||||
return error;
|
||||
inode->i_ctime = CURRENT_TIME_SEC;
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
return __ext2_set_acl(inode, acl, type);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the ACLs of a new inode. Called from ext2_new_inode.
|
||||
*
|
||||
|
|
@ -241,12 +249,12 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
|
|||
return error;
|
||||
|
||||
if (default_acl) {
|
||||
error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
|
||||
error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
|
||||
posix_acl_release(default_acl);
|
||||
}
|
||||
if (acl) {
|
||||
if (!error)
|
||||
error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
|
||||
error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
|
||||
posix_acl_release(acl);
|
||||
}
|
||||
return error;
|
||||
|
|
|
|||
|
|
@ -113,6 +113,10 @@ void f_setown(struct file *filp, unsigned long arg, int force)
|
|||
int who = arg;
|
||||
type = PIDTYPE_PID;
|
||||
if (who < 0) {
|
||||
/* avoid overflow below */
|
||||
if (who == INT_MIN)
|
||||
return;
|
||||
|
||||
type = PIDTYPE_PGID;
|
||||
who = -who;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -60,9 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
|
|||
else
|
||||
GROUP_AT(gi, i) = GROUP_AT(rqgi, i);
|
||||
|
||||
/* Each thread allocates its own gi, no race */
|
||||
groups_sort(gi);
|
||||
}
|
||||
|
||||
/* Each thread allocates its own gi, no race */
|
||||
groups_sort(gi);
|
||||
} else {
|
||||
gi = get_group_info(rqgi);
|
||||
}
|
||||
|
|
|
|||
18
fs/pipe.c
18
fs/pipe.c
|
|
@ -1001,6 +1001,9 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
|
|||
{
|
||||
struct pipe_buffer *bufs;
|
||||
|
||||
if (!nr_pages)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
|
||||
* expect a lot of shrink+grow operations, just free and allocate
|
||||
|
|
@ -1045,13 +1048,19 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
|
|||
|
||||
/*
|
||||
* Currently we rely on the pipe array holding a power-of-2 number
|
||||
* of pages.
|
||||
* of pages. Returns 0 on error.
|
||||
*/
|
||||
static inline unsigned int round_pipe_size(unsigned int size)
|
||||
{
|
||||
unsigned long nr_pages;
|
||||
|
||||
if (size < pipe_min_size)
|
||||
size = pipe_min_size;
|
||||
|
||||
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (nr_pages == 0)
|
||||
return 0;
|
||||
|
||||
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
|
|
@ -1062,13 +1071,18 @@ static inline unsigned int round_pipe_size(unsigned int size)
|
|||
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
unsigned int rounded_pipe_max_size;
|
||||
int ret;
|
||||
|
||||
ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
|
||||
if (ret < 0 || !write)
|
||||
return ret;
|
||||
|
||||
pipe_max_size = round_pipe_size(pipe_max_size);
|
||||
rounded_pipe_max_size = round_pipe_size(pipe_max_size);
|
||||
if (rounded_pipe_max_size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
pipe_max_size = rounded_pipe_max_size;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -513,9 +513,17 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
|
|||
"inode has negative prealloc blocks count.");
|
||||
#endif
|
||||
while (ei->i_prealloc_count > 0) {
|
||||
reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
|
||||
ei->i_prealloc_block++;
|
||||
b_blocknr_t block_to_free;
|
||||
|
||||
/*
|
||||
* reiserfs_free_prealloc_block can drop the write lock,
|
||||
* which could allow another caller to free the same block.
|
||||
* We can protect against it by modifying the prealloc
|
||||
* state before calling it.
|
||||
*/
|
||||
block_to_free = ei->i_prealloc_block++;
|
||||
ei->i_prealloc_count--;
|
||||
reiserfs_free_prealloc_block(th, inode, block_to_free);
|
||||
dirty = 1;
|
||||
}
|
||||
if (dirty)
|
||||
|
|
@ -1128,7 +1136,7 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
|
|||
hint->prealloc_size = 0;
|
||||
|
||||
if (!hint->formatted_node && hint->preallocate) {
|
||||
if (S_ISREG(hint->inode->i_mode)
|
||||
if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode)
|
||||
&& hint->inode->i_size >=
|
||||
REISERFS_SB(hint->th->t_super)->s_alloc_options.
|
||||
preallocmin * hint->inode->i_sb->s_blocksize)
|
||||
|
|
|
|||
|
|
@ -37,7 +37,14 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
|||
error = journal_begin(&th, inode->i_sb, jcreate_blocks);
|
||||
reiserfs_write_unlock(inode->i_sb);
|
||||
if (error == 0) {
|
||||
if (type == ACL_TYPE_ACCESS && acl) {
|
||||
error = posix_acl_update_mode(inode, &inode->i_mode,
|
||||
&acl);
|
||||
if (error)
|
||||
goto unlock;
|
||||
}
|
||||
error = __reiserfs_set_acl(&th, inode, type, acl);
|
||||
unlock:
|
||||
reiserfs_write_lock(inode->i_sb);
|
||||
error2 = journal_end(&th);
|
||||
reiserfs_write_unlock(inode->i_sb);
|
||||
|
|
@ -245,11 +252,6 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
|
|||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
name = POSIX_ACL_XATTR_ACCESS;
|
||||
if (acl) {
|
||||
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
name = POSIX_ACL_XATTR_DEFAULT;
|
||||
|
|
|
|||
14
fs/select.c
14
fs/select.c
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/sched/rt.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
|
@ -550,7 +551,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
|
|||
fd_set_bits fds;
|
||||
void *bits;
|
||||
int ret, max_fds;
|
||||
unsigned int size;
|
||||
size_t size, alloc_size;
|
||||
struct fdtable *fdt;
|
||||
/* Allocate small arguments on the stack to save memory and be faster */
|
||||
long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
|
||||
|
|
@ -577,7 +578,14 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
|
|||
if (size > sizeof(stack_fds) / 6) {
|
||||
/* Not enough space in on-stack array; must use kmalloc */
|
||||
ret = -ENOMEM;
|
||||
bits = kmalloc(6 * size, GFP_KERNEL);
|
||||
if (size > (SIZE_MAX / 6))
|
||||
goto out_nofds;
|
||||
|
||||
alloc_size = 6 * size;
|
||||
bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
|
||||
if (!bits && alloc_size > PAGE_SIZE)
|
||||
bits = vmalloc(alloc_size);
|
||||
|
||||
if (!bits)
|
||||
goto out_nofds;
|
||||
}
|
||||
|
|
@ -614,7 +622,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
|
|||
|
||||
out:
|
||||
if (bits != stack_fds)
|
||||
kfree(bits);
|
||||
kvfree(bits);
|
||||
out_nofds:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
7
include/asm-generic/asm-prototypes.h
Normal file
7
include/asm-generic/asm-prototypes.h
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
#include <linux/bitops.h>
|
||||
extern void *__memset(void *, int, __kernel_size_t);
|
||||
extern void *__memcpy(void *, const void *, __kernel_size_t);
|
||||
extern void *__memmove(void *, const void *, __kernel_size_t);
|
||||
extern void *memset(void *, int, __kernel_size_t);
|
||||
extern void *memcpy(void *, const void *, __kernel_size_t);
|
||||
extern void *memmove(void *, const void *, __kernel_size_t);
|
||||
94
include/asm-generic/export.h
Normal file
94
include/asm-generic/export.h
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
#ifndef __ASM_GENERIC_EXPORT_H
|
||||
#define __ASM_GENERIC_EXPORT_H
|
||||
|
||||
#ifndef KSYM_FUNC
|
||||
#define KSYM_FUNC(x) x
|
||||
#endif
|
||||
#ifdef CONFIG_64BIT
|
||||
#define __put .quad
|
||||
#ifndef KSYM_ALIGN
|
||||
#define KSYM_ALIGN 8
|
||||
#endif
|
||||
#ifndef KCRC_ALIGN
|
||||
#define KCRC_ALIGN 8
|
||||
#endif
|
||||
#else
|
||||
#define __put .long
|
||||
#ifndef KSYM_ALIGN
|
||||
#define KSYM_ALIGN 4
|
||||
#endif
|
||||
#ifndef KCRC_ALIGN
|
||||
#define KCRC_ALIGN 4
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
|
||||
#define KSYM(name) _##name
|
||||
#else
|
||||
#define KSYM(name) name
|
||||
#endif
|
||||
|
||||
/*
|
||||
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
|
||||
* since we immediately emit into those sections anyway.
|
||||
*/
|
||||
.macro ___EXPORT_SYMBOL name,val,sec
|
||||
#ifdef CONFIG_MODULES
|
||||
.globl KSYM(__ksymtab_\name)
|
||||
.section ___ksymtab\sec+\name,"a"
|
||||
.balign KSYM_ALIGN
|
||||
KSYM(__ksymtab_\name):
|
||||
__put \val, KSYM(__kstrtab_\name)
|
||||
.previous
|
||||
.section __ksymtab_strings,"a"
|
||||
KSYM(__kstrtab_\name):
|
||||
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
|
||||
.asciz "_\name"
|
||||
#else
|
||||
.asciz "\name"
|
||||
#endif
|
||||
.previous
|
||||
#ifdef CONFIG_MODVERSIONS
|
||||
.section ___kcrctab\sec+\name,"a"
|
||||
.balign KCRC_ALIGN
|
||||
KSYM(__kcrctab_\name):
|
||||
__put KSYM(__crc_\name)
|
||||
.weak KSYM(__crc_\name)
|
||||
.previous
|
||||
#endif
|
||||
#endif
|
||||
.endm
|
||||
#undef __put
|
||||
|
||||
#if defined(__KSYM_DEPS__)
|
||||
|
||||
#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym ===
|
||||
|
||||
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
|
||||
|
||||
#include <linux/kconfig.h>
|
||||
#include <generated/autoksyms.h>
|
||||
|
||||
#define __EXPORT_SYMBOL(sym, val, sec) \
|
||||
__cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
|
||||
#define __cond_export_sym(sym, val, sec, conf) \
|
||||
___cond_export_sym(sym, val, sec, conf)
|
||||
#define ___cond_export_sym(sym, val, sec, enabled) \
|
||||
__cond_export_sym_##enabled(sym, val, sec)
|
||||
#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
|
||||
#define __cond_export_sym_0(sym, val, sec) /* nothing */
|
||||
|
||||
#else
|
||||
#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
|
||||
#endif
|
||||
|
||||
#define EXPORT_SYMBOL(name) \
|
||||
__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
|
||||
#define EXPORT_SYMBOL_GPL(name) \
|
||||
__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
|
||||
#define EXPORT_DATA_SYMBOL(name) \
|
||||
__EXPORT_SYMBOL(name, KSYM(name),)
|
||||
#define EXPORT_DATA_SYMBOL_GPL(name) \
|
||||
__EXPORT_SYMBOL(name, KSYM(name),_gpl)
|
||||
|
||||
#endif
|
||||
|
|
@ -71,6 +71,7 @@ struct cpu_cacheinfo {
|
|||
struct cacheinfo *info_list;
|
||||
unsigned int num_levels;
|
||||
unsigned int num_leaves;
|
||||
bool cpu_map_populated;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -17,10 +17,11 @@
|
|||
* the last step cherry picks the 2nd arg, we get a zero.
|
||||
*/
|
||||
#define __ARG_PLACEHOLDER_1 0,
|
||||
#define config_enabled(cfg) _config_enabled(cfg)
|
||||
#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
|
||||
#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
|
||||
#define ___config_enabled(__ignored, val, ...) val
|
||||
#define config_enabled(cfg) ___is_defined(cfg)
|
||||
#define __is_defined(x) ___is_defined(x)
|
||||
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
|
||||
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
|
||||
#define __take_second_arg(__ignored, val, ...) val
|
||||
|
||||
/*
|
||||
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
|
||||
|
|
@ -42,7 +43,7 @@
|
|||
* built-in code when CONFIG_FOO is set to 'm'.
|
||||
*/
|
||||
#define IS_REACHABLE(option) (config_enabled(option) || \
|
||||
(config_enabled(option##_MODULE) && config_enabled(MODULE)))
|
||||
(config_enabled(option##_MODULE) && __is_defined(MODULE)))
|
||||
|
||||
/*
|
||||
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
|
||||
|
|
|
|||
|
|
@ -63,6 +63,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
|
|||
#define ktime_add(lhs, rhs) \
|
||||
({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
|
||||
|
||||
/*
|
||||
* Same as ktime_add(), but avoids undefined behaviour on overflow; however,
|
||||
* this means that you must check the result for overflow yourself.
|
||||
*/
|
||||
#define ktime_add_unsafe(lhs, rhs) \
|
||||
({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
|
||||
|
||||
/*
|
||||
* Add a ktime_t variable and a scalar nanosecond value.
|
||||
* res = kt + nsval:
|
||||
|
|
|
|||
|
|
@ -243,6 +243,10 @@ int xt_check_entry_offsets(const void *base, const char *elems,
|
|||
unsigned int target_offset,
|
||||
unsigned int next_offset);
|
||||
|
||||
unsigned int *xt_alloc_entry_offsets(unsigned int size);
|
||||
bool xt_find_jump_offset(const unsigned int *offsets,
|
||||
unsigned int target, unsigned int size);
|
||||
|
||||
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
|
||||
bool inv_proto);
|
||||
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
|
||||
|
|
@ -377,16 +381,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
|
|||
* allows us to return 0 for single core systems without forcing
|
||||
* callers to deal with SMP vs. NONSMP issues.
|
||||
*/
|
||||
static inline u64 xt_percpu_counter_alloc(void)
|
||||
static inline unsigned long xt_percpu_counter_alloc(void)
|
||||
{
|
||||
if (nr_cpu_ids > 1) {
|
||||
void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
|
||||
sizeof(struct xt_counters));
|
||||
|
||||
if (res == NULL)
|
||||
return (u64) -ENOMEM;
|
||||
return -ENOMEM;
|
||||
|
||||
return (u64) (__force unsigned long) res;
|
||||
return (__force unsigned long) res;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -1456,6 +1456,7 @@ struct sched_dl_entity {
|
|||
u64 dl_deadline; /* relative deadline of each instance */
|
||||
u64 dl_period; /* separation of two instances (period) */
|
||||
u64 dl_bw; /* dl_runtime / dl_deadline */
|
||||
u64 dl_density; /* dl_runtime / dl_deadline */
|
||||
|
||||
/*
|
||||
* Actual scheduling parameters. Initialized with the values above,
|
||||
|
|
|
|||
|
|
@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
|
|||
return (struct tcphdr *)skb_transport_header(skb);
|
||||
}
|
||||
|
||||
static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
|
||||
{
|
||||
return th->doff * 4;
|
||||
}
|
||||
|
||||
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
|
||||
{
|
||||
return tcp_hdr(skb)->doff * 4;
|
||||
return __tcp_hdrlen(tcp_hdr(skb));
|
||||
}
|
||||
|
||||
static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
|
||||
|
|
|
|||
|
|
@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
|
|||
|
||||
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
|
||||
{
|
||||
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
|
||||
key = INADDR_ANY;
|
||||
|
||||
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -281,6 +281,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
|
|||
int flags);
|
||||
int ip6_flowlabel_init(void);
|
||||
void ip6_flowlabel_cleanup(void);
|
||||
bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
|
||||
|
||||
static inline void fl6_sock_release(struct ip6_flowlabel *fl)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -209,6 +209,11 @@ int net_eq(const struct net *net1, const struct net *net2)
|
|||
return net1 == net2;
|
||||
}
|
||||
|
||||
static inline int check_net(const struct net *net)
|
||||
{
|
||||
return atomic_read(&net->count) != 0;
|
||||
}
|
||||
|
||||
void net_drop_ns(void *);
|
||||
|
||||
#else
|
||||
|
|
@ -233,6 +238,11 @@ int net_eq(const struct net *net1, const struct net *net2)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int check_net(const struct net *net)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define net_drop_ns NULL
|
||||
#endif
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue