This is the 5.10.32 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmCACCYACgkQONu9yGCS
 aT7BkA//ZVfIoUb4EThpqWz3qjHCxQ5jqOH3XT//a2hhAFBp8wZAe4JRf76oqKyz
 G+2ZPQFU+susfhvYQVcxp2C6A9XomRSlX00MqFsKVrcOPG/IL/iBdNgBma/ajG8R
 0ARmgTBZBt+3i3nED5i26SzBR3vFx0lrwU3zaZoivAQpDkzzHFte++s4lLqSEY8i
 rxEe0sUm6HpsXiuRPkNU2yH1/2immgypbIYNaaOjPGxIO4aKouPGIMZtEm5/w3Oi
 h+eDSQH8fCN5jJjX9sGIqiK1XJEeS+hyUPsSjKypytVkRLqHEXE3Dcau24giHfEm
 iWM8Xh16Nudvh0vnWNfsMGZeghKwgLr71OnW4AcPe6dCG7fh4cKvyAuXFneL2PeZ
 NgGEFKIsFNXtaPLNn8peot3TeSemMdkuc2ORdiq2OKmU1umkczF6Y5L38JxMWEnZ
 /FoPIhigqobF0AujaFK/wKu5eznZDGiZqRpluJLF2yZYbHAEiDjwemSb+Xa6tmpe
 wXFIhu6ipQlKhqFfIGo1sIKachXkk/CvKwbFPUYh+CjZGgls9nZH0sGSNVa+p2BY
 7k+I7CfRZgAjWd1W0UM5J2oZ2uVU66zBH3SwF2RBbsWhi+btvfzMhVQIK1FJwJFQ
 bsuVegUAwR5mSVlBk2QpLIDJ55BqPXOorba7i8CuAoUjETr1MEY=
 =YPgE
 -----END PGP SIGNATURE-----

Merge 5.10.32 into android12-5.10

Changes in 5.10.32
	net/sctp: fix race condition in sctp_destroy_sock
	mtd: rawnand: mtk: Fix WAITRDY break condition and timeout
	Input: nspire-keypad - enable interrupts only when opened
	gpio: sysfs: Obey valid_mask
	dmaengine: idxd: Fix clobbering of SWERR overflow bit on writeback
	dmaengine: idxd: fix delta_rec and crc size field for completion record
	dmaengine: idxd: fix opcap sysfs attribute output
	dmaengine: idxd: fix wq size store permission state
	dmaengine: dw: Make it dependent to HAS_IOMEM
	dmaengine: Fix a double free in dma_async_device_register
	dmaengine: plx_dma: add a missing put_device() on error path
	dmaengine: idxd: fix wq cleanup of WQCFG registers
	ACPI: x86: Call acpi_boot_table_init() after acpi_table_upgrade()
	ARM: dts: Drop duplicate sha2md5_fck to fix clk_disable race
	ARM: dts: Fix moving mmc devices with aliases for omap4 & 5
	lockdep: Add a missing initialization hint to the "INFO: Trying to register non-static key" message
	arc: kernel: Return -EFAULT if copy_to_user() fails
	iwlwifi: Fix softirq/hardirq disabling in iwl_pcie_enqueue_hcmd()
	xfrm: BEET mode doesn't support fragments for inner packets
	ASoC: max98373: Changed amp shutdown register as volatile
	ASoC: max98373: Added 30ms turn on/off time delay
	gpu/xen: Fix a use after free in xen_drm_drv_init
	neighbour: Disregard DEAD dst in neigh_update
	ARM: keystone: fix integer overflow warning
	ARM: omap1: fix building with clang IAS
	drm/msm: Fix a5xx/a6xx timestamps
	ASoC: fsl_esai: Fix TDM slot setup for I2S mode
	scsi: scsi_transport_srp: Don't block target in SRP_PORT_LOST state
	iwlwifi: add support for Qu with AX201 device
	net: ieee802154: stop dump llsec keys for monitors
	net: ieee802154: forbid monitor for add llsec key
	net: ieee802154: forbid monitor for del llsec key
	net: ieee802154: stop dump llsec devs for monitors
	net: ieee802154: forbid monitor for add llsec dev
	net: ieee802154: forbid monitor for del llsec dev
	net: ieee802154: stop dump llsec devkeys for monitors
	net: ieee802154: forbid monitor for add llsec devkey
	net: ieee802154: forbid monitor for del llsec devkey
	net: ieee802154: stop dump llsec seclevels for monitors
	net: ieee802154: forbid monitor for add llsec seclevel
	pcnet32: Use pci_resource_len to validate PCI resource
	mac80211: clear sta->fast_rx when STA removed from 4-addr VLAN
	virt_wifi: Return micros for BSS TSF values
	lib: fix kconfig dependency on ARCH_WANT_FRAME_POINTERS
	Input: s6sy761 - fix coordinate read bit shift
	Input: i8042 - fix Pegatron C15B ID entry
	HID: wacom: set EV_KEY and EV_ABS only for non-HID_GENERIC type of devices
	dm verity fec: fix misaligned RS roots IO
	readdir: make sure to verify directory entry for legacy interfaces too
	arm64: fix inline asm in load_unaligned_zeropad()
	arm64: alternatives: Move length validation in alternative_{insn, endif}
	vfio/pci: Add missing range check in vfio_pci_mmap
	riscv: Fix spelling mistake "SPARSEMEM" to "SPARSMEM"
	scsi: libsas: Reset num_scatter if libata marks qc as NODATA
	ixgbe: fix unbalanced device enable/disable in suspend/resume
	netfilter: flowtable: fix NAT IPv6 offload mangling
	netfilter: conntrack: do not print icmpv6 as unknown via /proc
	ice: Fix potential infinite loop when using u8 loop counter
	libnvdimm/region: Fix nvdimm_has_flush() to handle ND_REGION_ASYNC
	netfilter: bridge: add pre_exit hooks for ebtable unregistration
	netfilter: arp_tables: add pre_exit hook for table unregister
	libbpf: Fix potential NULL pointer dereference
	net: macb: fix the restore of cmp registers
	net/mlx5e: fix ingress_ifindex check in mlx5e_flower_parse_meta
	netfilter: nft_limit: avoid possible divide error in nft_limit_init
	net/mlx5e: Fix setting of RS FEC mode
	net: davicom: Fix regulator not turned off on failed probe
	net: sit: Unregister catch-all devices
	net: ip6_tunnel: Unregister catch-all devices
	mm: ptdump: fix build failure
	net: Make tcp_allowed_congestion_control readonly in non-init netns
	i40e: fix the panic when running bpf in xdpdrv mode
	ethtool: pause: make sure we init driver stats
	ia64: remove duplicate entries in generic_defconfig
	ia64: tools: remove inclusion of ia64-specific version of errno.h header
	ibmvnic: avoid calling napi_disable() twice
	ibmvnic: remove duplicate napi_schedule call in do_reset function
	ibmvnic: remove duplicate napi_schedule call in open function
	ch_ktls: Fix kernel panic
	ch_ktls: fix device connection close
	ch_ktls: tcb close causes tls connection failure
	ch_ktls: do not send snd_una update to TCB in middle
	gro: ensure frag0 meets IP header alignment
	ARM: OMAP2+: Fix warning for omap_init_time_of()
	ARM: 9069/1: NOMMU: Fix conversion for_each_membock() to for_each_mem_range()
	ARM: footbridge: fix PCI interrupt mapping
	ARM: OMAP2+: Fix uninitialized sr_inst
	arm64: dts: allwinner: Fix SD card CD GPIO for SOPine systems
	arm64: dts: allwinner: h6: beelink-gs1: Remove ext. 32 kHz osc reference
	bpf: Use correct permission flag for mixed signed bounds arithmetic
	KVM: VMX: Convert vcpu_vmx.exit_reason to a union
	KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
	r8169: tweak max read request size for newer chips also in jumbo mtu mode
	r8169: don't advertise pause in jumbo mode
	bpf: Ensure off_reg has no mixed signed bounds for all types
	bpf: Move off_reg into sanitize_ptr_alu
	ARM: 9071/1: uprobes: Don't hook on thumb instructions
	arm64: mte: Ensure TIF_MTE_ASYNC_FAULT is set atomically
	bpf: Rework ptr_limit into alu_limit and add common error path
	bpf: Improve verifier error messages for users
	bpf: Move sanitize_val_alu out of op switch
	net: phy: marvell: fix detection of PHY on Topaz switches
	Linux 5.10.32

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: If247bf8401509195e4f55f03dcc514f80d467966
This commit is contained in:
Greg Kroah-Hartman 2021-04-22 11:12:08 +02:00
commit 32b16a3a3f
97 changed files with 662 additions and 433 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 31
SUBLEVEL = 32
EXTRAVERSION =
NAME = Dare mighty things

View file

@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sizeof(sf->uc.uc_mcontext.regs.scratch));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
return err;
return err ? -EFAULT : 0;
}
static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
if (err)
return err;
return -EFAULT;
set_current_blocked(&set);
regs->bta = uregs.scratch.bta;

View file

@ -22,6 +22,11 @@
i2c1 = &i2c2;
i2c2 = &i2c3;
i2c3 = &i2c4;
mmc0 = &mmc1;
mmc1 = &mmc2;
mmc2 = &mmc3;
mmc3 = &mmc4;
mmc4 = &mmc5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;

View file

@ -770,14 +770,6 @@
ti,max-div = <2>;
};
sha2md5_fck: sha2md5_fck@15c8 {
#clock-cells = <0>;
compatible = "ti,gate-clock";
clocks = <&l3_div_ck>;
ti,bit-shift = <1>;
reg = <0x15c8>;
};
usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
#clock-cells = <0>;
compatible = "ti,gate-clock";

View file

@ -25,6 +25,11 @@
i2c2 = &i2c3;
i2c3 = &i2c4;
i2c4 = &i2c5;
mmc0 = &mmc1;
mmc1 = &mmc2;
mmc2 = &mmc3;
mmc3 = &mmc4;
mmc4 = &mmc5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;

View file

@ -15,14 +15,14 @@
#include <asm/mach-types.h>
/* cats host-specific stuff */
static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
{
return 0;
}
static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->irq >= 255)
return -1; /* not a valid interrupt. */

View file

@ -14,9 +14,9 @@
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
dev->device == PCI_DEVICE_ID_CONTAQ_82C693)

View file

@ -18,7 +18,7 @@
* We now use the slot ID instead of the device identifiers to select
* which interrupt is routed where.
*/
static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
switch (slot) {
case 0: /* host bridge */

View file

@ -14,13 +14,12 @@
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
static int irqmap_personal_server[] __initdata = {
static int irqmap_personal_server[] = {
IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
};
static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
u8 pin)
static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
unsigned char line;

View file

@ -65,7 +65,7 @@ static void __init keystone_init(void)
static long long __init keystone_pv_fixup(void)
{
long long offset;
phys_addr_t mem_start, mem_end;
u64 mem_start, mem_end;
mem_start = memblock_start_of_DRAM();
mem_end = memblock_end_of_DRAM();
@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
if (mem_start < KEYSTONE_HIGH_PHYS_START ||
mem_end > KEYSTONE_HIGH_PHYS_END) {
pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
(u64)mem_start, (u64)mem_end);
mem_start, mem_end);
return 0;
}

View file

@ -15,6 +15,7 @@
#include <linux/platform_data/gpio-omap.h>
#include <asm/assembler.h>
#include <asm/irq.h>
#include "ams-delta-fiq.h"
#include "board-ams-delta.h"

View file

@ -33,7 +33,7 @@ static void __init __maybe_unused omap_generic_init(void)
}
/* Clocks are needed early, see drivers/clocksource for the rest */
void __init __maybe_unused omap_init_time_of(void)
static void __init __maybe_unused omap_init_time_of(void)
{
omap_clk_init();
timer_probe();

View file

@ -188,7 +188,7 @@ static const char * const dra7_sr_instances[] = {
int __init omap_devinit_smartreflex(void)
{
const char * const *sr_inst;
const char * const *sr_inst = NULL;
int i, nr_sr = 0;
if (soc_is_omap44xx()) {

View file

@ -235,6 +235,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
phys_addr_t mem_end;
phys_addr_t reg_start, reg_end;
unsigned int mem_max_regions;
bool first = true;
int num;
u64 i;
@ -263,7 +264,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
#endif
for_each_mem_range(i, &reg_start, &reg_end) {
if (i == 0) {
if (first) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
@ -275,6 +276,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
mem_start = reg_start;
mem_end = reg_end;
specified_mem_size = mem_end - mem_start;
first = false;
} else {
/*
* memblock auto merges contiguous blocks, remove

View file

@ -95,10 +95,11 @@ void __init pmsav8_adjust_lowmem_bounds(void)
{
phys_addr_t mem_end;
phys_addr_t reg_start, reg_end;
bool first = true;
u64 i;
for_each_mem_range(i, &reg_start, &reg_end) {
if (i == 0) {
if (first) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
@ -107,6 +108,7 @@ void __init pmsav8_adjust_lowmem_bounds(void)
if (reg_start != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
mem_end = reg_end;
first = false;
} else {
/*
* memblock auto merges contiguous blocks, remove

View file

@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
static struct undef_hook uprobes_arm_break_hook = {
.instr_mask = 0x0fffffff,
.instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
.cpsr_mask = MODE_MASK,
.cpsr_mask = (PSR_T_BIT | MODE_MASK),
.cpsr_val = USR_MODE,
.fn = uprobe_trap_handler,
};
@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = {
static struct undef_hook uprobes_arm_ss_hook = {
.instr_mask = 0x0fffffff,
.instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff),
.cpsr_mask = MODE_MASK,
.cpsr_mask = (PSR_T_BIT | MODE_MASK),
.cpsr_val = USR_MODE,
.fn = uprobe_trap_handler,
};

View file

@ -1402,10 +1402,13 @@ config ARM64_PAN
config AS_HAS_LDAPR
def_bool $(as-instr,.arch_extension rcpc)
config AS_HAS_LSE_ATOMICS
def_bool $(as-instr,.arch_extension lse)
config ARM64_LSE_ATOMICS
bool
default ARM64_USE_LSE_ATOMICS
depends on $(as-instr,.arch_extension lse)
depends on AS_HAS_LSE_ATOMICS
config ARM64_USE_LSE_ATOMICS
bool "Atomic instructions"
@ -1687,6 +1690,7 @@ config ARM64_MTE
depends on AS_HAS_ARMV8_5
# Required for tag checking in the uaccess routines
depends on ARM64_PAN
depends on AS_HAS_LSE_ATOMICS
select ARCH_USES_HIGH_VMA_FLAGS
help
Memory Tagging (part of the ARMv8.5 Extensions) provides

View file

@ -8,3 +8,7 @@
compatible = "pine64,pine64-lts", "allwinner,sun50i-r18",
"allwinner,sun50i-a64";
};
&mmc0 {
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
};

View file

@ -34,7 +34,7 @@
vmmc-supply = <&reg_dcdc1>;
disable-wp;
bus-width = <4>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
status = "okay";
};

View file

@ -289,10 +289,6 @@
vcc-pm-supply = <&reg_aldo1>;
};
&rtc {
clocks = <&ext_osc32k>;
};
&spdif {
status = "okay";
};

View file

@ -97,9 +97,9 @@
.popsection
.subsection 1
663: \insn2
664: .previous
.org . - (664b-663b) + (662b-661b)
664: .org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.previous
.endif
.endm
@ -169,11 +169,11 @@
*/
.macro alternative_endif
664:
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.if .Lasm_alt_mode==0
.previous
.endif
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.endm
/*

View file

@ -148,16 +148,18 @@ alternative_cb_end
.endm
/* Check for MTE asynchronous tag check faults */
.macro check_mte_async_tcf, flgs, tmp
.macro check_mte_async_tcf, tmp, ti_flags
#ifdef CONFIG_ARM64_MTE
.arch_extension lse
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
mrs_s \tmp, SYS_TFSRE0_EL1
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
str \flgs, [tsk, #TSK_TI_FLAGS]
mov \tmp, #_TIF_MTE_ASYNC_FAULT
add \ti_flags, tsk, #TSK_TI_FLAGS
stset \tmp, [\ti_flags]
msr_s SYS_TFSRE0_EL1, xzr
1:
#endif
@ -244,7 +246,7 @@ alternative_else_nop_endif
disable_step_tsk x19, x20
/* Check for asynchronous tag check faults in user space */
check_mte_async_tcf x19, x22
check_mte_async_tcf x22, x23
apply_ssbd 1, x22, x23
ptrauth_keys_install_kernel tsk, x20, x22, x23

View file

@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=y
CONFIG_ATA=y
CONFIG_ATA_PIIX=y
CONFIG_SATA_VITESSE=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m

View file

@ -144,7 +144,7 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
depends on MMU
select SPARSEMEM_STATIC if 32BIT && SPARSMEM
select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
config ARCH_SELECT_MEMORY_MODEL

View file

@ -1051,9 +1051,6 @@ void __init setup_arch(char **cmdline_p)
cleanup_highmap();
/* Look for ACPI tables and reserve memory occupied by them. */
acpi_boot_table_init();
memblock_set_current_limit(ISA_END_ADDRESS);
e820__memblock_setup();
@ -1132,6 +1129,8 @@ void __init setup_arch(char **cmdline_p)
reserve_initrd();
acpi_table_upgrade();
/* Look for ACPI tables and reserve memory occupied by them. */
acpi_boot_table_init();
vsmp_init();

View file

@ -3329,7 +3329,11 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
enum vm_entry_failure_code entry_failure_code;
bool evaluate_pending_interrupts;
u32 exit_reason, failed_index;
union vmx_exit_reason exit_reason = {
.basic = EXIT_REASON_INVALID_STATE,
.failed_vmentry = 1,
};
u32 failed_index;
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
@ -3381,7 +3385,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
if (nested_vmx_check_guest_state(vcpu, vmcs12,
&entry_failure_code)) {
exit_reason = EXIT_REASON_INVALID_STATE;
exit_reason.basic = EXIT_REASON_INVALID_STATE;
vmcs12->exit_qualification = entry_failure_code;
goto vmentry_fail_vmexit;
}
@ -3392,7 +3396,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
exit_reason = EXIT_REASON_INVALID_STATE;
exit_reason.basic = EXIT_REASON_INVALID_STATE;
vmcs12->exit_qualification = entry_failure_code;
goto vmentry_fail_vmexit_guest_mode;
}
@ -3402,7 +3406,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count);
if (failed_index) {
exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
vmcs12->exit_qualification = failed_index;
goto vmentry_fail_vmexit_guest_mode;
}
@ -3470,7 +3474,7 @@ vmentry_fail_vmexit:
return NVMX_VMENTRY_VMEXIT;
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
vmcs12->vm_exit_reason = exit_reason.full;
if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
vmx->nested.need_vmcs12_to_shadow_sync = true;
return NVMX_VMENTRY_VMEXIT;
@ -5533,7 +5537,12 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
fail:
nested_vmx_vmexit(vcpu, vmx->exit_reason,
/*
* This is effectively a reflected VM-Exit, as opposed to a synthesized
* nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
* EXIT_REASON_VMFUNC as the exit reason.
*/
nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
vmx_get_intr_info(vcpu),
vmx_get_exit_qual(vcpu));
return 1;
@ -5601,7 +5610,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
* MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
*/
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12, u32 exit_reason)
struct vmcs12 *vmcs12,
union vmx_exit_reason exit_reason)
{
u32 msr_index = kvm_rcx_read(vcpu);
gpa_t bitmap;
@ -5615,7 +5625,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
* First we need to figure out which of the four to use:
*/
bitmap = vmcs12->msr_bitmap;
if (exit_reason == EXIT_REASON_MSR_WRITE)
if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
bitmap += 2048;
if (msr_index >= 0xc0000000) {
msr_index -= 0xc0000000;
@ -5752,11 +5762,12 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
* Return true if L0 wants to handle an exit from L2 regardless of whether or not
* L1 wants the exit. Only call this when in is_guest_mode (L2).
*/
static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
union vmx_exit_reason exit_reason)
{
u32 intr_info;
switch ((u16)exit_reason) {
switch ((u16)exit_reason.basic) {
case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info))
@ -5812,12 +5823,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
* Return 1 if L1 wants to intercept an exit from L2. Only call this when in
* is_guest_mode (L2).
*/
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
union vmx_exit_reason exit_reason)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 intr_info;
switch ((u16)exit_reason) {
switch ((u16)exit_reason.basic) {
case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info))
@ -5936,7 +5948,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason = vmx->exit_reason;
union vmx_exit_reason exit_reason = vmx->exit_reason;
unsigned long exit_qual;
u32 exit_intr_info;
@ -5955,7 +5967,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
goto reflect_vmexit;
}
trace_kvm_nested_vmexit(exit_reason, vcpu, KVM_ISA_VMX);
trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX);
/* If L0 (KVM) wants the exit, it trumps L1's desires. */
if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
@ -5981,7 +5993,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
exit_qual = vmx_get_exit_qual(vcpu);
reflect_vmexit:
nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, exit_qual);
nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
return true;
}

View file

@ -1578,7 +1578,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
* i.e. we end up advancing IP with some random value.
*/
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
orig_rip = kvm_rip_read(vcpu);
rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
#ifdef CONFIG_X86_64
@ -5687,7 +5687,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
struct vcpu_vmx *vmx = to_vmx(vcpu);
*info1 = vmx_get_exit_qual(vcpu);
if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
if (!(vmx->exit_reason.failed_vmentry)) {
*info2 = vmx->idt_vectoring_info;
*intr_info = vmx_get_intr_info(vcpu);
if (is_exception_with_error_code(*intr_info))
@ -5931,8 +5931,9 @@ void dump_vmcs(void)
static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason = vmx->exit_reason;
union vmx_exit_reason exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
u16 exit_handler_index;
/*
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
@ -5974,11 +5975,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return 1;
}
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
if (exit_reason.failed_vmentry) {
dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
= exit_reason;
= exit_reason.full;
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
return 0;
}
@ -6000,24 +6001,24 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
* will cause infinite loop.
*/
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_PML_FULL &&
exit_reason != EXIT_REASON_APIC_ACCESS &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
(exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
exit_reason.basic != EXIT_REASON_PML_FULL &&
exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
int ndata = 3;
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vectoring_info;
vcpu->run->internal.data[1] = exit_reason;
vcpu->run->internal.data[1] = exit_reason.full;
vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
vcpu->run->internal.ndata++;
vcpu->run->internal.data[3] =
if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
vcpu->run->internal.data[ndata++] =
vmcs_read64(GUEST_PHYSICAL_ADDRESS);
}
vcpu->run->internal.data[vcpu->run->internal.ndata++] =
vcpu->arch.last_vmentry_cpu;
vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
vcpu->run->internal.ndata = ndata;
return 0;
}
@ -6043,38 +6044,39 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (exit_fastpath != EXIT_FASTPATH_NONE)
return 1;
if (exit_reason >= kvm_vmx_max_exit_handlers)
if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
goto unexpected_vmexit;
#ifdef CONFIG_RETPOLINE
if (exit_reason == EXIT_REASON_MSR_WRITE)
if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
return kvm_emulate_wrmsr(vcpu);
else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
return handle_preemption_timer(vcpu);
else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW)
else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
return handle_interrupt_window(vcpu);
else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
return handle_external_interrupt(vcpu);
else if (exit_reason == EXIT_REASON_HLT)
else if (exit_reason.basic == EXIT_REASON_HLT)
return kvm_emulate_halt(vcpu);
else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
return handle_ept_misconfig(vcpu);
#endif
exit_reason = array_index_nospec(exit_reason,
kvm_vmx_max_exit_handlers);
if (!kvm_vmx_exit_handlers[exit_reason])
exit_handler_index = array_index_nospec((u16)exit_reason.basic,
kvm_vmx_max_exit_handlers);
if (!kvm_vmx_exit_handlers[exit_handler_index])
goto unexpected_vmexit;
return kvm_vmx_exit_handlers[exit_reason](vcpu);
return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
unexpected_vmexit:
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason);
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
exit_reason.full);
dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = exit_reason;
vcpu->run->internal.data[0] = exit_reason.full;
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
return 0;
}
@ -6393,9 +6395,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
handle_external_interrupt_irqoff(vcpu);
else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
handle_exception_nmi_irqoff(vmx);
}
@ -6583,7 +6585,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
switch (to_vmx(vcpu)->exit_reason) {
switch (to_vmx(vcpu)->exit_reason.basic) {
case EXIT_REASON_MSR_WRITE:
return handle_fastpath_set_msr_irqoff(vcpu);
case EXIT_REASON_PREEMPTION_TIMER:
@ -6782,17 +6784,17 @@ reenter_guest:
vmx->idt_vectoring_info = 0;
if (unlikely(vmx->fail)) {
vmx->exit_reason = 0xdead;
vmx->exit_reason.full = 0xdead;
return EXIT_FASTPATH_NONE;
}
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
kvm_machine_check();
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
if (unlikely(vmx->exit_reason.failed_vmentry))
return EXIT_FASTPATH_NONE;
vmx->loaded_vmcs->launched = 1;

View file

@ -70,6 +70,29 @@ struct pt_desc {
struct pt_ctx guest;
};
union vmx_exit_reason {
struct {
u32 basic : 16;
u32 reserved16 : 1;
u32 reserved17 : 1;
u32 reserved18 : 1;
u32 reserved19 : 1;
u32 reserved20 : 1;
u32 reserved21 : 1;
u32 reserved22 : 1;
u32 reserved23 : 1;
u32 reserved24 : 1;
u32 reserved25 : 1;
u32 reserved26 : 1;
u32 enclave_mode : 1;
u32 smi_pending_mtf : 1;
u32 smi_from_vmx_root : 1;
u32 reserved30 : 1;
u32 failed_vmentry : 1;
};
u32 full;
};
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@ -244,7 +267,7 @@ struct vcpu_vmx {
int vpid;
bool emulation_required;
u32 exit_reason;
union vmx_exit_reason exit_reason;
/* Posted interrupt descriptor */
struct pi_desc pi_desc;

View file

@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
kfree(chan->dev);
err_free_local:
free_percpu(chan->local);
chan->local = NULL;
return rc;
}

View file

@ -10,6 +10,7 @@ config DW_DMAC_CORE
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA platform driver"
depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller. This
@ -18,6 +19,7 @@ config DW_DMAC
config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver"
depends on PCI
depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller on the

View file

@ -263,6 +263,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
}
void idxd_wq_reset(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
u32 operand;
if (wq->state != IDXD_WQ_ENABLED) {
dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
return;
}
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
wq->state = IDXD_WQ_DISABLED;
}
int idxd_wq_map_portal(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
@ -291,8 +307,6 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
int i, wq_offset;
lockdep_assert_held(&idxd->dev_lock);
memset(wq->wqcfg, 0, idxd->wqcfg_size);
@ -303,14 +317,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
wq->priority = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
iowrite32(0, idxd->reg_base + wq_offset);
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
wq->id, i, wq_offset,
ioread32(idxd->reg_base + wq_offset));
}
}
/* Device control bits */
@ -560,7 +566,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
if (!wq->group)
return 0;
memset(wq->wqcfg, 0, idxd->wqcfg_size);
/*
* Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
* wq reset. This will copy back the sticky values that are present on some devices.
*/
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
}
/* byte 0-3 */
wq->wqcfg->wq_size = wq->size;

View file

@ -295,6 +295,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
void idxd_wq_drain(struct idxd_wq *wq);
void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);

View file

@ -66,7 +66,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
for (i = 0; i < 4; i++)
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
IDXD_SWERR_OFFSET + i * sizeof(u64));
iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
idxd->reg_base + IDXD_SWERR_OFFSET);
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
int id = idxd->sw_err.wq_idx;

View file

@ -241,7 +241,6 @@ static void disable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
int rc;
mutex_lock(&wq->wq_lock);
dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
@ -262,17 +261,13 @@ static void disable_wq(struct idxd_wq *wq)
idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq);
rc = idxd_wq_disable(wq);
idxd_wq_reset(wq);
idxd_wq_free_resources(wq);
wq->client_count = 0;
mutex_unlock(&wq->wq_lock);
if (rc < 0)
dev_warn(dev, "Failed to disable %s: %d\n",
dev_name(&wq->conf_dev), rc);
else
dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
}
static int idxd_config_bus_remove(struct device *dev)
@ -923,7 +918,7 @@ static ssize_t wq_size_store(struct device *dev,
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
if (wq->state != IDXD_WQ_DISABLED)
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
@ -1259,8 +1254,14 @@ static ssize_t op_cap_show(struct device *dev,
{
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
int i, rc = 0;
return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
for (i = 0; i < 4; i++)
rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
rc--;
rc += sysfs_emit_at(buf, rc, "\n");
return rc;
}
static DEVICE_ATTR_RO(op_cap);

View file

@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
KBUILD_MODNAME, plxdev);
if (rc) {
kfree(plxdev);
return rc;
}
if (rc)
goto free_plx;
spin_lock_init(&plxdev->ring_lock);
tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
rc = dma_async_device_register(dma);
if (rc) {
pci_err(pdev, "Failed to register dma device: %d\n", rc);
free_irq(pci_irq_vector(pdev, 0), plxdev);
kfree(plxdev);
return rc;
goto put_device;
}
pci_set_drvdata(pdev, plxdev);
return 0;
put_device:
put_device(&pdev->dev);
free_irq(pci_irq_vector(pdev, 0), plxdev);
free_plx:
kfree(plxdev);
return rc;
}
static int plx_dma_probe(struct pci_dev *pdev,

View file

@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
long gpio;
struct gpio_desc *desc;
int status;
struct gpio_chip *gc;
int offset;
status = kstrtol(buf, 0, &gpio);
if (status < 0)
@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
gc = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
if (!gpiochip_line_is_valid(gc, offset)) {
pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
return -EINVAL;
}
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so

View file

@ -1240,8 +1240,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
*value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
REG_A5XX_RBBM_PERFCTR_CP_0_HI);
*value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
return 0;
}

View file

@ -1073,8 +1073,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
REG_A6XX_RBBM_PERFCTR_CP_0_HI);
*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
mutex_unlock(&perfcounter_oob);

View file

@ -531,7 +531,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
if (IS_ERR(drm_dev)) {
ret = PTR_ERR(drm_dev);
goto fail;
goto fail_dev;
}
drm_info->drm_dev = drm_dev;
@ -561,8 +561,10 @@ fail_modeset:
drm_kms_helper_poll_fini(drm_dev);
drm_mode_config_cleanup(drm_dev);
drm_dev_put(drm_dev);
fail:
fail_dev:
kfree(drm_info);
front_info->drm_info = NULL;
fail:
return ret;
}

View file

@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
if (!(features->device_type & WACOM_DEVICETYPE_PEN))
return -ENODEV;
@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
return 0;
}
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
return -ENODEV;
@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
/* setup has already been done */
return 0;
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
if (features->touch_max == 1) {

View file

@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
static int nspire_keypad_open(struct input_dev *input)
{
struct nspire_keypad *keypad = input_get_drvdata(input);
unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
int error;
error = clk_prepare_enable(keypad->clk);
if (error)
return error;
cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
if (cycles_per_us == 0)
@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
keypad->int_mask = 1 << 1;
writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
/* Disable GPIO interrupts to prevent hanging on touchpad */
/* Possibly used to detect touchpad events */
writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
/* Acknowledge existing interrupts */
writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
return 0;
}
static int nspire_keypad_open(struct input_dev *input)
{
struct nspire_keypad *keypad = input_get_drvdata(input);
int error;
error = clk_prepare_enable(keypad->clk);
if (error)
return error;
error = nspire_keypad_chip_init(keypad);
if (error) {
clk_disable_unprepare(keypad->clk);
return error;
}
return 0;
}
@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
{
struct nspire_keypad *keypad = input_get_drvdata(input);
/* Disable interrupts */
writel(0, keypad->reg_base + KEYPAD_INTMSK);
/* Acknowledge existing interrupts */
writel(~0, keypad->reg_base + KEYPAD_INT);
clk_disable_unprepare(keypad->clk);
}
@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
}
error = clk_prepare_enable(keypad->clk);
if (error) {
dev_err(&pdev->dev, "failed to enable clock\n");
return error;
}
/* Disable interrupts */
writel(0, keypad->reg_base + KEYPAD_INTMSK);
/* Acknowledge existing interrupts */
writel(~0, keypad->reg_base + KEYPAD_INT);
/* Disable GPIO interrupts to prevent hanging on touchpad */
/* Possibly used to detect touchpad events */
writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
/* Acknowledge existing GPIO interrupts */
writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
clk_disable_unprepare(keypad->clk);
input_set_drvdata(input, keypad);
input->id.bustype = BUS_HOST;

View file

@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
}, {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */

View file

@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
u8 major = event[4];
u8 minor = event[5];
u8 z = event[6] & S6SY761_MASK_Z;
u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
input_mt_slot(sdata->input, tid);

View file

@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
return 0;
case NAND_OP_WAITRDY_INSTR:
return readl_poll_timeout(nfc->regs + NFI_STA, status,
status & STA_BUSY, 20,
instr->ctx.waitrdy.timeout_ms);
!(status & STA_BUSY), 20,
instr->ctx.waitrdy.timeout_ms * 1000);
default:
break;
}

View file

@ -2994,10 +2994,17 @@ out_resources:
return err;
}
/* prod_id for switch families which do not have a PHY model number */
static const u16 family_prod_id_table[] = {
[MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
[MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
};
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
struct mv88e6xxx_chip *chip = mdio_bus->chip;
u16 prod_id;
u16 val;
int err;
@ -3008,23 +3015,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
mv88e6xxx_reg_unlock(chip);
if (reg == MII_PHYSID2) {
/* Some internal PHYs don't have a model number. */
if (chip->info->family != MV88E6XXX_FAMILY_6165)
/* Then there is the 6165 family. It gets is
* PHYs correct. But it can also have two
* SERDES interfaces in the PHY address
* space. And these don't have a model
* number. But they are not PHYs, so we don't
* want to give them something a PHY driver
* will recognise.
*
* Use the mv88e6390 family model number
* instead, for anything which really could be
* a PHY,
*/
if (!(val & 0x3f0))
val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
/* Some internal PHYs don't have a model number. */
if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
prod_id = family_prod_id_table[chip->info->family];
if (prod_id)
val |= prod_id >> 4;
}
return err ? err : val;

View file

@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
ioaddr = pci_resource_start(pdev, 0);
if (!ioaddr) {
if (!pci_resource_len(pdev, 0)) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("card has no PCI IO resources, aborting\n");
err = -ENODEV;
@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
goto err_disable_dev;
}
ioaddr = pci_resource_start(pdev, 0);
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("io address range already allocated\n");

View file

@ -3777,6 +3777,7 @@ static int macb_init(struct platform_device *pdev)
reg = gem_readl(bp, DCFG8);
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
GEM_BFEXT(T2SCR, reg));
INIT_LIST_HEAD(&bp->rx_fs_list.list);
if (bp->max_tuples > 0) {
/* also needs one ethtype match to check IPv4 */
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
@ -3787,7 +3788,6 @@ static int macb_init(struct platform_device *pdev)
/* Filtering is supported in hw but don't enable it in kernel now */
dev->hw_features |= NETIF_F_NTUPLE;
/* init Rx flow definitions */
INIT_LIST_HEAD(&bp->rx_fs_list.list);
bp->rx_fs_list.count = 0;
spin_lock_init(&bp->rx_fs_lock);
} else

View file

@ -354,18 +354,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
return cxgb4_ofld_send(tx_info->netdev, skb);
}
/*
* chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
* @tx_info - driver specific tls info.
* return: NET_TX_OK/NET_XMIT_DROP.
*/
static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
{
return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
TCB_T_STATE_V(TCB_T_STATE_M),
CHCR_TCB_STATE_CLOSED, 1);
}
/*
* chcr_ktls_dev_del: call back for tls_dev_del.
* Remove the tid and l2t entry and close the connection.
@ -400,8 +388,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
/* clear tid */
if (tx_info->tid != -1) {
/* clear tcb state and then release tid */
chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
}
@ -579,7 +565,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
return 0;
free_tid:
chcr_ktls_mark_tcb_close(tx_info);
#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
@ -677,10 +662,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (tx_info->pending_close) {
spin_unlock(&tx_info->lock);
if (!status) {
/* it's a late success, tcb status is establised,
* mark it close.
*/
chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tid, tx_info->ip_family);
}
@ -1668,54 +1649,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
}
/*
* chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
* sending the same segment again. It will discard the segment which is before
* the current tx max.
* @tx_info - driver specific tls info.
* @q - TX queue.
* return: NET_TX_OK/NET_XMIT_DROP.
*/
static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q)
{
struct fw_ulptx_wr *wr;
unsigned int ndesc;
int credits;
void *pos;
u32 len;
len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
ndesc = DIV_ROUND_UP(len, 64);
credits = chcr_txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
chcr_eth_txq_stop(q);
return NETDEV_TX_BUSY;
}
pos = &q->q.desc[q->q.pidx];
wr = pos;
/* ULPTX wr */
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr->cookie = 0;
/* fill len in wr field */
wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
pos += sizeof(*wr);
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
TCB_SND_UNA_RAW_W,
TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0);
chcr_txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
return 0;
}
/*
* chcr_end_part_handler: This handler will handle the record which
* is complete or if record's end part is received. T6 adapter has a issue that
@ -1740,7 +1673,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr)
{
bool free_skb_if_tx_fails = false;
struct sk_buff *nskb = NULL;
/* check if it is a complete record */
if (tls_end_offset == record->len) {
nskb = skb;
@ -1763,6 +1698,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
if (last_wr)
dev_kfree_skb_any(skb);
else
free_skb_if_tx_fails = true;
last_wr = true;
@ -1774,6 +1711,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
record->num_frags,
(last_wr && tcp_push_no_fin),
mss)) {
if (free_skb_if_tx_fails)
dev_kfree_skb_any(skb);
goto out;
}
tx_info->prev_seq = record->end_seq;
@ -1910,11 +1849,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
}
/* reset snd una, so the middle record won't send the already
* sent part.
*/
if (chcr_ktls_update_snd_una(tx_info, q))
goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
@ -2015,12 +1949,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again.
*/
spin_lock_irqsave(&tx_ctx->base.lock, flags);
do {
int i;
cxgb4_reclaim_completed_tx(adap, &q->q, true);
/* lock taken */
spin_lock_irqsave(&tx_ctx->base.lock, flags);
/* fetch the tls record */
record = tls_get_record(&tx_ctx->base, tcp_seq,
&tx_info->record_no);
@ -2079,11 +2012,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset, skb_offset,
0);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (ret) {
/* free the refcount taken earlier */
if (tls_end_offset < data_len)
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
goto out;
}
@ -2093,16 +2026,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
continue;
}
/* increase page reference count of the record, so that there
* won't be any chance of page free in middle if in case stack
* receives ACK and try to delete the record.
*/
for (i = 0; i < record->num_frags; i++)
__skb_frag_ref(&record->frags[i]);
/* lock cleared */
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
/* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) {
ret = chcr_end_part_handler(tx_info, skb, record,
@ -2127,13 +2050,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
data_len = 0;
}
/* clear the frag ref count which increased locally before */
for (i = 0; i < record->num_frags; i++) {
/* clear the frag ref count */
__skb_frag_unref(&record->frags[i]);
}
/* if any failure, come out from the loop. */
if (ret) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (th->fin)
dev_kfree_skb_any(skb);
@ -2148,6 +2067,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);

View file

@ -1474,8 +1474,10 @@ dm9000_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev(sizeof(struct board_info));
if (!ndev)
return -ENOMEM;
if (!ndev) {
ret = -ENOMEM;
goto out_regulator_disable;
}
SET_NETDEV_DEV(ndev, &pdev->dev);

View file

@ -1159,19 +1159,13 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
ibmvnic_napi_disable(adapter);
release_resources(adapter);
return rc;
}
netif_tx_start_all_queues(netdev);
if (prev_state == VNIC_CLOSED) {
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
}
adapter->state = VNIC_OPEN;
return rc;
}
@ -1942,7 +1936,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
int i, rc;
int rc;
netdev_dbg(adapter->netdev,
"[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
@ -2088,10 +2082,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* refresh device's multicast list */
ibmvnic_set_multi(netdev);
/* kick napi */
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
adapter->reset_reason == VNIC_RESET_MOBILITY) {
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);

View file

@ -11863,6 +11863,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
{
int err = 0;
int size;
u16 pow;
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
@ -11881,6 +11882,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
/* find the next higher power-of-2 of num cpus */
pow = roundup_pow_of_two(num_online_cpus());
pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->alloc_rss_size = min_t(int, pf->rss_size_max,

View file

@ -747,8 +747,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
struct ice_port_info *pi)
{
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
u8 i, err, sync, oper, app_index, ice_app_sel_type;
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;

View file

@ -6896,6 +6896,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d)
adapter->hw.hw_addr = adapter->io_addr;
err = pci_enable_device_mem(pdev);
if (err) {
e_dev_err("Cannot enable PCI device from suspend\n");
return err;
}
smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);

View file

@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
} while (0)
#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
do { \
unsigned long policy_long; \
u16 *__policy = &(policy); \
bool _write = (write); \
\
policy_long = *__policy; \
if (_write && *__policy) \
*__policy = find_first_bit(&policy_long, \
sizeof(policy_long) * BITS_PER_BYTE);\
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
if (!_write && *__policy) \
*__policy = 1 << *__policy; \
} while (0)
/* get/set FEC admin field for a given speed */
static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
enum mlx5e_fec_supported_link_mode link_mode)
@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
default:
return -EINVAL;

View file

@ -2196,6 +2196,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
return 0;
flow_rule_match_meta(rule, &match);
if (!match.mask->ingress_ifindex)
return 0;
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EOPNOTSUPP;

View file

@ -2378,13 +2378,14 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
static void rtl_jumbo_config(struct rtl8169_private *tp)
{
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
int readrq = 4096;
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
if (jumbo) {
pcie_set_readrq(tp->pci_dev, 512);
readrq = 512;
r8168b_1_hw_jumbo_enable(tp);
} else {
r8168b_1_hw_jumbo_disable(tp);
@ -2392,7 +2393,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
if (jumbo) {
pcie_set_readrq(tp->pci_dev, 512);
readrq = 512;
r8168c_hw_jumbo_enable(tp);
} else {
r8168c_hw_jumbo_disable(tp);
@ -2417,8 +2418,15 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
}
rtl_lock_config_regs(tp);
if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
pcie_set_readrq(tp->pci_dev, 4096);
if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
pcie_set_readrq(tp->pci_dev, readrq);
/* Chip doesn't support pause in jumbo mode */
linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
tp->phydev->advertising, !jumbo);
linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
tp->phydev->advertising, !jumbo);
phy_start_aneg(tp->phydev);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@ -4710,8 +4718,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (!tp->supports_gmii)
phy_set_max_speed(phydev, SPEED_100);
phy_support_asym_pause(phydev);
phy_attached_info(phydev);
return 0;

View file

@ -2913,9 +2913,35 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
},
{
.phy_id = MARVELL_PHY_ID_88E6390,
.phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390",
.name = "Marvell 88E6341 Family",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.probe = m88e1510_probe,
.config_init = marvell_config_init,
.config_aneg = m88e6390_config_aneg,
.read_status = marvell_read_status,
.ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
.did_interrupt = m88e1121_did_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
.write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
.cable_test_start = marvell_vct7_cable_test_start,
.cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
.cable_test_get_status = marvell_vct7_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390 Family",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.probe = m88e6390_probe,
@ -3001,7 +3027,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
{ }

View file

@ -684,6 +684,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,

View file

@ -1181,6 +1181,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
unsigned long flags;
if (WARN(!trans->wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
@ -1264,10 +1265,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
spin_lock_bh(&txq->lock);
spin_lock_irqsave(&txq->lock, flags);
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
@ -1427,7 +1428,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
unlock_reg:
spin_unlock(&trans_pcie->reg_lock);
out:
spin_unlock_bh(&txq->lock);
spin_unlock_irqrestore(&txq->lock, flags);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);

View file

@ -1239,6 +1239,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO;
/* Test if an explicit flush function is defined */
if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
return 1;
/* Test if any flush hints for the region are available */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
@ -1249,8 +1254,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
}
/*
* The platform defines dimm devices without hints, assume
* platform persistence mechanism like ADR
* The platform defines dimm devices without hints nor explicit flush,
* assume platform persistence mechanism like ADR
*/
return 0;
}

View file

@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
task->total_xfer_len = qc->nbytes;
task->num_scatter = qc->n_elem;
task->data_dir = qc->dma_dir;
} else if (qc->tf.protocol == ATA_PROT_NODATA) {
task->data_dir = DMA_NONE;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
}
if (qc->tf.protocol == ATA_PROT_NODATA)
task->data_dir = DMA_NONE;
else
task->data_dir = qc->dma_dir;
}
task->scatter = qc->sg;
task->ata_task.retry_count = 1;
task->task_state_flags = SAS_TASK_STATE_PENDING;

View file

@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
res = mutex_lock_interruptible(&rport->mutex);
if (res)
goto out;
if (rport->state != SRP_RPORT_FAIL_FAST)
if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
/*
* sdev state must be SDEV_TRANSPORT_OFFLINE, transition
* to SDEV_BLOCK is illegal. Calling scsi_target_unblock()

View file

@ -1658,6 +1658,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
@ -1666,7 +1668,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
int regnum = index - VFIO_PCI_NUM_REGIONS;
struct vfio_pci_region *region = vdev->region + regnum;
if (region && region->ops && region->ops->mmap &&
if (region->ops && region->ops->mmap &&
(region->flags & VFIO_REGION_INFO_FLAG_MMAP))
return region->ops->mmap(vdev, region, vma);
return -EINVAL;

View file

@ -150,6 +150,9 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
if (buf->result)
return -EINVAL;
buf->result = verify_dirent_name(name, namlen);
if (buf->result < 0)
return buf->result;
d_ino = ino;
if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
buf->result = -EOVERFLOW;
@ -405,6 +408,9 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
if (buf->result)
return -EINVAL;
buf->result = verify_dirent_name(name, namlen);
if (buf->result < 0)
return buf->result;
d_ino = ino;
if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
buf->result = -EOVERFLOW;

View file

@ -25,11 +25,12 @@
#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
/* These Ethernet switch families contain embedded PHYs, but they do
* not have a model ID. So the switch driver traps reads to the ID2
* register and returns the switch family ID
*/
#define MARVELL_PHY_ID_88E6390 0x01410f90
#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41
#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90
#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)

View file

@ -52,8 +52,9 @@ extern void *arpt_alloc_initial_table(const struct xt_table *);
int arpt_register_table(struct net *net, const struct xt_table *table,
const struct arpt_replace *repl,
const struct nf_hook_ops *ops, struct xt_table **res);
void arpt_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops);
void arpt_unregister_table(struct net *net, struct xt_table *table);
void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops);
extern unsigned int arpt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);

View file

@ -110,8 +110,9 @@ extern int ebt_register_table(struct net *net,
const struct ebt_table *table,
const struct nf_hook_ops *ops,
struct ebt_table **res);
extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
const struct nf_hook_ops *);
extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
void ebt_unregister_table_pre_exit(struct net *net, const char *tablename,
const struct nf_hook_ops *ops);
extern unsigned int ebt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct ebt_table *table);

View file

@ -187,8 +187,8 @@ struct dsa_completion_record {
uint32_t rsvd2:8;
};
uint16_t delta_rec_size;
uint16_t crc_val;
uint32_t delta_rec_size;
uint32_t crc_val;
/* DIF check & strip */
struct {

View file

@ -5328,12 +5328,26 @@ static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
return &env->insn_aux_data[env->insn_idx];
}
enum {
REASON_BOUNDS = -1,
REASON_TYPE = -2,
REASON_PATHS = -3,
REASON_LIMIT = -4,
REASON_STACK = -5,
};
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
u32 *ptr_limit, u8 opcode, bool off_is_neg)
const struct bpf_reg_state *off_reg,
u32 *alu_limit, u8 opcode)
{
bool off_is_neg = off_reg->smin_value < 0;
bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
(opcode == BPF_SUB && !off_is_neg);
u32 off, max;
u32 off, max = 0, ptr_limit = 0;
if (!tnum_is_const(off_reg->var_off) &&
(off_reg->smin_value < 0) != (off_reg->smax_value < 0))
return REASON_BOUNDS;
switch (ptr_reg->type) {
case PTR_TO_STACK:
@ -5346,22 +5360,27 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
*/
off = ptr_reg->off + ptr_reg->var_off.value;
if (mask_to_left)
*ptr_limit = MAX_BPF_STACK + off;
ptr_limit = MAX_BPF_STACK + off;
else
*ptr_limit = -off - 1;
return *ptr_limit >= max ? -ERANGE : 0;
ptr_limit = -off - 1;
break;
case PTR_TO_MAP_VALUE:
max = ptr_reg->map_ptr->value_size;
if (mask_to_left) {
*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
ptr_limit = ptr_reg->umax_value + ptr_reg->off;
} else {
off = ptr_reg->smin_value + ptr_reg->off;
*ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
}
return *ptr_limit >= max ? -ERANGE : 0;
break;
default:
return -EINVAL;
return REASON_TYPE;
}
if (ptr_limit >= max)
return REASON_LIMIT;
*alu_limit = ptr_limit;
return 0;
}
static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
@ -5379,7 +5398,7 @@ static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
if (aux->alu_state &&
(aux->alu_state != alu_state ||
aux->alu_limit != alu_limit))
return -EACCES;
return REASON_PATHS;
/* Corresponding fixup done in fixup_bpf_calls(). */
aux->alu_state = alu_state;
@ -5398,14 +5417,20 @@ static int sanitize_val_alu(struct bpf_verifier_env *env,
return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
}
static bool sanitize_needed(u8 opcode)
{
return opcode == BPF_ADD || opcode == BPF_SUB;
}
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
struct bpf_reg_state *dst_reg,
bool off_is_neg)
const struct bpf_reg_state *off_reg,
struct bpf_reg_state *dst_reg)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_insn_aux_data *aux = cur_aux(env);
bool off_is_neg = off_reg->smin_value < 0;
bool ptr_is_dst_reg = ptr_reg == dst_reg;
u8 opcode = BPF_OP(insn->code);
u32 alu_state, alu_limit;
@ -5427,7 +5452,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
if (err < 0)
return err;
@ -5451,7 +5476,46 @@ do_sim:
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
if (!ptr_is_dst_reg && ret)
*dst_reg = tmp;
return !ret ? -EFAULT : 0;
return !ret ? REASON_STACK : 0;
}
static int sanitize_err(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int reason,
const struct bpf_reg_state *off_reg,
const struct bpf_reg_state *dst_reg)
{
static const char *err = "pointer arithmetic with it prohibited for !root";
const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
u32 dst = insn->dst_reg, src = insn->src_reg;
switch (reason) {
case REASON_BOUNDS:
verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
off_reg == dst_reg ? dst : src, err);
break;
case REASON_TYPE:
verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
off_reg == dst_reg ? src : dst, err);
break;
case REASON_PATHS:
verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
dst, op, err);
break;
case REASON_LIMIT:
verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
dst, op, err);
break;
case REASON_STACK:
verbose(env, "R%d could not be pushed for speculative verification, %s\n",
dst, err);
break;
default:
verbose(env, "verifier internal error: unknown reason (%d)\n",
reason);
break;
}
return -EACCES;
}
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
@ -5472,8 +5536,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
u32 dst = insn->dst_reg, src = insn->src_reg;
u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg;
int ret;
dst_reg = &regs[dst];
@ -5521,13 +5585,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
dst, reg_type_str[ptr_reg->type]);
return -EACCES;
case PTR_TO_MAP_VALUE:
if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
off_reg == dst_reg ? dst : src);
return -EACCES;
}
fallthrough;
default:
break;
}
@ -5547,11 +5604,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
switch (opcode) {
case BPF_ADD:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) {
verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
return ret;
}
ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
if (ret < 0)
return sanitize_err(env, insn, ret, off_reg, dst_reg);
/* We can take a fixed offset as long as it doesn't overflow
* the s32 'off' field
*/
@ -5602,11 +5658,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
}
break;
case BPF_SUB:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) {
verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
return ret;
}
ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
if (ret < 0)
return sanitize_err(env, insn, ret, off_reg, dst_reg);
if (dst_reg == off_reg) {
/* scalar -= pointer. Creates an unknown scalar */
verbose(env, "R%d tried to subtract pointer from scalar\n",
@ -6296,9 +6351,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
s32 s32_min_val, s32_max_val;
u32 u32_min_val, u32_max_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
u32 dst = insn->dst_reg;
int ret;
bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
int ret;
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
@ -6340,6 +6394,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
return 0;
}
if (sanitize_needed(opcode)) {
ret = sanitize_val_alu(env, insn);
if (ret < 0)
return sanitize_err(env, insn, ret, NULL, NULL);
}
/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
* There are two classes of instructions: The first class we track both
* alu32 and alu64 sign/unsigned bounds independently this provides the
@ -6356,21 +6416,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
*/
switch (opcode) {
case BPF_ADD:
ret = sanitize_val_alu(env, insn);
if (ret < 0) {
verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
return ret;
}
scalar32_min_max_add(dst_reg, &src_reg);
scalar_min_max_add(dst_reg, &src_reg);
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
ret = sanitize_val_alu(env, insn);
if (ret < 0) {
verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
return ret;
}
scalar32_min_max_sub(dst_reg, &src_reg);
scalar_min_max_sub(dst_reg, &src_reg);
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);

View file

@ -930,7 +930,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
/* Debug-check: all keys must be persistent! */
debug_locks_off();
pr_err("INFO: trying to register non-static key.\n");
pr_err("the code is fine but needs lockdep annotation.\n");
pr_err("The code is fine but needs lockdep annotation, or maybe\n");
pr_err("you didn't initialize this object before use?\n");
pr_err("turning off the locking correctness validator.\n");
dump_stack();
return false;

View file

@ -1303,7 +1303,7 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select STACKTRACE
select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
select KALLSYMS
select KALLSYMS_ALL
@ -1597,7 +1597,7 @@ config LATENCYTOP
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
@ -1850,7 +1850,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
help
Provide stacktrace filter for fault-injection capabilities

View file

@ -111,7 +111,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
struct ptdump_state *st = walk->private;
pte_t val = READ_ONCE(*pte);
pte_t val = ptep_get(pte);
if (st->effective_prot)
st->effective_prot(st, 4, pte_val(val));

View file

@ -105,14 +105,20 @@ static int __net_init broute_net_init(struct net *net)
&net->xt.broute_table);
}
static void __net_exit broute_net_pre_exit(struct net *net)
{
ebt_unregister_table_pre_exit(net, "broute", &ebt_ops_broute);
}
static void __net_exit broute_net_exit(struct net *net)
{
ebt_unregister_table(net, net->xt.broute_table, &ebt_ops_broute);
ebt_unregister_table(net, net->xt.broute_table);
}
static struct pernet_operations broute_net_ops = {
.init = broute_net_init,
.exit = broute_net_exit,
.pre_exit = broute_net_pre_exit,
};
static int __init ebtable_broute_init(void)

View file

@ -99,14 +99,20 @@ static int __net_init frame_filter_net_init(struct net *net)
&net->xt.frame_filter);
}
static void __net_exit frame_filter_net_pre_exit(struct net *net)
{
ebt_unregister_table_pre_exit(net, "filter", ebt_ops_filter);
}
static void __net_exit frame_filter_net_exit(struct net *net)
{
ebt_unregister_table(net, net->xt.frame_filter, ebt_ops_filter);
ebt_unregister_table(net, net->xt.frame_filter);
}
static struct pernet_operations frame_filter_net_ops = {
.init = frame_filter_net_init,
.exit = frame_filter_net_exit,
.pre_exit = frame_filter_net_pre_exit,
};
static int __init ebtable_filter_init(void)

View file

@ -99,14 +99,20 @@ static int __net_init frame_nat_net_init(struct net *net)
&net->xt.frame_nat);
}
static void __net_exit frame_nat_net_pre_exit(struct net *net)
{
ebt_unregister_table_pre_exit(net, "nat", ebt_ops_nat);
}
static void __net_exit frame_nat_net_exit(struct net *net)
{
ebt_unregister_table(net, net->xt.frame_nat, ebt_ops_nat);
ebt_unregister_table(net, net->xt.frame_nat);
}
static struct pernet_operations frame_nat_net_ops = {
.init = frame_nat_net_init,
.exit = frame_nat_net_exit,
.pre_exit = frame_nat_net_pre_exit,
};
static int __init ebtable_nat_init(void)

View file

@ -1232,10 +1232,34 @@ out:
return ret;
}
void ebt_unregister_table(struct net *net, struct ebt_table *table,
const struct nf_hook_ops *ops)
static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
{
struct ebt_table *t;
mutex_lock(&ebt_mutex);
list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
if (strcmp(t->name, name) == 0) {
mutex_unlock(&ebt_mutex);
return t;
}
}
mutex_unlock(&ebt_mutex);
return NULL;
}
void ebt_unregister_table_pre_exit(struct net *net, const char *name, const struct nf_hook_ops *ops)
{
struct ebt_table *table = __ebt_find_table(net, name);
if (table)
nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
}
EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
void ebt_unregister_table(struct net *net, struct ebt_table *table)
{
nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
__ebt_unregister_table(net, table);
}

View file

@ -5867,7 +5867,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
NAPI_GRO_CB(skb)->frag0_len = 0;
if (!skb_headlen(skb) && pinfo->nr_frags &&
!PageHighMem(skb_frag_page(frag0))) {
!PageHighMem(skb_frag_page(frag0)) &&
(!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
skb_frag_size(frag0),

View file

@ -1380,7 +1380,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
* we can reinject the packet there.
*/
n2 = NULL;
if (dst) {
if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
n2 = dst_neigh_lookup_skb(dst, skb);
if (n2)
n1 = n2;

View file

@ -38,16 +38,16 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
if (!dev->ethtool_ops->get_pauseparam)
return -EOPNOTSUPP;
ethtool_stats_init((u64 *)&data->pausestat,
sizeof(data->pausestat) / 8);
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
if (req_base->flags & ETHTOOL_FLAG_STATS &&
dev->ethtool_ops->get_pause_stats) {
ethtool_stats_init((u64 *)&data->pausestat,
sizeof(data->pausestat) / 8);
dev->ethtool_ops->get_pause_stats)
dev->ethtool_ops->get_pause_stats(dev, &data->pausestat);
}
ethnl_ops_complete(dev);
return 0;

View file

@ -1498,6 +1498,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
err = skb->len;
goto out_err;
}
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@ -1552,6 +1557,9 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
struct ieee802154_llsec_key_id id = { };
u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
return -EINVAL;
@ -1601,6 +1609,9 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
struct ieee802154_llsec_key_id id;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
return -EINVAL;
@ -1666,6 +1677,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
err = skb->len;
goto out_err;
}
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@ -1752,6 +1768,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_device dev_desc;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
&dev_desc) < 0)
return -EINVAL;
@ -1767,6 +1786,9 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
__le64 extended_addr;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
return -EINVAL;
@ -1836,6 +1858,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
err = skb->len;
goto out_err;
}
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@ -1893,6 +1920,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info
struct ieee802154_llsec_device_key key;
__le64 extended_addr;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0)
return -EINVAL;
@ -1924,6 +1954,9 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
struct ieee802154_llsec_device_key key;
__le64 extended_addr;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
return -EINVAL;
@ -1998,6 +2031,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
err = skb->len;
goto out_err;
}
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@ -2082,6 +2120,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_seclevel sl;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
&sl) < 0)
return -EINVAL;

View file

@ -1541,10 +1541,15 @@ out_free:
return ret;
}
void arpt_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops)
void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops)
{
nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
}
EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
void arpt_unregister_table(struct net *net, struct xt_table *table)
{
__arpt_unregister_table(net, table);
}

View file

@ -56,16 +56,24 @@ static int __net_init arptable_filter_table_init(struct net *net)
return err;
}
static void __net_exit arptable_filter_net_pre_exit(struct net *net)
{
if (net->ipv4.arptable_filter)
arpt_unregister_table_pre_exit(net, net->ipv4.arptable_filter,
arpfilter_ops);
}
static void __net_exit arptable_filter_net_exit(struct net *net)
{
if (!net->ipv4.arptable_filter)
return;
arpt_unregister_table(net, net->ipv4.arptable_filter, arpfilter_ops);
arpt_unregister_table(net, net->ipv4.arptable_filter);
net->ipv4.arptable_filter = NULL;
}
static struct pernet_operations arptable_filter_net_ops = {
.exit = arptable_filter_net_exit,
.pre_exit = arptable_filter_net_pre_exit,
};
static int __init arptable_filter_init(void)

View file

@ -1376,9 +1376,19 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
if (!table)
goto err_alloc;
/* Update the variables to point into the current struct net */
for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
table[i].data += (void *)net - (void *)&init_net;
for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++) {
if (table[i].data) {
/* Update the variables to point into
* the current struct net
*/
table[i].data += (void *)net - (void *)&init_net;
} else {
/* Entries without data pointer are global;
* Make them read-only in non-init_net ns
*/
table[i].mode &= ~0222;
}
}
}
net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);

View file

@ -2275,6 +2275,16 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head
t = rtnl_dereference(t->next);
}
}
t = rtnl_dereference(ip6n->tnls_wc[0]);
while (t) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev, list);
t = rtnl_dereference(t->next);
}
}
static int __net_init ip6_tnl_init_net(struct net *net)

View file

@ -1867,9 +1867,9 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
if (dev->rtnl_link_ops == &sit_link_ops)
unregister_netdevice_queue(dev, head);
for (prio = 1; prio < 4; prio++) {
for (prio = 0; prio < 4; prio++) {
int h;
for (h = 0; h < IP6_SIT_HASH_SIZE; h++) {
for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) {
struct ip_tunnel *t;
t = rtnl_dereference(sitn->tunnels[prio][h]);

View file

@ -1789,8 +1789,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
sta->sdata->u.vlan.sta)
sta->sdata->u.vlan.sta) {
ieee80211_clear_fast_rx(sta);
RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
}
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
ieee80211_vif_dec_num_mcast(sta->sdata);

View file

@ -266,6 +266,7 @@ static const char* l4proto_name(u16 proto)
case IPPROTO_GRE: return "gre";
case IPPROTO_SCTP: return "sctp";
case IPPROTO_UDPLITE: return "udplite";
case IPPROTO_ICMPV6: return "icmpv6";
}
return "unknown";

View file

@ -305,12 +305,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
const __be32 *addr, const __be32 *mask)
{
struct flow_action_entry *entry;
int i;
int i, j;
for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
entry = flow_action_entry_next(flow_rule);
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
offset + i, &addr[i], mask);
offset + i, &addr[j], mask);
}
}

View file

@ -76,13 +76,13 @@ static int nft_limit_init(struct nft_limit *limit,
return -EOVERFLOW;
if (pkts) {
tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
} else {
/* The token bucket size limits the number of tokens can be
* accumulated. tokens_max specifies the bucket size.
* tokens_max = unit * (rate + burst) / rate.
*/
tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
limit->rate);
}

View file

@ -1525,11 +1525,9 @@ static void sctp_close(struct sock *sk, long timeout)
/* Supposedly, no process has access to the socket, but
* the net layers still may.
* Also, sctp_destroy_sock() needs to be called with addr_wq_lock
* held and that should be grabbed before socket lock.
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
bh_lock_sock_nested(sk);
local_bh_disable();
bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
@ -1538,7 +1536,7 @@ static void sctp_close(struct sock *sk, long timeout)
sk_common_release(sk);
bh_unlock_sock(sk);
spin_unlock_bh(&net->sctp.addr_wq_lock);
local_bh_enable();
sock_put(sk);
@ -4944,9 +4942,6 @@ static int sctp_init_sock(struct sock *sk)
sk_sockets_allocated_inc(sk);
sock_prot_inuse_add(net, sk->sk_prot, 1);
/* Nothing can fail after this block, otherwise
* sctp_destroy_sock() will be called without addr_wq_lock held
*/
if (net->sctp.default_auto_asconf) {
spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
list_add_tail(&sp->auto_asconf_list,
@ -4981,7 +4976,9 @@ static void sctp_destroy_sock(struct sock *sk)
if (sp->do_auto_asconf) {
sp->do_auto_asconf = 0;
spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
list_del(&sp->auto_asconf_list);
spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
}
sctp_endpoint_free(sp->ep);
local_bh_disable();

View file

@ -658,6 +658,12 @@ static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
if (x->outer_mode.encap == XFRM_MODE_BEET &&
ip_is_fragment(ip_hdr(skb))) {
net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
return -EAFNOSUPPORT;
}
err = xfrm4_tunnel_check_size(skb);
if (err)
return err;
@ -703,8 +709,15 @@ out:
static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
unsigned int ptr = 0;
int err;
if (x->outer_mode.encap == XFRM_MODE_BEET &&
ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
return -EAFNOSUPPORT;
}
err = xfrm6_tunnel_check_size(skb);
if (err)
return err;

View file

@ -440,6 +440,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
case MAX98373_R20FF_GLOBAL_SHDN:
case MAX98373_R21FF_REV_ID:
return true;
default:

View file

@ -214,6 +214,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
case MAX98373_R20FF_GLOBAL_SHDN:
case MAX98373_R21FF_REV_ID:
/* SoundWire Control Port Registers */
case MAX98373_R0040_SCP_INIT_STAT_1 ... MAX98373_R0070_SCP_FRAME_CTLR:

View file

@ -28,11 +28,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
regmap_update_bits(max98373->regmap,
MAX98373_R20FF_GLOBAL_SHDN,
MAX98373_GLOBAL_EN_MASK, 1);
usleep_range(30000, 31000);
break;
case SND_SOC_DAPM_POST_PMD:
regmap_update_bits(max98373->regmap,
MAX98373_R20FF_GLOBAL_SHDN,
MAX98373_GLOBAL_EN_MASK, 0);
usleep_range(30000, 31000);
max98373->tdm_mode = false;
break;
default:

View file

@ -524,11 +524,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
ESAI_SAICR_SYNC, esai_priv->synchronous ?
ESAI_SAICR_SYNC : 0);
/* Set a default slot number -- 2 */
/* Set slots count */
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
ESAI_xCCR_xDC_MASK,
ESAI_xCCR_xDC(esai_priv->slots));
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
ESAI_xCCR_xDC_MASK,
ESAI_xCCR_xDC(esai_priv->slots));
}
return 0;

View file

@ -9,8 +9,6 @@
#include "../../../arch/alpha/include/uapi/asm/errno.h"
#elif defined(__mips__)
#include "../../../arch/mips/include/uapi/asm/errno.h"
#elif defined(__ia64__)
#include "../../../arch/ia64/include/uapi/asm/errno.h"
#elif defined(__xtensa__)
#include "../../../arch/xtensa/include/uapi/asm/errno.h"
#else

View file

@ -703,18 +703,19 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
struct xsk_ring_cons *comp,
const struct xsk_socket_config *usr_config)
{
bool unmap, rx_setup_done = false, tx_setup_done = false;
void *rx_map = NULL, *tx_map = NULL;
struct sockaddr_xdp sxdp = {};
struct xdp_mmap_offsets off;
struct xsk_socket *xsk;
struct xsk_ctx *ctx;
int err, ifindex;
bool unmap = umem->fill_save != fill;
bool rx_setup_done = false, tx_setup_done = false;
if (!umem || !xsk_ptr || !(rx || tx))
return -EFAULT;
unmap = umem->fill_save != fill;
xsk = calloc(1, sizeof(*xsk));
if (!xsk)
return -ENOMEM;