This is the 5.10.134 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmLj+okACgkQONu9yGCS
 aT7ULhAA2D1qxAvJsuhLK3HAG3ii4jKb+lPZO4Gx7MGbt6H0ktsHKcAppVCMOiQ/
 zr8z695+GjO9RcFqiVVEYVkXGuBSwEI34MWYkHk6+567Y47d9HX09tehvGmwSYB/
 2eFkhL7Am6XXY8fK1p5L3iFQ4pn2O1LT90oC6IX2PbgPBh9SqA/cL2RoFjrtLKYI
 s+ok/P6qiDz/7jn1V3AzvESs9n0h7fviGYwpe+jEcXRr+7Glu8A23n7goOpCn5k1
 NydT0S69fiVb14NhzDGhgSMp/Ft4u8pb12n2UWrR6pueE/Ea7VbC/AOhh2CYCOpJ
 VpjZlFQDSJhTNmlAEiFADmejzyfjRyFaaQkq52odOV9YljbX9u4XCI9w42E3kgfi
 ClEJNGNSRWc35LR69sAV2TzKmAQX8DcYCyvkk8uFpOkoEr9ANbqOn5rXgGk3jllT
 RoFcOmXvN4t+mYebvxjtOvC56OOopUte6a/hGzLoOvf1Uy36CaRQ4izURZpOAKAT
 lMN8P/s/NQxE9g3Aq4ABydCxPaLnJkIobfFqoc8wFVnopmUd4+wspklwWeo+MGps
 oZ2nt5BLlweQ7Yr1wif+Sff5q3jkR9ppUxMYiwRHUW9fTy3QL7uMJqs3qa5s6wLH
 AQJXuKjuA7mpbmE8csBPUGP+LL2d/RalLKjzqpwNcSJ0IPk6lW8=
 =9KOJ
 -----END PGP SIGNATURE-----

Merge 5.10.134 into android12-5.10-lts

Changes in 5.10.134
	pinctrl: stm32: fix optional IRQ support to gpios
	riscv: add as-options for modules with assembly compontents
	mlxsw: spectrum_router: Fix IPv4 nexthop gateway indication
	lockdown: Fix kexec lockdown bypass with ima policy
	io_uring: Use original task for req identity in io_identity_cow()
	xen/gntdev: Ignore failure to unmap INVALID_GRANT_HANDLE
	docs: net: explain struct net_device lifetime
	net: make free_netdev() more lenient with unregistering devices
	net: make sure devices go through netdev_wait_all_refs
	net: move net_set_todo inside rollback_registered()
	net: inline rollback_registered()
	net: move rollback_registered_many()
	net: inline rollback_registered_many()
	Revert "m68knommu: only set CONFIG_ISA_DMA_API for ColdFire sub-arch"
	PCI: hv: Fix multi-MSI to allow more than one MSI vector
	PCI: hv: Fix hv_arch_irq_unmask() for multi-MSI
	PCI: hv: Reuse existing IRTE allocation in compose_msi_msg()
	PCI: hv: Fix interrupt mapping for multi-MSI
	serial: mvebu-uart: correctly report configured baudrate value
	xfrm: xfrm_policy: fix a possible double xfrm_pols_put() in xfrm_bundle_lookup()
	power/reset: arm-versatile: Fix refcount leak in versatile_reboot_probe
	pinctrl: ralink: Check for null return of devm_kcalloc
	perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()
	drm/amdgpu/display: add quirk handling for stutter mode
	igc: Reinstate IGC_REMOVED logic and implement it properly
	ip: Fix data-races around sysctl_ip_no_pmtu_disc.
	ip: Fix data-races around sysctl_ip_fwd_use_pmtu.
	ip: Fix data-races around sysctl_ip_fwd_update_priority.
	ip: Fix data-races around sysctl_ip_nonlocal_bind.
	ip: Fix a data-race around sysctl_ip_autobind_reuse.
	ip: Fix a data-race around sysctl_fwmark_reflect.
	tcp/dccp: Fix a data-race around sysctl_tcp_fwmark_accept.
	tcp: Fix data-races around sysctl_tcp_mtu_probing.
	tcp: Fix data-races around sysctl_tcp_base_mss.
	tcp: Fix data-races around sysctl_tcp_min_snd_mss.
	tcp: Fix a data-race around sysctl_tcp_mtu_probe_floor.
	tcp: Fix a data-race around sysctl_tcp_probe_threshold.
	tcp: Fix a data-race around sysctl_tcp_probe_interval.
	net: stmmac: fix unbalanced ptp clock issue in suspend/resume flow
	i2c: cadence: Change large transfer count reset logic to be unconditional
	net: stmmac: fix dma queue left shift overflow issue
	net/tls: Fix race in TLS device down flow
	igmp: Fix data-races around sysctl_igmp_llm_reports.
	igmp: Fix a data-race around sysctl_igmp_max_memberships.
	igmp: Fix data-races around sysctl_igmp_max_msf.
	tcp: Fix data-races around keepalive sysctl knobs.
	tcp: Fix data-races around sysctl_tcp_syncookies.
	tcp: Fix data-races around sysctl_tcp_reordering.
	tcp: Fix data-races around some timeout sysctl knobs.
	tcp: Fix a data-race around sysctl_tcp_notsent_lowat.
	tcp: Fix a data-race around sysctl_tcp_tw_reuse.
	tcp: Fix data-races around sysctl_max_syn_backlog.
	tcp: Fix data-races around sysctl_tcp_fastopen.
	tcp: Fix data-races around sysctl_tcp_fastopen_blackhole_timeout.
	iavf: Fix handling of dummy receive descriptors
	i40e: Fix erroneous adapter reinitialization during recovery process
	ixgbe: Add locking to prevent panic when setting sriov_numvfs to zero
	gpio: pca953x: only use single read/write for No AI mode
	gpio: pca953x: use the correct range when do regmap sync
	gpio: pca953x: use the correct register address when regcache sync during init
	be2net: Fix buffer overflow in be_get_module_eeprom
	drm/imx/dcss: Add missing of_node_put() in fail path
	ipv4: Fix a data-race around sysctl_fib_multipath_use_neigh.
	ip: Fix data-races around sysctl_ip_prot_sock.
	udp: Fix a data-race around sysctl_udp_l3mdev_accept.
	tcp: Fix data-races around sysctl knobs related to SYN option.
	tcp: Fix a data-race around sysctl_tcp_early_retrans.
	tcp: Fix data-races around sysctl_tcp_recovery.
	tcp: Fix a data-race around sysctl_tcp_thin_linear_timeouts.
	tcp: Fix data-races around sysctl_tcp_slow_start_after_idle.
	tcp: Fix a data-race around sysctl_tcp_retrans_collapse.
	tcp: Fix a data-race around sysctl_tcp_stdurg.
	tcp: Fix a data-race around sysctl_tcp_rfc1337.
	tcp: Fix data-races around sysctl_tcp_max_reordering.
	spi: bcm2835: bcm2835_spi_handle_err(): fix NULL pointer deref for non DMA transfers
	KVM: Don't null dereference ops->destroy
	mm/mempolicy: fix uninit-value in mpol_rebind_policy()
	bpf: Make sure mac_header was set before using it
	sched/deadline: Fix BUG_ON condition for deboosted tasks
	x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts
	dlm: fix pending remove if msg allocation fails
	drm/imx/dcss: fix unused but set variable warnings
	bitfield.h: Fix "type of reg too small for mask" test
	ALSA: memalloc: Align buffer allocations in page size
	Bluetooth: Add bt_skb_sendmsg helper
	Bluetooth: Add bt_skb_sendmmsg helper
	Bluetooth: SCO: Replace use of memcpy_from_msg with bt_skb_sendmsg
	Bluetooth: RFCOMM: Replace use of memcpy_from_msg with bt_skb_sendmmsg
	Bluetooth: Fix passing NULL to PTR_ERR
	Bluetooth: SCO: Fix sco_send_frame returning skb->len
	Bluetooth: Fix bt_skb_sendmmsg not allocating partial chunks
	x86/amd: Use IBPB for firmware calls
	x86/alternative: Report missing return thunk details
	watchqueue: make sure to serialize 'wqueue->defunct' properly
	tty: drivers/tty/, stop using tty_schedule_flip()
	tty: the rest, stop using tty_schedule_flip()
	tty: drop tty_schedule_flip()
	tty: extract tty_flip_buffer_commit() from tty_flip_buffer_push()
	tty: use new tty_insert_flip_string_and_push_buffer() in pty_write()
	net: usb: ax88179_178a needs FLAG_SEND_ZLP
	watch-queue: remove spurious double semicolon
	Linux 5.10.134

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I55defdcdd6658e3ec9a3684b7e8cdfe114772a19
This commit is contained in:
Greg Kroah-Hartman 2022-08-03 12:42:13 +02:00
commit f6ce9a9115
98 changed files with 1011 additions and 555 deletions

View file

@ -10,18 +10,177 @@ Introduction
The following is a random collection of documentation regarding
network devices.
struct net_device allocation rules
==================================
struct net_device lifetime rules
================================
Network device structures need to persist even after module is unloaded and
must be allocated with alloc_netdev_mqs() and friends.
If device has registered successfully, it will be freed on last use
by free_netdev(). This is required to handle the pathologic case cleanly
(example: rmmod mydriver </sys/class/net/myeth/mtu )
by free_netdev(). This is required to handle the pathological case cleanly
(example: ``rmmod mydriver </sys/class/net/myeth/mtu``)
alloc_netdev_mqs()/alloc_netdev() reserve extra space for driver
alloc_netdev_mqs() / alloc_netdev() reserve extra space for driver
private data which gets freed when the network device is freed. If
separately allocated data is attached to the network device
(netdev_priv(dev)) then it is up to the module exit handler to free that.
(netdev_priv()) then it is up to the module exit handler to free that.
There are two groups of APIs for registering struct net_device.
First group can be used in normal contexts where ``rtnl_lock`` is not already
held: register_netdev(), unregister_netdev().
Second group can be used when ``rtnl_lock`` is already held:
register_netdevice(), unregister_netdevice(), free_netdevice().
Simple drivers
--------------
Most drivers (especially device drivers) handle lifetime of struct net_device
in context where ``rtnl_lock`` is not held (e.g. driver probe and remove paths).
In that case the struct net_device registration is done using
the register_netdev(), and unregister_netdev() functions:
.. code-block:: c
int probe()
{
struct my_device_priv *priv;
int err;
dev = alloc_netdev_mqs(...);
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
/* ... do all device setup before calling register_netdev() ...
*/
err = register_netdev(dev);
if (err)
goto err_undo;
/* net_device is visible to the user! */
err_undo:
/* ... undo the device setup ... */
free_netdev(dev);
return err;
}
void remove()
{
unregister_netdev(dev);
free_netdev(dev);
}
Note that after calling register_netdev() the device is visible in the system.
Users can open it and start sending / receiving traffic immediately,
or run any other callback, so all initialization must be done prior to
registration.
unregister_netdev() closes the device and waits for all users to be done
with it. The memory of struct net_device itself may still be referenced
by sysfs but all operations on that device will fail.
free_netdev() can be called after unregister_netdev() returns on when
register_netdev() failed.
Device management under RTNL
----------------------------
Registering struct net_device while in context which already holds
the ``rtnl_lock`` requires extra care. In those scenarios most drivers
will want to make use of struct net_device's ``needs_free_netdev``
and ``priv_destructor`` members for freeing of state.
Example flow of netdev handling under ``rtnl_lock``:
.. code-block:: c
static void my_setup(struct net_device *dev)
{
dev->needs_free_netdev = true;
}
static void my_destructor(struct net_device *dev)
{
some_obj_destroy(priv->obj);
some_uninit(priv);
}
int create_link()
{
struct my_device_priv *priv;
int err;
ASSERT_RTNL();
dev = alloc_netdev(sizeof(*priv), "net%d", NET_NAME_UNKNOWN, my_setup);
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
/* Implicit constructor */
err = some_init(priv);
if (err)
goto err_free_dev;
priv->obj = some_obj_create();
if (!priv->obj) {
err = -ENOMEM;
goto err_some_uninit;
}
/* End of constructor, set the destructor: */
dev->priv_destructor = my_destructor;
err = register_netdevice(dev);
if (err)
/* register_netdevice() calls destructor on failure */
goto err_free_dev;
/* If anything fails now unregister_netdevice() (or unregister_netdev())
* will take care of calling my_destructor and free_netdev().
*/
return 0;
err_some_uninit:
some_uninit(priv);
err_free_dev:
free_netdev(dev);
return err;
}
If struct net_device.priv_destructor is set it will be called by the core
some time after unregister_netdevice(), it will also be called if
register_netdevice() fails. The callback may be invoked with or without
``rtnl_lock`` held.
There is no explicit constructor callback, driver "constructs" the private
netdev state after allocating it and before registration.
Setting struct net_device.needs_free_netdev makes core call free_netdevice()
automatically after unregister_netdevice() when all references to the device
are gone. It only takes effect after a successful call to register_netdevice()
so if register_netdevice() fails driver is responsible for calling
free_netdev().
free_netdev() is safe to call on error paths right after unregister_netdevice()
or when register_netdevice() fails. Parts of netdev (de)registration process
happen after ``rtnl_lock`` is released, therefore in those cases free_netdev()
will defer some of the processing until ``rtnl_lock`` is released.
Devices spawned from struct rtnl_link_ops should never free the
struct net_device directly.
.ndo_init and .ndo_uninit
~~~~~~~~~~~~~~~~~~~~~~~~~
``.ndo_init`` and ``.ndo_uninit`` callbacks are called during net_device
registration and de-registration, under ``rtnl_lock``. Drivers can use
those e.g. when parts of their init process need to run under ``rtnl_lock``.
``.ndo_init`` runs before device is visible in the system, ``.ndo_uninit``
runs during de-registering after device is closed but other subsystems
may still have outstanding references to the netdevice.
MTU
===

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 133
SUBLEVEL = 134
EXTRAVERSION =
NAME = Dare mighty things

View file

@ -59,7 +59,7 @@ srmcons_do_receive_chars(struct tty_port *port)
} while((result.bits.status & 1) && (++loops < 10));
if (count)
tty_schedule_flip(port);
tty_flip_buffer_push(port);
return count;
}

View file

@ -63,7 +63,7 @@ source "drivers/zorro/Kconfig"
endif
if COLDFIRE
if !MMU
config ISA_DMA_API
def_bool !M5272

View file

@ -73,6 +73,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
endif
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
# GCC versions that support the "-mstrict-align" option default to allowing
# unaligned accesses. While unaligned accesses are explicitly allowed in the

View file

@ -298,6 +298,7 @@
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */

View file

@ -247,13 +247,6 @@ bool hv_vcpu_is_preempted(int vcpu);
static inline void hv_apic_init(void) {}
#endif
static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
struct msi_desc *msi_desc)
{
msi_entry->address = msi_desc->msg.address_lo;
msi_entry->data = msi_desc->msg.data;
}
#else /* CONFIG_HYPERV */
static inline void hyperv_init(void) {}
static inline void hyperv_setup_mmu_ops(void) {}

View file

@ -298,6 +298,8 @@ do { \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current() | SPEC_CTRL_IBRS, \
X86_FEATURE_USE_IBRS_FW); \
alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
X86_FEATURE_USE_IBPB_FW); \
} while (0)
#define firmware_restrict_branch_speculation_end() \

View file

@ -709,7 +709,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
dest = addr + insn.length + insn.immediate.value;
if (__static_call_fixup(addr, op, dest) ||
WARN_ON_ONCE(dest != &__x86_return_thunk))
WARN_ONCE(dest != &__x86_return_thunk,
"missing return thunk: %pS-%pS: %*ph",
addr, dest, 5, addr))
continue;
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",

View file

@ -931,6 +931,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL
void unpriv_ebpf_notify(int new_state)
@ -1371,6 +1372,8 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
break;
case SPECTRE_V2_LFENCE:
@ -1472,7 +1475,16 @@ static void __init spectre_v2_select_mitigation(void)
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
* enable IBRS around firmware calls.
*/
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
if (retbleed_cmd != RETBLEED_CMD_IBPB) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
pr_info("Enabling Speculation Barrier for firmware calls\n");
}
} else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}

View file

@ -88,7 +88,7 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty,
}
if (!ldisc_data->buf_free)
/* ttyio_in will tty_schedule_flip */
/* ttyio_in will tty_flip_buffer_push */
return 0;
/* Make sure the consumer has read buf before we have seen
@ -334,7 +334,7 @@ static unsigned char ttyio_in(int timeout)
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
tty_schedule_flip(speakup_tty->port);
tty_flip_buffer_push(speakup_tty->port);
return rv;
}

View file

@ -350,6 +350,9 @@ static const struct regmap_config pca953x_i2c_regmap = {
.reg_bits = 8,
.val_bits = 8,
.use_single_read = true,
.use_single_write = true,
.readable_reg = pca953x_readable_register,
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
@ -893,15 +896,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
{
DECLARE_BITMAP(val, MAX_LINE);
u8 regaddr;
int ret;
ret = regcache_sync_region(chip->regmap, chip->regs->output,
chip->regs->output + NBANK(chip));
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret)
goto out;
ret = regcache_sync_region(chip->regmap, chip->regs->direction,
chip->regs->direction + NBANK(chip));
regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret)
goto out;
@ -1114,14 +1120,14 @@ static int pca953x_regcache_sync(struct device *dev)
* sync these registers first and only then sync the rest.
*/
regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
@ -1131,7 +1137,7 @@ static int pca953x_regcache_sync(struct device *dev)
if (chip->driver_data & PCA_PCAL) {
regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip));
regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
@ -1140,7 +1146,7 @@ static int pca953x_regcache_sync(struct device *dev)
regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip));
regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);

View file

@ -922,6 +922,37 @@ static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device
}
}
struct amdgpu_stutter_quirk {
u16 chip_vendor;
u16 chip_device;
u16 subsys_vendor;
u16 subsys_device;
u8 revision;
};
static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
{ 0, 0, 0, 0, 0 },
};
static bool dm_should_disable_stutter(struct pci_dev *pdev)
{
const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
while (p && p->chip_device != 0) {
if (pdev->vendor == p->chip_vendor &&
pdev->device == p->chip_device &&
pdev->subsystem_vendor == p->subsys_vendor &&
pdev->subsystem_device == p->subsys_device &&
pdev->revision == p->revision) {
return true;
}
++p;
}
return false;
}
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@ -1014,6 +1045,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
if (dm_should_disable_stutter(adev->pdev))
adev->dm.dc->debug.disable_stutter = true;
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;

View file

@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
ret = dcss_submodules_init(dcss);
if (ret) {
of_node_put(dcss->of_port);
dev_err(dev, "submodules initialization failed\n");
goto clks_err;
}
@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
dcss_clocks_disable(dcss);
}
of_node_put(dcss->of_port);
pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss);

View file

@ -268,7 +268,6 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
u32 pixel_format;
struct drm_crtc_state *crtc_state;
bool modifiers_present;
u32 src_w, src_h, dst_w, dst_h;
@ -279,7 +278,6 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
if (!fb || !state->crtc || !state->visible)
return;
pixel_format = state->fb->format->format;
crtc_state = state->crtc->state;
modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);

View file

@ -386,9 +386,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr)
*/
static irqreturn_t cdns_i2c_master_isr(void *ptr)
{
unsigned int isr_status, avail_bytes, updatetx;
unsigned int isr_status, avail_bytes;
unsigned int bytes_to_send;
bool hold_quirk;
bool updatetx;
struct cdns_i2c *id = ptr;
/* Signal completion only after everything is updated */
int done_flag = 0;
@ -408,11 +408,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* Check if transfer size register needs to be updated again for a
* large data receive operation.
*/
updatetx = 0;
if (id->recv_count > id->curr_recv_count)
updatetx = 1;
hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
updatetx = id->recv_count > id->curr_recv_count;
/* When receiving, handle data interrupt and completion interrupt */
if (id->p_recv_buf &&
@ -443,7 +439,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
break;
}
if (cdns_is_holdquirk(id, hold_quirk))
if (cdns_is_holdquirk(id, updatetx))
break;
}
@ -454,7 +450,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* maintain transfer size non-zero while performing a large
* receive operation.
*/
if (cdns_is_holdquirk(id, hold_quirk)) {
if (cdns_is_holdquirk(id, updatetx)) {
/* wait while fifo is full */
while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
(id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@ -476,22 +472,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count;
}
} else if (id->recv_count && !hold_quirk &&
!id->curr_recv_count) {
/* Set the slave address in address register*/
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
} else {
cdns_i2c_writereg(id->recv_count,
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count;
}
}
/* Clear hold (if not repeated start) and signal completion */

View file

@ -1235,8 +1235,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
sock_net(newsk)->
ipv4.sysctl_tcp_window_scaling,
READ_ONCE(sock_net(newsk)->
ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@ -1383,7 +1383,7 @@ static void chtls_pass_accept_request(struct sock *sk,
#endif
}
if (req->tcpopt.wsf <= 14 &&
sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}

View file

@ -2287,7 +2287,7 @@ err:
/* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
u8 page_num, u8 *data)
u8 page_num, u32 off, u32 len, u8 *data)
{
struct be_dma_mem cmd;
struct be_mcc_wrb *wrb;
@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
req->port = cpu_to_le32(adapter->hba_port_num);
req->page_num = cpu_to_le32(page_num);
status = be_mcc_notify_wait(adapter);
if (!status) {
if (!status && len > 0) {
struct be_cmd_resp_port_type *resp = cmd.va;
memcpy(data, resp->page_data, PAGE_DATA_LEN);
memcpy(data, resp->page_data + off, len);
}
err:
mutex_unlock(&adapter->mcc_lock);
@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
page_data);
0, PAGE_DATA_LEN, page_data);
if (!status) {
switch (adapter->phy.interface_type) {
case PHY_TYPE_QSFP:
@ -2440,7 +2440,7 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
page_data);
0, PAGE_DATA_LEN, page_data);
if (!status) {
strlcpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);

View file

@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
u32 *state);
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
u8 page_num, u8 *data);
u8 page_num, u32 off, u32 len, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_query_sfp_info(struct be_adapter *adapter);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,

View file

@ -1338,7 +1338,7 @@ static int be_get_module_info(struct net_device *netdev,
return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
page_data);
0, PAGE_DATA_LEN, page_data);
if (!status) {
if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
modinfo->type = ETH_MODULE_SFF_8079;
@ -1356,25 +1356,32 @@ static int be_get_module_eeprom(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
u32 begin, end;
if (!check_privilege(adapter, MAX_PRIVILEGES))
return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
data);
if (status)
goto err;
begin = eeprom->offset;
end = eeprom->offset + eeprom->len;
if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
status = be_cmd_read_port_transceiver_data(adapter,
TR_PAGE_A2,
data +
PAGE_DATA_LEN);
if (begin < PAGE_DATA_LEN) {
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
min_t(u32, end, PAGE_DATA_LEN) - begin,
data);
if (status)
goto err;
data += PAGE_DATA_LEN - begin;
begin = PAGE_DATA_LEN;
}
if (end > PAGE_DATA_LEN) {
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
begin - PAGE_DATA_LEN,
end - begin, data);
if (status)
goto err;
}
if (eeprom->offset)
memcpy(data, data + eeprom->offset, eeprom->len);
err:
return be_cmd_status(status);
}

View file

@ -10187,7 +10187,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
@ -10195,13 +10195,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
int v;
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
i40e_check_recovery_mode(pf)) {
is_recovery_mode_reported)
i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
}
if (test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_RECOVERY_MODE, pf->state) &&
!old_recovery_mode_bit)
!test_bit(__I40E_RECOVERY_MODE, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@ -10228,13 +10226,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* accordingly with regard to resources initialization
* and deinitialization
*/
if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
old_recovery_mode_bit) {
if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
if (i40e_get_capabilities(pf,
i40e_aqc_opc_list_func_capabilities))
goto end_unlock;
if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
if (is_recovery_mode_reported) {
/* we're staying in recovery mode so we'll reinitialize
* misc vector here
*/

View file

@ -1263,11 +1263,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;
if (!size)
return NULL;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
if (!size)
return rx_buffer;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,

View file

@ -4933,6 +4933,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
if (IGC_REMOVED(hw_addr))
return ~value;
value = readl(&hw_addr[reg]);
/* reads should not return all F's */

View file

@ -252,7 +252,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
writel((val), &hw_addr[(reg)]); \
if (!IGC_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
@ -264,4 +265,6 @@ do { \
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
#define IGC_REMOVED(h) unlikely(!(h))
#endif

View file

@ -769,6 +769,7 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_IXGBE_IPSEC */
spinlock_t vfs_lock;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)

View file

@ -6403,6 +6403,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
/* init spinlock to avoid concurrency of VF resources */
spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_IXGBE_DCB
ixgbe_init_dcb(adapter);
#endif

View file

@ -204,10 +204,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
unsigned long flags;
int rss;
spin_lock_irqsave(&adapter->vfs_lock, flags);
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
spin_unlock_irqrestore(&adapter->vfs_lock, flags);
/* put the reference to all of the vf devices */
for (vf = 0; vf < num_vfs; ++vf) {
@ -1305,8 +1308,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
u32 vf;
spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
if (!ixgbe_check_for_rst(hw, vf))
@ -1320,6 +1325,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
if (!ixgbe_check_for_ack(hw, vf))
ixgbe_rcv_ack_from_vf(adapter, vf);
}
spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)

View file

@ -4003,7 +4003,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
{
const struct fib_nh *nh = fib_info_nh(fi, 0);
return nh->fib_nh_scope == RT_SCOPE_LINK ||
return nh->fib_nh_gw_family ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
@ -8038,13 +8038,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
struct net *net = mlxsw_sp_net(mlxsw_sp);
bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
bool usp;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);

View file

@ -215,6 +215,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
if (queue == 0 || queue == 4) {
value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
value |= MTL_RXQ_DMA_Q04MDMACH(chan);
} else if (queue > 4) {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
} else {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);

View file

@ -738,19 +738,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
struct timespec64 now;
u32 sec_inc = 0;
u64 temp = 0;
int ret;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
if (ret < 0) {
netdev_warn(priv->dev,
"failed to enable PTP reference clock: %pe\n",
ERR_PTR(ret));
return ret;
}
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
@ -2755,6 +2746,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv);
if (ptp_register) {
ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
if (ret < 0)
netdev_warn(priv->dev,
"failed to enable PTP reference clock: %pe\n",
ERR_PTR(ret));
}
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");

View file

@ -814,7 +814,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (ret)
return ret;
stmmac_init_tstamp_counter(priv, priv->systime_flags);
ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
if (ret < 0) {
netdev_warn(priv->dev,
"failed to enable PTP reference clock: %pe\n",
ERR_PTR(ret));
return ret;
}
}
return 0;

View file

@ -1796,7 +1796,7 @@ static const struct driver_info ax88179_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1809,7 +1809,7 @@ static const struct driver_info ax88178a_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1822,7 +1822,7 @@ static const struct driver_info cypress_GX3_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1835,7 +1835,7 @@ static const struct driver_info dlink_dub1312_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1848,7 +1848,7 @@ static const struct driver_info sitecom_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1861,7 +1861,7 @@ static const struct driver_info samsung_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1874,7 +1874,7 @@ static const struct driver_info lenovo_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1887,7 +1887,7 @@ static const struct driver_info belkin_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1900,7 +1900,7 @@ static const struct driver_info toshiba_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1913,7 +1913,7 @@ static const struct driver_info mct_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};

View file

@ -1118,6 +1118,10 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
u8 buffer[sizeof(struct pci_delete_interrupt)];
} ctxt;
if (!int_desc->vector_count) {
kfree(int_desc);
return;
}
memset(&ctxt, 0, sizeof(ctxt));
int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
int_pkt->message_type.type =
@ -1180,6 +1184,28 @@ static void hv_irq_mask(struct irq_data *data)
pci_msi_mask_irq(data);
}
static unsigned int hv_msi_get_int_vector(struct irq_data *data)
{
struct irq_cfg *cfg = irqd_cfg(data);
return cfg->vector;
}
static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
int ret = pci_msi_prepare(domain, dev, nvec, info);
/*
* By using the interrupt remapper in the hypervisor IOMMU, contiguous
* CPU vectors is not needed for multi-MSI
*/
if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
return ret;
}
/**
* hv_irq_unmask() - "Unmask" the IRQ by setting its current
* affinity.
@ -1195,6 +1221,7 @@ static void hv_irq_unmask(struct irq_data *data)
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct irq_cfg *cfg = irqd_cfg(data);
struct hv_retarget_device_interrupt *params;
struct tran_int_desc *int_desc;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
cpumask_var_t tmp;
@ -1209,6 +1236,7 @@ static void hv_irq_unmask(struct irq_data *data)
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
int_desc = data->chip_data;
spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
@ -1216,7 +1244,8 @@ static void hv_irq_unmask(struct irq_data *data)
memset(params, 0, sizeof(*params));
params->partition_id = HV_PARTITION_ID_SELF;
params->int_entry.source = 1; /* MSI(-X) */
hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
params->int_entry.msi_entry.address = int_desc->address & 0xffffffff;
params->int_entry.msi_entry.data = int_desc->data;
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) |
@ -1317,12 +1346,12 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
static u32 hv_compose_msi_req_v1(
struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
u32 slot, u8 vector)
u32 slot, u8 vector, u8 vector_count)
{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
@ -1336,14 +1365,14 @@ static u32 hv_compose_msi_req_v1(
static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
u32 slot, u8 vector)
u32 slot, u8 vector, u8 vector_count)
{
int cpu;
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
@ -1371,7 +1400,6 @@ static u32 hv_compose_msi_req_v2(
*/
static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct irq_cfg *cfg = irqd_cfg(data);
struct hv_pcibus_device *hbus;
struct vmbus_channel *channel;
struct hv_pci_dev *hpdev;
@ -1380,6 +1408,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct cpumask *dest;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct msi_desc *msi_desc;
u8 vector, vector_count;
struct {
struct pci_packet pci_pkt;
union {
@ -1391,7 +1421,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
u32 size;
int ret;
pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
/* Reuse the previous allocation */
if (data->chip_data) {
int_desc = data->chip_data;
msg->address_hi = int_desc->address >> 32;
msg->address_lo = int_desc->address & 0xffffffff;
msg->data = int_desc->data;
return;
}
msi_desc = irq_data_get_msi_desc(data);
pdev = msi_desc_to_pci_dev(msi_desc);
dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
@ -1400,17 +1440,40 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (!hpdev)
goto return_null_message;
/* Free any previous message that might have already been composed. */
if (data->chip_data) {
int_desc = data->chip_data;
data->chip_data = NULL;
hv_int_desc_free(hpdev, int_desc);
}
int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
if (!int_desc)
goto drop_reference;
if (!msi_desc->msi_attrib.is_msix && msi_desc->nvec_used > 1) {
/*
* If this is not the first MSI of Multi MSI, we already have
* a mapping. Can exit early.
*/
if (msi_desc->irq != data->irq) {
data->chip_data = int_desc;
int_desc->address = msi_desc->msg.address_lo |
(u64)msi_desc->msg.address_hi << 32;
int_desc->data = msi_desc->msg.data +
(data->irq - msi_desc->irq);
msg->address_hi = msi_desc->msg.address_hi;
msg->address_lo = msi_desc->msg.address_lo;
msg->data = int_desc->data;
put_pcichild(hpdev);
return;
}
/*
* The vector we select here is a dummy value. The correct
* value gets sent to the hypervisor in unmask(). This needs
* to be aligned with the count, and also not zero. Multi-msi
* is powers of 2 up to 32, so 32 will always work here.
*/
vector = 32;
vector_count = msi_desc->nvec_used;
} else {
vector = hv_msi_get_int_vector(data);
vector_count = 1;
}
memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event);
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
@ -1421,7 +1484,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
hpdev->desc.win_slot.slot,
cfg->vector);
vector,
vector_count);
break;
case PCI_PROTOCOL_VERSION_1_2:
@ -1429,7 +1493,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest,
hpdev->desc.win_slot.slot,
cfg->vector);
vector,
vector_count);
break;
default:
@ -1545,7 +1610,7 @@ static struct irq_chip hv_msi_irq_chip = {
};
static struct msi_domain_ops hv_msi_ops = {
.msi_prepare = pci_msi_prepare,
.msi_prepare = hv_msi_prepare,
.msi_free = hv_msi_free,
};

View file

@ -1303,15 +1303,17 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
bank->bank_ioport_nr = bank_ioport_nr;
spin_lock_init(&bank->lock);
/* create irq hierarchical domain */
bank->fwnode = of_node_to_fwnode(np);
if (pctl->domain) {
/* create irq hierarchical domain */
bank->fwnode = of_node_to_fwnode(np);
bank->domain = irq_domain_create_hierarchy(pctl->domain, 0,
STM32_GPIO_IRQ_LINE, bank->fwnode,
&stm32_gpio_domain_ops, bank);
bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, STM32_GPIO_IRQ_LINE,
bank->fwnode, &stm32_gpio_domain_ops,
bank);
if (!bank->domain)
return -ENODEV;
if (!bank->domain)
return -ENODEV;
}
err = gpiochip_add_data(&bank->gpio_chip, bank);
if (err) {
@ -1481,6 +1483,8 @@ int stm32_pctl_probe(struct platform_device *pdev)
pctl->domain = stm32_pctrl_get_irq_domain(np);
if (IS_ERR(pctl->domain))
return PTR_ERR(pctl->domain);
if (!pctl->domain)
dev_warn(dev, "pinctrl without interrupt support\n");
/* hwspinlock is optional */
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);

View file

@ -146,6 +146,7 @@ static int __init versatile_reboot_probe(void)
versatile_reboot_type = (enum versatile_reboot)reboot_id->data;
syscon_regmap = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(syscon_regmap))
return PTR_ERR(syscon_regmap);

View file

@ -56,7 +56,7 @@ static inline void
kbd_put_queue(struct tty_port *port, int ch)
{
tty_insert_flip_char(port, ch, 0);
tty_schedule_flip(port);
tty_flip_buffer_push(port);
}
static inline void
@ -64,5 +64,5 @@ kbd_puts_queue(struct tty_port *port, char *cp)
{
while (*cp)
tty_insert_flip_char(port, *cp++, 0);
tty_schedule_flip(port);
tty_flip_buffer_push(port);
}

View file

@ -1174,10 +1174,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */
dmaengine_terminate_sync(ctlr->dma_tx);
bs->tx_dma_active = false;
dmaengine_terminate_sync(ctlr->dma_rx);
bs->rx_dma_active = false;
if (ctlr->dma_tx) {
dmaengine_terminate_sync(ctlr->dma_tx);
bs->tx_dma_active = false;
}
if (ctlr->dma_rx) {
dmaengine_terminate_sync(ctlr->dma_rx);
bs->rx_dma_active = false;
}
bcm2835_spi_undo_prologue(bs);
/* and reset */

View file

@ -267,6 +267,8 @@ static int rt2880_pinmux_pins(struct rt2880_priv *p)
p->func[i]->pin_count,
sizeof(int),
GFP_KERNEL);
if (!p->func[i]->pins)
return -ENOMEM;
for (j = 0; j < p->func[i]->pin_count; j++)
p->func[i]->pins[j] = p->func[i]->pin_first + j;

View file

@ -556,7 +556,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
}
info->idle_stats.recv_idle = jiffies;
}
tty_schedule_flip(port);
tty_flip_buffer_push(port);
/* end of service */
cyy_writeb(info, CyRIR, save_xir & 0x3f);
@ -996,7 +996,7 @@ static void cyz_handle_rx(struct cyclades_port *info)
mod_timer(&info->rx_full_timer, jiffies + 1);
#endif
info->idle_stats.recv_idle = jiffies;
tty_schedule_flip(&info->port);
tty_flip_buffer_push(&info->port);
/* Update rx_get */
cy_writel(&buf_ctrl->rx_get, new_rx_get);
@ -1172,7 +1172,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
if (delta_count)
wake_up_interruptible(&info->port.delta_msr_wait);
if (special_count)
tty_schedule_flip(&info->port);
tty_flip_buffer_push(&info->port);
}
}

View file

@ -151,7 +151,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
address = (unsigned long)(void *)buf;
goldfish_tty_rw(qtty, address, count, 0);
tty_schedule_flip(&qtty->port);
tty_flip_buffer_push(&qtty->port);
return IRQ_HANDLED;
}

View file

@ -1385,7 +1385,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
if (inited && !tty_throttled(tty) &&
MoxaPortRxQueue(p) > 0) { /* RX */
MoxaPortReadData(p);
tty_schedule_flip(&p->port);
tty_flip_buffer_push(&p->port);
}
} else {
clear_bit(EMPTYWAIT, &p->statusflags);
@ -1410,7 +1410,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */
tty_insert_flip_char(&p->port, 0, TTY_BREAK);
tty_schedule_flip(&p->port);
tty_flip_buffer_push(&p->port);
}
if (intr & IntrLine)

View file

@ -111,21 +111,11 @@ static void pty_unthrottle(struct tty_struct *tty)
static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
{
struct tty_struct *to = tty->link;
unsigned long flags;
if (tty->stopped)
if (tty->stopped || !c)
return 0;
if (c > 0) {
spin_lock_irqsave(&to->port->lock, flags);
/* Stuff the data into the input queue of the other end */
c = tty_insert_flip_string(to->port, buf, c);
spin_unlock_irqrestore(&to->port->lock, flags);
/* And shovel */
if (c)
tty_flip_buffer_push(to->port);
}
return c;
return tty_insert_flip_string_and_push_buffer(to->port, buf, c);
}
/**

View file

@ -344,7 +344,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
LPC32XX_HSUART_IIR(port->membase));
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
tty_schedule_flip(tport);
tty_flip_buffer_push(tport);
}
/* Data received? */

View file

@ -443,13 +443,13 @@ static void mvebu_uart_shutdown(struct uart_port *port)
}
}
static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
{
unsigned int d_divisor, m_divisor;
u32 brdv, osamp;
if (!port->uartclk)
return -EOPNOTSUPP;
return 0;
/*
* The baudrate is derived from the UART clock thanks to two divisors:
@ -473,7 +473,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
osamp &= ~OSAMP_DIVISORS_MASK;
writel(osamp, port->membase + UART_OSAMP);
return 0;
return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
}
static void mvebu_uart_set_termios(struct uart_port *port,
@ -510,15 +510,11 @@ static void mvebu_uart_set_termios(struct uart_port *port,
max_baud = 230400;
baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
if (mvebu_uart_baud_rate_set(port, baud)) {
/* No clock available, baudrate cannot be changed */
if (old)
baud = uart_get_baud_rate(port, old, NULL,
min_baud, max_baud);
} else {
tty_termios_encode_baud_rate(termios, baud, baud);
uart_update_timeout(port, termios->c_cflag, baud);
}
baud = mvebu_uart_baud_rate_set(port, baud);
/* In case baudrate cannot be changed, report previous old value */
if (baud == 0 && old)
baud = tty_termios_baud_rate(old);
/* Only the following flag changes are supported */
if (old) {
@ -529,6 +525,11 @@ static void mvebu_uart_set_termios(struct uart_port *port,
termios->c_cflag |= CS8;
}
if (baud != 0) {
tty_termios_encode_baud_rate(termios, baud, baud);
uart_update_timeout(port, termios->c_cflag, baud);
}
spin_unlock_irqrestore(&port->lock, flags);
}

View file

@ -394,27 +394,6 @@ int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
}
EXPORT_SYMBOL(__tty_insert_flip_char);
/**
* tty_schedule_flip - push characters to ldisc
* @port: tty port to push from
*
* Takes any pending buffers and transfers their ownership to the
* ldisc side of the queue. It then schedules those characters for
* processing by the line discipline.
*/
void tty_schedule_flip(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
/* paired w/ acquire in flush_to_ldisc(); ensures
* flush_to_ldisc() sees buffer data.
*/
smp_store_release(&buf->tail->commit, buf->tail->used);
queue_work(system_unbound_wq, &buf->work);
}
EXPORT_SYMBOL(tty_schedule_flip);
/**
* tty_prepare_flip_string - make room for characters
* @port: tty port
@ -544,6 +523,15 @@ static void flush_to_ldisc(struct work_struct *work)
}
static inline void tty_flip_buffer_commit(struct tty_buffer *tail)
{
/*
* Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
* buffer data.
*/
smp_store_release(&tail->commit, tail->used);
}
/**
* tty_flip_buffer_push - terminal
* @port: tty port to push
@ -557,10 +545,44 @@ static void flush_to_ldisc(struct work_struct *work)
void tty_flip_buffer_push(struct tty_port *port)
{
tty_schedule_flip(port);
struct tty_bufhead *buf = &port->buf;
tty_flip_buffer_commit(buf->tail);
queue_work(system_unbound_wq, &buf->work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
/**
* tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and
* push
* @port: tty port
* @chars: characters
* @size: size
*
* The function combines tty_insert_flip_string() and tty_flip_buffer_push()
* with the exception of properly holding the @port->lock.
*
* To be used only internally (by pty currently).
*
* Returns: the number added.
*/
int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
const unsigned char *chars, size_t size)
{
struct tty_bufhead *buf = &port->buf;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
size = tty_insert_flip_string(port, chars, size);
if (size)
tty_flip_buffer_commit(buf->tail);
spin_unlock_irqrestore(&port->lock, flags);
queue_work(system_unbound_wq, &buf->work);
return size;
}
/**
* tty_buffer_init - prepare a tty buffer structure
* @port: tty port to initialise

View file

@ -311,7 +311,7 @@ int kbd_rate(struct kbd_repeat *rpt)
static void put_queue(struct vc_data *vc, int ch)
{
tty_insert_flip_char(&vc->port, ch, 0);
tty_schedule_flip(&vc->port);
tty_flip_buffer_push(&vc->port);
}
static void puts_queue(struct vc_data *vc, char *cp)
@ -320,7 +320,7 @@ static void puts_queue(struct vc_data *vc, char *cp)
tty_insert_flip_char(&vc->port, *cp, 0);
cp++;
}
tty_schedule_flip(&vc->port);
tty_flip_buffer_push(&vc->port);
}
static void applkey(struct vc_data *vc, int key, char mode)
@ -565,7 +565,7 @@ static void fn_inc_console(struct vc_data *vc)
static void fn_send_intr(struct vc_data *vc)
{
tty_insert_flip_char(&vc->port, 0, TTY_BREAK);
tty_schedule_flip(&vc->port);
tty_flip_buffer_push(&vc->port);
}
static void fn_scroll_forw(struct vc_data *vc)

View file

@ -1834,7 +1834,7 @@ static void csi_m(struct vc_data *vc)
static void respond_string(const char *p, size_t len, struct tty_port *port)
{
tty_insert_flip_string(port, p, len);
tty_schedule_flip(port);
tty_flip_buffer_push(port);
}
static void cursor_report(struct vc_data *vc, struct tty_struct *tty)

View file

@ -401,7 +401,8 @@ static void __unmap_grant_pages_done(int result,
unsigned int offset = data->unmap_ops - map->unmap_ops;
for (i = 0; i < data->count; i++) {
WARN_ON(map->unmap_ops[offset+i].status);
WARN_ON(map->unmap_ops[offset+i].status &&
map->unmap_ops[offset+i].handle != -1);
pr_debug("unmap handle=%d st=%d\n",
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);

View file

@ -4067,13 +4067,14 @@ static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
rv = _create_message(ls, sizeof(struct dlm_message) + len,
dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
if (rv)
return;
goto out;
memcpy(ms->m_extra, name, len);
ms->m_hash = hash;
send_message(mh, ms);
out:
spin_lock(&ls->ls_remove_spin);
ls->ls_remove_len = 0;
memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);

View file

@ -1325,7 +1325,7 @@ static void io_req_clean_work(struct io_kiocb *req)
*/
static bool io_identity_cow(struct io_kiocb *req)
{
struct io_uring_task *tctx = current->io_uring;
struct io_uring_task *tctx = req->task->io_uring;
const struct cred *creds = NULL;
struct io_identity *id;

View file

@ -41,6 +41,22 @@
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
#define __scalar_type_to_unsigned_cases(type) \
unsigned type: (unsigned type)0, \
signed type: (unsigned type)0
#define __unsigned_scalar_typeof(x) typeof( \
_Generic((x), \
char: (unsigned char)0, \
__scalar_type_to_unsigned_cases(char), \
__scalar_type_to_unsigned_cases(short), \
__scalar_type_to_unsigned_cases(int), \
__scalar_type_to_unsigned_cases(long), \
__scalar_type_to_unsigned_cases(long long), \
default: (x)))
#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
@ -49,7 +65,8 @@
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
_pfx "value too large for the field"); \
BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
__bf_cast_unsigned(_reg, ~0ull), \
_pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \

View file

@ -12,7 +12,6 @@ extern int tty_insert_flip_string_fixed_flag(struct tty_port *port,
extern int tty_prepare_flip_string(struct tty_port *port,
unsigned char **chars, size_t size);
extern void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
@ -40,4 +39,7 @@ static inline int tty_insert_flip_string(struct tty_port *port,
extern void tty_buffer_lock_exclusive(struct tty_port *port);
extern void tty_buffer_unlock_exclusive(struct tty_port *port);
int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
const unsigned char *chars, size_t cnt);
#endif /* _LINUX_TTY_FLIP_H */

View file

@ -422,6 +422,71 @@ out:
return NULL;
}
/* Shall not be called with lock_sock held */
static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
struct msghdr *msg,
size_t len, size_t mtu,
size_t headroom, size_t tailroom)
{
struct sk_buff *skb;
size_t size = min_t(size_t, len, mtu);
int err;
skb = bt_skb_send_alloc(sk, size + headroom + tailroom,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return ERR_PTR(err);
skb_reserve(skb, headroom);
skb_tailroom_reserve(skb, mtu, tailroom);
if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) {
kfree_skb(skb);
return ERR_PTR(-EFAULT);
}
skb->priority = sk->sk_priority;
return skb;
}
/* Similar to bt_skb_sendmsg but can split the msg into multiple fragments
* accourding to the MTU.
*/
static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
struct msghdr *msg,
size_t len, size_t mtu,
size_t headroom, size_t tailroom)
{
struct sk_buff *skb, **frag;
skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
if (IS_ERR_OR_NULL(skb))
return skb;
len -= skb->len;
if (!len)
return skb;
/* Add remaining data over MTU as continuation fragments */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
struct sk_buff *tmp;
tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
if (IS_ERR(tmp)) {
return skb;
}
len -= tmp->len;
*frag = tmp;
frag = &(*frag)->next;
}
return skb;
}
int bt_to_errno(u16 code);
void hci_sock_set_flag(struct sock *sk, int nr);

View file

@ -107,7 +107,8 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
{
if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
if (!sk->sk_mark &&
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
return skb->mark;
return sk->sk_mark;
@ -369,7 +370,7 @@ static inline bool inet_get_convert_csum(struct sock *sk)
static inline bool inet_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return net->ipv4.sysctl_ip_nonlocal_bind ||
return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) ||
inet->freebind || inet->transparent;
}

View file

@ -360,7 +360,7 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
{
return port < net->ipv4.sysctl_ip_prot_sock;
return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
}
#else
@ -392,7 +392,7 @@ void ipfrag_init(void);
void ip_static_sysctl_init(void);
#define IP4_REPLY_MARK(net, mark) \
((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
(READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
static inline bool ip_is_fragment(const struct iphdr *iph)
{
@ -453,7 +453,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
struct net *net = dev_net(dst->dev);
unsigned int mtu;
if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
ip_mtu_locked(dst) ||
!forwarding)
return dst_mtu(dst);

View file

@ -1385,8 +1385,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
s32 delta;
if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
ca_ops->cong_control)
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
tp->packets_out || ca_ops->cong_control)
return;
delta = tcp_jiffies32 - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto)
@ -1452,21 +1452,24 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
return tp->keepalive_intvl ? :
READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
}
static inline int keepalive_time_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
return tp->keepalive_time ? :
READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
}
static inline int keepalive_probes(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
return tp->keepalive_probes ? :
READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
}
static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
@ -1479,7 +1482,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
static inline int tcp_fin_time(const struct sock *sk)
{
int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
int fin_timeout = tcp_sk(sk)->linger2 ? :
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
const int rto = inet_csk(sk)->icsk_rto;
if (fin_timeout < (rto << 2) - (rto >> 1))
@ -1974,7 +1978,7 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
}
/* @wake is one when sk_stream_write_space() calls us.

View file

@ -259,7 +259,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept),
bound_dev_if, dif, sdif);
#else
return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);

View file

@ -68,11 +68,13 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
{
u8 *ptr = NULL;
if (k >= SKF_NET_OFF)
if (k >= SKF_NET_OFF) {
ptr = skb_network_header(skb) + k - SKF_NET_OFF;
else if (k >= SKF_LL_OFF)
} else if (k >= SKF_LL_OFF) {
if (unlikely(!skb_mac_header_was_set(skb)))
return NULL;
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
}
if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
return ptr;

View file

@ -6182,10 +6182,10 @@ again:
if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
/*
* Raced against perf_mmap_close() through
* perf_event_set_output(). Try again, hope for better
* luck.
* Raced against perf_mmap_close(); remove the
* event and try again.
*/
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
goto again;
}
@ -11542,14 +11542,25 @@ err_size:
goto out;
}
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
if (b < a)
swap(a, b);
mutex_lock(a);
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
struct perf_buffer *rb = NULL;
int ret = -EINVAL;
if (!output_event)
if (!output_event) {
mutex_lock(&event->mmap_mutex);
goto set;
}
/* don't allow circular references */
if (event == output_event)
@ -11587,8 +11598,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
event->pmu != output_event->pmu)
goto out;
/*
* Hold both mmap_mutex to serialize against perf_mmap_close(). Since
* output_event is already on rb->event_list, and the list iteration
* restarts after every removal, it is guaranteed this new event is
* observed *OR* if output_event is already removed, it's guaranteed we
* observe !rb->mmap_count.
*/
mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
set:
mutex_lock(&event->mmap_mutex);
/* Can't redirect output if we've got an active mmap() */
if (atomic_read(&event->mmap_count))
goto unlock;
@ -11598,6 +11616,12 @@ set:
rb = ring_buffer_get(output_event);
if (!rb)
goto unlock;
/* did we race against perf_mmap_close() */
if (!atomic_read(&rb->mmap_count)) {
ring_buffer_put(rb);
goto unlock;
}
}
ring_buffer_attach(event, rb);
@ -11605,20 +11629,13 @@ set:
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
if (output_event)
mutex_unlock(&output_event->mmap_mutex);
out:
return ret;
}
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
if (b < a)
swap(a, b);
mutex_lock(a);
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}
static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
{
bool nmi_safe = false;

View file

@ -1563,7 +1563,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* the throttle.
*/
p->dl.dl_throttled = 0;
BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
if (!(flags & ENQUEUE_REPLENISH))
printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
task_pid_nr(p));
return;
}

View file

@ -34,6 +34,27 @@ MODULE_LICENSE("GPL");
#define WATCH_QUEUE_NOTE_SIZE 128
#define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
/*
* This must be called under the RCU read-lock, which makes
* sure that the wqueue still exists. It can then take the lock,
* and check that the wqueue hasn't been destroyed, which in
* turn makes sure that the notification pipe still exists.
*/
static inline bool lock_wqueue(struct watch_queue *wqueue)
{
spin_lock_bh(&wqueue->lock);
if (unlikely(wqueue->defunct)) {
spin_unlock_bh(&wqueue->lock);
return false;
}
return true;
}
static inline void unlock_wqueue(struct watch_queue *wqueue)
{
spin_unlock_bh(&wqueue->lock);
}
static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
@ -69,6 +90,10 @@ static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
/*
* Post a notification to a watch queue.
*
* Must be called with the RCU lock for reading, and the
* watch_queue lock held, which guarantees that the pipe
* hasn't been released.
*/
static bool post_one_notification(struct watch_queue *wqueue,
struct watch_notification *n)
@ -85,9 +110,6 @@ static bool post_one_notification(struct watch_queue *wqueue,
spin_lock_irq(&pipe->rd_wait.lock);
if (wqueue->defunct)
goto out;
mask = pipe->ring_size - 1;
head = pipe->head;
tail = pipe->tail;
@ -203,7 +225,10 @@ void __post_watch_notification(struct watch_list *wlist,
if (security_post_notification(watch->cred, cred, n) < 0)
continue;
post_one_notification(wqueue, n);
if (lock_wqueue(wqueue)) {
post_one_notification(wqueue, n);
unlock_wqueue(wqueue);
}
}
rcu_read_unlock();
@ -465,11 +490,12 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
return -EAGAIN;
}
spin_lock_bh(&wqueue->lock);
kref_get(&wqueue->usage);
kref_get(&watch->usage);
hlist_add_head(&watch->queue_node, &wqueue->watches);
spin_unlock_bh(&wqueue->lock);
if (lock_wqueue(wqueue)) {
kref_get(&wqueue->usage);
kref_get(&watch->usage);
hlist_add_head(&watch->queue_node, &wqueue->watches);
unlock_wqueue(wqueue);
}
hlist_add_head(&watch->list_node, &wlist->watchers);
return 0;
@ -523,20 +549,15 @@ found:
wqueue = rcu_dereference(watch->queue);
/* We don't need the watch list lock for the next bit as RCU is
* protecting *wqueue from deallocation.
*/
if (wqueue) {
if (lock_wqueue(wqueue)) {
post_one_notification(wqueue, &n.watch);
spin_lock_bh(&wqueue->lock);
if (!hlist_unhashed(&watch->queue_node)) {
hlist_del_init_rcu(&watch->queue_node);
put_watch(watch);
}
spin_unlock_bh(&wqueue->lock);
unlock_wqueue(wqueue);
}
if (wlist->release_watch) {

View file

@ -374,7 +374,7 @@ static void mpol_rebind_preferred(struct mempolicy *pol,
*/
static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
if (!pol)
if (!pol || pol->mode == MPOL_LOCAL)
return;
if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))

View file

@ -278,9 +278,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
return 0;
out_free_newdev:
if (new_dev->reg_state == NETREG_UNINITIALIZED ||
new_dev->reg_state == NETREG_UNREGISTERED)
free_netdev(new_dev);
free_netdev(new_dev);
return err;
}

View file

@ -549,22 +549,58 @@ struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel)
return dlc;
}
int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
static int rfcomm_dlc_send_frag(struct rfcomm_dlc *d, struct sk_buff *frag)
{
int len = skb->len;
if (d->state != BT_CONNECTED)
return -ENOTCONN;
int len = frag->len;
BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len);
if (len > d->mtu)
return -EINVAL;
rfcomm_make_uih(skb, d->addr);
skb_queue_tail(&d->tx_queue, skb);
rfcomm_make_uih(frag, d->addr);
__skb_queue_tail(&d->tx_queue, frag);
if (!test_bit(RFCOMM_TX_THROTTLED, &d->flags))
return len;
}
int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
{
unsigned long flags;
struct sk_buff *frag, *next;
int len;
if (d->state != BT_CONNECTED)
return -ENOTCONN;
frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
/* Queue all fragments atomically. */
spin_lock_irqsave(&d->tx_queue.lock, flags);
len = rfcomm_dlc_send_frag(d, skb);
if (len < 0 || !frag)
goto unlock;
for (; frag; frag = next) {
int ret;
next = frag->next;
ret = rfcomm_dlc_send_frag(d, frag);
if (ret < 0) {
kfree_skb(frag);
goto unlock;
}
len += ret;
}
unlock:
spin_unlock_irqrestore(&d->tx_queue.lock, flags);
if (len > 0 && !test_bit(RFCOMM_TX_THROTTLED, &d->flags))
rfcomm_schedule();
return len;
}

View file

@ -575,47 +575,21 @@ static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
sent = bt_sock_wait_ready(sk, msg->msg_flags);
if (sent)
goto done;
while (len) {
size_t size = min_t(size_t, len, d->mtu);
int err;
skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
if (sent == 0)
sent = err;
break;
}
skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
skb->priority = sk->sk_priority;
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
sent += size;
len -= size;
}
done:
release_sock(sk);
if (sent)
return sent;
skb = bt_skb_sendmmsg(sk, msg, len, d->mtu, RFCOMM_SKB_HEAD_RESERVE,
RFCOMM_SKB_TAIL_RESERVE);
if (IS_ERR(skb))
return PTR_ERR(skb);
sent = rfcomm_dlc_send(d, skb);
if (sent < 0)
kfree_skb(skb);
return sent;
}

View file

@ -280,12 +280,10 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk)
return err;
}
static int sco_send_frame(struct sock *sk, void *buf, int len,
unsigned int msg_flags)
static int sco_send_frame(struct sock *sk, struct sk_buff *skb)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
int err;
int len = skb->len;
/* Check outgoing MTU */
if (len > conn->mtu)
@ -293,11 +291,6 @@ static int sco_send_frame(struct sock *sk, void *buf, int len,
BT_DBG("sk %p len %d", sk, len);
skb = bt_skb_send_alloc(sk, len, msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
memcpy(skb_put(skb, len), buf, len);
hci_send_sco(conn->hcon, skb);
return len;
@ -727,7 +720,7 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
void *buf;
struct sk_buff *skb;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
@ -739,24 +732,21 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (memcpy_from_msg(buf, msg, len)) {
kfree(buf);
return -EFAULT;
}
skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
if (IS_ERR(skb))
return PTR_ERR(skb);
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, buf, len, msg->msg_flags);
err = sco_send_frame(sk, skb);
else
err = -ENOTCONN;
release_sock(sk);
kfree(buf);
if (err < 0)
kfree_skb(skb);
return err;
}

View file

@ -5755,7 +5755,7 @@ static void flush_all_backlogs(void)
}
/* we can have in flight packet[s] on the cpus we are not flushing,
* synchronize_net() in rollback_registered_many() will take care of
* synchronize_net() in unregister_netdevice_many() will take care of
* them
*/
for_each_cpu(cpu, &flush_cpus)
@ -9513,106 +9513,6 @@ static void net_set_todo(struct net_device *dev)
dev_net(dev)->dev_unreg_count++;
}
static void rollback_registered_many(struct list_head *head)
{
struct net_device *dev, *tmp;
LIST_HEAD(close_head);
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
list_for_each_entry_safe(dev, tmp, head, unreg_list) {
/* Some devices call without registering
* for initialization unwind. Remove those
* devices and proceed with the remaining.
*/
if (dev->reg_state == NETREG_UNINITIALIZED) {
pr_debug("unregister_netdevice: device %s/%p never was registered\n",
dev->name, dev);
WARN_ON(1);
list_del(&dev->unreg_list);
continue;
}
dev->dismantle = true;
BUG_ON(dev->reg_state != NETREG_REGISTERED);
}
/* If device is running, close it first. */
list_for_each_entry(dev, head, unreg_list)
list_add_tail(&dev->close_list, &close_head);
dev_close_many(&close_head, true);
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
unlist_netdevice(dev);
dev->reg_state = NETREG_UNREGISTERING;
}
flush_all_backlogs();
synchronize_net();
list_for_each_entry(dev, head, unreg_list) {
struct sk_buff *skb = NULL;
/* Shutdown queueing discipline. */
dev_shutdown(dev);
dev_xdp_uninstall(dev);
/* Notify protocols, that we are about to destroy
* this device. They should clean all the things.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
GFP_KERNEL, NULL, 0);
/*
* Flush the unicast and multicast chains
*/
dev_uc_flush(dev);
dev_mc_flush(dev);
netdev_name_node_alt_flush(dev);
netdev_name_node_free(dev->name_node);
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
if (skb)
rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
/* Notifier chain MUST detach us all upper devices. */
WARN_ON(netdev_has_any_upper_dev(dev));
WARN_ON(netdev_has_any_lower_dev(dev));
/* Remove entries from kobject tree */
netdev_unregister_kobject(dev);
#ifdef CONFIG_XPS
/* Remove XPS queueing entries */
netif_reset_xps_queues_gt(dev, 0);
#endif
}
synchronize_net();
list_for_each_entry(dev, head, unreg_list)
dev_put(dev);
}
static void rollback_registered(struct net_device *dev)
{
LIST_HEAD(single);
list_add(&dev->unreg_list, &single);
rollback_registered_many(&single);
list_del(&single);
}
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
struct net_device *upper, netdev_features_t features)
{
@ -10149,17 +10049,10 @@ int register_netdevice(struct net_device *dev)
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
ret = notifier_to_errno(ret);
if (ret) {
rollback_registered(dev);
rcu_barrier();
dev->reg_state = NETREG_UNREGISTERED;
/* We should put the kobject that hold in
* netdev_unregister_kobject(), otherwise
* the net device cannot be freed when
* driver calls free_netdev(), because the
* kobject is being hold.
*/
kobject_put(&dev->dev.kobj);
/* Expect explicit free_netdev() on failure */
dev->needs_free_netdev = false;
unregister_netdevice_queue(dev, NULL);
goto out;
}
/*
* Prevent userspace races by waiting until the network
@ -10688,6 +10581,17 @@ void free_netdev(struct net_device *dev)
struct napi_struct *p, *n;
might_sleep();
/* When called immediately after register_netdevice() failed the unwind
* handling may still be dismantling the device. Handle that case by
* deferring the free.
*/
if (dev->reg_state == NETREG_UNREGISTERING) {
ASSERT_RTNL();
dev->needs_free_netdev = true;
return;
}
netif_free_tx_queues(dev);
netif_free_rx_queues(dev);
@ -10754,9 +10658,10 @@ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
if (head) {
list_move_tail(&dev->unreg_list, head);
} else {
rollback_registered(dev);
/* Finish processing unregister after unlock */
net_set_todo(dev);
LIST_HEAD(single);
list_add(&dev->unreg_list, &single);
unregister_netdevice_many(&single);
}
}
EXPORT_SYMBOL(unregister_netdevice_queue);
@ -10770,14 +10675,100 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
*/
void unregister_netdevice_many(struct list_head *head)
{
struct net_device *dev;
struct net_device *dev, *tmp;
LIST_HEAD(close_head);
if (!list_empty(head)) {
rollback_registered_many(head);
list_for_each_entry(dev, head, unreg_list)
net_set_todo(dev);
list_del(head);
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
if (list_empty(head))
return;
list_for_each_entry_safe(dev, tmp, head, unreg_list) {
/* Some devices call without registering
* for initialization unwind. Remove those
* devices and proceed with the remaining.
*/
if (dev->reg_state == NETREG_UNINITIALIZED) {
pr_debug("unregister_netdevice: device %s/%p never was registered\n",
dev->name, dev);
WARN_ON(1);
list_del(&dev->unreg_list);
continue;
}
dev->dismantle = true;
BUG_ON(dev->reg_state != NETREG_REGISTERED);
}
/* If device is running, close it first. */
list_for_each_entry(dev, head, unreg_list)
list_add_tail(&dev->close_list, &close_head);
dev_close_many(&close_head, true);
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
unlist_netdevice(dev);
dev->reg_state = NETREG_UNREGISTERING;
}
flush_all_backlogs();
synchronize_net();
list_for_each_entry(dev, head, unreg_list) {
struct sk_buff *skb = NULL;
/* Shutdown queueing discipline. */
dev_shutdown(dev);
dev_xdp_uninstall(dev);
/* Notify protocols, that we are about to destroy
* this device. They should clean all the things.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
GFP_KERNEL, NULL, 0);
/*
* Flush the unicast and multicast chains
*/
dev_uc_flush(dev);
dev_mc_flush(dev);
netdev_name_node_alt_flush(dev);
netdev_name_node_free(dev->name_node);
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
if (skb)
rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
/* Notifier chain MUST detach us all upper devices. */
WARN_ON(netdev_has_any_upper_dev(dev));
WARN_ON(netdev_has_any_lower_dev(dev));
/* Remove entries from kobject tree */
netdev_unregister_kobject(dev);
#ifdef CONFIG_XPS
/* Remove XPS queueing entries */
netif_reset_xps_queues_gt(dev, 0);
#endif
}
synchronize_net();
list_for_each_entry(dev, head, unreg_list) {
dev_put(dev);
net_set_todo(dev);
}
list_del(head);
}
EXPORT_SYMBOL(unregister_netdevice_many);

View file

@ -6489,7 +6489,7 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len
if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
return -EINVAL;
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies))
return -EINVAL;
if (!th->ack || th->rst || th->syn)
@ -6564,7 +6564,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
return -EINVAL;
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies))
return -ENOENT;
if (!th->syn || th->ack || th->fin || th->rst)

View file

@ -3442,26 +3442,15 @@ replay:
dev->ifindex = ifm->ifi_index;
if (ops->newlink) {
if (ops->newlink)
err = ops->newlink(link_net ? : net, dev, tb, data, extack);
/* Drivers should call free_netdev() in ->destructor
* and unregister it on failure after registration
* so that device could be finally freed in rtnl_unlock.
*/
if (err < 0) {
/* If device is not registered at all, free it now */
if (dev->reg_state == NETREG_UNINITIALIZED ||
dev->reg_state == NETREG_UNREGISTERED)
free_netdev(dev);
goto out;
}
} else {
else
err = register_netdevice(dev);
if (err < 0) {
free_netdev(dev);
goto out;
}
if (err < 0) {
free_netdev(dev);
goto out;
}
err = rtnl_configure_link(dev, ifm);
if (err < 0)
goto out_unregister;

View file

@ -64,7 +64,7 @@ u32 secure_tcpv6_ts_off(const struct net *net,
.daddr = *(struct in6_addr *)daddr,
};
if (net->ipv4.sysctl_tcp_timestamps != 1)
if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
return 0;
ts_secret_init();
@ -120,7 +120,7 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
#ifdef CONFIG_INET
u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr)
{
if (net->ipv4.sysctl_tcp_timestamps != 1)
if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
return 0;
ts_secret_init();

View file

@ -220,7 +220,7 @@ int inet_listen(struct socket *sock, int backlog)
* because the socket was in TCP_LISTEN state previously but
* was shutdown() rather than close().
*/
tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
(tcp_fastopen & TFO_SERVER_ENABLE) &&
!inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
@ -338,7 +338,7 @@ lookup_protocol:
inet->hdrincl = 1;
}
if (net->ipv4.sysctl_ip_no_pmtu_disc)
if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;

View file

@ -2232,7 +2232,7 @@ void fib_select_multipath(struct fib_result *res, int hash)
}
change_nexthops(fi) {
if (net->ipv4.sysctl_fib_multipath_use_neigh) {
if (READ_ONCE(net->ipv4.sysctl_fib_multipath_use_neigh)) {
if (!fib_good_nh(nexthop_nh))
continue;
if (!first) {

View file

@ -887,7 +887,7 @@ static bool icmp_unreach(struct sk_buff *skb)
* values please see
* Documentation/networking/ip-sysctl.rst
*/
switch (net->ipv4.sysctl_ip_no_pmtu_disc) {
switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) {
default:
net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n",
&iph->daddr);

View file

@ -467,7 +467,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
if (pmc->multiaddr == IGMP_ALL_HOSTS)
return skb;
if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
if (ipv4_is_local_multicast(pmc->multiaddr) &&
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
return skb;
mtu = READ_ONCE(dev->mtu);
@ -593,7 +594,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
if (pmc->multiaddr == IGMP_ALL_HOSTS)
continue;
if (ipv4_is_local_multicast(pmc->multiaddr) &&
!net->ipv4.sysctl_igmp_llm_reports)
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
continue;
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE])
@ -736,7 +737,8 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
return igmpv3_send_report(in_dev, pmc);
if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports)
if (ipv4_is_local_multicast(group) &&
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
return 0;
if (type == IGMP_HOST_LEAVE_MESSAGE)
@ -913,7 +915,8 @@ static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
if (group == IGMP_ALL_HOSTS)
return false;
if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports)
if (ipv4_is_local_multicast(group) &&
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
return false;
rcu_read_lock();
@ -1038,7 +1041,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
if (ipv4_is_local_multicast(im->multiaddr) &&
!net->ipv4.sysctl_igmp_llm_reports)
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
continue;
spin_lock_bh(&im->lock);
if (im->tm_running)
@ -1289,7 +1292,8 @@ static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp)
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
if (ipv4_is_local_multicast(im->multiaddr) &&
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
return;
reporter = im->reporter;
@ -1331,7 +1335,8 @@ static void igmp_group_added(struct ip_mc_list *im)
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
if (ipv4_is_local_multicast(im->multiaddr) &&
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
return;
if (in_dev->dead)
@ -1635,7 +1640,7 @@ static void ip_mc_rejoin_groups(struct in_device *in_dev)
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
if (ipv4_is_local_multicast(im->multiaddr) &&
!net->ipv4.sysctl_igmp_llm_reports)
!READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
continue;
/* a failover is happening and switches
@ -2185,7 +2190,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
count++;
}
err = -ENOBUFS;
if (count >= net->ipv4.sysctl_igmp_max_memberships)
if (count >= READ_ONCE(net->ipv4.sysctl_igmp_max_memberships))
goto done;
iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
if (!iml)
@ -2372,7 +2377,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
}
/* else, add a new source to the filter */
if (psl && psl->sl_count >= net->ipv4.sysctl_igmp_max_msf) {
if (psl && psl->sl_count >= READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
err = -ENOBUFS;
goto done;
}

View file

@ -251,7 +251,7 @@ next_port:
goto other_half_scan;
}
if (net->ipv4.sysctl_ip_autobind_reuse && !relax) {
if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
/* We still have a chance to connect to different destinations */
relax = true;
goto ports_exhausted;

View file

@ -151,7 +151,7 @@ int ip_forward(struct sk_buff *skb)
!skb_sec_path(skb))
ip_rt_send_redirect(skb);
if (net->ipv4.sysctl_ip_fwd_update_priority)
if (READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority))
skb->priority = rt_tos2priority(iph->tos);
return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,

View file

@ -783,7 +783,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
/* numsrc >= (4G-140)/128 overflow in 32 bits */
err = -ENOBUFS;
if (gsf->gf_numsrc >= 0x1ffffff ||
gsf->gf_numsrc > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
goto out_free_gsf;
err = -EINVAL;
@ -832,7 +832,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
/* numsrc >= (4G-140)/128 overflow in 32 bits */
err = -ENOBUFS;
if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
goto out_free_gsf;
err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode,
&gf32->gf_group, gf32->gf_slist);
@ -1242,7 +1242,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
}
/* numsrc >= (1G-4) overflow in 32 bits */
if (msf->imsf_numsrc >= 0x3ffffffcU ||
msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
kfree(msf);
err = -ENOBUFS;
break;

View file

@ -1442,7 +1442,7 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
struct fib_info *fi = res->fi;
u32 mtu = 0;
if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) ||
fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
mtu = fi->fib_mtu;

View file

@ -249,12 +249,12 @@ bool cookie_timestamp_decode(const struct net *net,
return true;
}
if (!net->ipv4.sysctl_tcp_timestamps)
if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps))
return false;
tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
if (tcp_opt->sack_ok && !net->ipv4.sysctl_tcp_sack)
if (tcp_opt->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack))
return false;
if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK)
@ -263,7 +263,7 @@ bool cookie_timestamp_decode(const struct net *net,
tcp_opt->wscale_ok = 1;
tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
return net->ipv4.sysctl_tcp_window_scaling != 0;
return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0;
}
EXPORT_SYMBOL(cookie_timestamp_decode);
@ -342,7 +342,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
struct flowi4 fl4;
u32 tsoff = 0;
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
if (tcp_synq_no_recent_overflow(sk))

View file

@ -95,7 +95,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
* port limit.
*/
if ((range[1] < range[0]) ||
(range[0] < net->ipv4.sysctl_ip_prot_sock))
(range[0] < READ_ONCE(net->ipv4.sysctl_ip_prot_sock)))
ret = -EINVAL;
else
set_local_port_range(net, range);
@ -121,7 +121,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write,
.extra2 = &ip_privileged_port_max,
};
pports = net->ipv4.sysctl_ip_prot_sock;
pports = READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
@ -133,7 +133,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write,
if (range[0] < pports)
ret = -EINVAL;
else
net->ipv4.sysctl_ip_prot_sock = pports;
WRITE_ONCE(net->ipv4.sysctl_ip_prot_sock, pports);
}
return ret;

View file

@ -442,7 +442,7 @@ void tcp_init_sock(struct sock *sk)
tp->snd_cwnd_clamp = ~0;
tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
tcp_assign_congestion_control(sk);
tp->tsoffset = 0;
@ -1150,7 +1150,8 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
struct sockaddr *uaddr = msg->msg_name;
int err, flags;
if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) &
TFO_CLIENT_ENABLE) ||
(uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
uaddr->sa_family == AF_UNSPEC))
return -EOPNOTSUPP;
@ -3369,7 +3370,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
case TCP_FASTOPEN_CONNECT:
if (val > 1 || val < 0) {
err = -EINVAL;
} else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
} else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) &
TFO_CLIENT_ENABLE) {
if (sk->sk_state == TCP_CLOSE)
tp->fastopen_connect = val;
else
@ -3706,7 +3708,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_LINGER2:
val = tp->linger2;
if (val >= 0)
val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
break;
case TCP_DEFER_ACCEPT:
val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,

View file

@ -349,7 +349,7 @@ static bool tcp_fastopen_no_cookie(const struct sock *sk,
const struct dst_entry *dst,
int flag)
{
return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
tcp_sk(sk)->fastopen_no_cookie ||
(dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
}
@ -364,7 +364,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
const struct dst_entry *dst)
{
bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
struct tcp_fastopen_cookie valid_foc = { .len = -1 };
struct sock *child;
int ret = 0;
@ -506,7 +506,7 @@ void tcp_fastopen_active_disable(struct sock *sk)
{
struct net *net = sock_net(sk);
if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
return;
/* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
@ -527,7 +527,8 @@ void tcp_fastopen_active_disable(struct sock *sk)
*/
bool tcp_fastopen_active_should_disable(struct sock *sk)
{
unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
unsigned int tfo_bh_timeout =
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
unsigned long timeout;
int tfo_da_times;
int multiplier;

View file

@ -1012,7 +1012,7 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
tp->undo_marker ? tp->undo_retrans : 0);
#endif
tp->reordering = min_t(u32, (metric + mss - 1) / mss,
sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
}
/* This exciting event is worth to be remembered. 8) */
@ -1991,7 +1991,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
return;
tp->reordering = min_t(u32, tp->packets_out + addend,
sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
tp->reord_seen++;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
}
@ -2056,7 +2056,8 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
static bool tcp_is_rack(const struct sock *sk)
{
return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION;
return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
TCP_RACK_LOSS_DETECTION;
}
/* If we detect SACK reneging, forget all SACK information
@ -2100,6 +2101,7 @@ void tcp_enter_loss(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
u8 reordering;
tcp_timeout_mark_lost(sk);
@ -2120,10 +2122,12 @@ void tcp_enter_loss(struct sock *sk)
/* Timeout in disordered state after receiving substantial DUPACKs
* suggests that the degree of reordering is over-estimated.
*/
reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
tp->sacked_out >= net->ipv4.sysctl_tcp_reordering)
tp->sacked_out >= reordering)
tp->reordering = min_t(unsigned int, tp->reordering,
net->ipv4.sysctl_tcp_reordering);
reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
tcp_ecn_queue_cwr(tp);
@ -3412,7 +3416,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
* new SACK or ECE mark may first advance cwnd here and later reduce
* cwnd in tcp_fastretrans_alert() based on more states.
*/
if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
if (tcp_sk(sk)->reordering >
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
return flag & FLAG_FORWARD_PROGRESS;
return flag & FLAG_DATA_ACKED;
@ -4004,7 +4009,7 @@ void tcp_parse_options(const struct net *net,
break;
case TCPOPT_WINDOW:
if (opsize == TCPOLEN_WINDOW && th->syn &&
!estab && net->ipv4.sysctl_tcp_window_scaling) {
!estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) {
__u8 snd_wscale = *(__u8 *)ptr;
opt_rx->wscale_ok = 1;
if (snd_wscale > TCP_MAX_WSCALE) {
@ -4020,7 +4025,7 @@ void tcp_parse_options(const struct net *net,
case TCPOPT_TIMESTAMP:
if ((opsize == TCPOLEN_TIMESTAMP) &&
((estab && opt_rx->tstamp_ok) ||
(!estab && net->ipv4.sysctl_tcp_timestamps))) {
(!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) {
opt_rx->saw_tstamp = 1;
opt_rx->rcv_tsval = get_unaligned_be32(ptr);
opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@ -4028,7 +4033,7 @@ void tcp_parse_options(const struct net *net,
break;
case TCPOPT_SACK_PERM:
if (opsize == TCPOLEN_SACK_PERM && th->syn &&
!estab && net->ipv4.sysctl_tcp_sack) {
!estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) {
opt_rx->sack_ok = TCP_SACK_SEEN;
tcp_sack_reset(opt_rx);
}
@ -5489,7 +5494,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
struct tcp_sock *tp = tcp_sk(sk);
u32 ptr = ntohs(th->urg_ptr);
if (ptr && !sock_net(sk)->ipv4.sysctl_tcp_stdurg)
if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg))
ptr--;
ptr += ntohl(th->seq);
@ -6685,11 +6690,14 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto)
{
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
const char *msg = "Dropping request";
bool want_cookie = false;
struct net *net = sock_net(sk);
bool want_cookie = false;
u8 syncookies;
syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
#ifdef CONFIG_SYN_COOKIES
if (net->ipv4.sysctl_tcp_syncookies) {
if (syncookies) {
msg = "Sending cookies";
want_cookie = true;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
@ -6697,8 +6705,7 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto)
#endif
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
if (!queue->synflood_warned &&
net->ipv4.sysctl_tcp_syncookies != 2 &&
if (!queue->synflood_warned && syncookies != 2 &&
xchg(&queue->synflood_warned, 1) == 0)
net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
proto, sk->sk_num, msg);
@ -6747,7 +6754,7 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
struct tcp_sock *tp = tcp_sk(sk);
u16 mss;
if (sock_net(sk)->ipv4.sysctl_tcp_syncookies != 2 &&
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 &&
!inet_csk_reqsk_queue_is_full(sk))
return 0;
@ -6781,13 +6788,15 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
bool want_cookie = false;
struct dst_entry *dst;
struct flowi fl;
u8 syncookies;
syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
/* TW buckets are converted to open requests without
* limitations, they conserve resources and peer is
* evidently real one.
*/
if ((net->ipv4.sysctl_tcp_syncookies == 2 ||
inet_csk_reqsk_queue_is_full(sk)) && !isn) {
if ((syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) {
want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name);
if (!want_cookie)
goto drop;
@ -6841,10 +6850,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop_and_free;
if (!want_cookie && !isn) {
int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
/* Kill the following clause, if you dislike this way. */
if (!net->ipv4.sysctl_tcp_syncookies &&
(net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(net->ipv4.sysctl_max_syn_backlog >> 2)) &&
if (!syncookies &&
(max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(max_syn_backlog >> 2)) &&
!tcp_peer_is_proven(req, dst)) {
/* Without syncookies last quarter of
* backlog is filled with destinations,

View file

@ -106,10 +106,10 @@ static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse);
const struct inet_timewait_sock *tw = inet_twsk(sktw);
const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
struct tcp_sock *tp = tcp_sk(sk);
int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
if (reuse == 2) {
/* Still does not detect *everything* that goes through

View file

@ -428,7 +428,8 @@ void tcp_update_metrics(struct sock *sk)
if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
if (val < tp->reordering &&
tp->reordering != net->ipv4.sysctl_tcp_reordering)
tp->reordering !=
READ_ONCE(net->ipv4.sysctl_tcp_reordering))
tcp_metric_set(tm, TCP_METRIC_REORDERING,
tp->reordering);
}

View file

@ -180,7 +180,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
* Oh well... nobody has a sufficient solution to this
* protocol bug yet.
*/
if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
kill:
inet_twsk_deschedule_put(tw);
return TCP_TW_SUCCESS;

View file

@ -789,18 +789,18 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
opts->mss = tcp_advertise_mss(sk);
remaining -= TCPOLEN_MSS_ALIGNED;
if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) {
opts->options |= OPTION_TS;
opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
opts->tsecr = tp->rx_opt.ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) {
opts->ws = tp->rx_opt.rcv_wscale;
opts->options |= OPTION_WSCALE;
remaining -= TCPOLEN_WSCALE_ALIGNED;
}
if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!(OPTION_TS & opts->options)))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
@ -1720,7 +1720,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
mss_now -= icsk->icsk_ext_hdr_len;
/* Then reserve room for full set of TCP options and 8 bytes of data */
mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
mss_now = max(mss_now,
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss));
return mss_now;
}
@ -1763,10 +1764,10 @@ void tcp_mtup_init(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct net *net = sock_net(sk);
icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1;
icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
icsk->icsk_af_ops->net_header_len;
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss));
icsk->icsk_mtup.probe_size = 0;
if (icsk->icsk_mtup.enabled)
icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
@ -1898,7 +1899,7 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
if (tp->packets_out > tp->snd_cwnd_used)
tp->snd_cwnd_used = tp->packets_out;
if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) &&
(s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
!ca_ops->cong_control)
tcp_cwnd_application_limited(sk);
@ -2277,7 +2278,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
u32 interval;
s32 delta;
interval = net->ipv4.sysctl_tcp_probe_interval;
interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval);
delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
if (unlikely(delta >= interval * HZ)) {
int mss = tcp_current_mss(sk);
@ -2359,7 +2360,7 @@ static int tcp_mtu_probe(struct sock *sk)
* probing process by not resetting search range to its orignal.
*/
if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
interval < net->ipv4.sysctl_tcp_probe_threshold) {
interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) {
/* Check whether enough time has elaplased for
* another round of probing.
*/
@ -2734,7 +2735,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
if (rcu_access_pointer(tp->fastopen_rsk))
return false;
early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans);
/* Schedule a loss probe in 2*RTT for SACK capable connections
* not in loss recovery, that are either limited by cwnd or application.
*/
@ -3099,7 +3100,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
struct sk_buff *skb = to, *tmp;
bool first = true;
if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse))
return;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
return;
@ -3647,7 +3648,7 @@ static void tcp_connect_init(struct sock *sk)
* See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
*/
tp->tcp_header_len = sizeof(struct tcphdr);
if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps))
tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
#ifdef CONFIG_TCP_MD5SIG
@ -3683,7 +3684,7 @@ static void tcp_connect_init(struct sock *sk)
tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd,
&tp->window_clamp,
sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling),
&rcv_wscale,
rcv_wnd);
@ -4091,7 +4092,7 @@ void tcp_send_probe0(struct sock *sk)
icsk->icsk_probes_out++;
if (err <= 0) {
if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
icsk->icsk_backoff++;
timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
} else {

View file

@ -19,7 +19,8 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
return 0;
if (tp->sacked_out >= tp->reordering &&
!(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
TCP_RACK_NO_DUPTHRESH))
return 0;
}
@ -190,7 +191,8 @@ void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
{
struct tcp_sock *tp = tcp_sk(sk);
if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
TCP_RACK_STATIC_REO_WND) ||
!rs->prior_delivered)
return;

View file

@ -143,7 +143,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
*/
static int tcp_orphan_retries(struct sock *sk, bool alive)
{
int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
/* We know from an ICMP that something is wrong. */
if (sk->sk_err_soft && !alive)
@ -163,7 +163,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
int mss;
/* Black hole detection */
if (!net->ipv4.sysctl_tcp_mtu_probing)
if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
return;
if (!icsk->icsk_mtup.enabled) {
@ -171,9 +171,9 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
} else {
mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor);
mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
}
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
@ -242,14 +242,14 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
expired = icsk->icsk_retransmits >= retry_until;
} else {
if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
__dst_negative_advice(sk);
}
retry_until = net->ipv4.sysctl_tcp_retries2;
retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
if (sock_flag(sk, SOCK_DEAD)) {
const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
@ -380,7 +380,7 @@ static void tcp_probe_timer(struct sock *sk)
msecs_to_jiffies(icsk->icsk_user_timeout))
goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
if (sock_flag(sk, SOCK_DEAD)) {
const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
@ -574,7 +574,7 @@ out_reset_timer:
* linear-timeout retransmissions into a black hole
*/
if (sk->sk_state == TCP_ESTABLISHED &&
(tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
(tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
tcp_stream_is_thin(tp) &&
icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
icsk->icsk_backoff = 0;
@ -585,7 +585,7 @@ out_reset_timer:
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
__sk_dst_reset(sk);
out:;

View file

@ -225,7 +225,7 @@ lookup_protocol:
inet->mc_list = NULL;
inet->rcv_tos = 0;
if (net->ipv4.sysctl_ip_no_pmtu_disc)
if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;

View file

@ -141,7 +141,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
__u8 rcv_wscale;
u32 tsoff = 0;
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
if (tcp_synq_no_recent_overflow(sk))

View file

@ -358,7 +358,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
ret != RTN_LOCAL &&
!sp->inet.freebind &&
!net->ipv4.sysctl_ip_nonlocal_bind)
!READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind))
return 0;
if (ipv6_only_sock(sctp_opt2sk(sp)))

View file

@ -1787,7 +1787,7 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
init_waitqueue_head(&lgr->llc_flow_waiter);
init_waitqueue_head(&lgr->llc_msg_waiter);
mutex_init(&lgr->llc_conf_mutex);
lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
lgr->llc_testlink_time = READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
}
/* called after lgr was removed from lgr_list */

View file

@ -97,13 +97,16 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
unsigned long flags;
spin_lock_irqsave(&tls_device_lock, flags);
if (unlikely(!refcount_dec_and_test(&ctx->refcount)))
goto unlock;
list_move_tail(&ctx->list, &tls_device_gc_list);
/* schedule_work inside the spinlock
* to make sure tls_device_down waits for that work.
*/
schedule_work(&tls_device_gc_work);
unlock:
spin_unlock_irqrestore(&tls_device_lock, flags);
}
@ -194,8 +197,7 @@ void tls_device_sk_destruct(struct sock *sk)
clean_acked_data_disable(inet_csk(sk));
}
if (refcount_dec_and_test(&tls_ctx->refcount))
tls_device_queue_ctx_destruction(tls_ctx);
tls_device_queue_ctx_destruction(tls_ctx);
}
EXPORT_SYMBOL_GPL(tls_device_sk_destruct);

View file

@ -2685,8 +2685,10 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family,
*num_xfrms = 0;
return 0;
}
if (IS_ERR(pols[0]))
if (IS_ERR(pols[0])) {
*num_pols = 0;
return PTR_ERR(pols[0]);
}
*num_xfrms = pols[0]->xfrm_nr;
@ -2701,6 +2703,7 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family,
if (pols[1]) {
if (IS_ERR(pols[1])) {
xfrm_pols_put(pols, *num_pols);
*num_pols = 0;
return PTR_ERR(pols[1]);
}
(*num_pols)++;

View file

@ -2569,7 +2569,7 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
int err;
if (family == AF_INET &&
xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)
READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc))
x->props.flags |= XFRM_STATE_NOPMTUDISC;
err = -EPROTONOSUPPORT;

View file

@ -1805,6 +1805,10 @@ bool ima_appraise_signature(enum kernel_read_file_id id)
if (id >= READING_MAX_ID)
return false;
if (id == READING_KEXEC_IMAGE && !(ima_appraise & IMA_APPRAISE_ENFORCE)
&& security_locked_down(LOCKDOWN_KEXEC))
return false;
func = read_idmap[id] ?: FILE_CHECK;
rcu_read_lock();

View file

@ -3644,8 +3644,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
kvm_put_kvm_no_destroy(kvm);
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
if (ops->release)
ops->release(dev);
mutex_unlock(&kvm->lock);
ops->destroy(dev);
if (ops->destroy)
ops->destroy(dev);
return ret;
}